Merge 3.19-rc5 into char-misc-next
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Jan 2015 22:56:57 +0000 (06:56 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Jan 2015 22:56:57 +0000 (06:56 +0800)
We want the 3.19-rc5 fixes in here for our testing.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
613 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-mei
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/devicetree/bindings/input/stmpe-keypad.txt
Documentation/networking/ip-sysctl.txt
Documentation/target/tcm_mod_builder.py
Documentation/thermal/cpu-cooling-api.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/berlin2q-marvell-dmp.dts
arch/arm/boot/dts/berlin2q.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/ls1021a.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/sama5d3xmb.dtsi
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/boot/dts/vf610-twr.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/perf_regs.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/mach-at91/board-dt-sama5.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-imx6sx.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/control.h
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-rockchip/rockchip.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mm/dump.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/module.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/reset.c
arch/arm64/mm/init.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/ia64/include/asm/unistd.h
arch/ia64/include/uapi/asm/unistd.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/entry.S
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/nios2/kernel/cpuinfo.c
arch/nios2/kernel/entry.S
arch/powerpc/include/asm/kexec.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/pseries/lpar.c
arch/s390/hypfs/hypfs_vm.c
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/syscalls.S
arch/s390/kernel/uprobes.c
arch/s390/kernel/vtime.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/um/Kconfig.common
arch/x86/boot/Makefile
arch/x86/crypto/Makefile
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
arch/x86/include/asm/vgtod.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/perf_regs.c
arch/x86/lib/insn.c
arch/x86/mm/init.c
arch/x86/um/sys_call_table_32.c
arch/x86/um/sys_call_table_64.c
arch/x86/vdso/vma.c
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/time.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
block/blk-timeout.c
crypto/af_alg.c
drivers/Makefile
drivers/acpi/acpi_processor.c
drivers/acpi/device_pm.c
drivers/acpi/int340x_thermal.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/base/power/domain.c
drivers/base/power/opp.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/virtio_blk.c
drivers/bus/arm-cci.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/at91/clk-slow.c
drivers/clk/berlin/bg2q.c
drivers/clk/clk-ppc-corenet.c
drivers/clk/clk.c
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clocksource/arm_arch_timer.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-grgpio.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/nouveau/core/core/event.c
drivers/gpu/drm/nouveau/core/core/notify.c
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/dce3_1_afmt.c
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/hid/Kconfig
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-kye.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/iio/adc/ad799x.c
drivers/iio/inkern.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/gpio_keys.c
drivers/input/keyboard/hil_kbd.c
drivers/input/keyboard/stmpe-keypad.c
drivers/input/mouse/alps.c
drivers/input/mouse/trackpoint.c
drivers/input/mouse/trackpoint.h
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/rockchip-iommu.c
drivers/isdn/hardware/eicon/message.c
drivers/leds/leds-netxbig.c
drivers/mcb/mcb-internal.h
drivers/mcb/mcb-pci.c
drivers/mfd/stmpe.c
drivers/mfd/stmpe.h
drivers/misc/cxl/context.c
drivers/misc/cxl/file.c
drivers/misc/mei/hw-me.c
drivers/mmc/core/mmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pci.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci.c
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_virtio.c
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/s6gmac.c [deleted file]
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/phy/micrel.c
drivers/net/team/team.c
drivers/net/usb/kaweth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/ipw2x00/Kconfig
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/rtlwifi/pci.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/phy/phy-miphy28lp.c
drivers/phy/phy-omap-control.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/pinctrl-st.c
drivers/powercap/intel_rapl.c
drivers/regulator/s2mps11.c
drivers/reset/reset-sunxi.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-sh-msiof.c
drivers/staging/vt6655/baseband.c
drivers/staging/vt6655/channel.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6655/rxtx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_user.c
drivers/thermal/cpu_cooling.c
drivers/thermal/db8500_cpufreq_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/Makefile
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/int340x_thermal/int3402_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/int340x_thermal/processor_thermal_device.c [new file with mode: 0644]
drivers/thermal/intel_powerclamp.c
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_io.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/musb/Kconfig
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_debugfs.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-mv-usb.c
drivers/usb/phy/phy.c
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/generic.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_uas.h
drivers/vfio/pci/vfio_pci.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/omap2/dss/hdmi_pll.c
drivers/video/fbdev/omap2/dss/pll.c
drivers/video/fbdev/omap2/dss/sdi.c
drivers/video/fbdev/simplefb.c
drivers/video/logo/logo.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
fs/btrfs/backref.c
fs/btrfs/delayed-inode.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/ceph/addr.c
fs/cifs/cifsglob.h
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.h
fs/cifs/smb2transport.c
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/resize.c
fs/ext4/super.c
fs/fcntl.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/isofs/rock.c
fs/kernfs/dir.c
fs/lockd/svc.c
fs/locks.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfsd/nfs4state.c
fs/notify/fanotify/fanotify_user.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/namei.c
fs/udf/dir.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/symlink.c
fs/udf/udfdecl.h
fs/udf/unicode.c
include/acpi/processor.h
include/asm-generic/tlb.h
include/dt-bindings/thermal/thermal.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/ceph/osd_client.h
include/linux/compiler.h
include/linux/cpu_cooling.h
include/linux/cpuidle.h
include/linux/fs.h
include/linux/kdb.h
include/linux/mfd/stmpe.h
include/linux/mm.h
include/linux/mmc/sdhci.h
include/linux/netdevice.h
include/linux/netlink.h
include/linux/nfs_fs_sb.h
include/linux/pagemap.h
include/linux/perf_event.h
include/linux/perf_regs.h
include/linux/phy/omap_control_phy.h
include/linux/pm_domain.h
include/linux/rmap.h
include/linux/thermal.h
include/linux/writeback.h
include/net/genetlink.h
include/net/mac80211.h
include/net/neighbour.h
include/net/vxlan.h
include/sound/pcm.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h
include/target/target_core_base.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/in6.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/libc-compat.h
include/uapi/linux/openvswitch.h
include/uapi/linux/virtio_ring.h
include/xen/interface/nmi.h [new file with mode: 0644]
kernel/audit.c
kernel/auditsc.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_bp.c
kernel/debug/kdb/kdb_debugger.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/events/core.c
kernel/exit.c
kernel/locking/mutex-debug.c
kernel/range.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_kdb.c
lib/Kconfig.kgdb
lib/assoc_array.c
mm/Kconfig.debug
mm/filemap.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/page-writeback.c
mm/rmap.c
mm/vmscan.c
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/multicast.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/bluetooth/6lowpan.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bridge/br_input.c
net/ceph/auth_x.c
net/ceph/mon_client.c
net/core/dev.c
net/core/neighbour.c
net/core/skbuff.c
net/ipv4/geneve.c
net/ipv4/netfilter/nft_redir_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/netfilter/nft_redir_ipv6.c
net/ipv6/tcp_ipv6.c
net/mac80211/key.c
net/mpls/mpls_gso.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_nat.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/genetlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/packet/af_packet.c
net/sunrpc/xdr.c
net/tipc/bcast.c
net/wireless/Kconfig
scripts/Makefile.clean
security/keys/gc.c
sound/firewire/fireworks/fireworks_transaction.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/rt5677.c
sound/soc/dwc/designware_i2s.c
sound/soc/intel/Kconfig
sound/soc/intel/bytcr_dpcm_rt5640.c
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst/sst_acpi.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/rockchip/rockchip_i2s.h
sound/soc/soc-core.c
sound/usb/caiaq/audio.c
tools/include/asm-generic/bitops.h
tools/include/asm-generic/bitops/arch_hweight.h [new file with mode: 0644]
tools/include/asm-generic/bitops/const_hweight.h [new file with mode: 0644]
tools/include/asm-generic/bitops/hweight.h [new file with mode: 0644]
tools/include/linux/bitops.h
tools/lib/api/fs/debugfs.c
tools/lib/api/fs/fs.c
tools/lib/lockdep/preload.c
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/bench/sched-pipe.c
tools/perf/builtin-annotate.c
tools/perf/builtin-diff.c
tools/perf/builtin-list.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/config/Makefile.arch
tools/perf/perf-sys.h
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_output.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/hist.c
tools/perf/ui/tui/setup.c
tools/perf/util/annotate.h
tools/perf/util/cache.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/hweight.c [deleted file]
tools/perf/util/include/asm/hweight.h [deleted file]
tools/perf/util/machine.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources
tools/perf/util/unwind-libunwind.c
tools/power/cpupower/utils/cpupower.c
tools/power/cpupower/utils/helpers/sysfs.c
tools/testing/selftests/exec/execveat.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/vm/Makefile

index ada8ad696b2e902489c6e8a8f713f1285bf5a9c5..d357e1bd2a434665ae545d9ed970edd77f15f7d9 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
 Herbert Xu <herbert@gondor.apana.org.au>
 Jacob Shin <Jacob.Shin@amd.com>
 James Bottomley <jejb@mulgrave.(none)>
index 0ec8b8178c41305a4435b208646715c22b94154a..80d9888a8ece2673686ead1cda4504ce568a1051 100644 (file)
@@ -14,3 +14,18 @@ Description:
                The /sys/class/mei/meiN directory is created for
                each probed mei device
 
+What:          /sys/class/mei/meiN/fw_status
+Date:          Nov 2014
+KernelVersion: 3.19
+Contact:       Tomas Winkler <tomas.winkler@intel.com>
+Description:   Display fw status registers content
+
+               The ME FW writes its status information into fw status
+               registers for BIOS and OS to monitor fw health.
+
+               The register contains running state, power management
+               state, error codes, and others. The way the registers
+               are decoded depends on PCH or SoC generation.
+               Also number of registers varies between 1 and 6
+               depending on generation.
+
index a4a38fcf2ed61d1fa3db42e053fb8259f2ffcc71..44b705767aca45ea1366c6e624f0515c8cb78b99 100644 (file)
@@ -10,12 +10,13 @@ Optional properties:
 Each button (key) is represented as a sub-node of "gpio-keys":
 Subnode properties:
 
+       - gpios: OF device-tree gpio specification.
+       - interrupts: the interrupt line for that input.
        - label: Descriptive name of the key.
        - linux,code: Keycode to emit.
 
-Required mutual exclusive subnode-properties:
-       - gpios: OF device-tree gpio specification.
-       - interrupts: the interrupt line for that input
+Note that either "interrupts" or "gpios" properties can be omitted, but not
+both at the same time. Specifying both properties is allowed.
 
 Optional subnode-properties:
        - linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@ Optional subnode-properties:
        - debounce-interval: Debouncing interval time in milliseconds.
          If not specified defaults to 5.
        - gpio-key,wakeup: Boolean, button can wake-up the system.
+       - linux,can-disable: Boolean, indicates that button is connected
+         to dedicated (not shared) interrupt which can be disabled to
+         suppress events from the button.
 
 Example nodes:
 
index 1b97222e8a0bfe30d88f9d921195b721ecd65194..12bb771d66d446647722ba3e423aabc3734f77bc 100644 (file)
@@ -8,6 +8,8 @@ Optional properties:
  - debounce-interval        : Debouncing interval time in milliseconds
  - st,scan-count            : Scanning cycles elapsed before key data is updated
  - st,no-autorepeat         : If specified device will not autorepeat
+ - keypad,num-rows          : See ./matrix-keymap.txt
+ - keypad,num-columns       : See ./matrix-keymap.txt
 
 Example:
 
index 9bffdfc648dc66149401296d73eb6fc04564ebd1..85b0221791048aabb65bcee8090562384ab1c628 100644 (file)
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
 route/max_size - INTEGER
        Maximum number of routes allowed in the kernel.  Increase
        this when using large numbers of interfaces and/or routes.
+       From linux kernel 3.6 onwards, this is deprecated for ipv4
+       as route cache is no longer used.
 
 neigh/default/gc_thresh1 - INTEGER
        Minimum number of entries to keep.  Garbage collector will not
index 230ce71f4d75529ff4e071ed25e7bf0b4f72cd3c..2b47704f75cb3bfedf836cf02c75afd82c91e405 100755 (executable)
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
        buf += "        .close_session                  = " + fabric_mod_name + "_close_session,\n"
-       buf += "        .stop_session                   = " + fabric_mod_name + "_stop_session,\n"
-       buf += "        .fall_back_to_erl0              = " + fabric_mod_name + "_reset_nexus,\n"
-       buf += "        .sess_logged_in                 = " + fabric_mod_name + "_sess_logged_in,\n"
        buf += "        .sess_get_index                 = " + fabric_mod_name + "_sess_get_index,\n"
        buf += "        .sess_get_initiator_sid         = NULL,\n"
        buf += "        .write_pending                  = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
        buf += "        .queue_tm_rsp                   = " + fabric_mod_name + "_queue_tm_rsp,\n"
-       buf += "        .is_state_remove                = " + fabric_mod_name + "_is_state_remove,\n"
+       buf += "        .aborted_task                   = " + fabric_mod_name + "_aborted_task,\n"
        buf += "        /*\n"
        buf += "         * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
        buf += "         */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /*\n"
        buf += "         * Register the top level struct config_item_type with TCM core\n"
        buf += "         */\n"
-       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+       buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
        buf += "        if (IS_ERR(fabric)) {\n"
        buf += "                printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
        buf += "                return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                if re.search('get_fabric_name', fo):
                        buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
                        buf += "{\n"
-                       buf += "        return \"" + fabric_mod_name[4:] + "\";\n"
+                       buf += "        return \"" + fabric_mod_name + "\";\n"
                        buf += "}\n\n"
                        bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
                        continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
 
-               if re.search('stop_session\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
-               if re.search('fall_back_to_erl0\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
-               if re.search('sess_logged_in\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
-                       buf += "{\n"
-                       buf += "        return 0;\n"
-                       buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
                if re.search('sess_get_index\)\(', fo):
                        buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
                        buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
 
                if re.search('queue_tm_rsp\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+                       buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+                       bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
 
-               if re.search('is_state_remove\)\(', fo):
-                       buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+               if re.search('aborted_task\)\(', fo):
+                       buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
-                       buf += "        return 0;\n"
+                       buf += "        return;\n"
                        buf += "}\n\n"
-                       bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+                       bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
 
        ret = p.write(buf)
        if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
        tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
        tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
 
-       input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+       input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
        if input == "yes" or input == "y":
                tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
 
index fca24c931ec8dcb737012b6b67f6b88a8fef2223..753e47cc2e2036cd53e176241f579addd3b43ec1 100644 (file)
@@ -3,7 +3,7 @@ CPU cooling APIs How To
 
 Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
 
-Updated: 12 May 2012
+Updated: 6 Jan 2015
 
 Copyright (c)  2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
 
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
 
    clip_cpus: cpumask of cpus where the frequency constraints will happen.
 
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+       struct device_node *np, const struct cpumask *clip_cpus)
+
+    This interface function registers the cpufreq cooling device with
+    the name "thermal-cpufreq-%x" linking it with a device tree node, in
+    order to bind it via the thermal DT code. This api can support multiple
+    instances of cpufreq cooling devices.
+
+    np: pointer to the cooling device device tree node
+    clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 
     This interface function unregisters the "thermal-cpufreq-%x" cooling device.
 
index 2b41cd183b4f225cf0f9b8fdf9b3464bc3308ab0..b00535f1e74c415ed998d46bf1b74a62302e2e3c 100644 (file)
@@ -724,15 +724,15 @@ F:        include/uapi/linux/apm_bios.h
 F:     drivers/char/apm-emulation.c
 
 APPLE BCM5974 MULTITOUCH DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/input/mouse/bcm5974.c
 
 APPLE SMC DRIVER
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     lm-sensors@lm-sensors.org
-S:     Maintained
+S:     Odd fixes
 F:     drivers/hwmon/applesmc.c
 
 APPLETALK NETWORK LAYER
@@ -2259,6 +2259,7 @@ F:        drivers/gpio/gpio-bt8xx.c
 BTRFS FILE SYSTEM
 M:     Chris Mason <clm@fb.com>
 M:     Josef Bacik <jbacik@fb.com>
+M:     David Sterba <dsterba@suse.cz>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -3183,7 +3184,7 @@ L:        dmaengine@vger.kernel.org
 Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Maintained
 F:     drivers/dma/
-F:     include/linux/dma*
+F:     include/linux/dmaengine.h
 F:     Documentation/dmaengine/
 T:     git git://git.infradead.org/users/vkoul/slave-dma.git
 
@@ -4749,7 +4750,7 @@ S:        Supported
 F:     drivers/scsi/ipr.*
 
 IBM Power Virtual Ethernet Device Driver
-M:     Santiago Leon <santil@linux.vnet.ibm.com>
+M:     Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
@@ -4941,10 +4942,10 @@ F:      include/uapi/linux/input.h
 F:     include/linux/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
-M:     Henrik Rydberg <rydberg@euromail.se>
+M:     Henrik Rydberg <rydberg@bitmath.org>
 L:     linux-input@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S:     Maintained
+S:     Odd fixes
 F:     Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
@@ -5280,6 +5281,15 @@ W:       www.open-iscsi.org
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/ulp/iser/
 
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M:     Sagi Grimberg <sagig@mellanox.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L:     linux-rdma@vger.kernel.org
+L:     target-devel@vger.kernel.org
+S:     Supported
+W:     http://www.linux-iscsi.org
+F:     drivers/infiniband/ulp/isert
+
 ISDN SUBSYSTEM
 M:     Karsten Keil <isdn@linux-pingi.de>
 L:     isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -7738,8 +7748,7 @@ F:        Documentation/scsi/LICENSE.qla2xxx
 F:     drivers/scsi/qla2xxx/
 
 QLOGIC QLA4XXX iSCSI DRIVER
-M:     Vikas Chaudhary <vikas.chaudhary@qlogic.com>
-M:     iscsi-driver@qlogic.com
+M:     QLogic-Storage-Upstream@qlogic.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/scsi/LICENSE.qla4xxx
@@ -9534,7 +9543,8 @@ F:        drivers/platform/x86/thinkpad_acpi.c
 TI BANDGAP AND THERMAL DRIVER
 M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
-S:     Supported
+L:     linux-omap@vger.kernel.org
+S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
 TI CLOCK DRIVER
index ef748e17702f5109bf2678fb57f7929ef411d938..fb93350cf6456c89649aff15a99efae74eca5cb8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE    := \
 # Needed to be compatible with the O= option
 LINUXINCLUDE    := \
                -I$(srctree)/arch/$(hdr-arch)/include \
+               -Iarch/$(hdr-arch)/include/generated/uapi \
                -Iarch/$(hdr-arch)/include/generated \
                $(if $(KBUILD_SRC), -I$(srctree)/include) \
                -Iinclude \
index 1466580be2954996c43fc189bc263967b37a49fc..70b1943a86b104502449c1c58153d9ed68c7c469 100644 (file)
                compatible = "linux,spdif-dir";
        };
 };
-
-&pinctrl {
-       /*
-        * These pins might be muxed as I2S by
-        * the bootloader, but it conflicts
-        * with the real I2S pins that are
-        * muxed using i2s_pins. We must mux
-        * those pins to a function other than
-        * I2S.
-        */
-       pinctrl-0 = <&hog_pins1 &hog_pins2>;
-       pinctrl-names = "default";
-
-       hog_pins1: hog-pins1 {
-               marvell,pins = "mpp6",  "mpp8", "mpp10",
-                              "mpp12", "mpp13";
-               marvell,function = "gpio";
-       };
-
-       hog_pins2: hog-pins2 {
-               marvell,pins = "mpp5", "mpp7", "mpp9";
-               marvell,function = "gpo";
-       };
-};
index 1467750e3377d161bddff0cfcce1f35d91c9261b..e8c6c600a5b69335bbea0fdabf70e7ccee7ff163 100644 (file)
                        interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_fb>;
+                       clocks = <&lcd_clk>, <&lcd_clk>;
+                       clock-names = "lcdc_clk", "hclk";
                        status = "disabled";
                };
 
index 28e7e2060c3399c204f547b37fc325fa1069e8d8..a98ac1bd8f65124fe69d43aa8e8b467a2a7c911c 100644 (file)
@@ -65,6 +65,8 @@
 };
 
 &sdhci2 {
+       broken-cd;
+       bus-width = <8>;
        non-removable;
        status = "okay";
 };
index 35253c947a7cd0002211dac773d7f1f9723d6fce..e2f61f27944e24fd45cc65518126f932409ea610 100644 (file)
@@ -83,7 +83,8 @@
                        compatible = "mrvl,pxav3-mmc";
                        reg = <0xab1000 0x200>;
                        interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&chip CLKID_SDIO1XIN>;
+                       clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
+                       clock-names = "io", "core";
                        status = "disabled";
                };
 
                                interrupt-parent = <&gic>;
                                interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        };
-
-                       gpio4: gpio@5000 {
-                               compatible = "snps,dw-apb-gpio";
-                               reg = <0x5000 0x400>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               porte: gpio-port@4 {
-                                       compatible = "snps,dw-apb-gpio-port";
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       snps,nr-gpios = <32>;
-                                       reg = <0>;
-                               };
-                       };
-
-                       gpio5: gpio@c000 {
-                               compatible = "snps,dw-apb-gpio";
-                               reg = <0xc000 0x400>;
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               portf: gpio-port@5 {
-                                       compatible = "snps,dw-apb-gpio-port";
-                                       gpio-controller;
-                                       #gpio-cells = <2>;
-                                       snps,nr-gpios = <32>;
-                                       reg = <0>;
-                               };
-                       };
                };
 
                chip: chip-control@ea0000 {
                        ranges = <0 0xfc0000 0x10000>;
                        interrupt-parent = <&sic>;
 
+                       sm_gpio1: gpio@5000 {
+                               compatible = "snps,dw-apb-gpio";
+                               reg = <0x5000 0x400>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               portf: gpio-port@5 {
+                                       compatible = "snps,dw-apb-gpio-port";
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       snps,nr-gpios = <32>;
+                                       reg = <0>;
+                               };
+                       };
+
                        i2c2: i2c@7000 {
                                compatible = "snps,designware-i2c";
                                #address-cells = <1>;
                                status = "disabled";
                        };
 
+                       sm_gpio0: gpio@c000 {
+                               compatible = "snps,dw-apb-gpio";
+                               reg = <0xc000 0x400>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               porte: gpio-port@4 {
+                                       compatible = "snps,dw-apb-gpio-port";
+                                       gpio-controller;
+                                       #gpio-cells = <2>;
+                                       snps,nr-gpios = <32>;
+                                       reg = <0>;
+                               };
+                       };
+
                        sysctrl: pin-controller@d000 {
                                compatible = "marvell,berlin2q-system-ctrl";
                                reg = <0xd000 0x100>;
index 10b725c7bfc02fc79e8c694150ec1e23c5493175..ad4118f7e1a6106139af2a6bfe56d3534c306af9 100644 (file)
                };
                partition@5 {
                        label = "QSPI.u-boot-spl-os";
-                       reg = <0x00140000 0x00010000>;
+                       reg = <0x00140000 0x00080000>;
                };
                partition@6 {
                        label = "QSPI.u-boot-env";
-                       reg = <0x00150000 0x00010000>;
+                       reg = <0x001c0000 0x00010000>;
                };
                partition@7 {
                        label = "QSPI.u-boot-env.backup1";
-                       reg = <0x00160000 0x0010000>;
+                       reg = <0x001d0000 0x0010000>;
                };
                partition@8 {
                        label = "QSPI.kernel";
-                       reg = <0x00170000 0x0800000>;
+                       reg = <0x001e0000 0x0800000>;
                };
                partition@9 {
                        label = "QSPI.file-system";
-                       reg = <0x00970000 0x01690000>;
+                       reg = <0x009e0000 0x01620000>;
                };
        };
 };
index 0a229fcd7acfdfff4e07b4359cf0c7776e510efc..d75c89d7666a0a0bea5fff5139439f8090eee793 100644 (file)
 
        dp_phy: video-phy@10040720 {
                compatible = "samsung,exynos5250-dp-video-phy";
-               reg = <0x10040720 4>;
+               samsung,pmu-syscon = <&pmu_system_controller>;
                #phy-cells = <0>;
        };
 
index aa7a7d727a7e80033df0cddb0b9cce8596d48ddc..db2c1c4cd90076b5c7bb47d12737e46bf4938264 100644 (file)
 &usbdrd_dwc3_1 {
        dr_mode = "host";
 };
+
+&cci {
+       status = "disabled";
+};
index 517e50f6760b0cf4d77bc55bf899f0aa5e3667a1..6d38f8bfd0e68e71358608a9af0ec32dbc5a53c2 100644 (file)
                };
        };
 
-       cci@10d20000 {
+       cci: cci@10d20000 {
                compatible = "arm,cci-400";
                #address-cells = <1>;
                #size-cells = <1>;
        };
 
        dp_phy: video-phy@10040728 {
-               compatible = "samsung,exynos5250-dp-video-phy";
-               reg = <0x10040728 4>;
+               compatible = "samsung,exynos5420-dp-video-phy";
+               samsung,pmu-syscon = <&pmu_system_controller>;
                #phy-cells = <0>;
        };
 
index 58d3c3cf2923f5ffae5e1657140fc7b94f4090c1..d238676a910753a4d7b2ff9e80d4599fe36da057 100644 (file)
                                #size-cells = <0>;
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x43fa4000 0x4000>;
-                               clocks = <&clks 62>, <&clks 62>;
+                               clocks = <&clks 78>, <&clks 78>;
                                clock-names = "ipg", "per";
                                interrupts = <14>;
                                status = "disabled";
index 56569cecaa7852795ab94f6046321f13bddd837e..649befeb2cf96ef4b968f809a98d5d718227b002 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               reg_usbh1_vbus: regulator@0 {
-                       compatible = "regulator-fixed";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_usbh1reg>;
-                       reg = <0>;
-                       regulator-name = "usbh1_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
-                       enable-active-high;
-               };
-
-               reg_usbotg_vbus: regulator@1 {
+               reg_hub_reset: regulator@0 {
                        compatible = "regulator-fixed";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_usbotgreg>;
-                       reg = <1>;
-                       regulator-name = "usbotg_vbus";
+                       reg = <0>;
+                       regulator-name = "hub_reset";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
                        reg = <0>;
                        clocks = <&clks IMX5_CLK_DUMMY>;
                        clock-names = "main_clk";
+                       reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
                };
        };
 };
 &usbh1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usbh1>;
-       vbus-supply = <&reg_usbh1_vbus>;
+       vbus-supply = <&reg_hub_reset>;
        fsl,usbphy = <&usbh1phy>;
        phy_type = "ulpi";
        status = "okay";
        dr_mode = "otg";
        disable-over-current;
        phy_type = "utmi_wide";
-       vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
 
index 4fc03b7f1ceec52fe5d327974cd2929127de21f9..2109d0763c1b6dca448449ba74709c6ced666cbb 100644 (file)
                        vpu: vpu@02040000 {
                                compatible = "cnm,coda960";
                                reg = <0x02040000 0x3c000>;
-                               interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                                            <0 12 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+                                            <0 3 IRQ_TYPE_LEVEL_HIGH>;
                                interrupt-names = "bit", "jpeg";
                                clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
                                         <&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
index 1e6e5cc1c14cf283fb8b3bd9321f44218fbe4fb3..8c1febd7e3f2757176d1ba0ab14450133010d177 100644 (file)
        pinctrl-0 = <&pinctrl_enet1>;
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy1>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy1: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy2: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
        phy-mode = "rgmii";
+       phy-handle = <&ethphy2>;
        status = "okay";
 };
 
index 657da14cb4b5b2cc96bc1b4139c871f5fac65307..c70bb27ac65a63e1f197408e4a9ccb822d3407c4 100644 (file)
                scfg: scfg@1570000 {
                        compatible = "fsl,ls1021a-scfg", "syscon";
                        reg = <0x0 0x1570000 0x0 0x10000>;
+                       big-endian;
                };
 
                clockgen: clocking@1ee1000 {
index 53f3ca064140470866dbfe12773af1ddb76d1632..b550c41b46f1ecc83fcc9abf551fbeff4f44e2a0 100644 (file)
                };
        };
 
+       /* Ethernet is on some early development boards and qemu */
        ethernet@gpmc {
                compatible = "smsc,lan91c94";
-
-               status = "disabled";
-
                interrupt-parent = <&gpio2>;
                interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;  /* gpio54 */
                reg = <1 0x300 0xf>;            /* 16 byte IO range at offset 0x300 */
index 3e067dd65d0c87d7845809bd121859f86de1a879..6194d673e80be828c7067bccf498569c121b2a74 100644 (file)
 };
 
 &pinctrl {
+       pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+               drive-strength = <8>;
+       };
+
+       pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+               bias-pull-up;
+               drive-strength = <8>;
+       };
+
        backlight {
                bl_en: bl-en {
                        rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
 
+       sdmmc {
+               /*
+                * Default drive strength isn't enough to achieve even
+                * high-speed mode on EVB board so bump up to 8ma.
+                */
+               sdmmc_bus4: sdmmc-bus4 {
+                       rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+                                       <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+               };
+
+               sdmmc_clk: sdmmc-clk {
+                       rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+               };
+
+               sdmmc_cmd: sdmmc-cmd {
+                       rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+               };
+       };
+
        usb {
                host_vbus_drv: host-vbus-drv {
                        rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
index 49c10d33df302b7d967f0861c1424f636026a943..77e03655aca3626ad369b81b572b755987fd1609 100644 (file)
                        "Headphone Jack", "HPOUTR",
                        "IN2L", "Line In Jack",
                        "IN2R", "Line In Jack",
-                       "MICBIAS", "IN1L",
+                       "Mic", "MICBIAS",
                        "IN1L", "Mic";
 
                atmel,ssc-controller = <&ssc0>;
index 1b0f30c2c4a58d907aafd9d54e80d11e80d5722c..b94995d1889fc3ca780d69ac79a9c3be53620189 100644 (file)
 
                        pit: timer@fc068630 {
                                compatible = "atmel,at91sam9260-pit";
-                               reg = <0xfc068630 0xf>;
+                               reg = <0xfc068630 0x10>;
                                interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
                                clocks = <&h32ck>;
                        };
index a8c00ee7522a1872ee5af06debe66c6acdba49f8..3d0b8755caeee62f77ac214d343ab40cebba2ce0 100644 (file)
                stmpe2401_1 {
                        stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
                                nhk_cfg1 {
-                                       ste,pins = "GPIO76_B20"; // IRQ line
+                                       pins = "GPIO76_B20"; // IRQ line
                                        ste,input = <0>;
                                };
                                nhk_cfg2 {
-                                       ste,pins = "GPIO77_B8"; // reset line
+                                       pins = "GPIO77_B8"; // reset line
                                        ste,output = <1>;
                                };
                        };
                stmpe2401_2 {
                        stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
                                nhk_cfg1 {
-                                       ste,pins = "GPIO78_A8"; // IRQ line
+                                       pins = "GPIO78_A8"; // IRQ line
                                        ste,input = <0>;
                                };
                                nhk_cfg2 {
-                                       ste,pins = "GPIO79_C9"; // reset line
+                                       pins = "GPIO79_C9"; // reset line
                                        ste,output = <1>;
                                };
                        };
index a0f762159cb26501b517a1af0d529da8abc42283..f2b64b1b00fa5231fc2493164fcc89f434687cd1 100644 (file)
 
 &fec0 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy0>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec0>;
        status = "okay";
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ethphy0: ethernet-phy@0 {
+                       reg = <0>;
+               };
+
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
+               };
+       };
 };
 
 &fec1 {
        phy-mode = "rmii";
+       phy-handle = <&ethphy1>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_fec1>;
        status = "okay";
index 5ef14de00a29ba2f433fc93a47d95912569817c8..3d0c5d65c741933fad5947aa028dfdf11f7eda4d 100644 (file)
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
 CONFIG_POWER_SUPPLY=y
 CONFIG_BATTERY_SBS=y
 CONFIG_CHARGER_TPS65090=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_LM90=y
 CONFIG_THERMAL=y
 CONFIG_EXYNOS_THERMAL=y
 CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
+CONFIG_DRM=y
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PTN3460=y
+CONFIG_DRM_PS8622=y
+CONFIG_DRM_EXYNOS=y
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_DP=y
+CONFIG_DRM_PANEL=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
index 2328fe752e9c5ed6066534a0332724b3c4f8d8e5..bc393b7e5ece1f1f985d994e01312f8cf5814837 100644 (file)
@@ -338,6 +338,7 @@ CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
index c2c3a852af9fcb28a4fc03bf69322fad6cf52f79..667d9d52aa01aaa230bd2e12a9b4575778285f38 100644 (file)
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
 # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
 CONFIG_CPU_IDLE=y
 CONFIG_BINFMT_MISC=y
index 705bb7620673a10222e3258d94158ab8ec1555c9..0c3f5a0dafd32c04af58eec20e6af09f2efca0fe 100644 (file)
 #define __NR_getrandom                 (__NR_SYSCALL_BASE+384)
 #define __NR_memfd_create              (__NR_SYSCALL_BASE+385)
 #define __NR_bpf                       (__NR_SYSCALL_BASE+386)
+#define __NR_execveat                  (__NR_SYSCALL_BASE+387)
 
 /*
  * The following SWIs are ARM private.
index e51833f8cc387118ae3826a0a78a533f4ff90a5f..05745eb838c599dc2dd6034a71b8ebec619b9995 100644 (file)
                CALL(sys_getrandom)
 /* 385 */      CALL(sys_memfd_create)
                CALL(sys_bpf)
+               CALL(sys_execveat)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 6e4379c67cbc191e58fa28c4dbf25b10f887c536..592dda3f21fff05f7024abbcebbe2e55bc44947f 100644 (file)
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index f9c863911038ac7d2cbdb5c3d154edd482df8d8a..715ae19bc7c87302350093b6894251c4519ea957 100644 (file)
@@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
                seq_printf(m, "model name\t: %s rev %d (%s)\n",
                           cpu_name, cpuid & 15, elf_platform);
 
+#if defined(CONFIG_SMP)
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+                          (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+#else
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000/HZ),
+                          (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
                /* dump out the processor features */
                seq_puts(m, "Features\t: ");
 
index 5e6052e18850a9d04071bbaf03f595fc8ecf00e0..86ef244c5a24b4fa80b20da26c5d522832a61d59 100644 (file)
@@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+       int cpu;
+       unsigned long bogosum = 0;
+
+       for_each_online_cpu(cpu)
+               bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+       printk(KERN_INFO "SMP: Total of %d processors activated "
+              "(%lu.%02lu BogoMIPS).\n",
+              num_online_cpus(),
+              bogosum / (500000/HZ),
+              (bogosum / (5000/HZ)) % 100);
+
        hyp_mode_check();
 }
 
index 8fb9ef5333f17648d8a28aaa0385badc9c0ed65a..97f7367d32b8a2b9af60abb3f6b4df68dd860489 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_platform.h>
 #include <linux/phy.h>
 #include <linux/clk-provider.h>
+#include <linux/phy.h>
 
 #include <asm/setup.h>
 #include <asm/irq.h>
 
 #include "generic.h"
 
+static int ksz8081_phy_fixup(struct phy_device *phy)
+{
+       int value;
+
+       value = phy_read(phy, 0x16);
+       value &= ~0x20;
+       phy_write(phy, 0x16, value);
+
+       return 0;
+}
+
 static void __init sama5_dt_device_init(void)
 {
+       if (of_machine_is_compatible("atmel,sama5d4ek") &&
+          IS_ENABLED(CONFIG_PHYLIB)) {
+               phy_register_fixup_for_id("fc028000.etherne:00",
+                                               ksz8081_phy_fixup);
+       }
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 5951660d1bd2363db0326cd83b07fcec7cc908f1..2daef619d0534d626daef9ac2bfd8fcacbb91a19 100644 (file)
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                post_div_table[1].div = 1;
                post_div_table[2].div = 1;
                video_div_table[1].div = 1;
-               video_div_table[2].div = 1;
+               video_div_table[3].div = 1;
        }
 
        clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
index 17354a11356fbd0291ca5629db26446370c951d2..5a3e5a159e708b35a13427b6c766d48bc8cf15b0 100644 (file)
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
        clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
        clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
 
+       clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+       clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
 }
index 608079a1aba6774e5ff6682354ecc08ba2d7a882..b61c049f92d6a361de57b16fe11c4773c12b88c6 100644 (file)
@@ -77,6 +77,24 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_OMAP3
+/* Some boards need board name for legacy userspace in /proc/cpuinfo */
+static const char *const n900_boards_compat[] __initconst = {
+       "nokia,omap3-n900",
+       NULL,
+};
+
+DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
+       .reserve        = omap_reserve,
+       .map_io         = omap3_map_io,
+       .init_early     = omap3430_init_early,
+       .init_machine   = omap_generic_init,
+       .init_late      = omap3_init_late,
+       .init_time      = omap3_sync32k_timer_init,
+       .dt_compat      = n900_boards_compat,
+       .restart        = omap3xxx_restart,
+MACHINE_END
+
+/* Generic omap3 boards, most boards can use these */
 static const char *const omap3_boards_compat[] __initconst = {
        "ti,omap3430",
        "ti,omap3",
index 377eea849e7bcdaf1142f6b1087ed6857a92b046..db57741c9c8ae69175f007ddca876c2bd8afe9cc 100644 (file)
@@ -249,6 +249,7 @@ extern void omap4_cpu_die(unsigned int cpu);
 extern struct smp_operations omap4_smp_ops;
 
 extern void omap5_secondary_startup(void);
+extern void omap5_secondary_hyp_startup(void);
 #endif
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
index a3c013345c45fa3b495924edaaa8628e636e147d..a80ac2d70bb1bca42084187851c09b914cd51871 100644 (file)
 #define OMAP5XXX_CONTROL_STATUS                0x134
 #define OMAP5_DEVICETYPE_MASK          (0x7 << 6)
 
+/* DRA7XX CONTROL CORE BOOTSTRAP */
+#define DRA7_CTRL_CORE_BOOTSTRAP       0x6c4
+#define DRA7_SPEEDSELECT_MASK          (0x3 << 8)
+
 /*
  * REVISIT: This list of registers is not comprehensive - there are more
  * that should be added.
index 4993d4bfe9b2a579d7adcc37726cb6828a78f130..6d1dffca6c7b6d68f3bd6e29a16bf5334c54a5fb 100644 (file)
@@ -22,6 +22,7 @@
 
 /* Physical address needed since MMU not enabled yet on secondary core */
 #define AUX_CORE_BOOT0_PA                      0x48281800
+#define API_HYP_ENTRY                          0x102
 
 /*
  * OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -40,6 +41,26 @@ wait:        ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
        bne     wait
        b       secondary_startup
 ENDPROC(omap5_secondary_startup)
+/*
+ * Same as omap5_secondary_startup except we call into the ROM to
+ * enable HYP mode first.  This is called instead of
+ * omap5_secondary_startup if the primary CPU was put into HYP mode by
+ * the boot loader.
+ */
+ENTRY(omap5_secondary_hyp_startup)
+wait_2:        ldr     r2, =AUX_CORE_BOOT0_PA  @ read from AuxCoreBoot0
+       ldr     r0, [r2]
+       mov     r0, r0, lsr #5
+       mrc     p15, 0, r4, c0, c0, 5
+       and     r4, r4, #0x0f
+       cmp     r0, r4
+       bne     wait_2
+       ldr     r12, =API_HYP_ENTRY
+       adr     r0, hyp_boot
+       smc     #0
+hyp_boot:
+       b       secondary_startup
+ENDPROC(omap5_secondary_hyp_startup)
 /*
  * OMAP4 specific entry point for secondary CPU to jump from ROM
  * code.  This routine also provides a holding flag into which
index 256e84ef0f679072324892a04d90817b81ceee36..5305ec7341eca5579398a10b72f263a2fbbe8e0e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/irqchip/arm-gic.h>
 
 #include <asm/smp_scu.h>
+#include <asm/virt.h>
 
 #include "omap-secure.h"
 #include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        if (omap_secure_apis_support())
                omap_auxcoreboot_addr(virt_to_phys(startup_addr));
        else
-               writel_relaxed(virt_to_phys(omap5_secondary_startup),
-                              base + OMAP_AUX_CORE_BOOT_1);
+               /*
+                * If the boot CPU is in HYP mode then start secondary
+                * CPU in HYP mode as well.
+                */
+               if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+                       writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
+                                      base + OMAP_AUX_CORE_BOOT_1);
+               else
+                       writel_relaxed(virt_to_phys(omap5_secondary_startup),
+                                      base + OMAP_AUX_CORE_BOOT_1);
 
 }
 
index 4f61148ec1689b667f30a5259aa98037c5fa06ec..7d45c84c69ba38a3da362456c941bb39ef90d7a2 100644 (file)
@@ -54,6 +54,7 @@
 
 #include "soc.h"
 #include "common.h"
+#include "control.h"
 #include "powerdomain.h"
 #include "omap-secure.h"
 
@@ -496,7 +497,8 @@ static void __init realtime_counter_init(void)
        void __iomem *base;
        static struct clk *sys_clk;
        unsigned long rate;
-       unsigned int reg, num, den;
+       unsigned int reg;
+       unsigned long long num, den;
 
        base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
        if (!base) {
@@ -511,13 +513,42 @@ static void __init realtime_counter_init(void)
        }
 
        rate = clk_get_rate(sys_clk);
+
+       if (soc_is_dra7xx()) {
+               /*
+                * Errata i856 says the 32.768KHz crystal does not start at
+                * power on, so the CPU falls back to an emulated 32KHz clock
+                * based on sysclk / 610 instead. This causes the master counter
+                * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
+                * (OR sysclk * 75 / 244)
+                *
+                * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
+                * Of course any board built without a populated 32.768KHz
+                * crystal would also need this fix even if the CPU is fixed
+                * later.
+                *
+                * Either case can be detected by using the two speedselect bits
+                * If they are not 0, then the 32.768KHz clock driving the
+                * coarse counter that corrects the fine counter every time it
+                * ticks is actually rate/610 rather than 32.768KHz and we
+                * should compensate to avoid the 570ppm (at 20MHz, much worse
+                * at other rates) too fast system time.
+                */
+               reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
+               if (reg & DRA7_SPEEDSELECT_MASK) {
+                       num = 75;
+                       den = 244;
+                       goto sysclk1_based;
+               }
+       }
+
        /* Numerator/denumerator values refer TRM Realtime Counter section */
        switch (rate) {
-       case 1200000:
+       case 12000000:
                num = 64;
                den = 125;
                break;
-       case 1300000:
+       case 13000000:
                num = 768;
                den = 1625;
                break;
@@ -529,11 +560,11 @@ static void __init realtime_counter_init(void)
                num = 192;
                den = 625;
                break;
-       case 2600000:
+       case 26000000:
                num = 384;
                den = 1625;
                break;
-       case 2700000:
+       case 27000000:
                num = 256;
                den = 1125;
                break;
@@ -545,6 +576,7 @@ static void __init realtime_counter_init(void)
                break;
        }
 
+sysclk1_based:
        /* Program numerator and denumerator registers */
        reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
                        NUMERATOR_DENUMERATOR_MASK;
@@ -556,7 +588,7 @@ static void __init realtime_counter_init(void)
        reg |= den;
        writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
 
-       arch_timer_freq = (rate / den) * num;
+       arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
        set_cntfreq();
 
        iounmap(base);
index d226b71d21d5c6c0bdb702af93323f59934b22d4..a611f48525828fcef5cb1dbacc9d2430f5de8867 100644 (file)
 #include <linux/init.h>
 #include <linux/of_platform.h>
 #include <linux/irqchip.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/hardware/cache-l2x0.h>
 #include "core.h"
 
+#define RK3288_GRF_SOC_CON0 0x244
+
+static void __init rockchip_timer_init(void)
+{
+       if (of_machine_is_compatible("rockchip,rk3288")) {
+               struct regmap *grf;
+
+               /*
+                * Disable auto jtag/sdmmc switching that causes issues
+                * with the mmc controllers making them unreliable
+                */
+               grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
+               if (!IS_ERR(grf))
+                       regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
+               else
+                       pr_err("rockchip: could not get grf syscon\n");
+       }
+
+       of_clk_init(NULL);
+       clocksource_of_init();
+}
+
 static void __init rockchip_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = {
 DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
+       .init_time      = rockchip_timer_init,
        .dt_compat      = rockchip_board_dt_compat,
        .init_machine   = rockchip_dt_init,
 MACHINE_END
index 79ad93dfdae4ee7083f2c2f1351ee863355d20c4..d191cf4197313482b961f1a6ed91f11954eff9a4 100644 (file)
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
        void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
        void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
+#endif
 
        /* route signals to GIC */
        iowrite32(0x0, pfc_inta_ctrl);
index 93ebe3430bfe707ac234b35f37deb54a1998a531..fb5e1bb34be80b1d5c529a728e55b7a7d41fa8c8 100644 (file)
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
 
 static struct renesas_intc_irqpin_config irqpin0_platform_data = {
        .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+       .control_parent = true,
 };
 
 static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
 
 static struct renesas_intc_irqpin_config irqpin2_platform_data = {
        .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+       .control_parent = true,
 };
 
 static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
 
 static struct renesas_intc_irqpin_config irqpin3_platform_data = {
        .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+       .control_parent = true,
 };
 
 static struct resource irqpin3_resources[] = {
index 59424937e52b8839c4fb4504aedbc93584b4304e..9fe8e241335c6edcb0db5077f5d4621aefb68944 100644 (file)
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
        static const char units[] = "KMGTPE";
        u64 prot = val & pg_level[level].mask;
 
-       if (addr < USER_PGTABLES_CEILING)
-               return;
-
        if (!st->level) {
                st->level = level;
                st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
        pgd_t *pgd = swapper_pg_dir;
        struct pg_state st;
        unsigned long addr;
-       unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+       unsigned i;
 
        memset(&st, 0, sizeof(st));
        st.seq = m;
        st.marker = address_markers;
 
-       pgd += pgdoff;
-
-       for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+       for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
                addr = i * PGDIR_SIZE;
                if (!pgd_none(*pgd)) {
                        walk_pud(&st, pgd, addr);
index 98ad9c79ea0e6a1e980a5f0c09e4c25389e060e2..2495c8cb47baaddcdb15a209406a9e4b9b4f1f25 100644 (file)
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
-               .mask   = ~PMD_SECT_RDONLY,
-               .prot   = PMD_SECT_RDONLY,
+               .mask   = ~L_PMD_SECT_RDONLY,
+               .prot   = L_PMD_SECT_RDONLY,
 #else
                .mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
                .prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
index cda7c40999b6692fef5cb4a4b869b5608d87c5ec..4e6ef896c6195db73f770957e9df619a0be05e06 100644 (file)
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
-       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+       phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
index b1fa4e61471814f78e4c5f99ed349afc98541121..fbe0ca31a99cafc0769a6d6b93622016af637ee4 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <asm/barrier.h>
 
+#include <linux/bug.h>
 #include <linux/init.h>
 #include <linux/types.h>
 
index ace70682499b69b3e23e36215bb8760c6ea67cb3..8e797b2fcc0186b6f5f505303b51fbea0eff2e94 100644 (file)
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
        u64             reg_id_aa64pfr0;
        u64             reg_id_aa64pfr1;
 
+       u32             reg_id_dfr0;
        u32             reg_id_isar0;
        u32             reg_id_isar1;
        u32             reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
        u32             reg_id_mmfr3;
        u32             reg_id_pfr0;
        u32             reg_id_pfr1;
+
+       u32             reg_mvfr0;
+       u32             reg_mvfr1;
+       u32             reg_mvfr2;
 };
 
 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
index 8127e45e263752821c833d1c354a8033372b2a47..865a7e28ea2d166efc0f27911970fd480bce4c3d 100644 (file)
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+               vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
 
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
index 286b1bec547ce2a060d01cf816893b2b4aef9d12..f9be30ea1cbd8bc5b00cf2627c2e0be47ab54d98 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
 
@@ -123,9 +124,6 @@ struct task_struct;
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk)   do { } while (0)
-
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
index 49c9aefd24a50e1892c4df018e784fc44be4617e..23e9432ac11240a15b5dc4fecfe7d275cdfb10db 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           386
+#define __NR_compat_syscalls           388
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 8893cebcea5b8d903fed25db1a6dcb6effa14cef..27224426e0bf920de713c22cd4b6ca125c84d9bf 100644 (file)
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
 __SYSCALL(__NR_memfd_create, sys_memfd_create)
 #define __NR_bpf 386
 __SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
index 57b641747534a4bb7a8e0901b685d092b89fbfdc..07d435cf2eea6ee4da81e158b4e26a6be5b14c29 100644 (file)
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
         * If we have AArch32, we care about 32-bit features for compat. These
         * registers should be RES0 otherwise.
         */
+       diff |= CHECK(id_dfr0, boot, cur, cpu);
        diff |= CHECK(id_isar0, boot, cur, cpu);
        diff |= CHECK(id_isar1, boot, cur, cpu);
        diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
        diff |= CHECK(id_pfr0, boot, cur, cpu);
        diff |= CHECK(id_pfr1, boot, cur, cpu);
 
+       diff |= CHECK(mvfr0, boot, cur, cpu);
+       diff |= CHECK(mvfr1, boot, cur, cpu);
+       diff |= CHECK(mvfr2, boot, cur, cpu);
+
        /*
         * Mismatched CPU features are a recipe for disaster. Don't even
         * pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
        info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
 
+       info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
        info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
        info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
        info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
        info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
 
+       info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+       info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+       info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
        cpuinfo_detect_icache_policy(info);
 
        check_local_cpu_errata();
index 6fac253bc783a44066630643c0b57153825d0be9..2bb4347d0edfd9d703fb96768fcdf6a58ea9aad0 100644 (file)
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
 
        /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
        efi_setup_idmap();
+       early_memunmap(memmap.map, memmap.map_end - memmap.map);
 }
 
 static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
        }
 
        mapsize = memmap.map_end - memmap.map;
-       early_memunmap(memmap.map, mapsize);
 
        if (efi_runtime_disabled()) {
                pr_info("EFI runtime services will be disabled.\n");
index fd027b101de59fd350ed45d2ec928693e4b9be41..9b6f71db270952ad72cbfb30c767fcf2320092f9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/mm.h>
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
+#include <asm/alternative.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
 
index 6762ad705587fa34fff0281546273a6930ddbcbf..3f62b35fb6f157c49c1adb8b4cc3ec2744cc1e48 100644 (file)
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
index b8099116675459b933b290fd3fda5fd3cd3ecdb3..20fe2932ad0c47d50d0c836acd35686c8777b98a 100644 (file)
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
        request_standard_resources();
 
        efi_idmap_init();
+       early_ioremap_reset();
 
        unflatten_device_tree();
 
index 4f93c67e63de34293dadc0baeb23fe944b27888c..14944e5b28dace9ea083e74f4849d4f40eadef95 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cpu_ops.h>
 #include <asm/cputype.h>
+#include <asm/io.h>
 #include <asm/smp_plat.h>
 
 extern void secondary_holding_pen(void);
index fbe909fb0a1a8b95ab4f6e3ade19daaa21c70436..c3ca89c27c6b351839ec62f763ca99d34ac5b3c2 100644 (file)
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
         * Instead, we invalidate Stage-2 for this IPA, and the
         * whole of Stage-1. Weep...
         */
+       lsr     x1, x1, #12
        tlbi    ipas2e1is, x1
        /*
         * We have to ensure completion of the invalidation at Stage-2,
index 70a7816535cd4a9bf575b9767a9a9fd62dbe21e6..0b43265789858cbe71f761eebbc48927834b7fe8 100644 (file)
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                        if (!cpu_has_32bit_el1())
                                return -EINVAL;
                        cpu_reset = &default_regs_reset32;
-                       vcpu->arch.hcr_el2 &= ~HCR_RW;
                } else {
                        cpu_reset = &default_regs_reset;
                }
index bac492c12fcc4bd054e09f8db8022b64dcaa8f74..c95464a33f36175d1f7905ad61bd5176654efd0f 100644 (file)
@@ -335,14 +335,8 @@ static int keep_initrd;
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-       if (!keep_initrd) {
-               if (start == initrd_start)
-                       start = round_down(start, PAGE_SIZE);
-               if (end == initrd_end)
-                       end = round_up(end, PAGE_SIZE);
-
+       if (!keep_initrd)
                free_reserved_area((void *)start, (void *)end, 0, "initrd");
-       }
 }
 
 static int __init keepinitrd_setup(char *__unused)
index 6f4bac969bf72e360a6476c55a9e7f667d2d21fb..23eada79439c7abe2734ffdb0945ec66318dff9f 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
index f3b51b57740af91e097a7b4b37b4067b5873cd44..95c39b95e97e24f1ed3d7a58cf56dbbefc2ff419 100644 (file)
@@ -11,7 +11,7 @@
 
 
 
-#define NR_syscalls                    318 /* length of syscall table */
+#define NR_syscalls                    319 /* length of syscall table */
 
 /*
  * The following defines stop scripts/checksyscalls.sh from complaining about
index 4c2240c1b0cb4b81e219631750c4e61f82ffb750..461079560c78728848b7631de5efbe700d146620 100644 (file)
 #define __NR_getrandom                 1339
 #define __NR_memfd_create              1340
 #define __NR_bpf                       1341
+#define __NR_execveat                  1342
 
 #endif /* _UAPI_ASM_IA64_UNISTD_H */
index 615ef81def494ee804deb7252e343dee3cbd7e56..e795cb848154a451bf12c83ce221123686b238b5 100644 (file)
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
        ia64_cpu_to_sapicid[cpu] = -1;
        set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 #ifdef CONFIG_ACPI_NUMA
index f5e96dffc63c3d0ce54399759ceb9a5fefacde15..fcf8b8cbca0be79607808c81aad2b37456bbef89 100644 (file)
@@ -1779,6 +1779,7 @@ sys_call_table:
        data8 sys_getrandom
        data8 sys_memfd_create                  // 1340
        data8 sys_bpf
+       data8 sys_execveat
 
        .org sys_call_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
index 75e75d7b1702fb6434c59e9155dbba1d71623b17..244e0dbe45dbeda359e233cde23b4652f0ce13dc 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            355
+#define NR_syscalls            356
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 2c1bec9a14b67da42a8ed09b644373d0cf35b5ef..61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810 100644 (file)
 #define __NR_getrandom         352
 #define __NR_memfd_create      353
 #define __NR_bpf               354
+#define __NR_execveat          355
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 2ca219e184cd16e6ebad1bd9123061695691c843..a0ec4303f2c8e57a04fb353178d43b0be6a461fe 100644 (file)
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
        .long sys_getrandom
        .long sys_memfd_create
        .long sys_bpf
+       .long sys_execveat              /* 355 */
 
index 51d5bb90d3e504e6b3480bf4cb51be0598d58a22..a223691dff4fb1699c52b3e025ebb6446394fbb0 100644 (file)
@@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
        cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
        cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
        cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+       cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
 
        if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
                err_cpu("DIV");
index 83bca17d1008f844857ca1dca98b00a4158f6e38..0bdfd13ff98bbbbd5af7fc4251abf1c618d63a94 100644 (file)
@@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
        GET_THREAD_INFO r1
        ldw     r4, TI_PREEMPT_COUNT(r1)
        bne     r4, r0, restore_all
-
-need_resched:
        ldw     r4, TI_FLAGS(r1)                /* ? Need resched set */
        BTBZ    r10, r4, TIF_NEED_RESCHED, restore_all
        ldw     r4, PT_ESTATUS(sp)      /* ? Interrupts off */
        andi    r10, r4, ESTATUS_EPIE
        beq     r10, r0, restore_all
-       movia   r4, PREEMPT_ACTIVE
-       stw     r4, TI_PREEMPT_COUNT(r1)
-       rdctl   r10, status             /* enable intrs again */
-       ori     r10, r10 ,STATUS_PIE
-       wrctl   status, r10
-       PUSH    r1
-       call    schedule
-       POP     r1
-       mov     r4, r0
-       stw     r4, TI_PREEMPT_COUNT(r1)
-       rdctl   r10, status             /* disable intrs */
-       andi    r10, r10, %lo(~STATUS_PIE)
-       wrctl   status, r10
-       br      need_resched
-#else
-       br      restore_all
+       call    preempt_schedule_irq
 #endif
+       br      restore_all
 
 /***********************************************************************
  * A few syscall wrappers
index 19c36cba37c4acac5e63e7c4d8a093b4dbe65781..a46f5f45570c8904a5a13de12ecb3edfed5c2449 100644 (file)
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
 extern void reserve_crashkernel(void);
 extern void machine_kexec_mask_interrupts(void);
 
+static inline bool kdump_in_progress(void)
+{
+       return crashing_cpu >= 0;
+}
+
 #else /* !CONFIG_KEXEC */
 static inline void crash_kexec_secondary(struct pt_regs *regs) { }
 
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
        return 0;
 }
 
+static inline bool kdump_in_progress(void)
+{
+       return false;
+}
+
 #endif /* CONFIG_KEXEC */
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index ce9577d693be1c7849c18ac0259a35525d46c6fe..91062eef582f9c1ed8d824f9e16bcde8a0f8714c 100644 (file)
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
 SYSCALL_SPU(getrandom)
 SYSCALL_SPU(memfd_create)
 SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
index ebc4f165690a9fc0113864b6857708b5df8c317a..0be6c681cab1341061c02031464d5355ff8a4d7d 100644 (file)
@@ -23,9 +23,9 @@
 #define THREAD_SIZE            (1 << THREAD_SHIFT)
 
 #ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp)  clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
 #else
-#define CURRENT_THREAD_INFO(dest, sp)  rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp)  stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
 #endif
 
 #ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
 #define THREAD_SIZE_ORDER      (THREAD_SHIFT - PAGE_SHIFT)
 
 /* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
 static inline struct thread_info *current_thread_info(void)
 {
-       /* gcc4, at least, is smart enough to turn this into a single
-        * rlwinm for ppc32 and clrrdi for ppc64 */
-       return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+       unsigned long val;
+
+       asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+       return (struct thread_info *)val;
 }
 
 #endif /* __ASSEMBLY__ */
index e0da021caa004205fc8b645d95b3047c8cd9b73e..36b79c31eedda5cb090e73cafc6a64c937fcc5f7 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          362
+#define __NR_syscalls          363
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index f55351f2e66e962097bc078c25f77a176ca52e2a..ef5b5b1f31231648135ed092af027933c8dc3f06 100644 (file)
 #define __NR_getrandom         359
 #define __NR_memfd_create      360
 #define __NR_bpf               361
+#define __NR_execveat          362
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 879b3aacac3282d8b9de5101c9349d2aea9b8edc..f96d1ec241891b9683761d3e04c2daed29d670d1 100644 (file)
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
         * using debugger IPI.
         */
 
-       if (crashing_cpu == -1)
+       if (!kdump_in_progress())
                kexec_prepare_cpus();
 
        pr_debug("kexec: Starting switchover sequence.\n");
index 8ec017cb44461943c90ebdb6cdf6e007936efb39..8b2d2dc8ef106ef780c9a145335e9de17b3879a7 100644 (file)
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
        smp_store_cpu_info(cpu);
        set_dec(tb_ticks_per_jiffy);
        preempt_disable();
+       cpu_callin_map[cpu] = 1;
 
        if (smp_ops->setup_cpu)
                smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
 
-       /*
-        * CPU must be marked active and online before we signal back to the
-        * master, because the scheduler needs to see the cpu_online and
-        * cpu_active bits set.
-        */
-       smp_wmb();
-       cpu_callin_map[cpu] = 1;
-
        local_irq_enable();
 
        cpu_startup_entry(CPUHP_ONLINE);
index 54eca8b3b288f8fcca45ef5f44c5211832b3416f..0509bca5e830b656c8a553c047740b68b172f4b1 100644 (file)
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION;                                            \
        b       1f;                                             \
 END_FTR_SECTION(0, 1);                                         \
        ld      r12,opal_tracepoint_refcount@toc(r2);           \
-       std     r12,32(r1);                                     \
        cmpdi   r12,0;                                          \
        bne-    LABEL;                                          \
 1:
index 469751d9200469c5220be3da3ec573207d9c1100..b5682fd6c9846b2cdb259720e35a2326cf7c45a1 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/trace.h>
 #include <asm/firmware.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 #include <asm/fadump.h>
 
 #include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
                 * out to the user, but at least this will stop us from
                 * continuing on further and creating an even more
                 * difficult to debug situation.
+                *
+                * There is a known problem when kdump'ing, if cpus are offline
+                * the above call will fail. Rather than panicking again, keep
+                * going and hope the kdump kernel is also little endian, which
+                * it usually is.
                 */
-               if (rc)
+               if (rc && !kdump_in_progress())
                        panic("Could not enable big endian exceptions");
        }
 #endif
index 32040ace00ea2431a18428dca5c34c0c4ebde10c..afbe07907c10b6304e52b5eb234d33694fd9693a 100644 (file)
@@ -231,7 +231,7 @@ failed:
 struct dbfs_d2fc_hdr {
        u64     len;            /* Length of d2fc buffer without header */
        u16     version;        /* Version of header */
-       char    tod_ext[16];    /* TOD clock for d2fc */
+       char    tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
        u64     count;          /* Number of VM guests in d2fc buffer */
        char    reserved[30];
 } __attribute__ ((packed));
index 37b9091ab8c010c88a22bb2851f0133b10c6f032..16aa0c779e0762e210fe6652a0a5efda24771381 100644 (file)
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
-       return __arch_local_irq_stosm(0x00);
+       return __arch_local_irq_stnsm(0xff);
 }
 
 static inline notrace unsigned long arch_local_irq_save(void)
index 8beee1cceba4ed17831736a11c6f5167155335d6..98eb2a5792234d9d12c303bdb1301f869f706b60 100644 (file)
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
        set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
-#define CLOCK_TICK_RATE        1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE                1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE   16      /* stcke writes 16 bytes */
 
 typedef unsigned long long cycles_t;
 
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
 {
-       typedef struct { char _[sizeof(clk)]; } addrtype;
+       typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
 
        asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
 }
 
 static inline unsigned long long get_tod_clock(void)
 {
-       unsigned char clk[16];
+       unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
index 2b446cf0cc65543d38defaf1985d4246f767449b..67878af257a083c531140f74b951019e2a1537e0 100644 (file)
 #define __NR_bpf               351
 #define __NR_s390_pci_mmio_write       352
 #define __NR_s390_pci_mmio_read                353
-#define NR_syscalls 354
+#define __NR_execveat          354
+#define NR_syscalls 355
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index a2987243bc76c89bd07a6d4a298825bea9664ab8..939ec474b1dd705e7814f94c94df4faeb8009aa2 100644 (file)
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
index f6b3cd056ec22c1c28b908c4cefc46bc4a4e2099..cc7328080b609a653b72c1ca2c6989f054e3be74 100644 (file)
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
        return false;
 }
 
+static int check_per_event(unsigned short cause, unsigned long control,
+                          struct pt_regs *regs)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return 0;
+       /* user space single step */
+       if (control == 0)
+               return 1;
+       /* over indication for storage alteration */
+       if ((control & 0x20200000) && (cause & 0x2000))
+               return 1;
+       if (cause & 0x8000) {
+               /* all branches */
+               if ((control & 0x80800000) == 0x80000000)
+                       return 1;
+               /* branch into selected range */
+               if (((control & 0x80800000) == 0x80800000) &&
+                   regs->psw.addr >= current->thread.per_user.start &&
+                   regs->psw.addr <= current->thread.per_user.end)
+                       return 1;
+       }
+       return 0;
+}
+
 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
 {
        int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (regs->psw.addr - utask->xol_vaddr == ilen)
                        regs->psw.addr = utask->vaddr + ilen;
        }
-       /* If per tracing was active generate trap */
-       if (regs->psw.mask & PSW_MASK_PER)
-               do_per_trap(regs);
+       if (check_per_event(current->thread.per_event.cause,
+                           current->thread.per_user.control, regs)) {
+               /* fix per address */
+               current->thread.per_event.address = utask->vaddr;
+               /* trigger per event */
+               set_pt_regs_flag(regs, PIF_PER_TRAP);
+       }
        return 0;
 }
 
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
        clear_thread_flag(TIF_UPROBE_SINGLESTEP);
        regs->int_code = auprobe->saved_int_code;
        regs->psw.addr = current->utask->vaddr;
+       current->thread.per_event.address = current->utask->vaddr;
 }
 
 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
        __rc;                                           \
 })
 
-#define emu_store_ril(ptr, input)                      \
+#define emu_store_ril(regs, ptr, input)                        \
 ({                                                     \
        unsigned int mask = sizeof(*(ptr)) - 1;         \
+       __typeof__(ptr) __ptr = (ptr);                  \
        int __rc = 0;                                   \
                                                        \
        if (!test_facility(34))                         \
                __rc = EMU_ILLEGAL_OP;                  \
-       else if ((u64 __force)ptr & mask)               \
+       else if ((u64 __force)__ptr & mask)             \
                __rc = EMU_SPECIFICATION;               \
-       else if (put_user(*(input), ptr))               \
+       else if (put_user(*(input), __ptr))             \
                __rc = EMU_ADDRESSING;                  \
+       if (__rc == 0)                                  \
+               sim_stor_event(regs, __ptr, mask + 1);  \
        __rc;                                           \
 })
 
@@ -197,6 +229,25 @@ union split_register {
        s16 s16[4];
 };
 
+/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+       if (!(regs->psw.mask & PSW_MASK_PER))
+               return;
+       if (!(current->thread.per_user.control & PER_EVENT_STORE))
+               return;
+       if ((void *)current->thread.per_user.start > (addr + len))
+               return;
+       if ((void *)current->thread.per_user.end < addr)
+               return;
+       current->thread.per_event.address = regs->psw.addr;
+       current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+       set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
 /*
  * pc relative instructions are emulated, since parameters may not be
  * accessible from the xol area due to range limitations.
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
                        rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
                        break;
                case 0x07: /* sthrl */
-                       rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+                       rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
                        break;
                case 0x0b: /* stgrl */
-                       rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+                       rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
                        break;
                case 0x0f: /* strl */
-                       rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+                       rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
                        break;
                }
                break;
index 7f0089d9a4aa47ef86e7691502281c140ee637c3..e34122e539a16bad4e5727f881f982e4829cd8f0 100644 (file)
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, system;
 
-       WARN_ON_ONCE(!irqs_disabled());
-
        timer = S390_lowcore.last_update_timer;
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
index be99357d238c68e34dee133c52053e23c88a34d4..3cf8cc03fff60d7a59e7b23f4e92d477061a5f42 100644 (file)
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
 {
        struct page *page;
-       unsigned long offset;
+       unsigned long offset, mask;
 
        offset = (unsigned long) entry / sizeof(unsigned long);
        offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
-       page = pmd_to_page((pmd_t *) entry);
+       mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+       page = virt_to_page((void *)((unsigned long) entry & mask));
        return page->index + offset;
 }
 
index c52ac77408ca5cac7ad50b6a2a8a3113d8af30ca..524496d47ef506d0ca888356df21fd4bb7e25053 100644 (file)
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                EMIT4_DISP(0x88500000, K);
                break;
        case BPF_ALU | BPF_NEG: /* A = -A */
-               /* lnr %r5,%r5 */
-               EMIT2(0x1155);
+               /* lcr %r5,%r5 */
+               EMIT2(0x1355);
                break;
        case BPF_JMP | BPF_JA: /* ip += K */
                offset = addrs[i + K] + jit->start - jit->prg;
@@ -502,8 +502,8 @@ branch:             if (filter->jt == filter->jf) {
 xbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
                        jit->seen |= SEEN_XREG;
-                       /* cr %r5,%r12 */
-                       EMIT2(0x195c);
+                       /* clr %r5,%r12 */
+                       EMIT2(0x155c);
                }
                goto branch;
        case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
index 87bc86821bc9b81380f46ac38557ad537fd0a442..d195a87ca542b75e919055b30a72bf60f1ba132c 100644 (file)
@@ -3,6 +3,7 @@ config UML
        default y
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_UID16
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_IO
index 5b016e2498f3d3250b4edf12584a524bcb79e968..3db07f30636fe40c4cfea973abb08af8adc3c13c 100644 (file)
@@ -51,6 +51,7 @@ targets += cpustr.h
 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
        $(call if_changed,cpustr)
 endif
+clean-files += cpustr.h
 
 # ---------------------------------------------------------------------------
 
index fd0f848938ccd81a165c9dff570e108c84271c4d..5a4a089e8b1fd7166e396b52917424e1d9a421b5 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
 
 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
 obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
 ifeq ($(avx2_supported),yes)
        obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
        obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+       obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
 endif
 
 aes-i586-y := aes-i586-asm_32.o aes_glue.o
index 2df2a0298f5ad7075bc9b214438516270dc60bb5..a916c4a611652fb97e2e6d295e1a4b333ea532bd 100644 (file)
@@ -208,7 +208,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 3*16(p_keys), xkeyA
+                       vmovdqa 3*16(p_keys), xkey4
                .endif
        .else
                vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
        add     $(16*by), p_in
 
        .if (klen == KEY_128)
-               vmovdqa 4*16(p_keys), xkey4
+               vmovdqa 4*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 3 */
+               /* key 3 */
+               .if (klen == KEY_128)
+                       vaesenc xkey4, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -243,13 +248,18 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey4, var_xdata, var_xdata             /* key 4 */
+               /* key 4 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey4, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 6*16(p_keys), xkeyB
+                       vmovdqa 6*16(p_keys), xkey8
                .endif
        .else
                vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyB, var_xdata, var_xdata             /* key 6 */
+               /* key 6 */
+               .if (klen == KEY_128)
+                       vaesenc xkey8, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
        .if (klen == KEY_128)
-               vmovdqa 8*16(p_keys), xkey8
+               vmovdqa 8*16(p_keys), xkeyB
        .else
                .if (load_keys)
                        vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
 
        .if (klen == KEY_128)
                .if (load_keys)
-                       vmovdqa 9*16(p_keys), xkeyA
+                       vmovdqa 9*16(p_keys), xkey12
                .endif
        .else
                vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkey8, var_xdata, var_xdata             /* key 8 */
+               /* key 8 */
+               .if (klen == KEY_128)
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkey8, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -306,7 +326,12 @@ ddq_add_8:
        .set i, 0
        .rept by
                club XDATA, i
-               vaesenc xkeyA, var_xdata, var_xdata             /* key 9 */
+               /* key 9 */
+               .if (klen == KEY_128)
+                       vaesenc xkey12, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyA, var_xdata, var_xdata
+               .endif
                .set i, (i +1)
        .endr
 
@@ -412,7 +437,6 @@ ddq_add_8:
 /* main body of aes ctr load */
 
 .macro do_aes_ctrmain key_len
-
        cmp     $16, num_bytes
        jb      .Ldo_return2\key_len
 
index e7e9682a33e90f350e0ce5f99c61480755614221..f556c4843aa18af74359dfeb2a41d39d9a2c3bb9 100644 (file)
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
 
        /*
         * Load per CPU data from GDT.  LSL is faster than RDTSCP and
-        * works on all CPUs.
+        * works on all CPUs.  This is volatile so that it orders
+        * correctly wrt barrier() and to keep gcc from cleverly
+        * hoisting it out of the calling function.
         */
-       asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+       asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
 
        return p;
 }
index 4433a4be8171b095ff56bb6699f71d67857cc6fb..d1626364a28a16d0a04db86134ef09119e3e9a50 100644 (file)
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
 {
        return _acpi_map_lsapic(handle, physid, pcpu);
 }
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
 
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
 {
 #ifdef CONFIG_ACPI_NUMA
        set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
 
        return (0);
 }
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
 #endif                         /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
index e27b49d7c922a3caaa6c45446e8bc383a86d27bc..80091ae54c2b0995ea56629a3f7e6969a484fb9b 100644 (file)
@@ -66,3 +66,4 @@ targets += capflags.c
 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
        $(call if_changed,mkcapflags)
 endif
+clean-files += capflags.c
index e2b22df964cd88fc49b4cce67023b1e85122ed66..36d99a337b49f56398ca29d900638ddcedee277b 100644 (file)
@@ -28,7 +28,7 @@ function dump_array()
                # If the /* comment */ starts with a quote string, grab that.
                VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
                [ -z "$VALUE" ] && VALUE="\"$NAME\""
-               [ "$VALUE" == '""' ] && continue
+               [ "$VALUE" = '""' ] && continue
 
                # Name is uppercase, VALUE is all lowercase
                VALUE="$(echo "$VALUE" | tr A-Z a-z)"
index 3c895d480cd75b056ab24e686e765b70db1729f5..07398339836426eae32b55fb08fa108593cd7389 100644 (file)
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
 };
 
 struct event_constraint intel_slm_pebs_event_constraints[] = {
-       /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
-       INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+       /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+       INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
        /* Allow all events as PEBS with no flags */
        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
        EVENT_CONSTRAINT_END
index 673f930c700f3ae745d8a408111f072dcce00dd1..6e434f8e5fc8a34ea8f098280c9c0da3227b834e 100644 (file)
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var =          \
 
 #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
 
+#define RAPL_EVENT_ATTR_STR(_name, v, str)                             \
+static struct perf_pmu_events_attr event_attr_##v = {                  \
+       .attr           = __ATTR(_name, 0444, rapl_sysfs_show, NULL),   \
+       .id             = 0,                                            \
+       .event_str      = str,                                          \
+};
+
 struct rapl_pmu {
        spinlock_t       lock;
        int              hw_unit;  /* 1/2^hw_unit Joule */
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
        .attrs = rapl_pmu_attrs,
 };
 
-EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
-EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
-EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
-EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
+static ssize_t rapl_sysfs_show(struct device *dev,
+                              struct device_attribute *attr,
+                              char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr = \
+               container_of(attr, struct perf_pmu_events_attr, attr);
+
+       if (pmu_attr->event_str)
+               return sprintf(page, "%s", pmu_attr->event_str);
+
+       return 0;
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
 
-EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
-EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
-EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
-EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
 
 /*
  * we compute in 0.23 nJ increments regardless of MSR
  */
-EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
 
 static struct attribute *rapl_events_srv_attr[] = {
        EVENT_PTR(rapl_cores),
index 18eb78bbdd1003a5f7d1d8b302b608405214741f..863d9b02563e596cd6bc04005546a383179175bf 100644 (file)
@@ -17,7 +17,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       2
+#define UNCORE_EXTRA_PCI_DEV_MAX       3
 
 /* support up to 8 sockets */
 #define UNCORE_SOCKET_MAX              8
index 745b158e9a65768134caaba91d2f55f43200a481..21af6149edf2e79dd462a7e8f4994c8fd201fa0f 100644 (file)
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       HSWEP_PCI_PCU_3,
 };
 
 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
 {
        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+       /* Detect 6-8 core systems with only two SBOXes */
+       if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+               u32 capid4;
+
+               pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+                                     0x94, &capid4);
+               if (((capid4 >> 6) & 0x3) == 0)
+                       hswep_uncore_sbox.num_boxes = 2;
+       }
+
        uncore_msr_uncores = hswep_msr_uncores;
 }
 
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
                                                   SNBEP_PCI_QPI_PORT1_FILTER),
        },
+       { /* PCU.3 (for Capability registers) */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  HSWEP_PCI_PCU_3),
+       },
        { /* end: all zeroes */ }
 };
 
index f7e3cd50ece02a7b0408683d113bbafca8d49479..98f654d466e585167153e58811902675bfeb5baa 100644 (file)
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->flags &= ~X86_EFLAGS_IF;
        trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
+
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer to get messed up.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
        return 1;
 }
 NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        u8 *addr = (u8 *) (regs->ip - 1);
        struct jprobe *jp = container_of(p, struct jprobe, kp);
+       void *saved_sp = kcb->jprobe_saved_sp;
 
        if ((addr > (u8 *) jprobe_return) &&
            (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+               if (stack_addr(regs) != saved_sp) {
                        struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
                        printk(KERN_ERR
                               "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), kcb->jprobe_saved_sp);
+                              stack_addr(regs), saved_sp);
                        printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
                        show_regs(saved_regs);
                        printk(KERN_ERR "Current registers\n");
                        show_regs(regs);
                        BUG();
                }
+               /* It's OK to start function graph tracing again */
+               unpause_graph_tracing();
                *regs = kcb->jprobe_saved_regs;
-               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-                      kcb->jprobes_stack,
-                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+               memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
                preempt_enable_no_resched();
                return 1;
        }
index e309cc5c276eaf7b2a9fa01020f14007b166875f..781861cc5ee8d7b9bbd27e9b13c380da59bb06c0 100644 (file)
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_32;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #else /* CONFIG_X86_64 */
 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
                       (1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
        else
                return PERF_SAMPLE_REGS_ABI_64;
 }
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy)
+{
+       struct pt_regs *user_regs = task_pt_regs(current);
+
+       /*
+        * If we're in an NMI that interrupted task_pt_regs setup, then
+        * we can't sample user regs at all.  This check isn't really
+        * sufficient, though, as we could be in an NMI inside an interrupt
+        * that happened during task_pt_regs setup.
+        */
+       if (regs->sp > (unsigned long)&user_regs->r11 &&
+           regs->sp <= (unsigned long)(user_regs + 1)) {
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+               regs_user->regs = NULL;
+               return;
+       }
+
+       /*
+        * RIP, flags, and the argument registers are usually saved.
+        * orig_ax is probably okay, too.
+        */
+       regs_user_copy->ip = user_regs->ip;
+       regs_user_copy->cx = user_regs->cx;
+       regs_user_copy->dx = user_regs->dx;
+       regs_user_copy->si = user_regs->si;
+       regs_user_copy->di = user_regs->di;
+       regs_user_copy->r8 = user_regs->r8;
+       regs_user_copy->r9 = user_regs->r9;
+       regs_user_copy->r10 = user_regs->r10;
+       regs_user_copy->r11 = user_regs->r11;
+       regs_user_copy->orig_ax = user_regs->orig_ax;
+       regs_user_copy->flags = user_regs->flags;
+
+       /*
+        * Don't even try to report the "rest" regs.
+        */
+       regs_user_copy->bx = -1;
+       regs_user_copy->bp = -1;
+       regs_user_copy->r12 = -1;
+       regs_user_copy->r13 = -1;
+       regs_user_copy->r14 = -1;
+       regs_user_copy->r15 = -1;
+
+       /*
+        * For this to be at all useful, we need a reasonable guess for
+        * sp and the ABI.  Be careful: we're in NMI context, and we're
+        * considering current to be the current task, so we should
+        * be careful not to look at any other percpu variables that might
+        * change during context switches.
+        */
+       if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+           task_thread_info(current)->status & TS_COMPAT) {
+               /* Easy case: we're in a compat syscall. */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       } else if (user_regs->orig_ax != -1) {
+               /*
+                * We're probably in a 64-bit syscall.
+                * Warning: this code is severely racy.  At least it's better
+                * than just blindly copying user_regs.
+                */
+               regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+               regs_user_copy->sp = this_cpu_read(old_rsp);
+               regs_user_copy->cs = __USER_CS;
+               regs_user_copy->ss = __USER_DS;
+               regs_user_copy->cx = -1;  /* usually contains garbage */
+       } else {
+               /* We're probably in an interrupt or exception. */
+               regs_user->abi = user_64bit_mode(user_regs) ?
+                       PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+               regs_user_copy->sp = user_regs->sp;
+               regs_user_copy->cs = user_regs->cs;
+               regs_user_copy->ss = user_regs->ss;
+       }
+
+       regs_user->regs = regs_user_copy;
+}
 #endif /* CONFIG_X86_32 */
index 2480978b31cc29e5d34cd54bbd05394eeee4b484..1313ae6b478b6c439741ee032a8c33b86868ee2c 100644 (file)
@@ -28,7 +28,7 @@
 
 /* Verify next sizeof(t) bytes can be on the same instruction */
 #define validate_next(t, insn, n)      \
-       ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+       ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
 
 #define __get_next(t, insn)    \
        ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
index a97ee0801475a2e25df60f336c552408fe1d2b63..08a7d313538a72dfc51227b01546bab34a8f7bee 100644 (file)
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
 static unsigned long __init get_new_step_size(unsigned long step_size)
 {
        /*
-        * Explain why we shift by 5 and why we don't have to worry about
-        * 'step_size << 5' overflowing:
-        *
-        * initial mapped size is PMD_SIZE (2M).
+        * Initial mapped size is PMD_SIZE (2M).
         * We can not set step_size to be PUD_SIZE (1G) yet.
         * In worse case, when we cross the 1G boundary, and
         * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
-        * to map 1G range with PTE. Use 5 as shift for now.
+        * to map 1G range with PTE. Hence we use one less than the
+        * difference of page table level shifts.
         *
-        * Don't need to worry about overflow, on 32bit, when step_size
-        * is 0, round_down() returns 0 for start, and that turns it
-        * into 0x100000000ULL.
+        * Don't need to worry about overflow in the top-down case, on 32bit,
+        * when step_size is 0, round_down() returns 0 for start, and that
+        * turns it into 0x100000000ULL.
+        * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+        * needs to be taken into consideration by the code below.
         */
-       return step_size << 5;
+       return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
 }
 
 /**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
        unsigned long step_size;
        unsigned long addr;
        unsigned long mapped_ram_size = 0;
-       unsigned long new_mapped_ram_size;
 
        /* xen has big range in reserved near end of ram, skip it at first.*/
        addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
                                start = map_start;
                } else
                        start = map_start;
-               new_mapped_ram_size = init_range_memory_mapping(start,
+               mapped_ram_size += init_range_memory_mapping(start,
                                                        last_start);
                last_start = start;
                min_pfn_mapped = last_start >> PAGE_SHIFT;
-               /* only increase step_size after big range get mapped */
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 
        if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
 static void __init memory_map_bottom_up(unsigned long map_start,
                                        unsigned long map_end)
 {
-       unsigned long next, new_mapped_ram_size, start;
+       unsigned long next, start;
        unsigned long mapped_ram_size = 0;
        /* step_size need to be small so pgt_buf from BRK could cover it */
        unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
         * for page table.
         */
        while (start < map_end) {
-               if (map_end - start > step_size) {
+               if (step_size && map_end - start > step_size) {
                        next = round_up(start + 1, step_size);
                        if (next > map_end)
                                next = map_end;
-               } else
+               } else {
                        next = map_end;
+               }
 
-               new_mapped_ram_size = init_range_memory_mapping(start, next);
+               mapped_ram_size += init_range_memory_mapping(start, next);
                start = next;
 
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 }
 
index 531d4269e2e3c5303e8b40e6753dd20ddab3c405..bd16d6c370ec9aaeb3328779e277de8129ef61f3 100644 (file)
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
 
 extern asmlinkage void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index 20c3649d06915cce37ea62b5ee8c9a6d704c0a62..5cdfa9db22175ed8dc327465b4cf033a7a65d3bc 100644 (file)
@@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
 
 extern void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index 009495b9ab4bc52c0927accd29fe0b673cda5f7e..1c9f750c38592c7278c95d7f8dbe6e1a88835c0c 100644 (file)
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
 
 struct linux_binprm;
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits.
-
-   Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
 #ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 #else
        unsigned long addr, end;
        unsigned offset;
-       end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
        if (end >= TASK_SIZE_MAX)
                end = TASK_SIZE_MAX;
        end -= len;
-       /* This loses some more bits than a modulo, but is cheaper */
-       offset = get_random_int() & (PTRS_PER_PTE - 1);
-       addr = start + (offset << PAGE_SHIFT);
-       if (addr >= end)
-               addr = end;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
 
        /*
-        * page-align it here so that get_unmapped_area doesn't
-        * align it wrongfully again to the next page. addr can come in 4K
-        * unaligned here as a result of stack start randomization.
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
         */
-       addr = PAGE_ALIGN(addr);
        addr = align_vdso_addr(addr);
 
        return addr;
index 6bf3a13e3e0f7af10c8d984f829a512661d12c2e..78a881b7fc415e16f50f16e4381e968370f4fd9f 100644 (file)
@@ -40,6 +40,7 @@
 #include <xen/interface/physdev.h>
 #include <xen/interface/vcpu.h>
 #include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
 #include <xen/interface/xen-mca.h>
 #include <xen/features.h>
 #include <xen/page.h>
@@ -66,6 +67,7 @@
 #include <asm/reboot.h>
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
 #include <asm/mwait.h>
 #include <asm/pci_x86.h>
 #include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
        .emergency_restart = xen_emergency_restart,
 };
 
+static unsigned char xen_get_nmi_reason(void)
+{
+       unsigned char reason = 0;
+
+       /* Construct a value which looks like it came from port 0x61. */
+       if (test_bit(_XEN_NMIREASON_io_error,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_IOCHK;
+       if (test_bit(_XEN_NMIREASON_pci_serr,
+                    &HYPERVISOR_shared_info->arch.nmi_reason))
+               reason |= NMI_REASON_SERR;
+
+       return reason;
+}
+
 static void __init xen_boot_params_init_edd(void)
 {
 #if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
        pv_info = xen_info;
        pv_init_ops = xen_init_ops;
        pv_apic_ops = xen_apic_ops;
-       if (!xen_pvh_domain())
+       if (!xen_pvh_domain()) {
                pv_cpu_ops = xen_cpu_ops;
 
+               x86_platform.get_nmi_reason = xen_get_nmi_reason;
+       }
+
        if (xen_feature(XENFEAT_auto_translated_physmap))
                x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
        else
index edbc7a63fd737f0ca13edac752e1f06341f9dd97..70fb5075c901f5b0c370478b156288764f609ad8 100644 (file)
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
        return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
 }
 
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
 {
-       BUG_ON(!slab_is_available());
+       if (unlikely(!slab_is_available())) {
+               free_bootmem((unsigned long)p, PAGE_SIZE);
+               return;
+       }
+
        free_page((unsigned long)p);
 }
 
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
                        p2m_missing_pte : p2m_identity_pte;
                for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
                        pmdp = populate_extra_pmd(
-                               (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+                               (unsigned long)(p2m + pfn) + i * PMD_SIZE);
                        set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
                }
        }
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
  * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
  */
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 {
        pte_t *ptechk;
-       pte_t *pteret = ptep;
        pte_t *pte_newpg[PMDS_PER_MID_PAGE];
        pmd_t *pmdp;
        unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                if (ptechk == pte_pg) {
                        set_pmd(pmdp,
                                __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
-                       if (vaddr == (addr & ~(PMD_SIZE - 1)))
-                               pteret = pte_offset_kernel(pmdp, addr);
                        pte_newpg[i] = NULL;
                }
 
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
                vaddr += PMD_SIZE;
        }
 
-       return pteret;
+       return lookup_address(addr, &level);
 }
 
 /*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
 
        if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
                /* PMD level is missing, allocate a new one */
-               ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+               ptep = alloc_p2m_pmd(addr, pte_pg);
                if (!ptep)
                        return false;
        }
index dfd77dec8e2b7c1ef72d16adb1ecf7f7b80c88de..865e56cea7a0abe4d9b6feb2e1d0da27957d47e8 100644 (file)
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
 {
        int i;
-       unsigned long addr = PFN_PHYS(pfn);
+       phys_addr_t addr = PFN_PHYS(pfn);
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
                if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
        int i;
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               if (!xen_extra_mem[i].size)
+                       continue;
                pfn_s = PFN_DOWN(xen_extra_mem[i].start);
                pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
                for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-       unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
-       unsigned long *released)
+       unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
 {
-       unsigned long len = 0;
        unsigned long pfn, end;
        int ret;
 
        WARN_ON(start_pfn > end_pfn);
 
+       /* Release pages first. */
        end = min(end_pfn, nr_pages);
        for (pfn = start_pfn; pfn < end; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
                WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
                if (ret == 1) {
+                       (*released)++;
                        if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
                                break;
-                       len++;
                } else
                        break;
        }
 
-       /* Need to release pages first */
-       *released += len;
-       *identity += set_phys_range_identity(start_pfn, end_pfn);
+       set_phys_range_identity(start_pfn, end_pfn);
 }
 
 /*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
        }
 
        /* Update kernel mapping, but not for highmem. */
-       if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+       if (pfn >= PFN_UP(__pa(high_memory - 1)))
                return;
 
        if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
        unsigned long ident_pfn_iter, remap_pfn_iter;
        unsigned long ident_end_pfn = start_pfn + size;
        unsigned long left = size;
-       unsigned long ident_cnt = 0;
        unsigned int i, chunk;
 
        WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
                xen_remap_mfn = mfn;
 
                /* Set identity map */
-               ident_cnt += set_phys_range_identity(ident_pfn_iter,
-                       ident_pfn_iter + chunk);
+               set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
 
                left -= chunk;
        }
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
 static unsigned long __init xen_set_identity_and_remap_chunk(
         const struct e820entry *list, size_t map_size, unsigned long start_pfn,
        unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
-       unsigned long *identity, unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        unsigned long pfn;
        unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Do not remap pages beyond the current allocation */
                if (cur_pfn >= nr_pages) {
                        /* Identity map remaining pages */
-                       *identity += set_phys_range_identity(cur_pfn,
-                               cur_pfn + size);
+                       set_phys_range_identity(cur_pfn, cur_pfn + size);
                        break;
                }
                if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                if (!remap_range_size) {
                        pr_warning("Unable to find available pfn range, not remapping identity pages\n");
                        xen_set_identity_and_release_chunk(cur_pfn,
-                               cur_pfn + left, nr_pages, identity, released);
+                               cur_pfn + left, nr_pages, released);
                        break;
                }
                /* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Update variables to reflect new mappings. */
                i += size;
                remap_pfn += size;
-               *identity += size;
+               *remapped += size;
        }
 
        /*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
 
 static void __init xen_set_identity_and_remap(
        const struct e820entry *list, size_t map_size, unsigned long nr_pages,
-       unsigned long *released)
+       unsigned long *released, unsigned long *remapped)
 {
        phys_addr_t start = 0;
-       unsigned long identity = 0;
        unsigned long last_pfn = nr_pages;
        const struct e820entry *entry;
        unsigned long num_released = 0;
+       unsigned long num_remapped = 0;
        int i;
 
        /*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
                                last_pfn = xen_set_identity_and_remap_chunk(
                                                list, map_size, start_pfn,
                                                end_pfn, nr_pages, last_pfn,
-                                               &identity, &num_released);
+                                               &num_released, &num_remapped);
                        start = end;
                }
        }
 
        *released = num_released;
+       *remapped = num_remapped;
 
-       pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
        pr_info("Released %ld page(s)\n", num_released);
 }
 
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
        struct xen_memory_map memmap;
        unsigned long max_pages;
        unsigned long extra_pages = 0;
+       unsigned long remapped_pages;
        int i;
        int op;
 
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
         * underlying RAM.
         */
        xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
-                                  &xen_released_pages);
+                                  &xen_released_pages, &remapped_pages);
 
        extra_pages += xen_released_pages;
+       extra_pages += remapped_pages;
 
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
index f473d268d387fcdc8f237153b378508ec0c03f56..69087341d9aed7860dfd95549c0a8af5ffbf1ed6 100644 (file)
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
 
 struct xen_clock_event_device {
        struct clock_event_device evt;
-       char *name;
+       char name[16];
 };
 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
 
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
        if (evt->irq >= 0) {
                unbind_from_irqhandler(evt->irq, NULL);
                evt->irq = -1;
-               kfree(per_cpu(xen_clock_events, cpu).name);
-               per_cpu(xen_clock_events, cpu).name = NULL;
        }
 }
 
 void xen_setup_timer(int cpu)
 {
-       char *name;
-       struct clock_event_device *evt;
+       struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+       struct clock_event_device *evt = &xevt->evt;
        int irq;
 
-       evt = &per_cpu(xen_clock_events, cpu).evt;
        WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
        if (evt->irq >= 0)
                xen_teardown_timer(cpu);
 
        printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
 
-       name = kasprintf(GFP_KERNEL, "timer%d", cpu);
-       if (!name)
-               name = "<timer kasprintf failed>";
+       snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
 
        irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
                                      IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
                                      IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
-                                     name, NULL);
+                                     xevt->name, NULL);
        (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
 
        memcpy(evt, xen_clockevent, sizeof(*evt));
 
        evt->cpumask = cpumask_of(cpu);
        evt->irq = irq;
-       per_cpu(xen_clock_events, cpu).name = name;
 }
 
 
 void xen_setup_cpu_clockevents(void)
 {
-       BUG_ON(preemptible());
-
        clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
 }
 
index 30f6153a40c27c7154dd453301807c7d39d1451c..3ad405571dcc5105a52da4284477a187db936f64 100644 (file)
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
+void blk_set_queue_dying(struct request_queue *q)
+{
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+       if (q->mq_ops)
+               blk_mq_wake_waiters(q);
+       else {
+               struct request_list *rl;
+
+               blk_queue_for_each_rl(rl, q) {
+                       if (rl->rq_pool) {
+                               wake_up(&rl->wait[BLK_RW_SYNC]);
+                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
        /*
index 32e8dbb9ad1c49f0078e57fae100f0e6a7eb8a73..60c9d4a93fe470ced7471cd8653d8a00fc8922d7 100644 (file)
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 }
 
 /*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
  */
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
 
                wake_index = bt_index_inc(wake_index);
        }
+
+       if (include_reserve) {
+               bt = &tags->breserved_tags;
+               if (waitqueue_active(&bt->bs[0].wait))
+                       wake_up(&bt->bs[0].wait);
+       }
 }
 
 /*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
        atomic_dec(&tags->active_queues);
 
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
 }
 
 /*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
         * static and should never need resizing.
         */
        bt_update_count(&tags->bitmap_tags, tdepth);
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
index 6206ed17ef766714b655a715ffbc05fd34b0463a..a6fa0fc9d41a2e91c8bb4ce29bb2b1a0d952c8ed 100644 (file)
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index da1ab5641227b670faac42a84fde7e223668a4d8..2f95747c287eac350b45cc272fcd3d6e9c43ee09 100644 (file)
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
        wake_up_all(&q->mq_freeze_wq);
 }
 
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        bool freeze;
 
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
                blk_mq_run_queues(q, false);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_wait(q);
 }
 
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
 {
        bool wake;
 
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
                wake_up_all(&q->mq_freeze_wq);
        }
 }
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               if (blk_mq_hw_queue_mapped(hctx))
+                       blk_mq_tag_wakeup_all(hctx->tags, true);
+
+       /*
+        * If we are called because the queue has now been marked as
+        * dying, we need to ensure that processes currently waiting on
+        * the queue are notified as well.
+        */
+       wake_up_all(&q->mq_freeze_wq);
+}
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
                ctx = alloc_data.ctx;
        }
        blk_mq_put_ctx(ctx);
-       if (!rq)
+       if (!rq) {
+               blk_mq_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
+       }
        return rq;
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
+int blk_mq_request_started(struct request *rq)
+{
+       return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
 void blk_mq_start_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+       cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
        kblockd_schedule_work(&q->requeue_work);
 }
 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+       unsigned long flags;
+       LIST_HEAD(rq_list);
+
+       spin_lock_irqsave(&q->requeue_lock, flags);
+       list_splice_init(&q->requeue_list, &rq_list);
+       spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+       while (!list_empty(&rq_list)) {
+               struct request *rq;
+
+               rq = list_first_entry(&rq_list, struct request, queuelist);
+               list_del_init(&rq->queuelist);
+               rq->errors = -EIO;
+               blk_mq_end_request(rq, rq->errors);
+       }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
 static inline bool is_flush_request(struct request *rq,
                struct blk_flush_queue *fq, unsigned int tag)
 {
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
                break;
        }
 }
-               
+
 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                struct request *rq, void *priv, bool reserved)
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+               /*
+                * If a request wasn't started before the queue was
+                * marked dying, kill it here or it'll go unnoticed.
+                */
+               if (unlikely(blk_queue_dying(rq->q))) {
+                       rq->errors = -EIO;
+                       blk_mq_complete_request(rq);
+               }
+               return;
+       }
+       if (rq->cmd_flags & REQ_NO_TIMEOUT)
                return;
 
        if (time_after_eq(jiffies, rq->deadline)) {
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
        hctx->flags = set->flags;
-       hctx->cmd_size = set->cmd_size;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
index 206230e64f7915e642ce7306aec8b949deca43b1..4f4f943c22c3d1e907ef2c18224b8635f0057e82 100644 (file)
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
                struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
 
 /*
  * CPU hotplug helpers
index 56c025894cdf2d73f78c5346c9c5987d5deb0e37..246dfb16c3d988c4f84749065a66977b825c98b5 100644 (file)
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
        struct request_queue *q = req->q;
        unsigned long expiry;
 
+       if (req->cmd_flags & REQ_NO_TIMEOUT)
+               return;
+
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index 1fa7bc31be63b9fb774782ebe3fbdaed0b16ff72..4665b79c729ac1d59d699907d3e5c75628231cdb 100644 (file)
@@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
 {
        struct af_alg_completion *completion = req->data;
 
+       if (err == -EINPROGRESS)
+               return;
+
        completion->err = err;
        complete(&completion->completion);
 }
index 67d2334dc41ecd571f9eb0dfd907cc87104912a9..527a6da8d539ad2abb84c5397e0087c1914501af 100644 (file)
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER)       += reset/
 obj-y                          += tty/
 obj-y                          += char/
 
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
 obj-y                          += gpu/
 
 obj-$(CONFIG_CONNECTOR)                += connector/
@@ -141,7 +144,6 @@ obj-y                               += clk/
 
 obj-$(CONFIG_MAILBOX)          += mailbox/
 obj-$(CONFIG_HWSPINLOCK)       += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT)    += iommu/
 obj-$(CONFIG_REMOTEPROC)       += remoteproc/
 obj-$(CONFIG_RPMSG)            += rpmsg/
 
index 1fdf5e07a1c7cb0440594b12f8b78408c1c25bd4..1020b1b53a174e58111056e2c3e88089d0180d83 100644 (file)
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
-       if (pr->apic_id == -1)
+       if (pr->phys_id == -1)
                return -ENODEV;
 
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        cpu_maps_update_begin();
        cpu_hotplug_begin();
 
-       ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+       ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
        if (ret)
                goto out;
 
        ret = arch_register_cpu(pr->id);
        if (ret) {
-               acpi_unmap_lsapic(pr->id);
+               acpi_unmap_cpu(pr->id);
                goto out;
        }
 
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
        union acpi_object object = { 0 };
        struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
        struct acpi_processor *pr = acpi_driver_data(device);
-       int apic_id, cpu_index, device_declaration = 0;
+       int phys_id, cpu_index, device_declaration = 0;
        acpi_status status = AE_OK;
        static int cpu0_initialized;
        unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->acpi_id = value;
        }
 
-       apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0)
-               acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-       pr->apic_id = apic_id;
+       phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+       if (phys_id < 0)
+               acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+       pr->phys_id = phys_id;
 
-       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+       cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
        if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
                cpu0_initialized = 1;
-               /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+               /*
+                * Handle UP system running SMP kernel, with no CPU
+                * entry in MADT
+                */
                if ((cpu_index == -1) && (num_online_cpus() == 1))
                        cpu_index = 0;
        }
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
 
        /* Remove the CPU. */
        arch_unregister_cpu(pr->id);
-       acpi_unmap_lsapic(pr->id);
+       acpi_unmap_cpu(pr->id);
 
        cpu_hotplug_done();
        cpu_maps_update_done();
index c2daa85fc9f70fa5aca61a6f2a4fbaa6b85df100..c0d44d394ca39c63f87f212f0345d0c05d9acdc4 100644 (file)
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
 
        device->power.state = ACPI_STATE_UNKNOWN;
        if (!acpi_device_is_present(device))
-               return 0;
+               return -ENXIO;
 
        result = acpi_device_get_power(device, &state);
        if (result)
index a27d31d1ba24afcd176d1151aff62da8350e54b4..9dcf83682e367e889db67cd5ae44670878893459 100644 (file)
 
 #include "internal.h"
 
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
 static const struct acpi_device_id int340x_thermal_device_ids[] = {
-       {"INT3400", DO_ENUMERATION },
-       {"INT3401"},
+       {"INT3400"},
+       {"INT3401", INT3401_DEVICE},
        {"INT3402"},
        {"INT3403"},
        {"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
                                        const struct acpi_device_id *id)
 {
 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
-       if (id->driver_data == DO_ENUMERATION)
+       acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+       /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+       if (id->driver_data == INT3401_DEVICE)
                acpi_create_platform_device(adev);
 #endif
        return 1;
index 342942f90a1031a3650306144d6858bb58a79b08..02e48394276c785aa84c72fcacf231b4b6cc4587 100644 (file)
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        unsigned long madt_end, entry;
        static struct acpi_table_madt *madt;
        static int read_madt;
-       int apic_id = -1;
+       int phys_id = -1;       /* CPU hardware ID */
 
        if (!read_madt) {
                if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
        }
 
        if (!madt)
-               return apic_id;
+               return phys_id;
 
        entry = (unsigned long)madt;
        madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
                struct acpi_subtable_header *header =
                        (struct acpi_subtable_header *)entry;
                if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
-                       if (!map_lapic_id(header, acpi_id, &apic_id))
+                       if (!map_lapic_id(header, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
-                       if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+                       if (!map_x2apic_id(header, type, acpi_id, &phys_id))
                                break;
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
-                       if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+                       if (!map_lsapic_id(header, type, acpi_id, &phys_id))
                                break;
                }
                entry += header->length;
        }
-       return apic_id;
+       return phys_id;
 }
 
 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
        struct acpi_subtable_header *header;
-       int apic_id = -1;
+       int phys_id = -1;
 
        if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
                goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
        if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
-               map_lapic_id(header, acpi_id, &apic_id);
+               map_lapic_id(header, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
-               map_lsapic_id(header, type, acpi_id, &apic_id);
+               map_lsapic_id(header, type, acpi_id, &phys_id);
        else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
-               map_x2apic_id(header, type, acpi_id, &apic_id);
+               map_x2apic_id(header, type, acpi_id, &phys_id);
 
 exit:
        kfree(buffer.pointer);
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = map_mat_entry(handle, type, acpi_id);
-       if (apic_id == -1)
-               apic_id = map_madt_entry(type, acpi_id);
+       phys_id = map_mat_entry(handle, type, acpi_id);
+       if (phys_id == -1)
+               phys_id = map_madt_entry(type, acpi_id);
 
-       return apic_id;
+       return phys_id;
 }
 
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
 {
 #ifdef CONFIG_SMP
        int i;
 #endif
 
-       if (apic_id == -1) {
+       if (phys_id == -1) {
                /*
                 * On UP processor, there is no _MAT or MADT table.
-                * So above apic_id is always set to -1.
+                * So above phys_id is always set to -1.
                 *
                 * BIOS may define multiple CPU handles even for UP processor.
                 * For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always returns 0 for the processor
+                * Ignores phys_id and always returns 0 for the processor
                 * handle with acpi id 0 if nr_cpu_ids is 1.
                 * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
-                       return apic_id;
+                       return phys_id;
        }
 
 #ifdef CONFIG_SMP
        for_each_possible_cpu(i) {
-               if (cpu_physical_id(i) == apic_id)
+               if (cpu_physical_id(i) == phys_id)
                        return i;
        }
 #else
        /* In UP kernel, only processor 0 is valid */
-       if (apic_id == 0)
-               return apic_id;
+       if (phys_id == 0)
+               return phys_id;
 #endif
        return -1;
 }
 
 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 {
-       int apic_id;
+       int phys_id;
 
-       apic_id = acpi_get_apicid(handle, type, acpi_id);
+       phys_id = acpi_get_phys_id(handle, type, acpi_id);
 
-       return acpi_map_cpuid(apic_id, acpi_id);
+       return acpi_map_cpuid(phys_id, acpi_id);
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
index 4995365046984e55855373484b47936b2569447d..87b704e41877daa488eec962f545f32cf0d079fe 100644 (file)
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
                state->flags = 0;
                switch (cx->type) {
                        case ACPI_STATE_C1:
-                       if (cx->entry_method != ACPI_CSTATE_FFH)
-                               state->flags |= CPUIDLE_FLAG_TIME_INVALID;
 
                        state->enter = acpi_idle_enter_c1;
                        state->enter_dead = acpi_idle_play_dead;
index 16914cc308822798b091d51a15f337b28486fe51..dc4d8960684a78f12978d56b911ead4f02a2074a 100644 (file)
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
        if (device->wakeup.flags.valid)
                acpi_power_resources_list_free(&device->wakeup.resources);
 
-       if (!device->flags.power_manageable)
+       if (!device->power.flags.power_resources)
                return;
 
        for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
                        device->power.flags.power_resources)
                device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
 
-       if (acpi_bus_init_power(device)) {
-               acpi_free_power_resources_lists(device);
+       if (acpi_bus_init_power(device))
                device->flags.power_manageable = 0;
-       }
 }
 
 static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
                device->flags.visited = false;
+               device->flags.power_manageable = 0;
                return;
        }
        if (device->handler)
                goto ok;
 
        if (!device->flags.initialized) {
-               acpi_bus_update_power(device, NULL);
+               device->flags.power_manageable =
+                       device->power.states[ACPI_STATE_D0].flags.valid;
+               if (acpi_bus_init_power(device))
+                       device->flags.power_manageable = 0;
+
                device->flags.initialized = true;
        }
        device->flags.visited = false;
index 1eaadff2e198037ac9b8567517e8f37e37f09d4a..032db459370f85481ba091dc3761e727284fe0f3 100644 (file)
@@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
                },
        },
+
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+               },
+       },
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+               },
+       },
+
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+        .callback = video_disable_native_backlight,
+        .ident = "Dell XPS15 L521X",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+               },
+       },
        {}
 };
 
index 6a103a35ea9b375f9328573c2fdece516690c2f1..0d8780c04a5e4d7c409b2ad1aa2d7e4c0c4f424b 100644 (file)
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
  * on failure.
  */
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
                                        struct of_phandle_args *genpdspec)
 {
        struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
 
        return genpd;
 }
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
 
 /**
  * genpd_dev_pm_detach - Detach a device from its PM domain.
index d24dd614a0bd3208c0d88fd79c35397db5be8a71..106c69359306b595e74075853ae8cc4e1465924f 100644 (file)
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
 /* Lock to allow exclusive modification to the device and opp lists */
 static DEFINE_MUTEX(dev_opp_list_lock);
 
+#define opp_rcu_lockdep_assert()                                       \
+do {                                                                   \
+       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
+                               lockdep_is_held(&dev_opp_list_lock),    \
+                          "Missing rcu_read_lock() or "                \
+                          "dev_opp_list_lock protection");             \
+} while (0)
+
 /**
  * find_device_opp() - find device_opp struct using device pointer
  * @dev:       device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  * This function returns the number of available opps if there are any,
  * else returns 0 if none or the corresponding error value.
  *
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
  */
 int dev_pm_opp_get_opp_count(struct device *dev)
 {
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
        struct dev_pm_opp *temp_opp;
        int count = 0;
 
+       rcu_read_lock();
+
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
-               int r = PTR_ERR(dev_opp);
-               dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
-               return r;
+               count = PTR_ERR(dev_opp);
+               dev_err(dev, "%s: device OPP not found (%d)\n",
+                       __func__, count);
+               goto out_unlock;
        }
 
        list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
                        count++;
        }
 
+out_unlock:
+       rcu_read_unlock();
        return count;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
                return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
                return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
 
        /* Check for existing list for 'dev' */
        dev_opp = find_device_opp(dev);
-       if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
-                PTR_ERR(dev_opp)))
+       if (IS_ERR(dev_opp)) {
+               int error = PTR_ERR(dev_opp);
+               if (error != -ENODEV)
+                       WARN(1, "%s: dev_opp: %d\n",
+                            IS_ERR_OR_NULL(dev) ?
+                                       "Invalid device" : dev_name(dev),
+                            error);
                return;
+       }
 
        /* Hold our list modification lock here */
        mutex_lock(&dev_opp_list_lock);
index ae9f615382f6173c9ad6377c4bc4275403575fdf..aa2224aa7caa34d5854aebfb7ceaf4cebd29eccc 100644 (file)
@@ -530,7 +530,7 @@ static int null_add_dev(void)
                        goto out_cleanup_queues;
 
                nullb->q = blk_mq_init_queue(&nullb->tag_set);
-               if (!nullb->q) {
+               if (IS_ERR(nullb->q)) {
                        rv = -ENOMEM;
                        goto out_cleanup_tags;
                }
index b1d5d87973157b4c6e4a70b757519c37b3460201..cb529e9a82dd685b5b372bea2ed272c59fae5bc5 100644 (file)
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
        cmd->fn = handler;
        cmd->ctx = ctx;
        cmd->aborted = 0;
+       blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
 }
 
 /* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        if (unlikely(status)) {
                if (!(status & NVME_SC_DNR || blk_noretry_request(req))
                    && (jiffies - req->start_time) < req->timeout) {
+                       unsigned long flags;
+
                        blk_mq_requeue_request(req);
-                       blk_mq_kick_requeue_list(req->q);
+                       spin_lock_irqsave(req->q->queue_lock, flags);
+                       if (!blk_queue_stopped(req->q))
+                               blk_mq_kick_requeue_list(req->q);
+                       spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
                req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 
-       blk_mq_start_request(req);
-
        nvme_set_info(cmd, iod, req_completion);
        spin_lock_irq(&nvmeq->q_lock);
        if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       req->cmd_flags |= REQ_NO_TIMEOUT;
        cmd_info = blk_mq_rq_to_pdu(req);
        nvme_set_info(cmd_info, req, async_req_completion);
 
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
        struct nvme_command cmd;
 
        if (!nvmeq->qid || cmd_rq->aborted) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&dev_list_lock, flags);
                if (work_busy(&dev->reset_work))
-                       return;
+                       goto out;
                list_del_init(&dev->node);
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n",
                                                        req->tag, nvmeq->qid);
                dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
+ out:
+               spin_unlock_irqrestore(&dev_list_lock, flags);
                return;
        }
 
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
        void *ctx;
        nvme_completion_fn fn;
        struct nvme_cmd_info *cmd;
-       static struct nvme_completion cqe = {
-               .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
-       };
+       struct nvme_completion cqe;
+
+       if (!blk_mq_request_started(req))
+               return;
 
        cmd = blk_mq_rq_to_pdu(req);
 
        if (cmd->ctx == CMD_CTX_CANCELLED)
                return;
 
+       if (blk_queue_dying(req->q))
+               cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+       else
+               cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
        dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
                                                req->tag, nvmeq->qid);
        ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
-                                                       nvmeq->qid);
-       if (nvmeq->dev->initialized)
-               nvme_abort_req(req);
-
        /*
         * The aborted req will be completed on receiving the abort req.
         * We enable the timer again. If hit twice, it'll cause a device reset,
         * as the device then is in a faulty state.
         */
-       return BLK_EH_RESET_TIMER;
+       int ret = BLK_EH_RESET_TIMER;
+
+       dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+                                                       nvmeq->qid);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (!nvmeq->dev->initialized) {
+               /*
+                * Force cancelled command frees the request, which requires we
+                * return BLK_EH_NOT_HANDLED.
+                */
+               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+               ret = BLK_EH_NOT_HANDLED;
+       } else
+               nvme_abort_req(req);
+       spin_unlock_irq(&nvmeq->q_lock);
+
+       return ret;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  */
 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 {
-       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+       int vector;
 
        spin_lock_irq(&nvmeq->q_lock);
+       if (nvmeq->cq_vector == -1) {
+               spin_unlock_irq(&nvmeq->q_lock);
+               return 1;
+       }
+       vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
        nvmeq->dev->online_queues--;
+       nvmeq->cq_vector = -1;
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
                adapter_delete_sq(dev, qid);
                adapter_delete_cq(dev, qid);
        }
+       if (!qid && dev->admin_q)
+               blk_mq_freeze_queue_start(dev->admin_q);
        nvme_clear_queue(nvmeq);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
-                                                       int depth, int vector)
+                                                       int depth)
 {
        struct device *dmadev = &dev->pci_dev->dev;
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        nvmeq->cq_phase = 1;
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
-       nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        dev->queue_count++;
        dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        struct nvme_dev *dev = nvmeq->dev;
        int result;
 
+       nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
                return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
        .timeout        = nvme_timeout,
 };
 
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+       if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+               blk_cleanup_queue(dev->admin_q);
+               blk_mq_free_tag_set(&dev->admin_tagset);
+       }
+}
+
 static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 {
        if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
                        return -ENOMEM;
 
                dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
-               if (!dev->admin_q) {
+               if (IS_ERR(dev->admin_q)) {
                        blk_mq_free_tag_set(&dev->admin_tagset);
                        return -ENOMEM;
                }
-       }
+               if (!blk_get_queue(dev->admin_q)) {
+                       nvme_dev_remove_admin(dev);
+                       return -ENODEV;
+               }
+       } else
+               blk_mq_unfreeze_queue(dev->admin_q);
 
        return 0;
 }
 
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
-       if (dev->admin_q)
-               blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
 
        nvmeq = dev->queues[0];
        if (!nvmeq) {
-               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+               nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
                if (!nvmeq)
                        return -ENOMEM;
        }
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                goto free_nvmeq;
 
-       result = nvme_alloc_admin_tags(dev);
-       if (result)
-               goto free_nvmeq;
-
+       nvmeq->cq_vector = 0;
        result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
-               goto free_tags;
+               goto free_nvmeq;
 
        return result;
 
- free_tags:
-       nvme_free_admin_tags(dev);
  free_nvmeq:
        nvme_free_queues(dev, 0);
        return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
        unsigned i;
 
        for (i = dev->queue_count; i <= dev->max_qid; i++)
-               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+               if (!nvme_alloc_queue(dev, i, dev->q_depth))
                        break;
 
        for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
                        break;
                if (!schedule_timeout(ADMIN_TIMEOUT) ||
                                        fatal_signal_pending(current)) {
+                       /*
+                        * Disable the controller first since we can't trust it
+                        * at this point, but leave the admin queue enabled
+                        * until all queue deletion requests are flushed.
+                        * FIXME: This may take a while if there are more h/w
+                        * queues than admin tags.
+                        */
                        set_current_state(TASK_RUNNING);
-
                        nvme_disable_ctrl(dev, readq(&dev->bar->cap));
-                       nvme_disable_queue(dev, 0);
-
-                       send_sig(SIGKILL, dq->worker->task, 1);
+                       nvme_clear_queue(dev->queues[0]);
                        flush_kthread_worker(dq->worker);
+                       nvme_disable_queue(dev, 0);
                        return;
                }
        }
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
 {
        struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
                                                        cmdinfo.work);
-       allow_signal(SIGKILL);
        if (nvme_delete_sq(nvmeq))
                nvme_del_queue_end(nvmeq);
 }
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
                kthread_stop(tmp);
 }
 
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               blk_mq_freeze_queue_start(ns->queue);
+
+               spin_lock(ns->queue->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+               spin_unlock(ns->queue->queue_lock);
+
+               blk_mq_cancel_requeue_work(ns->queue);
+               blk_mq_stop_hw_queues(ns->queue);
+       }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns;
+
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+               blk_mq_unfreeze_queue(ns->queue);
+               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_kick_requeue_list(ns->queue);
+       }
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
-       if (dev->bar)
+       if (dev->bar) {
+               nvme_freeze_queues(dev);
                csts = readl(&dev->bar->csts);
+       }
        if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        nvme_dev_unmap(dev);
 }
 
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
-       if (dev->admin_q && !blk_queue_dying(dev->admin_q))
-               blk_cleanup_queue(dev->admin_q);
-}
-
 static void nvme_dev_remove(struct nvme_dev *dev)
 {
        struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        list_for_each_entry(ns, &dev->namespaces, list) {
                if (ns->disk->flags & GENHD_FL_UP)
                        del_gendisk(ns->disk);
-               if (!blk_queue_dying(ns->queue))
+               if (!blk_queue_dying(ns->queue)) {
+                       blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
+               }
        }
 }
 
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
+       blk_put_queue(dev->admin_q);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
        }
 
        nvme_init_queue(dev->queues[0], 0);
+       result = nvme_alloc_admin_tags(dev);
+       if (result)
+               goto disable;
 
        result = nvme_setup_io_queues(dev);
        if (result)
-               goto disable;
+               goto free_tags;
 
        nvme_set_irq_hints(dev);
 
        return result;
 
+ free_tags:
+       nvme_dev_remove_admin(dev);
  disable:
        nvme_disable_queue(dev, 0);
        nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
+       } else {
+               nvme_unfreeze_queues(dev);
+               nvme_set_irq_hints(dev);
        }
        dev->initialized = 1;
        return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
        flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
-       nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
+       nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
-       nvme_free_admin_tags(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
 }
index 7ef7c098708fc4e482181724d574555bbb9db6d7..cdfbd21e35975178fa0c4cece78a354ef1d53007 100644 (file)
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_put_disk;
 
        q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
-       if (!q) {
+       if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
index 860da40b78effb96b16a779f84f9ea21d05aba95..0ce5e2d65a06b5d4e6ecbdf3390554f08751d9e2 100644 (file)
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
        if (!np)
                return -ENODEV;
 
+       if (!of_device_is_available(np))
+               return -ENODEV;
+
        cci_config = of_match_node(arm_cci_matches, np)->data;
        if (!cci_config)
                return -ENODEV;
index fd5a5e85d7dc604e2ebe237ee34ce9355d81d866..982b96323f823b8402ede2ceec7c0cb85c042ec4 100644 (file)
@@ -969,7 +969,8 @@ static void sender(void                *send_info,
 
                do_gettimeofday(&t);
                pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
-                      msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+                      msg->data[0], msg->data[1],
+                      (long) t.tv_sec, (long) t.tv_usec);
        }
 }
 
index 32f7c1b36204018d0ce151601c6ca5ef6f2cf75f..2f13bd5246b5563ec399058029fcd0740c58c3d2 100644 (file)
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
 
 #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
 
+static struct clk *slow_clk;
 
 static int clk_slow_osc_prepare(struct clk_hw *hw)
 {
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
        clk = clk_register(NULL, &slowck->hw);
        if (IS_ERR(clk))
                kfree(slowck);
+       else
+               slow_clk = clk;
 
        return clk;
 }
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
        clk = clk_register(NULL, &slowck->hw);
        if (IS_ERR(clk))
                kfree(slowck);
+       else
+               slow_clk = clk;
 
        return clk;
 }
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
 
        of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
+
+/*
+ * FIXME: All slow clk users are not properly claiming it (get + prepare +
+ * enable) before using it.
+ * If all users properly claiming this clock decide that they don't need it
+ * anymore (or are removed), it is disabled while faulty users are still
+ * requiring it, and the system hangs.
+ * Prevent this clock from being disabled until all users are properly
+ * requesting it.
+ * Once this is done we should remove this function and the slow_clk variable.
+ */
+static int __init of_at91_clk_slow_retain(void)
+{
+       if (!slow_clk)
+               return 0;
+
+       __clk_get(slow_clk);
+       clk_prepare_enable(slow_clk);
+
+       return 0;
+}
+arch_initcall(of_at91_clk_slow_retain);
index 21784e4eb3f004af06c1b980938ab4ced9bc2894..440ef81ab15c4ba8d9f70947db7e5a0d144a97a4 100644 (file)
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
        { "pbridge",    "perif",        15, CLK_IGNORE_UNUSED },
        { "sdio",       "perif",        16, CLK_IGNORE_UNUSED },
        { "nfc",        "perif",        18 },
-       { "smemc",      "perif",        19 },
        { "pcie",       "perif",        22 },
 };
 
index b6e6c85507a5a7706c8c69f2c6611d908ade8611..0a47d6f49cd6f347eca03eadf283722c737c7fb2 100644 (file)
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
        {}
 };
 
-static struct platform_driver ppc_corenet_clk_driver __initdata = {
+static struct platform_driver ppc_corenet_clk_driver = {
        .driver = {
                .name = "ppc_corenet_clock",
                .of_match_table = ppc_clk_ids,
index f4963b7d4e17d41b6a6553854c5250a7e90bfdef..d48ac71c6c8b173793a31e95ebaa93749e883a76 100644 (file)
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
                new_rate = clk->ops->determine_rate(clk->hw, rate,
                                                    &best_parent_rate,
                                                    &parent_hw);
-               parent = parent_hw->clk;
+               parent = parent_hw ? parent_hw->clk : NULL;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
index 75c8c45ef72849358e4b7bf2c3bbff7927761ae0..8539c4fd34cc37bd28810b93d6ffb815b0bd48d6 100644 (file)
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
 {
        const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
        unsigned long alt_prate, alt_div;
+       unsigned long flags;
 
        alt_prate = clk_get_rate(cpuclk->alt_parent);
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        /*
         * If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
                        cpuclk->reg_base + reg_data->core_reg);
        }
 
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
 {
        const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
        const struct rockchip_cpuclk_rate_table *rate;
+       unsigned long flags;
 
        rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
        if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
                return -EINVAL;
        }
 
-       spin_lock(cpuclk->lock);
+       spin_lock_irqsave(cpuclk->lock, flags);
 
        if (ndata->old_rate < ndata->new_rate)
                rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
        if (ndata->old_rate > ndata->new_rate)
                rockchip_cpuclk_set_dividers(cpuclk, rate);
 
-       spin_unlock(cpuclk->lock);
+       spin_unlock_irqrestore(cpuclk->lock, flags);
        return 0;
 }
 
index c54078960847c91f6f499ecb8f26924338bb2ab4..7eb684c50d42ce9f0d06ffa4de1ecef4270808b7 100644 (file)
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p)            = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
 PNAME(mux_mac_p)               = { "gpll", "dpll" };
 PNAME(mux_sclk_macref_p)       = { "mac_src", "ext_rmii" };
 
+static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
+       [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+                    RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
+       [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+                    RK2928_MODE_CON, 4, 4, 0, NULL),
+       [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+                    RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+       [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+                    RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+};
+
 static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
        [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
                     RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        /* hclk_peri gates */
        GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
        GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
-       GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+       GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
        GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
        GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
-       GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
-       GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+       GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+       GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
        GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
        GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
        GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
        GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
        GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
 
-       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+                       RK2928_CLKGATE_CON(5), 14, GFLAGS),
 
        GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
 
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
        GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
        GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
 
-       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+                       RK2928_CLKGATE_CON(7), 3, GFLAGS),
        GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
 
        GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
 static void __init rk3066a_clk_init(struct device_node *np)
 {
        rk3188_common_clk_init(np);
-       rockchip_clk_register_plls(rk3188_pll_clks,
-                                  ARRAY_SIZE(rk3188_pll_clks),
+       rockchip_clk_register_plls(rk3066_pll_clks,
+                                  ARRAY_SIZE(rk3066_pll_clks),
                                   RK3066_GRF_SOC_STATUS);
        rockchip_clk_register_branches(rk3066a_clk_branches,
                                  ARRAY_SIZE(rk3066a_clk_branches));
index ac6be7c0132d1e27cfad2f95169212b70f3d31d2..11194b8329fe5a639a6241ed0718888f985df854 100644 (file)
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
        }
 
 static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
-       RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
-       RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
+       RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
+       RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
 };
 
 static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
index 6a79fc4f900c4b56b4bd351cf050aa5c2b67175e..095c1774592c918d4684995a12f74b1090f17c3f 100644 (file)
@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
 
        /* Register the CP15 based counter if we have one */
        if (type & ARCH_CP15_TIMER) {
-               if (arch_timer_use_virtual)
+               if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
                        arch_timer_read_counter = arch_counter_get_cntvct;
                else
                        arch_timer_read_counter = arch_counter_get_cntpct;
index f56147a1daed54a2e7fe4be9c983e46db95ecfc6..fde97d6e31d6d9749698aaf91bfae821f1a72f9f 100644 (file)
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        /* OPPs might be populated at runtime, don't check for error here */
        of_init_opp_table(cpu_dev);
 
+       /*
+        * But we need OPP table to function so if it is not there let's
+        * give platform code chance to provide it for us.
+        */
+       ret = dev_pm_opp_get_opp_count(cpu_dev);
+       if (ret <= 0) {
+               pr_debug("OPP table is not ready, deferring probe\n");
+               ret = -EPROBE_DEFER;
+               goto out_free_opp;
+       }
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
                ret = -ENOMEM;
index a09a29c312a9cbeef8cb43a2862c07f13c78a24e..46bed4f81cde882e8f1d3b3dbd1b9418b3de2ee5 100644 (file)
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        /* Don't start any governor operations if we are entering suspend */
        if (cpufreq_suspended)
                return 0;
+       /*
+        * Governor might not be initiated here if ACPI _PPC changed
+        * notification happened, so check it.
+        */
+       if (!policy->governor)
+               return -EINVAL;
 
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
index 37263d9a105127079cd71baed298467495000dd6..401c0106ed345eda469a590aa345f88d8ff59eca 100644 (file)
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        last_state = &ldev->states[last_idx];
 
-       if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               last_residency = cpuidle_get_last_residency(dev) - \
-                                        drv->states[last_idx].exit_latency;
-       }
-       else
-               last_residency = last_state->threshold.promotion_time + 1;
+       last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
index 659d7b0c9ebfd1e78348d539f497112c36f2cdc5..40580794e23dc00f4d086a696c803ed35b4c0f25 100644 (file)
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * power state and occurrence of the wakeup event.
         *
         * If the entered idle state didn't support residency measurements,
-        * we are basically lost in the dark how much time passed.
-        * As a compromise, assume we slept for the whole expected time.
+        * we use them anyway if they are short, and if long,
+        * truncate to the whole expected time.
         *
         * Any measured amount of time will include the exit latency.
         * Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * the measured amount of time is less than the exit latency,
         * assume the state was never reached and the exit latency is 0.
         */
-       if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               /* Use timer value as is */
-               measured_us = data->next_timer_us;
 
-       } else {
-               /* Use measured value */
-               measured_us = cpuidle_get_last_residency(dev);
+       /* measured value */
+       measured_us = cpuidle_get_last_residency(dev);
 
-               /* Deduct exit latency */
-               if (measured_us > target->exit_latency)
-                       measured_us -= target->exit_latency;
+       /* Deduct exit latency */
+       if (measured_us > target->exit_latency)
+               measured_us -= target->exit_latency;
 
-               /* Make sure our coefficients do not exceed unity */
-               if (measured_us > data->next_timer_us)
-                       measured_us = data->next_timer_us;
-       }
+       /* Make sure our coefficients do not exceed unity */
+       if (measured_us > data->next_timer_us)
+               measured_us = data->next_timer_us;
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
index 380478562b7d3187d42a64c221caf5714e6e59ec..5c062548957c3183fba608e13354d204c0a4b40c 100644 (file)
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        dw->regs = chip->regs;
        chip->dw = dw;
 
-       pm_runtime_enable(chip->dev);
        pm_runtime_get_sync(chip->dev);
 
        dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
        }
 
        pm_runtime_put_sync_suspend(chip->dev);
-       pm_runtime_disable(chip->dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(dw_dma_remove);
index a630161473a4fa69c2586a257949d7f0956d4db7..32ea1aca7a0ea27dc28ddd58c20281f482f53c45 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
        if (err)
                return err;
 
+       pm_runtime_enable(&pdev->dev);
+
        err = dw_dma_probe(chip, pdata);
        if (err)
                goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
        return 0;
 
 err_dw_dma_probe:
+       pm_runtime_disable(&pdev->dev);
        clk_disable_unprepare(chip->clk);
        return err;
 }
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
                of_dma_controller_free(pdev->dev.of_node);
 
        dw_dma_remove(chip);
+       pm_runtime_disable(&pdev->dev);
        clk_disable_unprepare(chip->clk);
 
        return 0;
index 978b51eae2ec61bbba18db37cf05b8e93db2037e..ce3c1558cb0a6f6cfa5a818736da54ff8fc5ed07 100644 (file)
 
 #define DLN2_GPIO_MAX_PINS 32
 
-struct dln2_irq_work {
-       struct work_struct work;
-       struct dln2_gpio *dln2;
-       int pin;
-       int type;
-};
-
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
         */
        DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
 
-       DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
-       DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
-       struct dln2_irq_work *irq_work;
+       /* active IRQs - not synced to hardware */
+       DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+       /* active IRQS - synced to hardware */
+       DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+       int irq_type[DLN2_GPIO_MAX_PINS];
+       struct mutex irq_lock;
 };
 
 struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
        return !!ret;
 }
 
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
-                                     unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+                                    unsigned int pin, int value)
 {
        struct dln2_gpio_pin_val req = {
                .pin = cpu_to_le16(pin),
                .value = value,
        };
 
-       dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
-                        sizeof(req));
+       return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+                               sizeof(req));
 }
 
 #define DLN2_GPIO_DIRECTION_IN         0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                      int value)
 {
+       struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+       int ret;
+
+       ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+       if (ret < 0)
+               return ret;
+
        return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
 }
 
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
                                &req, sizeof(req));
 }
 
-static void dln2_irq_work(struct work_struct *w)
-{
-       struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
-       struct dln2_gpio *dln2 = iw->dln2;
-       u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
-       if (test_bit(iw->pin, dln2->irqs_enabled))
-               dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
-       else
-               dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       int pin = irqd_to_hwirq(irqd);
-
-       set_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       clear_bit(pin, dln2->irqs_enabled);
-       schedule_work(&dln2->irq_work[pin].work);
+       set_bit(pin, dln2->unmasked_irqs);
 }
 
 static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
        struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
        int pin = irqd_to_hwirq(irqd);
 
-       set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
-       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
-       struct device *dev = dln2->gpio.dev;
-       int pin = irqd_to_hwirq(irqd);
-
-       if (test_and_clear_bit(pin, dln2->irqs_pending)) {
-               int irq;
-
-               irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
-               if (!irq) {
-                       dev_err(dev, "pin %d not mapped to IRQ\n", pin);
-                       return;
-               }
-
-               generic_handle_irq(irq);
-       }
+       clear_bit(pin, dln2->unmasked_irqs);
 }
 
 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
 
        switch (type) {
        case IRQ_TYPE_LEVEL_HIGH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
                break;
        case IRQ_TYPE_LEVEL_LOW:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
                break;
        case IRQ_TYPE_EDGE_BOTH:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
                break;
        case IRQ_TYPE_EDGE_RISING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
                break;
        case IRQ_TYPE_EDGE_FALLING:
-               dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+               dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
                break;
        default:
                return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
        return 0;
 }
 
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+       mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+       struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+       int pin = irqd_to_hwirq(irqd);
+       int enabled, unmasked;
+       unsigned type;
+       int ret;
+
+       enabled = test_bit(pin, dln2->enabled_irqs);
+       unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+       if (enabled != unmasked) {
+               if (unmasked) {
+                       type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+                       set_bit(pin, dln2->enabled_irqs);
+               } else {
+                       type = DLN2_GPIO_EVENT_NONE;
+                       clear_bit(pin, dln2->enabled_irqs);
+               }
+
+               ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+               if (ret)
+                       dev_err(dln2->gpio.dev, "failed to set event\n");
+       }
+
+       mutex_unlock(&dln2->irq_lock);
+}
+
 static struct irq_chip dln2_gpio_irqchip = {
        .name = "dln2-irq",
-       .irq_enable = dln2_irq_enable,
-       .irq_disable = dln2_irq_disable,
        .irq_mask = dln2_irq_mask,
        .irq_unmask = dln2_irq_unmask,
        .irq_set_type = dln2_irq_set_type,
+       .irq_bus_lock = dln2_irq_bus_lock,
+       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
 };
 
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                return;
        }
 
-       if (!test_bit(pin, dln2->irqs_enabled))
-               return;
-       if (test_bit(pin, dln2->irqs_masked)) {
-               set_bit(pin, dln2->irqs_pending);
-               return;
-       }
-
-       switch (dln2->irq_work[pin].type) {
+       switch (dln2->irq_type[pin]) {
        case DLN2_GPIO_EVENT_CHANGE_RISING:
                if (event->value)
                        generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        struct dln2_gpio *dln2;
        struct device *dev = &pdev->dev;
        int pins;
-       int i, ret;
+       int ret;
 
        pins = dln2_gpio_get_pin_count(pdev);
        if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        if (!dln2)
                return -ENOMEM;
 
-       dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
-                                     sizeof(struct dln2_irq_work), GFP_KERNEL);
-       if (!dln2->irq_work)
-               return -ENOMEM;
-       for (i = 0; i < pins; i++) {
-               INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
-               dln2->irq_work[i].pin = i;
-               dln2->irq_work[i].dln2 = dln2;
-       }
+       mutex_init(&dln2->irq_lock);
 
        dln2->pdev = pdev;
 
@@ -529,11 +510,8 @@ out:
 static int dln2_gpio_remove(struct platform_device *pdev)
 {
        struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-       int i;
 
        dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
-       for (i = 0; i < dln2->gpio.ngpio; i++)
-               flush_work(&dln2->irq_work[i].work);
        gpiochip_remove(&dln2->gpio);
 
        return 0;
index 09daaf2aeb563d982c71807b8155df4b2c50a6c5..3a5a71050559c7c52964a5b55764ef9b16e82361 100644 (file)
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
        err = gpiochip_add(gc);
        if (err) {
                dev_err(&ofdev->dev, "Could not add gpiochip\n");
-               irq_domain_remove(priv->domain);
+               if (priv->domain)
+                       irq_domain_remove(priv->domain);
                return err;
        }
 
index 66e40398b3d32220624cb7fad671c86058c84339..e620807418ea7559eddc9d5a994a0c5e1f829a9c 100644 (file)
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
 obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
 obj-$(CONFIG_DRM_RADEON)+= radeon/
 obj-$(CONFIG_DRM_MGA)  += mga/
 obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
 obj-y                  += i2c/
 obj-y                  += panel/
 obj-y                  += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
index 7d4974b83af7821649c7eff1f75ee4d0b05758e6..fcfdf23e1913ed01663b46bebfda5eec8bc4079d 100644 (file)
@@ -31,7 +31,6 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <linux/uaccess.h>
 #include <uapi/asm-generic/mman-common.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
@@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
        return 0;
 }
 
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_get_version_args args;
+       struct kfd_ioctl_get_version_args *args = data;
        int err = 0;
 
-       args.major_version = KFD_IOCTL_MAJOR_VERSION;
-       args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               err = -EFAULT;
+       args->major_version = KFD_IOCTL_MAJOR_VERSION;
+       args->minor_version = KFD_IOCTL_MINOR_VERSION;
 
        return err;
 }
@@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
        return 0;
 }
 
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
-                                       void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+                                       void *data)
 {
-       struct kfd_ioctl_create_queue_args args;
+       struct kfd_ioctl_create_queue_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        unsigned int queue_id;
@@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        memset(&q_properties, 0, sizeof(struct queue_properties));
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
        pr_debug("kfd: creating queue ioctl\n");
 
-       err = set_queue_properties_from_user(&q_properties, &args);
+       err = set_queue_properties_from_user(&q_properties, args);
        if (err)
                return err;
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto err_bind_process;
        }
 
@@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
        if (err != 0)
                goto err_create_queue;
 
-       args.queue_id = queue_id;
+       args->queue_id = queue_id;
 
        /* Return gpu_id as doorbell offset for mmap usage */
-       args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
-       if (copy_to_user(arg, &args, sizeof(args))) {
-               err = -EFAULT;
-               goto err_copy_args_out;
-       }
+       args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
 
        mutex_unlock(&p->mutex);
 
-       pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+       pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
 
        pr_debug("ring buffer address == 0x%016llX\n",
-                       args.ring_base_address);
+                       args->ring_base_address);
 
        pr_debug("read ptr address    == 0x%016llX\n",
-                       args.read_pointer_address);
+                       args->read_pointer_address);
 
        pr_debug("write ptr address   == 0x%016llX\n",
-                       args.write_pointer_address);
+                       args->write_pointer_address);
 
        return 0;
 
-err_copy_args_out:
-       pqm_destroy_queue(&p->pqm, queue_id);
 err_create_queue:
 err_bind_process:
        mutex_unlock(&p->mutex);
@@ -297,99 +283,90 @@ err_bind_process:
 }
 
 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_destroy_queue_args args;
-
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
+       struct kfd_ioctl_destroy_queue_args *args = data;
 
        pr_debug("kfd: destroying queue id %d for PASID %d\n",
-                               args.queue_id,
+                               args->queue_id,
                                p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+       retval = pqm_destroy_queue(&p->pqm, args->queue_id);
 
        mutex_unlock(&p->mutex);
        return retval;
 }
 
 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
-                                       void __user *arg)
+                                       void *data)
 {
        int retval;
-       struct kfd_ioctl_update_queue_args args;
+       struct kfd_ioctl_update_queue_args *args = data;
        struct queue_properties properties;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+       if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
                pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
                return -EINVAL;
        }
 
-       if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+       if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
                pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
                return -EINVAL;
        }
 
-       if ((args.ring_base_address) &&
+       if ((args->ring_base_address) &&
                (!access_ok(VERIFY_WRITE,
-                       (const void __user *) args.ring_base_address,
+                       (const void __user *) args->ring_base_address,
                        sizeof(uint64_t)))) {
                pr_err("kfd: can't access ring base address\n");
                return -EFAULT;
        }
 
-       if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+       if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
                pr_err("kfd: ring size must be a power of 2 or 0\n");
                return -EINVAL;
        }
 
-       properties.queue_address = args.ring_base_address;
-       properties.queue_size = args.ring_size;
-       properties.queue_percent = args.queue_percentage;
-       properties.priority = args.queue_priority;
+       properties.queue_address = args->ring_base_address;
+       properties.queue_size = args->ring_size;
+       properties.queue_percent = args->queue_percentage;
+       properties.priority = args->queue_priority;
 
        pr_debug("kfd: updating queue id %d for PASID %d\n",
-                       args.queue_id, p->pasid);
+                       args->queue_id, p->pasid);
 
        mutex_lock(&p->mutex);
 
-       retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+       retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
 
        mutex_unlock(&p->mutex);
 
        return retval;
 }
 
-static long kfd_ioctl_set_memory_policy(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+                                       struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_set_memory_policy_args args;
+       struct kfd_ioctl_set_memory_policy_args *args = data;
        struct kfd_dev *dev;
        int err = 0;
        struct kfd_process_device *pdd;
        enum cache_policy default_policy, alternate_policy;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
-           && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+       if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+           && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
                return -EINVAL;
        }
 
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
@@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
-               err = PTR_ERR(pdd);
+               err = -ESRCH;
                goto out;
        }
 
-       default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+       default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                         ? cache_policy_coherent : cache_policy_noncoherent;
 
        alternate_policy =
-               (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+               (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                   ? cache_policy_coherent : cache_policy_noncoherent;
 
        if (!dev->dqm->set_cache_memory_policy(dev->dqm,
                                &pdd->qpd,
                                default_policy,
                                alternate_policy,
-                               (void __user *)args.alternate_aperture_base,
-                               args.alternate_aperture_size))
+                               (void __user *)args->alternate_aperture_base,
+                               args->alternate_aperture_size))
                err = -EINVAL;
 
 out:
@@ -422,53 +399,44 @@ out:
        return err;
 }
 
-static long kfd_ioctl_get_clock_counters(struct file *filep,
-                               struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_clock_counters_args args;
+       struct kfd_ioctl_get_clock_counters_args *args = data;
        struct kfd_dev *dev;
        struct timespec time;
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       dev = kfd_device_by_id(args.gpu_id);
+       dev = kfd_device_by_id(args->gpu_id);
        if (dev == NULL)
                return -EINVAL;
 
        /* Reading GPU clock counter from KGD */
-       args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+       args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
 
        /* No access to rdtsc. Using raw monotonic time */
        getrawmonotonic(&time);
-       args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        get_monotonic_boottime(&time);
-       args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+       args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
 
        /* Since the counter is in nano-seconds we use 1GHz frequency */
-       args.system_clock_freq = 1000000000;
-
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
+       args->system_clock_freq = 1000000000;
 
        return 0;
 }
 
 
 static int kfd_ioctl_get_process_apertures(struct file *filp,
-                               struct kfd_process *p, void __user *arg)
+                               struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_get_process_apertures_args args;
+       struct kfd_ioctl_get_process_apertures_args *args = data;
        struct kfd_process_device_apertures *pAperture;
        struct kfd_process_device *pdd;
 
        dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
 
-       if (copy_from_user(&args, arg, sizeof(args)))
-               return -EFAULT;
-
-       args.num_of_nodes = 0;
+       args->num_of_nodes = 0;
 
        mutex_lock(&p->mutex);
 
@@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                /* Run over all pdd of the process */
                pdd = kfd_get_first_process_device_data(p);
                do {
-                       pAperture = &args.process_apertures[args.num_of_nodes];
+                       pAperture =
+                               &args->process_apertures[args->num_of_nodes];
                        pAperture->gpu_id = pdd->dev->id;
                        pAperture->lds_base = pdd->lds_base;
                        pAperture->lds_limit = pdd->lds_limit;
@@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        pAperture->scratch_limit = pdd->scratch_limit;
 
                        dev_dbg(kfd_device,
-                               "node id %u\n", args.num_of_nodes);
+                               "node id %u\n", args->num_of_nodes);
                        dev_dbg(kfd_device,
                                "gpu id %u\n", pdd->dev->id);
                        dev_dbg(kfd_device,
@@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
                        dev_dbg(kfd_device,
                                "scratch_limit %llX\n", pdd->scratch_limit);
 
-                       args.num_of_nodes++;
+                       args->num_of_nodes++;
                } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
-                               (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+                               (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
        }
 
        mutex_unlock(&p->mutex);
 
-       if (copy_to_user(arg, &args, sizeof(args)))
-               return -EFAULT;
-
        return 0;
 }
 
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+       [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+                       kfd_ioctl_get_version, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+                       kfd_ioctl_create_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+                       kfd_ioctl_destroy_queue, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+                       kfd_ioctl_set_memory_policy, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+                       kfd_ioctl_get_clock_counters, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+                       kfd_ioctl_get_process_apertures, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+                       kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
+
 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 {
        struct kfd_process *process;
-       long err = -EINVAL;
+       amdkfd_ioctl_t *func;
+       const struct amdkfd_ioctl_desc *ioctl = NULL;
+       unsigned int nr = _IOC_NR(cmd);
+       char stack_kdata[128];
+       char *kdata = NULL;
+       unsigned int usize, asize;
+       int retcode = -EINVAL;
 
-       dev_dbg(kfd_device,
-               "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
-               cmd, _IOC_NR(cmd), arg);
+       if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+               goto err_i1;
+
+       if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+               u32 amdkfd_size;
+
+               ioctl = &amdkfd_ioctls[nr];
+
+               amdkfd_size = _IOC_SIZE(ioctl->cmd);
+               usize = asize = _IOC_SIZE(cmd);
+               if (amdkfd_size > asize)
+                       asize = amdkfd_size;
+
+               cmd = ioctl->cmd;
+       } else
+               goto err_i1;
+
+       dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
 
        process = kfd_get_process(current);
-       if (IS_ERR(process))
-               return PTR_ERR(process);
+       if (IS_ERR(process)) {
+               dev_dbg(kfd_device, "no process\n");
+               goto err_i1;
+       }
 
-       switch (cmd) {
-       case KFD_IOC_GET_VERSION:
-               err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
-               break;
-       case KFD_IOC_CREATE_QUEUE:
-               err = kfd_ioctl_create_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_DESTROY_QUEUE:
-               err = kfd_ioctl_destroy_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_SET_MEMORY_POLICY:
-               err = kfd_ioctl_set_memory_policy(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_CLOCK_COUNTERS:
-               err = kfd_ioctl_get_clock_counters(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_GET_PROCESS_APERTURES:
-               err = kfd_ioctl_get_process_apertures(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       case KFD_IOC_UPDATE_QUEUE:
-               err = kfd_ioctl_update_queue(filep, process,
-                                               (void __user *)arg);
-               break;
-
-       default:
-               dev_err(kfd_device,
-                       "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
-                       cmd, arg);
-               err = -EINVAL;
-               break;
+       /* Do not trust userspace, use our own definition */
+       func = ioctl->func;
+
+       if (unlikely(!func)) {
+               dev_dbg(kfd_device, "no function\n");
+               retcode = -EINVAL;
+               goto err_i1;
        }
 
-       if (err < 0)
-               dev_err(kfd_device,
-                       "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
-                       err, cmd, _IOC_NR(cmd));
+       if (cmd & (IOC_IN | IOC_OUT)) {
+               if (asize <= sizeof(stack_kdata)) {
+                       kdata = stack_kdata;
+               } else {
+                       kdata = kmalloc(asize, GFP_KERNEL);
+                       if (!kdata) {
+                               retcode = -ENOMEM;
+                               goto err_i1;
+                       }
+               }
+               if (asize > usize)
+                       memset(kdata + usize, 0, asize - usize);
+       }
 
-       return err;
+       if (cmd & IOC_IN) {
+               if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+                       retcode = -EFAULT;
+                       goto err_i1;
+               }
+       } else if (cmd & IOC_OUT) {
+               memset(kdata, 0, usize);
+       }
+
+       retcode = func(filep, process, kdata);
+
+       if (cmd & IOC_OUT)
+               if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+                       retcode = -EFAULT;
+
+err_i1:
+       if (!ioctl)
+               dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+                         task_pid_nr(current), cmd, nr);
+
+       if (kdata != stack_kdata)
+               kfree(kdata);
+
+       if (retcode)
+               dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+       return retcode;
 }
 
 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
index 924e90c072e513180ec8991b50333f2af663a3f5..9c8961d22360722a5382de9dde19b35d9b3d27ad 100644 (file)
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
 {
        int bit = qpd->vmid - KFD_VMID_START_OFFSET;
 
+       /* Release the vmid mapping */
+       set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
        set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
        qpd->vmid = 0;
        q->properties.vmid = 0;
@@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                return retval;
        }
 
+       pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+                       q->pipe,
+                       q->queue);
+
+       retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+                       q->queue, q->properties.write_ptr);
+       if (retval != 0) {
+               deallocate_hqd(dqm, q);
+               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+               return retval;
+       }
+
        return 0;
 }
 
@@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 {
        int retval;
        struct mqd_manager *mqd;
+       bool prev_active = false;
 
        BUG_ON(!dqm || !q || !q->mqd);
 
@@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                return -ENOMEM;
        }
 
-       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
        if (q->properties.is_active == true)
+               prev_active = true;
+
+       /*
+        *
+        * check active state vs. the previous state
+        * and modify counter accordingly
+        */
+       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+       if ((q->properties.is_active == true) && (prev_active == false))
                dqm->queue_count++;
-       else
+       else if ((q->properties.is_active == false) && (prev_active == true))
                dqm->queue_count--;
 
        if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
index adc31474e786195bb3308150334c5a7a280f1d81..4c3828cf45bf71fbbae7e26f21f22bcd927fadce 100644 (file)
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
                        uint32_t queue_id)
 {
 
-       return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+       return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
                                        pipe_id, queue_id);
 
 }
index 71699ad97d74487d532cef2a168f8bb4d4fb5366..4c25ef504f79dd6be067369a1c216b11db98fc2e 100644 (file)
@@ -32,7 +32,7 @@ int kfd_pasid_init(void)
 {
        pasid_limit = max_num_of_processes;
 
-       pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+       pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
        if (!pasid_bitmap)
                return -ENOMEM;
 
index f9fb81e3bb09b24edd7f8de6c22cf7e64a92391b..a5edb29507e310af8ebe437ef2a22fb218687ad1 100644 (file)
@@ -463,6 +463,24 @@ struct kfd_process {
        bool is_32bit_user_mode;
 };
 
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+                               void *data);
+
+struct amdkfd_ioctl_desc {
+       unsigned int cmd;
+       int flags;
+       amdkfd_ioctl_t *func;
+       unsigned int cmd_drv;
+       const char *name;
+};
+
 void kfd_process_create_wq(void);
 void kfd_process_destroy_wq(void);
 struct kfd_process *kfd_create_process(const struct task_struct *);
index b11792d7e70e2a6fca1fdc15636c4672fb3165dc..cca1708fd811be8253ab0d2989d74e405f3dab04 100644 (file)
@@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
        uint32_t i = 0;
 
        list_for_each_entry(dev, &topology_device_list, list) {
-               ret = kfd_build_sysfs_node_entry(dev, 0);
+               ret = kfd_build_sysfs_node_entry(dev, i);
                if (ret < 0)
                        return ret;
                i++;
index 47b551970a14aed723b24f7d2662293fa8914dff..96a512208fade6825da6aa8e2645f2a088d485d2 100644 (file)
@@ -183,7 +183,7 @@ struct kfd2kgd_calls {
        int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-       bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+       bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
        int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
index 70d0f0f06f1a65ccec340c0882f0481bf66903a5..e9f891c432f837b8693df84158b0b2cdf715972e 100644 (file)
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
         */
        struct workqueue_struct *dp_wq;
 
-       uint32_t bios_vgacntr;
-
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
        struct {
                int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
index 52adcb680be3a61113630493c6c5b98509965912..c11603b4cf1dc035d792a47bb3e58334e52382d9 100644 (file)
@@ -1048,6 +1048,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                        return -EFAULT;
        }
 
+       intel_runtime_pm_get(dev_priv);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               return ret;
+               goto put_rpm;
 
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL) {
@@ -1121,6 +1124,9 @@ out:
        drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
+put_rpm:
+       intel_runtime_pm_put(dev_priv);
+
        return ret;
 }
 
index 996c2931c49945d86a595c6f38104c475fc32f1b..d0d3dfbe6d2adae46d968f8112675276197f15c6 100644 (file)
@@ -3725,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3736,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if (I915_READ16(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
@@ -3907,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if ((iir & flip_pending) == 0)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-
        /* We detect FlipDone by looking for the change in PendingFlip from '1'
         * to '0' on the following vblank, i.e. IIR has the Pendingflip
         * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3918,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if (I915_READ(ISR) & flip_pending)
                goto check_page_flip;
 
+       intel_prepare_page_flip(dev, plane);
        intel_finish_page_flip(dev, pipe);
        return true;
 
index fb3e3d429191247c5041af8ca0212c6ce1e2f705..e2af1383b179f498856767b5ccfd1ec8a2f8d8a4 100644 (file)
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
        udelay(300);
 
-       /*
-        * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
-        * from S3 without preserving (some of?) the other bits.
-        */
-       I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+       I915_WRITE(vga_reg, VGA_DISP_DISABLE);
        POSTING_READ(vga_reg);
 }
 
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
 
        intel_shared_dpll_init(dev);
 
-       /* save the BIOS value before clobbering it */
-       dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
        /* Just disable it once at startup */
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
index f5a78d53e2978ed1c4f25cdaa73d9fcb0ca887bb..ac6da7102fbbdc53c74e584234c51506191da1ee 100644 (file)
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
                vlv_power_sequencer_reset(dev_priv);
 }
 
-static void check_power_well_state(struct drm_i915_private *dev_priv,
-                                  struct i915_power_well *power_well)
-{
-       bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
-       if (power_well->always_on || !i915.disable_power_well) {
-               if (!enabled)
-                       goto mismatch;
-
-               return;
-       }
-
-       if (enabled != (power_well->count > 0))
-               goto mismatch;
-
-       return;
-
-mismatch:
-       WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-                 power_well->name, power_well->always_on, enabled,
-                 power_well->count, i915.disable_power_well);
-}
-
 /**
  * intel_display_power_get - grab a power domain reference
  * @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                        power_well->ops->enable(dev_priv, power_well);
                        power_well->hw_enabled = true;
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
                        power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
-
-               check_power_well_state(dev_priv, power_well);
        }
 
        mutex_unlock(&power_domains->lock);
index ff2b434b3db480a47aa5b94515476714b2819693..760947e380c93bf429a0459622d62e376d450b13 100644 (file)
@@ -26,7 +26,7 @@
 void
 nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
 void
 nvkm_event_get(struct nvkm_event *event, u32 types, int index)
 {
-       BUG_ON(!spin_is_locked(&event->refs_lock));
+       assert_spin_locked(&event->refs_lock);
        while (types) {
                int type = __ffs(types); types &= ~(1 << type);
                if (++event->refs[index * event->types_nr + type] == 1) {
index d1bcde55e9d734df7573366a21f87e8b40a5f18b..839a32577680bf32eecbed59e196e6d6ea3606da 100644 (file)
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
        struct nvkm_event *event = notify->event;
        unsigned long flags;
 
-       BUG_ON(!spin_is_locked(&event->list_lock));
+       assert_spin_locked(&event->list_lock);
        BUG_ON(size != notify->size);
 
        spin_lock_irqsave(&event->refs_lock, flags);
index 674da1f095b29a1c1ecc524fef40eb3b60bc3a35..7329226906539fb0ed7f5f3ce6ab9280cd13a20c 100644 (file)
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
                break;
+       case 0x106:
+               device->cname = "GK208B";
+               device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+               device->oclass[NVDEV_SUBDEV_GPIO   ] =  nve0_gpio_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  nve0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
+               device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+               device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+               device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+               device->oclass[NVDEV_SUBDEV_DEVINIT] =  nvc0_devinit_oclass;
+               device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
+               device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+               device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
+               device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+               device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
+               device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+               device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+               device->oclass[NVDEV_SUBDEV_PWR    ] =  nv108_pwr_oclass;
+               device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nvd0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_FIFO   ] =  nv108_fifo_oclass;
+               device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+               device->oclass[NVDEV_ENGINE_GR     ] =  nv108_graph_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] =  nvf0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+               device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+               break;
        case 0x108:
                device->cname = "GK208";
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
index 5e58bba0dd5c5bbcf3527088abdc4a80eaeaec9c..a7a890fad1e537325eb24d477d75c24feb624f46 100644 (file)
@@ -44,8 +44,10 @@ static void
 pramin_fini(void *data)
 {
        struct priv *priv = data;
-       nv_wr32(priv->bios, 0x001700, priv->bar0);
-       kfree(priv);
+       if (priv) {
+               nv_wr32(priv->bios, 0x001700, priv->bar0);
+               kfree(priv);
+       }
 }
 
 static void *
index 00f2ca7e44a56af6bb2bf4db5ce159b6526f2d00..033a8e99949735866c751494fd9deae5765dc49e 100644 (file)
 
 #include "nv50.h"
 
+struct nvaa_ram_priv {
+       struct nouveau_ram base;
+       u64 poller_base;
+};
+
 static int
 nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 datasize,
              struct nouveau_object **pobject)
 {
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       u32 rsvd_tail = (1024 * 1024); /* vbios etc */
        struct nouveau_fb *pfb = nouveau_fb(parent);
-       struct nouveau_ram *ram;
+       struct nvaa_ram_priv *priv;
        int ret;
 
-       ret = nouveau_ram_create(parent, engine, oclass, &ram);
-       *pobject = nv_object(ram);
+       ret = nouveau_ram_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       ram->size = nv_rd32(pfb, 0x10020c);
-       ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+       priv->base.type   = NV_MEM_TYPE_STOLEN;
+       priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+       priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
 
-       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-                             (rsvd_head + rsvd_tail), 1);
+       rsvd_tail += 0x1000;
+       priv->poller_base = priv->base.size - rsvd_tail;
+
+       ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+                             (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
+                             1);
        if (ret)
                return ret;
 
-       ram->type   = NV_MEM_TYPE_STOLEN;
-       ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-       ram->get = nv50_ram_get;
-       ram->put = nv50_ram_put;
+       priv->base.get = nv50_ram_get;
+       priv->base.put = nv50_ram_put;
+       return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+       struct nouveau_fb *pfb = nouveau_fb(object);
+       struct nvaa_ram_priv *priv = (void *)object;
+       int ret;
+       u64 dniso, hostnb, flush;
+
+       ret = nouveau_ram_init(&priv->base);
+       if (ret)
+               return ret;
+
+       dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+       hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+       flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+       /* Enable NISO poller for various clients and set their associated
+        * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+        */
+       nv_wr32(pfb, 0x100c18, dniso);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+       nv_wr32(pfb, 0x100c1c, hostnb);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+       nv_wr32(pfb, 0x100c24, flush);
+       nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
        return 0;
 }
 
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvaa_ram_ctor,
                .dtor = _nouveau_ram_dtor,
-               .init = _nouveau_ram_init,
+               .init = nvaa_ram_init,
                .fini = _nouveau_ram_fini,
        },
 };
index a75c35ccf25c739010ac3f866f7106824e387ff6..165401c4045cfe56ee42eeba1eda1918c4ded5a3 100644 (file)
 
 #include "nv04.h"
 
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
-       struct nv04_mc_priv *priv = (void *)pmc;
-       nv_wr08(priv, 0x088050, 0xff);
-}
-
 struct nouveau_oclass *
 nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
        .base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
                .fini = _nouveau_mc_fini,
        },
        .intr = nv04_mc_intr,
-       .msi_rearm = nv4c_mc_msi_rearm,
 }.base;
index 21ec561edc999458c5a8d4f1a99be19e67070ca0..bba2960d3dfbb5de9b6e69d977f72529624ab65c 100644 (file)
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
         * so use the DMA API for them.
         */
        if (!nv_device_is_cpu_coherent(device) &&
-           ttm->caching_state == tt_uncached)
+           ttm->caching_state == tt_uncached) {
                ttm_dma_unpopulate(ttm_dma, dev->dev);
+               return;
+       }
 
 #if __OS_HAS_AGP
        if (drm->agp.stat == ENABLED) {
index 42c34babc2e5b728959ad5c8135266f8b3c147e2..bf0f9e21d714a80248749ed0594699054e1b7fea 100644 (file)
@@ -36,7 +36,14 @@ void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_buffer_object *bo = &nvbo->bo;
+       struct device *dev = drm->dev->dev;
+       int ret;
+
+       ret = pm_runtime_get_sync(dev);
+       if (WARN_ON(ret < 0 && ret != -EACCES))
+               return;
 
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
        /* reset filp so nouveau_bo_del_ttm() can test for it */
        gem->filp = NULL;
        ttm_bo_unref(&bo);
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct nouveau_vma *vma;
+       struct device *dev = drm->dev->dev;
        int ret;
 
        if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
                        goto out;
                }
 
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0 && ret != -EACCES)
+                       goto out;
+
                ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
-               if (ret) {
+               if (ret)
                        kfree(vma);
-                       goto out;
-               }
+
+               pm_runtime_mark_last_busy(dev);
+               pm_runtime_put_autosuspend(dev);
        } else {
                vma->refcount++;
        }
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 {
        struct nouveau_cli *cli = nouveau_cli(file_priv);
        struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+       struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+       struct device *dev = drm->dev->dev;
        struct nouveau_vma *vma;
        int ret;
 
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
 
        vma = nouveau_bo_vma_find(nvbo, cli->vm);
        if (vma) {
-               if (--vma->refcount == 0)
-                       nouveau_gem_object_unmap(nvbo, vma);
+               if (--vma->refcount == 0) {
+                       ret = pm_runtime_get_sync(dev);
+                       if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+                               nouveau_gem_object_unmap(nvbo, vma);
+                               pm_runtime_mark_last_busy(dev);
+                               pm_runtime_put_autosuspend(dev);
+                       }
+               }
        }
        ttm_bo_unreserve(&nvbo->bo);
 }
index d59ec491dbb9cba64d76369e62ef61b836c094d8..ed644a4f6f57c4254349c3881a16955cd42cbc05 100644 (file)
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                                return pll;
                }
                /* otherwise, pick one of the plls */
-               if ((rdev->family == CHIP_KAVERI) ||
-                   (rdev->family == CHIP_KABINI) ||
+               if ((rdev->family == CHIP_KABINI) ||
                    (rdev->family == CHIP_MULLINS)) {
-                       /* KB/KV/ML has PPLL1 and PPLL2 */
+                       /* KB/ML has PPLL1 and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
                        DRM_ERROR("unable to allocate a PPLL\n");
                        return ATOM_PPLL_INVALID;
                } else {
-                       /* CI has PPLL0, PPLL1, and PPLL2 */
+                       /* CI/KV has PPLL0, PPLL1, and PPLL2 */
                        pll_in_use = radeon_get_pll_use_mask(crtc);
                        if (!(pll_in_use & (1 << ATOM_PPLL2)))
                                return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
        case ATOM_PPLL0:
                /* disable the ppll */
                if ((rdev->family == CHIP_ARUBA) ||
+                   (rdev->family == CHIP_KAVERI) ||
                    (rdev->family == CHIP_BONAIRE) ||
                    (rdev->family == CHIP_HAWAII))
                        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
index 11ba9d21b89b608788f623822bcfb6f9f14dbbf1..db42a670f9957c7fd6c3698b2c0be178ac6a02d2 100644 (file)
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
        struct radeon_connector_atom_dig *dig_connector;
        int dp_clock;
 
+       if ((mode->clock > 340000) &&
+           (!radeon_connector_is_dp12_capable(connector)))
+               return MODE_CLOCK_HIGH;
+
        if (!radeon_connector->con_priv)
                return MODE_CLOCK_HIGH;
        dig_connector = radeon_connector->con_priv;
index ba85986febea5054762615ee7fc5edbaa275f2cd..03003f8a6de63ba00c741824c053070a009cd319 100644 (file)
 #define ATC_VM_APERTURE1_HIGH_ADDR                             0x330Cu
 #define ATC_VM_APERTURE1_LOW_ADDR                              0x3304u
 
+#define IH_VMID_0_LUT                                          0x3D40u
+
 #endif
index 2fe8cfc966d9304b6845f6f6d29c9e236d80b0cb..bafdf92a5732dfa679f74e47ee582a699f9d635e 100644 (file)
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
        }
 
        sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
-       if (sad_count < 0) {
+       if (sad_count <= 0) {
                DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
                return;
        }
index 9b42001295ba587197f5bcb317daca4a0f83cb3d..e3e9c10cfba97438571b4b8bc88cbea7c3dab9f7 100644 (file)
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
        if (radeon_bapm == -1) {
-               /* There are stability issues reported on with
-                * bapm enabled on an asrock system.
-                */
-               if (rdev->pdev->subsystem_vendor == 0x1849)
-                       pi->bapm_enable = false;
-               else
+               /* only enable bapm on KB, ML by default */
+               if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
                        pi->bapm_enable = true;
+               else
+                       pi->bapm_enable = false;
        } else if (radeon_bapm == 0) {
                pi->bapm_enable = false;
        } else {
index 242fd8b1b221d9c49459b67d775aaf3c57fea45f..8bf87f1203ccaddd85a215dc0c5e19fb48305dbf 100644 (file)
@@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                        uint32_t queue_id, uint32_t __user *wptr);
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id);
 
 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .init_memory = kgd_init_memory,
        .init_pipeline = kgd_init_pipeline,
        .hqd_load = kgd_hqd_load,
-       .hqd_is_occupies = kgd_hqd_is_occupies,
+       .hqd_is_occupied = kgd_hqd_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .get_fw_version = get_fw_version
 };
@@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd;
 
 bool radeon_kfd_init(void)
 {
+#if defined(CONFIG_HSA_AMD_MODULE)
        bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
                                const struct kgd2kfd_calls**);
 
@@ -117,6 +118,17 @@ bool radeon_kfd_init(void)
        }
 
        return true;
+#elif defined(CONFIG_HSA_AMD)
+       if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+               kgd2kfd = NULL;
+
+               return false;
+       }
+
+       return true;
+#else
+       return false;
+#endif
 }
 
 void radeon_kfd_fini(void)
@@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
                cpu_relax();
        write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
 
+       /* Mapping vmid to pasid also for IH block */
+       write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+                       pasid_mapping);
+
        return 0;
 }
 
@@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
        return 0;
 }
 
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
                                uint32_t pipe_id, uint32_t queue_id)
 {
        uint32_t act;
@@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
                if (timeout == 0) {
                        pr_err("kfd: cp queue preemption time out (%dms)\n",
                                temp);
+                       release_queue(kgd);
                        return -ETIME;
                }
                msleep(20);
index 535403e0c8a28c20011261decb68eeb3d47b6846..15aee723db77ec171a5b8a32797d75e3d49e1469 100644 (file)
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
        u32 format;
        u32 *buffer;
        const u8 __user *data;
-       int size, dwords, tex_width, blit_width, spitch;
+       unsigned int size, dwords, tex_width, blit_width, spitch;
        u32 height;
        int i;
        u32 texpitch, microtile;
index 230b6f887cd86e9b4d3d4bf625166c878d5524ed..dfdc26970022998adfb951f50baacb0e4df38474 100644 (file)
@@ -27,7 +27,8 @@ if HID
 
 config HID_BATTERY_STRENGTH
        bool "Battery level reporting for HID devices"
-       depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+       depends on HID
+       select POWER_SUPPLY
        default n
        ---help---
        This option adds support of reporting battery strength (for HID devices
index c3d0ac1a0988096eaacbe8063b354399b6a85e14..8b638792cb43c426c2e4fffb0bb594e76617f554 100644 (file)
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
index 7460f3402298c2e1925059a1ef5cbf669e9d381b..9243359c18219ab75c5e47bda83ed44f3d322f05 100644 (file)
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2     0x501a
 #define USB_DEVICE_ID_KYE_EASYPEN_M610X        0x5013
 
 #define USB_VENDOR_ID_LABTEC           0x1020
index e0a0f06ac5ef6168c8fcdd5c2462df3f3130c941..9505605b6e22a72b29661d568aa6fe4dcaa32643 100644 (file)
@@ -311,6 +311,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
index b92bf01a1ae8122f486ea333288558f082162f5d..158fcf577fae570d331a37c46d650a151141c19c 100644 (file)
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
                if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
                        rdesc = mousepen_i608x_rdesc_fixed;
                        *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
        switch (id->product) {
        case USB_DEVICE_ID_KYE_EASYPEN_I405X:
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
        case USB_DEVICE_ID_KYE_EASYPEN_M610X:
                ret = kye_tablet_enable(hdev);
                if (ret) {
@@ -445,6 +447,8 @@ static const struct hid_device_id kye_devices[] = {
                                USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
index c917ab61aafa6156b433ed3148a31384ac6d4292..5bc6d80d5be79f465f3cbbb686c471db6162eb63 100644 (file)
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
 
        switch (data[0]) {
        case REPORT_ID_DJ_SHORT:
+               if (size != DJREPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_dj_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_SHORT:
-               /* intentional fallthrough */
+               if (size != HIDPP_REPORT_SHORT_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Short HID++ report of bad size (%d)", size);
+                       return false;
+               }
+               return logi_dj_hidpp_event(hdev, report, data, size);
        case REPORT_ID_HIDPP_LONG:
+               if (size != HIDPP_REPORT_LONG_LENGTH) {
+                       dev_err(&hdev->dev,
+                               "Long HID++ report of bad size (%d)", size);
+                       return false;
+               }
                return logi_dj_hidpp_event(hdev, report, data, size);
        }
 
index 2f420c0b6609b1f197a15be182d07102323b3973..a93cefe0e522e66fe670a28a269f13da9ff25d5d 100644 (file)
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
                (report->rap.sub_id == 0x41);
 }
 
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+       int new_length;
+       char *new_name;
+
+       if (name_length > PREFIX_LENGTH &&
+           strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+               /* The prefix has is already in the name */
+               return;
+
+       new_length = PREFIX_LENGTH + name_length;
+       new_name = kzalloc(new_length, GFP_KERNEL);
+       if (!new_name)
+               return;
+
+       snprintf(new_name, new_length, "Logitech %s", *name);
+
+       kfree(*name);
+
+       *name = new_name;
+}
+
 /* -------------------------------------------------------------------------- */
 /* HIDP++ 1.0 commands                                                        */
 /* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
                return NULL;
 
        memcpy(name, &response.rap.params[2], len);
+
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, len + 1);
+
        return name;
 }
 
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
                index += ret;
        }
 
+       /* include the terminating '\0' */
+       hidpp_prefix_name(&name, __name_length + 1);
+
        return name;
 }
 
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
 
        switch (data[0]) {
        case 0x02:
+               if (size < 2) {
+                       hid_err(hdev, "Received HID report of bad size (%d)",
+                               size);
+                       return 1;
+               }
                if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
                        input_event(wd->input, EV_KEY, BTN_LEFT,
                                        !!(data[1] & 0x01));
                        input_event(wd->input, EV_KEY, BTN_RIGHT,
                                        !!(data[1] & 0x02));
                        input_sync(wd->input);
+                       return 0;
                } else {
                        if (size < 21)
                                return 1;
                        return wtp_mouse_raw_xy_event(hidpp, &data[7]);
                }
        case REPORT_ID_HIDPP_LONG:
+               /* size is already checked in hidpp_raw_event. */
                if ((report->fap.feature_index != wd->mt_feature_index) ||
                    (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
                        return 1;
index 1a07e07d99a06c8972a2d80b8fefa8aa4f4b3848..47d7e74231e5a3245461eb5f34a3acecc5bd67d1 100644 (file)
@@ -35,6 +35,8 @@ static struct class *pyra_class;
 static void profile_activated(struct pyra_device *pyra,
                unsigned int new_profile)
 {
+       if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return;
        pyra->actual_profile = new_profile;
        pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
 }
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
        if (off != 0 || count != PYRA_SIZE_SETTINGS)
                return -EINVAL;
 
-       mutex_lock(&pyra->pyra_lock);
-
        settings = (struct pyra_settings const *)buf;
+       if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return -EINVAL;
+
+       mutex_lock(&pyra->pyra_lock);
 
        retval = pyra_set_settings(usb_dev, settings);
        if (retval) {
index d32037cbf9db5e3bf9b1f4d96a3f8c98259a65a5..d43e967e75339ec7972e734e284c4356e31a4e38 100644 (file)
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
 
 static void i2c_hid_stop(struct hid_device *hid)
 {
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
        hid->claimed = 0;
-
-       i2c_hid_free_buffers(ihid);
 }
 
 static int i2c_hid_open(struct hid_device *hid)
index dc89be90b35e80f7d14d5dcaa71dda082c8ab84b..b27b3d33ebab02b9afb7d9fc9dad2e99b31afb14 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
index e37412da15f5c8ea300c7a096368d564a0acfba0..b99de00e57b86ce8164eb0b3ad02a8de11b8597e 100644 (file)
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
        case ad7998:
                return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
                        val);
-       default:
+       case ad7992:
+       case ad7993:
+       case ad7994:
                return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
                        val);
+       default:
+               /* Will be written when doing a conversion */
+               st->config = val;
+               return 0;
        }
 }
 
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
        case ad7997:
        case ad7998:
                return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
-       default:
+       case ad7992:
+       case ad7993:
+       case ad7994:
                return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+       default:
+               /* No readback support */
+               return st->config;
        }
 }
 
index 866fe904cba29e9f9f06d26fb0da16a9ce62b4d4..90c8cb727cc700b63f25c451c5eb4758bb3e400b 100644 (file)
@@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
        if (val2 == NULL)
                val2 = &unused;
 
+       if(!iio_channel_has_info(chan->channel, info))
+               return -EINVAL;
+
        if (chan->indio_dev->info->read_raw_multi) {
                ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
                                        chan->channel, INDIO_MAX_RAW_ELEMENTS,
index 8afa28e4570ed099bb3fb9fc4b2d7e1c1a5ba9d6..18d4b2c8fe55092aa8e31c411faf051d968fb49d 100644 (file)
 #include <linux/cdev.h>
 #include "input-compat.h"
 
+enum evdev_clock_type {
+       EV_CLK_REAL = 0,
+       EV_CLK_MONO,
+       EV_CLK_BOOT,
+       EV_CLK_MAX
+};
+
 struct evdev {
        int open;
        struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
-       int clkid;
+       int clk_type;
        bool revoked;
        unsigned int bufsize;
        struct input_event buffer[];
 };
 
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+       switch (clkid) {
+
+       case CLOCK_REALTIME:
+               client->clk_type = EV_CLK_REAL;
+               break;
+       case CLOCK_MONOTONIC:
+               client->clk_type = EV_CLK_MONO;
+               break;
+       case CLOCK_BOOTTIME:
+               client->clk_type = EV_CLK_BOOT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /* flush queued events of type @type, caller must hold client->buffer_lock */
 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
 {
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
        struct input_event ev;
        ktime_t time;
 
-       time = (client->clkid == CLOCK_MONOTONIC) ?
-               ktime_get() : ktime_get_real();
+       time = client->clk_type == EV_CLK_REAL ?
+                       ktime_get_real() :
+                       client->clk_type == EV_CLK_MONO ?
+                               ktime_get() :
+                               ktime_get_boottime();
 
        ev.time = ktime_to_timeval(time);
        ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
 
 static void evdev_pass_values(struct evdev_client *client,
                        const struct input_value *vals, unsigned int count,
-                       ktime_t mono, ktime_t real)
+                       ktime_t *ev_time)
 {
        struct evdev *evdev = client->evdev;
        const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
        if (client->revoked)
                return;
 
-       event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
-                                     mono : real);
+       event.time = ktime_to_timeval(ev_time[client->clk_type]);
 
        /* Interrupts are disabled, just acquire the lock. */
        spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
 {
        struct evdev *evdev = handle->private;
        struct evdev_client *client;
-       ktime_t time_mono, time_real;
+       ktime_t ev_time[EV_CLK_MAX];
 
-       time_mono = ktime_get();
-       time_real = ktime_mono_to_real(time_mono);
+       ev_time[EV_CLK_MONO] = ktime_get();
+       ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+       ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+                                                TK_OFFS_BOOT);
 
        rcu_read_lock();
 
        client = rcu_dereference(evdev->grab);
 
        if (client)
-               evdev_pass_values(client, vals, count, time_mono, time_real);
+               evdev_pass_values(client, vals, count, ev_time);
        else
                list_for_each_entry_rcu(client, &evdev->client_list, node)
-                       evdev_pass_values(client, vals, count,
-                                         time_mono, time_real);
+                       evdev_pass_values(client, vals, count, ev_time);
 
        rcu_read_unlock();
 }
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
        case EVIOCSCLOCKID:
                if (copy_from_user(&i, p, sizeof(unsigned int)))
                        return -EFAULT;
-               if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
-                       return -EINVAL;
-               client->clkid = i;
-               return 0;
+
+               return evdev_set_clk_type(client, i);
 
        case EVIOCGKEYCODE:
                return evdev_handle_get_keycode(dev, p);
index 04217c2e345c0ddcaa8e5b1d2525dbb14b5854fe..213e3a1903ee1ddecf84a797ba19da6fa8d7b50c 100644 (file)
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
 
        events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
 
-       for (i = 0; i < ABS_CNT; i++) {
-               if (test_bit(i, dev->absbit)) {
-                       if (input_is_mt_axis(i))
-                               events += mt_slots;
-                       else
-                               events++;
+       if (test_bit(EV_ABS, dev->evbit)) {
+               for (i = 0; i < ABS_CNT; i++) {
+                       if (test_bit(i, dev->absbit)) {
+                               if (input_is_mt_axis(i))
+                                       events += mt_slots;
+                               else
+                                       events++;
+                       }
                }
        }
 
-       for (i = 0; i < REL_CNT; i++)
-               if (test_bit(i, dev->relbit))
-                       events++;
+       if (test_bit(EV_REL, dev->evbit)) {
+               for (i = 0; i < REL_CNT; i++)
+                       if (test_bit(i, dev->relbit))
+                               events++;
+       }
 
        /* Make room for KEY and MSC events */
        events += 7;
index 96ee26c555e02dd2b69530382170ae3e365facc4..a5d9b3f3c8714ee5a307e166afa4bf90e23864bf 100644 (file)
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
 config KEYBOARD_STMPE
        tristate "STMPE keypad support"
        depends on MFD_STMPE
+       depends on OF
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use the keypad controller on STMPE I/O
index d4dd78a7d56b5b1bf3dbe8865025bd5559a82ce2..883d6aed5b9ac12f47bc3a137bbf8e61c0bd1ec2 100644 (file)
 struct gpio_button_data {
        const struct gpio_keys_button *button;
        struct input_dev *input;
-       struct timer_list timer;
-       struct work_struct work;
-       unsigned int timer_debounce;    /* in msecs */
+
+       struct timer_list release_timer;
+       unsigned int release_delay;     /* in msecs, for IRQ-only buttons */
+
+       struct delayed_work work;
+       unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
        unsigned int irq;
        spinlock_t lock;
        bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
 {
        if (!bdata->disabled) {
                /*
-                * Disable IRQ and possible debouncing timer.
+                * Disable IRQ and associated timer/work structure.
                 */
                disable_irq(bdata->irq);
-               if (bdata->timer_debounce)
-                       del_timer_sync(&bdata->timer);
+
+               if (gpio_is_valid(bdata->button->gpio))
+                       cancel_delayed_work_sync(&bdata->work);
+               else
+                       del_timer_sync(&bdata->release_timer);
 
                bdata->disabled = true;
        }
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
 static void gpio_keys_gpio_work_func(struct work_struct *work)
 {
        struct gpio_button_data *bdata =
-               container_of(work, struct gpio_button_data, work);
+               container_of(work, struct gpio_button_data, work.work);
 
        gpio_keys_gpio_report_event(bdata);
 
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
                pm_relax(bdata->input->dev.parent);
 }
 
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
-       struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
-       schedule_work(&bdata->work);
-}
-
 static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 {
        struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
 
        if (bdata->button->wakeup)
                pm_stay_awake(bdata->input->dev.parent);
-       if (bdata->timer_debounce)
-               mod_timer(&bdata->timer,
-                       jiffies + msecs_to_jiffies(bdata->timer_debounce));
-       else
-               schedule_work(&bdata->work);
+
+       mod_delayed_work(system_wq,
+                        &bdata->work,
+                        msecs_to_jiffies(bdata->software_debounce));
 
        return IRQ_HANDLED;
 }
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
                input_event(input, EV_KEY, button->code, 1);
                input_sync(input);
 
-               if (!bdata->timer_debounce) {
+               if (!bdata->release_delay) {
                        input_event(input, EV_KEY, button->code, 0);
                        input_sync(input);
                        goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
                bdata->key_pressed = true;
        }
 
-       if (bdata->timer_debounce)
-               mod_timer(&bdata->timer,
-                       jiffies + msecs_to_jiffies(bdata->timer_debounce));
+       if (bdata->release_delay)
+               mod_timer(&bdata->release_timer,
+                       jiffies + msecs_to_jiffies(bdata->release_delay));
 out:
        spin_unlock_irqrestore(&bdata->lock, flags);
        return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
 {
        struct gpio_button_data *bdata = data;
 
-       if (bdata->timer_debounce)
-               del_timer_sync(&bdata->timer);
-
-       cancel_work_sync(&bdata->work);
+       if (gpio_is_valid(bdata->button->gpio))
+               cancel_delayed_work_sync(&bdata->work);
+       else
+               del_timer_sync(&bdata->release_timer);
 }
 
 static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                                        button->debounce_interval * 1000);
                        /* use timer if gpiolib doesn't provide debounce */
                        if (error < 0)
-                               bdata->timer_debounce =
+                               bdata->software_debounce =
                                                button->debounce_interval;
                }
 
-               irq = gpio_to_irq(button->gpio);
-               if (irq < 0) {
-                       error = irq;
-                       dev_err(dev,
-                               "Unable to get irq number for GPIO %d, error %d\n",
-                               button->gpio, error);
-                       return error;
+               if (button->irq) {
+                       bdata->irq = button->irq;
+               } else {
+                       irq = gpio_to_irq(button->gpio);
+                       if (irq < 0) {
+                               error = irq;
+                               dev_err(dev,
+                                       "Unable to get irq number for GPIO %d, error %d\n",
+                                       button->gpio, error);
+                               return error;
+                       }
+                       bdata->irq = irq;
                }
-               bdata->irq = irq;
 
-               INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
-               setup_timer(&bdata->timer,
-                           gpio_keys_gpio_timer, (unsigned long)bdata);
+               INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
 
                isr = gpio_keys_gpio_isr;
                irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
                        return -EINVAL;
                }
 
-               bdata->timer_debounce = button->debounce_interval;
-               setup_timer(&bdata->timer,
+               bdata->release_delay = button->debounce_interval;
+               setup_timer(&bdata->release_timer,
                            gpio_keys_irq_timer, (unsigned long)bdata);
 
                isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
        input_set_capability(input, button->type ?: EV_KEY, button->code);
 
        /*
-        * Install custom action to cancel debounce timer and
+        * Install custom action to cancel release timer and
         * workqueue item.
         */
        error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
 
        i = 0;
        for_each_child_of_node(node, pp) {
-               int gpio = -1;
                enum of_gpio_flags flags;
 
                button = &pdata->buttons[i++];
 
-               if (!of_find_property(pp, "gpios", NULL)) {
-                       button->irq = irq_of_parse_and_map(pp, 0);
-                       if (button->irq == 0) {
-                               i--;
-                               pdata->nbuttons--;
-                               dev_warn(dev, "Found button without gpios or irqs\n");
-                               continue;
-                       }
-               } else {
-                       gpio = of_get_gpio_flags(pp, 0, &flags);
-                       if (gpio < 0) {
-                               error = gpio;
+               button->gpio = of_get_gpio_flags(pp, 0, &flags);
+               if (button->gpio < 0) {
+                       error = button->gpio;
+                       if (error != -ENOENT) {
                                if (error != -EPROBE_DEFER)
                                        dev_err(dev,
                                                "Failed to get gpio flags, error: %d\n",
                                                error);
                                return ERR_PTR(error);
                        }
+               } else {
+                       button->active_low = flags & OF_GPIO_ACTIVE_LOW;
                }
 
-               button->gpio = gpio;
-               button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+               button->irq = irq_of_parse_and_map(pp, 0);
+
+               if (!gpio_is_valid(button->gpio) && !button->irq) {
+                       dev_err(dev, "Found button without gpios or irqs\n");
+                       return ERR_PTR(-EINVAL);
+               }
 
                if (of_property_read_u32(pp, "linux,code", &button->code)) {
                        dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
 
                button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
 
+               button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
                if (of_property_read_u32(pp, "debounce-interval",
                                         &button->debounce_interval))
                        button->debounce_interval = 5;
index 610a8af795a1f5b090b73d7bb8919bf41d3e331a..5b152f25a8e1ff72e613608f08876b3098627469 100644 (file)
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
        if (error)
                goto bail1;
 
-       init_completion(&dev->cmd_done);
+       reinit_completion(&dev->cmd_done);
        serio_write(serio, 0);
        serio_write(serio, 0);
        serio_write(serio, HIL_PKT_CMD >> 8);
index ef5e67fb567e701365767a9949dd3ea722e4f260..fe6e3f22eed76157c42a1d9b873b01e34f38b5f2 100644 (file)
 #define STMPE_KEYPAD_MAX_ROWS          8
 #define STMPE_KEYPAD_MAX_COLS          8
 #define STMPE_KEYPAD_ROW_SHIFT         3
-#define STMPE_KEYPAD_KEYMAP_SIZE       \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
        (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
 
 /**
  * struct stmpe_keypad_variant - model-specific attributes
  * @auto_increment: whether the KPC_DATA_BYTE register address
  *                 auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
  * @num_data: number of data bytes
  * @num_normal_data: number of normal keys' data bytes
  * @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
  */
 struct stmpe_keypad_variant {
        bool            auto_increment;
+       bool            set_pullup;
        int             num_data;
        int             num_normal_data;
        int             max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
        [STMPE2401] = {
                .auto_increment         = false,
+               .set_pullup             = true,
                .num_data               = 3,
                .num_normal_data        = 2,
                .max_cols               = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
        [STMPE2403] = {
                .auto_increment         = true,
+               .set_pullup             = true,
                .num_data               = 5,
                .num_normal_data        = 3,
                .max_cols               = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
        },
 };
 
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms.  Maximum is
+ *              %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ *             Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
 struct stmpe_keypad {
        struct stmpe *stmpe;
        struct input_dev *input;
        const struct stmpe_keypad_variant *variant;
-       const struct stmpe_keypad_platform_data *plat;
-
+       unsigned int debounce_ms;
+       unsigned int scan_count;
+       bool no_autorepeat;
        unsigned int rows;
        unsigned int cols;
-
-       unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+       unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
 };
 
 static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
        unsigned int col_gpios = variant->col_gpios;
        unsigned int row_gpios = variant->row_gpios;
        struct stmpe *stmpe = keypad->stmpe;
+       u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
        unsigned int pins = 0;
+       unsigned int pu_pins = 0;
+       int ret;
        int i;
 
        /*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
        for (i = 0; i < variant->max_cols; i++) {
                int num = __ffs(col_gpios);
 
-               if (keypad->cols & (1 << i))
+               if (keypad->cols & (1 << i)) {
                        pins |= 1 << num;
+                       pu_pins |= 1 << num;
+               }
 
                col_gpios &= ~(1 << num);
        }
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
                row_gpios &= ~(1 << num);
        }
 
-       return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+       ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+       if (ret)
+               return ret;
+
+       /*
+        * On STMPE24xx, set pin bias to pull-up on all keypad input
+        * pins (columns), this incidentally happen to be maximum 8 pins
+        * and placed at GPIO0-7 so only the LSB of the pull up register
+        * ever needs to be written.
+        */
+       if (variant->set_pullup) {
+               u8 val;
+
+               ret = stmpe_reg_read(stmpe, pureg);
+               if (ret)
+                       return ret;
+
+               /* Do not touch unused pins, may be used for GPIO */
+               val = ret & ~pu_pins;
+               val |= pu_pins;
+
+               ret = stmpe_reg_write(stmpe, pureg, val);
+       }
+
+       return 0;
 }
 
 static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
 {
-       const struct stmpe_keypad_platform_data *plat = keypad->plat;
        const struct stmpe_keypad_variant *variant = keypad->variant;
        struct stmpe *stmpe = keypad->stmpe;
        int ret;
 
-       if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+       if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
                return -EINVAL;
 
-       if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+       if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
                return -EINVAL;
 
        ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
 
        ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
                             STMPE_KPC_CTRL_MSB_SCAN_COUNT,
-                            plat->scan_count << 4);
+                            keypad->scan_count << 4);
        if (ret < 0)
                return ret;
 
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
                              STMPE_KPC_CTRL_LSB_SCAN |
                              STMPE_KPC_CTRL_LSB_DEBOUNCE,
                              STMPE_KPC_CTRL_LSB_SCAN |
-                             (plat->debounce_ms << 1));
+                             (keypad->debounce_ms << 1));
 }
 
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+                                       u32 used_rows, u32 used_cols)
 {
        int row, col;
 
-       for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
-               for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+       for (row = 0; row < used_rows; row++) {
+               for (col = 0; col < used_cols; col++) {
                        int code = MATRIX_SCAN_CODE(row, col,
-                                               STMPE_KEYPAD_ROW_SHIFT);
+                                                   STMPE_KEYPAD_ROW_SHIFT);
                        if (keypad->keymap[code] != KEY_RESERVED) {
                                keypad->rows |= 1 << row;
                                keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
        }
 }
 
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
-       struct device_node *np = dev->of_node;
-       struct stmpe_keypad_platform_data *plat;
-
-       if (!np)
-               return ERR_PTR(-ENODEV);
-
-       plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
-       if (!plat)
-               return ERR_PTR(-ENOMEM);
-
-       of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
-       of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
-       plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
-       return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif
-
 static int stmpe_keypad_probe(struct platform_device *pdev)
 {
        struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
-       const struct stmpe_keypad_platform_data *plat;
+       struct device_node *np = pdev->dev.of_node;
        struct stmpe_keypad *keypad;
        struct input_dev *input;
+       u32 rows;
+       u32 cols;
        int error;
        int irq;
 
-       plat = stmpe->pdata->keypad;
-       if (!plat) {
-               plat = stmpe_keypad_of_probe(&pdev->dev);
-               if (IS_ERR(plat))
-                       return PTR_ERR(plat);
-       }
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
        if (!keypad)
                return -ENOMEM;
 
+       keypad->stmpe = stmpe;
+       keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+       of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+       of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+       keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
        input = devm_input_allocate_device(&pdev->dev);
        if (!input)
                return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
        input->id.bustype = BUS_I2C;
        input->dev.parent = &pdev->dev;
 
-       error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
-                                          STMPE_KEYPAD_MAX_ROWS,
-                                          STMPE_KEYPAD_MAX_COLS,
+       error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+       if (error)
+               return error;
+
+       error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
                                           keypad->keymap, input);
        if (error)
                return error;
 
        input_set_capability(input, EV_MSC, MSC_SCAN);
-       if (!plat->no_autorepeat)
+       if (!keypad->no_autorepeat)
                __set_bit(EV_REP, input->evbit);
 
-       stmpe_keypad_fill_used_pins(keypad);
+       stmpe_keypad_fill_used_pins(keypad, rows, cols);
 
-       keypad->stmpe = stmpe;
-       keypad->plat = plat;
        keypad->input = input;
-       keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
 
        error = stmpe_keypad_chip_init(keypad);
        if (error < 0)
index d125a019383f10155dcafb88903f47e0f5297080..d88d73d835526a16d2e5e4e48c6a2562c802a4cf 100644 (file)
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
                                          unsigned char *pkt,
                                          unsigned char pkt_id)
 {
+       /*
+        *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
+        * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
+        * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
+        * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+        * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+        * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
+        * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+        * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
+        * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
+        * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+        * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
+        * L:         Left button
+        * R / M:     Non-clickpads: Right / Middle button
+        *            Clickpads: When > 2 fingers are down, and some fingers
+        *            are in the button area, then the 2 coordinates reported
+        *            are for fingers outside the button area and these report
+        *            extra fingers being present in the right / left button
+        *            area. Note these fingers are not added to the F field!
+        *            so if a TWO packet is received and R = 1 then there are
+        *            3 fingers down, etc.
+        * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+        *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+        *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+        *               in NEW fmt
+        * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+        */
+
        mt[0].x = ((pkt[2] & 0x80) << 4);
        mt[0].x |= ((pkt[2] & 0x3F) << 5);
        mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
 
 static int alps_get_mt_count(struct input_mt_pos *mt)
 {
-       int i;
+       int i, fingers = 0;
 
-       for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
-               /* empty */;
+       for (i = 0; i < MAX_TOUCHES; i++) {
+               if (mt[i].x != 0 || mt[i].y != 0)
+                       fingers++;
+       }
 
-       return i;
+       return fingers;
 }
 
 static int alps_decode_packet_v7(struct alps_fields *f,
                                  unsigned char *p,
                                  struct psmouse *psmouse)
 {
+       struct alps_data *priv = psmouse->private;
        unsigned char pkt_id;
 
        pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
                return 0;
        if (pkt_id == V7_PACKET_ID_UNKNOWN)
                return -1;
+       /*
+        * NEW packets are send to indicate a discontinuity in the finger
+        * coordinate reporting. Specifically a finger may have moved from
+        * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+        * us.
+        *
+        * NEW packets have 3 problems:
+        * 1) They do not contain middle / right button info (on non clickpads)
+        *    this can be worked around by preserving the old button state
+        * 2) They do not contain an accurate fingercount, and they are
+        *    typically send when the number of fingers changes. We cannot use
+        *    the old finger count as that may mismatch with the amount of
+        *    touch coordinates we've available in the NEW packet
+        * 3) Their x data for the second touch is inaccurate leading to
+        *    a possible jump of the x coordinate by 16 units when the first
+        *    non NEW packet comes in
+        * Since problems 2 & 3 cannot be worked around, just ignore them.
+        */
+       if (pkt_id == V7_PACKET_ID_NEW)
+               return 1;
 
        alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
 
-       if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
-               f->left = (p[0] & 0x80) >> 7;
+       if (pkt_id == V7_PACKET_ID_TWO)
+               f->fingers = alps_get_mt_count(f->mt);
+       else /* pkt_id == V7_PACKET_ID_MULTI */
+               f->fingers = 3 + (p[5] & 0x03);
+
+       f->left = (p[0] & 0x80) >> 7;
+       if (priv->flags & ALPS_BUTTONPAD) {
+               if (p[0] & 0x20)
+                       f->fingers++;
+               if (p[0] & 0x10)
+                       f->fingers++;
+       } else {
                f->right = (p[0] & 0x20) >> 5;
                f->middle = (p[0] & 0x10) >> 4;
        }
 
-       if (pkt_id == V7_PACKET_ID_TWO)
-               f->fingers = alps_get_mt_count(f->mt);
-       else if (pkt_id == V7_PACKET_ID_MULTI)
-               f->fingers = 3 + (p[5] & 0x03);
+       /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+       if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+               f->mt[0].x = f->mt[1].x;
+               f->mt[0].y = f->mt[1].y;
+               f->mt[1].x = 0;
+               f->mt[1].y = 0;
+       }
 
        return 0;
 }
index 30c8b6998808fa452a19e437c33bd7e9db8a0888..354d47ecd66a01c8b0ab732eb038bbad19857d19 100644 (file)
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
 TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
 TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
 
 TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
                    TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
        &psmouse_attr_upthresh.dattr.attr,
        &psmouse_attr_ztime.dattr.attr,
        &psmouse_attr_jenks.dattr.attr,
+       &psmouse_attr_drift_time.dattr.attr,
        &psmouse_attr_press_to_select.dattr.attr,
        &psmouse_attr_skipback.dattr.attr,
        &psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+       TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
 
        /* toggles */
        TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+       TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
        TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
 
        /* toggles */
index ecd0547964a570048a2ad9ecf228d9eda98290a3..5617ed3a7d7a15d0f9769086afb8c740b5739fcc 100644 (file)
@@ -70,6 +70,9 @@
 #define TP_UP_THRESH           0x5A    /* Used to generate a 'click' on Z-axis */
 #define TP_Z_TIME              0x5E    /* How sharp of a press */
 #define TP_JENKS_CURV          0x5D    /* Minimum curvature for double click */
+#define TP_DRIFT_TIME          0x5F    /* How long a 'hands off' condition */
+                                       /* must last (x*107ms) for drift */
+                                       /* correction to occur */
 
 /*
  * Toggling Flag bits
 #define TP_DEF_UP_THRESH       0xFF
 #define TP_DEF_Z_TIME          0x26
 #define TP_DEF_JENKS_CURV      0x87
+#define TP_DEF_DRIFT_TIME      0x05
 
 /* Toggles */
 #define TP_DEF_MB              0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
        unsigned char draghys, mindrag;
        unsigned char thresh, upthresh;
        unsigned char ztime, jenks;
+       unsigned char drift_time;
 
        /* toggles */
        unsigned char press_to_select;
index bb070206223c1e9eeee4f4964a8f9f6ed848806b..95ee92a91bd21353bf58020c9d87fda42fad6633 100644 (file)
 #define MXT_T6_STATUS_COMSERR  (1 << 2)
 
 /* MXT_GEN_POWER_T7 field */
-struct t7_config {
-       u8 idle;
-       u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN              0
-#define MXT_POWER_CFG_DEEPSLEEP                1
+#define MXT_POWER_IDLEACQINT   0
+#define MXT_POWER_ACTVACQINT   1
+#define MXT_POWER_ACTV2IDLETO  2
 
 /* MXT_GEN_ACQUIRE_T8 field */
 #define MXT_ACQUIRE_CHRGTIME   0
@@ -117,6 +113,7 @@ struct t7_config {
 #define MXT_ACQUIRE_ATCHCALSTHR        7
 
 /* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL         0
 #define MXT_T9_ORIENT          9
 #define MXT_T9_RANGE           18
 
@@ -256,7 +253,6 @@ struct mxt_data {
        bool update_input;
        u8 last_message_count;
        u8 num_touchids;
-       struct t7_config t7_cfg;
 
        /* Cached parameters from object table */
        u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
        data->t6_status = status;
 }
 
+static int mxt_write_object(struct mxt_data *data,
+                                u8 type, u8 offset, u8 val)
+{
+       struct mxt_object *object;
+       u16 reg;
+
+       object = mxt_get_object(data, type);
+       if (!object || offset >= mxt_obj_size(object))
+               return -EINVAL;
+
+       reg = object->start_address;
+       return mxt_write_reg(data->client, reg + offset, val);
+}
+
 static void mxt_input_button(struct mxt_data *data, u8 *message)
 {
        struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
        return error;
 }
 
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
-       struct device *dev = &data->client->dev;
-       int error;
-       struct t7_config *new_config;
-       struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
-       if (sleep == MXT_POWER_CFG_DEEPSLEEP)
-               new_config = &deepsleep;
-       else
-               new_config = &data->t7_cfg;
-
-       error = __mxt_write_reg(data->client, data->T7_address,
-                               sizeof(data->t7_cfg), new_config);
-       if (error)
-               return error;
-
-       dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
-               new_config->active, new_config->idle);
-
-       return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
-       struct device *dev = &data->client->dev;
-       int error;
-       bool retry = false;
-
-recheck:
-       error = __mxt_read_reg(data->client, data->T7_address,
-                               sizeof(data->t7_cfg), &data->t7_cfg);
-       if (error)
-               return error;
-
-       if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
-               if (!retry) {
-                       dev_dbg(dev, "T7 cfg zero, resetting\n");
-                       mxt_soft_reset(data);
-                       retry = true;
-                       goto recheck;
-               } else {
-                       dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
-                       data->t7_cfg.active = 20;
-                       data->t7_cfg.idle = 100;
-                       return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-               }
-       }
-
-       dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
-               data->t7_cfg.active, data->t7_cfg.idle);
-       return 0;
-}
-
 static int mxt_configure_objects(struct mxt_data *data,
                                 const struct firmware *cfg)
 {
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
                        dev_warn(dev, "Error %d updating config\n", error);
        }
 
-       error = mxt_init_t7_power_cfg(data);
-       if (error) {
-               dev_err(dev, "Failed to initialize power cfg\n");
-               return error;
-       }
-
        error = mxt_initialize_t9_input_device(data);
        if (error)
                return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
 
 static void mxt_start(struct mxt_data *data)
 {
-       mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
-       /* Recalibrate since chip has been in deep sleep */
-       mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+       /* Touch enable */
+       mxt_write_object(data,
+                       MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
 }
 
 static void mxt_stop(struct mxt_data *data)
 {
-       mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+       /* Touch disable */
+       mxt_write_object(data,
+                       MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
 }
 
 static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
        struct mxt_data *data = i2c_get_clientdata(client);
        struct input_dev *input_dev = data->input_dev;
 
+       mxt_soft_reset(data);
+
        mutex_lock(&input_dev->mutex);
 
        if (input_dev->users)
index 3793fcc7e5db31117404e819272da27f4fd15d90..d4c24fb7704f5e2d87f299ac01e2aa2ba5866521 100644 (file)
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
 }
 
 #define EDT_ATTR_CHECKSET(name, reg) \
+do {                                                           \
        if (pdata->name >= edt_ft5x06_attr_##name.limit_low &&          \
            pdata->name <= edt_ft5x06_attr_##name.limit_high)           \
-               edt_ft5x06_register_write(tsdata, reg, pdata->name)
+               edt_ft5x06_register_write(tsdata, reg, pdata->name);    \
+} while (0)
 
 #define EDT_GET_PROP(name, reg) {                              \
        u32 val;                                                \
index 1232336b960edb163278d59510a56e51840df9b9..40dfbc0444c0eaccdeca8cebfcd5f567e3d84ae4 100644 (file)
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
        if (action != BUS_NOTIFY_REMOVED_DEVICE)
                return 0;
 
-       /*
-        * If the device is still attached to a device driver we can't
-        * tear down the domain yet as DMA mappings may still be in use.
-        * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
-        */
-       if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
-               return 0;
-
        domain = find_domain(dev);
        if (!domain)
                return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                                domain_remove_one_dev_info(old_domain, dev);
                        else
                                domain_remove_dev_info(old_domain);
+
+                       if (!domain_type_is_vm_or_si(old_domain) &&
+                            list_empty(&old_domain->devices))
+                               domain_exit(old_domain);
                }
        }
 
index 68dfb0fd5ee9af38f6586ea994ef4a6ba997b282..748693192c20a0dd862f799ace8d20450bc436fe 100644 (file)
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
 
 static u64 ipmmu_page_prot(unsigned int prot, u64 type)
 {
-       u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+       u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
                   | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
                   | ARM_VMSA_PTE_NS | type;
 
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
        if (prot & IOMMU_CACHE)
                pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
 
-       if (prot & IOMMU_EXEC)
-               pgprot &= ~ARM_VMSA_PTE_XN;
+       if (prot & IOMMU_NOEXEC)
+               pgprot |= ARM_VMSA_PTE_XN;
        else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
                /* If no access create a faulting entry to avoid TLB fills. */
                pgprot &= ~ARM_VMSA_PTE_PAGE;
index b2023af384b9be0852c04195f75cea93582d47a8..6a8b1ec4a48a1f1100bc0f9f301fc658758a35ca 100644 (file)
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
        .remove = rk_iommu_remove,
        .driver = {
                   .name = "rk_iommu",
-                  .owner = THIS_MODULE,
                   .of_match_table = of_match_ptr(rk_iommu_dt_ids),
        },
 };
index a82e542ffc21dd4dccf9d1b810ab9373984c1d0d..0b380603a578543bcd275d248ade94f3f07b24a8 100644 (file)
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
        byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
        byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
        byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
-       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+       byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
        byte force_mt_info = false;
        byte dir;
        dword d;
index 26515c27ea8cca18a2e5f5ac24f82a6158194182..25e419752a7b7c5719baa69bd114d57720a62f92 100644 (file)
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
        led_dat->sata = 0;
        led_dat->cdev.brightness = LED_OFF;
        led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
-       /*
-        * If available, expose the SATA activity blink capability through
-        * a "sata" sysfs attribute.
-        */
-       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
-               led_dat->cdev.groups = netxbig_led_groups;
        led_dat->mode_addr = template->mode_addr;
        led_dat->mode_val = template->mode_val;
        led_dat->bright_addr = template->bright_addr;
        led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
        led_dat->timer = pdata->timer;
        led_dat->num_timer = pdata->num_timer;
+       /*
+        * If available, expose the SATA activity blink capability through
+        * a "sata" sysfs attribute.
+        */
+       if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+               led_dat->cdev.groups = netxbig_led_groups;
 
        return led_classdev_register(&pdev->dev, &led_dat->cdev);
 }
index f956ef26c0ce2ddc1f81014b8ab5e0b658eb9977..fb7493dcfb79f409a8f483480f1c8d50bdba2278 100644 (file)
@@ -7,6 +7,7 @@
 #define PCI_DEVICE_ID_MEN_CHAMELEON    0x4d45
 #define CHAMELEON_FILENAME_LEN         12
 #define CHAMELEONV2_MAGIC              0xabce
+#define CHAM_HEADER_SIZE               0x200
 
 enum chameleon_descriptor_type {
        CHAMELEON_DTYPE_GENERAL = 0x0,
index b5918196564376b028e825ccc75131ae74b1cdd0..5e1bd5db02c8ee7f21e0de9b778d2d1bb17a2d6c 100644 (file)
@@ -17,6 +17,7 @@
 
 struct priv {
        struct mcb_bus *bus;
+       phys_addr_t mapbase;
        void __iomem *base;
 };
 
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
 
 static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       struct resource *res;
        struct priv *priv;
-       phys_addr_t mapbase;
        int ret;
        int num_cells;
        unsigned long flags;
@@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENODEV;
        }
 
-       mapbase = pci_resource_start(pdev, 0);
-       if (!mapbase) {
+       priv->mapbase = pci_resource_start(pdev, 0);
+       if (!priv->mapbase) {
                dev_err(&pdev->dev, "No PCI resource\n");
                goto err_start;
        }
 
-       ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+       res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
+                                KBUILD_MODNAME);
+       if (IS_ERR(res)) {
+               dev_err(&pdev->dev, "Failed to request PCI memory\n");
+               ret = PTR_ERR(res);
                goto err_start;
        }
 
-       priv->base = pci_iomap(pdev, 0, 0);
+       priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
        if (!priv->base) {
                dev_err(&pdev->dev, "Cannot ioremap\n");
                ret = -ENOMEM;
@@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        priv->bus->get_irq = mcb_pci_get_irq;
 
-       ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+       ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
        if (ret < 0)
                goto err_drvdata;
        num_cells = ret;
@@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        mcb_bus_add_devices(priv->bus);
 
+       return 0;
+
 err_drvdata:
-       pci_iounmap(pdev, priv->base);
+       iounmap(priv->base);
 err_ioremap:
        pci_release_region(pdev, 0);
 err_start:
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
        struct priv *priv = pci_get_drvdata(pdev);
 
        mcb_release_bus(priv->bus);
+
+       iounmap(priv->base);
+       release_region(priv->mapbase, CHAM_HEADER_SIZE);
+       pci_disable_device(pdev);
 }
 
 static const struct pci_device_id mcb_pci_tbl[] = {
index e2f9df1c0c361f0d0e66bf782817af6170e49dc5..2d7fae94c861013c594463705e05ea413275e4d0 100644 (file)
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE1601_REG_GPIO_SET_DIR_LSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE1601_REG_GPIO_RE_LSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE1601_REG_GPIO_FE_LSB,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE1601_REG_GPIO_PU_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE1801_REG_GPIO_SET_DIR_LOW,
        [STMPE_IDX_GPRER_LSB]   = STMPE1801_REG_GPIO_RE_LOW,
        [STMPE_IDX_GPFER_LSB]   = STMPE1801_REG_GPIO_FE_LOW,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE1801_REG_GPIO_PULL_UP_LOW,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
        [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
 };
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
        [STMPE_IDX_GPDR_LSB]    = STMPE24XX_REG_GPDR_LSB,
        [STMPE_IDX_GPRER_LSB]   = STMPE24XX_REG_GPRER_LSB,
        [STMPE_IDX_GPFER_LSB]   = STMPE24XX_REG_GPFER_LSB,
+       [STMPE_IDX_GPPUR_LSB]   = STMPE24XX_REG_GPPUR_LSB,
+       [STMPE_IDX_GPPDR_LSB]   = STMPE24XX_REG_GPPDR_LSB,
        [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
        [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
        [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
index bee0abf82040001664c07e82c646e0e3e5afc259..84adb46b3e2fea599f069072f2f48190602092e9 100644 (file)
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE1601_REG_GPIO_ED_MSB              0x8A
 #define STMPE1601_REG_GPIO_RE_LSB              0x8D
 #define STMPE1601_REG_GPIO_FE_LSB              0x8F
+#define STMPE1601_REG_GPIO_PU_LSB              0x91
 #define STMPE1601_REG_GPIO_AF_U_MSB            0x92
 
 #define STMPE1601_SYS_CTRL_ENABLE_GPIO         (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
 #define STMPE24XX_REG_GPEDR_MSB                0x8C
 #define STMPE24XX_REG_GPRER_LSB                0x91
 #define STMPE24XX_REG_GPFER_LSB                0x94
+#define STMPE24XX_REG_GPPUR_LSB                0x97
+#define STMPE24XX_REG_GPPDR_LSB                0x9a
 #define STMPE24XX_REG_GPAFR_U_MSB      0x9B
 
 #define STMPE24XX_SYS_CTRL_ENABLE_GPIO         (1 << 3)
index 51fd6b524371ecd9ae762716e4cf33692c36ce45..d1b55fe62817dcd0261ab926a704a37f590ca67c 100644 (file)
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        return 0;
 }
 
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct cxl_context *ctx = vma->vm_file->private_data;
+       unsigned long address = (unsigned long)vmf->virtual_address;
+       u64 area, offset;
+
+       offset = vmf->pgoff << PAGE_SHIFT;
+
+       pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+                       __func__, ctx->pe, address, offset);
+
+       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+               area = ctx->afu->psn_phys;
+               if (offset > ctx->afu->adapter->ps_size)
+                       return VM_FAULT_SIGBUS;
+       } else {
+               area = ctx->psn_phys;
+               if (offset > ctx->psn_size)
+                       return VM_FAULT_SIGBUS;
+       }
+
+       mutex_lock(&ctx->status_mutex);
+
+       if (ctx->status != STARTED) {
+               mutex_unlock(&ctx->status_mutex);
+               pr_devel("%s: Context not started, failing problem state access\n", __func__);
+               return VM_FAULT_SIGBUS;
+       }
+
+       vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+       mutex_unlock(&ctx->status_mutex);
+
+       return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+       .fault = cxl_mmap_fault,
+};
+
 /*
  * Map a per-context mmio space into the given vma.
  */
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
        u64 len = vma->vm_end - vma->vm_start;
        len = min(len, ctx->psn_size);
 
-       if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
-       }
+       if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+               /* make sure there is a valid per process space for this AFU */
+               if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+                       pr_devel("AFU doesn't support mmio space\n");
+                       return -EINVAL;
+               }
 
-       /* make sure there is a valid per process space for this AFU */
-       if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
-               pr_devel("AFU doesn't support mmio space\n");
-               return -EINVAL;
+               /* Can't mmap until the AFU is enabled */
+               if (!ctx->afu->enabled)
+                       return -EBUSY;
        }
 
-       /* Can't mmap until the AFU is enabled */
-       if (!ctx->afu->enabled)
-               return -EBUSY;
-
        pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
                 ctx->psn_phys, ctx->pe , ctx->master);
 
+       vma->vm_flags |= VM_IO | VM_PFNMAP;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       return vm_iomap_memory(vma, ctx->psn_phys, len);
+       vma->vm_ops = &cxl_mmap_vmops;
+       return 0;
 }
 
 /*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
        afu_release_irqs(ctx);
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
        wake_up_all(&ctx->wq);
-
-       /* Release Problem State Area mapping */
-       mutex_lock(&ctx->mapping_lock);
-       if (ctx->mapping)
-               unmap_mapping_range(ctx->mapping, 0, 0, 1);
-       mutex_unlock(&ctx->mapping_lock);
 }
 
 /*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
                 * created and torn down after the IDR removed
                 */
                __detach_context(ctx);
+
+               /*
+                * We are force detaching - remove any active PSA mappings so
+                * userspace cannot interfere with the card if it comes back.
+                * Easiest way to exercise this is to unbind and rebind the
+                * driver via sysfs while it is in use.
+                */
+               mutex_lock(&ctx->mapping_lock);
+               if (ctx->mapping)
+                       unmap_mapping_range(ctx->mapping, 0, 0, 1);
+               mutex_unlock(&ctx->mapping_lock);
        }
        mutex_unlock(&afu->contexts_lock);
 }
index e9f2f10dbb3734f3de4df60cdaed583415cfd831..b15d8113877c9f6ed5c45c58fb012fa6f50276aa 100644 (file)
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        pr_devel("%s: pe: %i\n", __func__, ctx->pe);
 
-       mutex_lock(&ctx->status_mutex);
-       if (ctx->status != OPENED) {
-               rc = -EIO;
-               goto out;
-       }
-
+       /* Do this outside the status_mutex to avoid a circular dependency with
+        * the locking in cxl_mmap_fault() */
        if (copy_from_user(&work, uwork,
                           sizeof(struct cxl_ioctl_start_work))) {
                rc = -EFAULT;
                goto out;
        }
 
+       mutex_lock(&ctx->status_mutex);
+       if (ctx->status != OPENED) {
+               rc = -EIO;
+               goto out;
+       }
+
        /*
         * if any of the reserved fields are set or any of the unused
         * flags are set it's invalid
index ff2755062b4420cf3239a80e0f767d6dc333b6a6..06ff0a2ec96071c5b2d1c2c5dffdff7a5c03d938 100644 (file)
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
        struct mei_me_hw *hw = to_me_hw(dev);
        u32 hcsr = mei_hcsr_read(hw);
 
+       /* H_RST may be found lit before reset is started,
+        * for example if preceding reset flow hasn't completed.
+        * In that case asserting H_RST will be ignored, therefore
+        * we need to clean H_RST bit to start a successful reset sequence.
+        */
+       if ((hcsr & H_RST) == H_RST) {
+               dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+               hcsr &= ~H_RST;
+               mei_me_reg_write(hw, H_CSR, hcsr);
+               hcsr = mei_hcsr_read(hw);
+       }
+
        hcsr |= H_RST | H_IG | H_IS;
 
        if (intr_enable)
index 02ad79229f65ecf0b50b9bf29a1f63c0d297910e..7466ce098e60a086e4f60c5111c19c1b85cd6d60 100644 (file)
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
        unsigned idx, bus_width = 0;
        int err = 0;
 
-       if (!mmc_can_ext_csd(card) &&
+       if (!mmc_can_ext_csd(card) ||
            !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
                return 0;
 
index e3e56d35f0eeee634a0930acce0500bb33cb3258..970314e0aac8f1f05e5b9ff199e28723b9d85a19 100644 (file)
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
        { "INT33BB"  , "3" , &sdhci_acpi_slot_int_sd },
        { "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio },
+       { "INT344D"  , NULL, &sdhci_acpi_slot_int_sdio },
        { "PNP0D40"  },
        { },
 };
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
        { "INT33BB"  },
        { "INT33C6"  },
        { "INT3436"  },
+       { "INT344D"  },
        { "PNP0D40"  },
        { },
 };
index 03427755b9029b297393d1d43851b4f678f2bbae..4f38554ce6797e0a461a3ddaf76261264229f107 100644 (file)
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
                .subdevice      = PCI_ANY_ID,
                .driver_data    = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
        },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_SPT_SD,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .driver_data    = (kernel_ulong_t)&sdhci_intel_byt_sd,
+       },
+
        {
                .vendor         = PCI_VENDOR_ID_O2,
                .device         = PCI_DEVICE_ID_O2_8120,
index d57c3d169914e94e716b64b90e8b8da2b86afa87..1ec684d06d54733b35b2427d4007bfd5cfd3d52b 100644 (file)
@@ -21,6 +21,9 @@
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0  0x08e5
 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1  0x08e6
 #define PCI_DEVICE_ID_INTEL_QRK_SD     0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC   0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO   0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD     0x9d2d
 
 /*
  * PCI registers
index 45238871192da1d6553268d82659dce3cc600d68..ca3424e7ef717c59a82da1e13a8ca3cf939da7f2 100644 (file)
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (IS_ERR(host))
                return PTR_ERR(host);
 
-       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
-               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
-               if (ret < 0)
-                       goto err_mbus_win;
-       }
-
-
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = pxa;
 
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
        if (!IS_ERR(pxa->clk_core))
                clk_prepare_enable(pxa->clk_core);
 
+       if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+               ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+               if (ret < 0)
+                       goto err_mbus_win;
+       }
+
        /* enable 1/8V DDR capable */
        host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
@@ -396,11 +395,11 @@ err_add_host:
        pm_runtime_disable(&pdev->dev);
 err_of_parse:
 err_cd_req:
+err_mbus_win:
        clk_disable_unprepare(pxa->clk_io);
        if (!IS_ERR(pxa->clk_core))
                clk_disable_unprepare(pxa->clk_core);
 err_clk_get:
-err_mbus_win:
        sdhci_pltfm_free(pdev);
        return ret;
 }
index cbb245b5853873cbddc2f1a0967c8bcc989ea4e1..f1a488ee432f891971f79707d78ca91a631b6c51 100644 (file)
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
 
                del_timer_sync(&host->tuning_timer);
                host->flags &= ~SDHCI_NEEDS_RETUNING;
-               host->mmc->max_blk_count =
-                       (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
        }
        sdhci_enable_card_detection(host);
 }
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
                spin_unlock_irq(&host->lock);
                mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
                spin_lock_irq(&host->lock);
+
+               if (mode != MMC_POWER_OFF)
+                       sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+               else
+                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
                return;
        }
 
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
        return !(present_state & SDHCI_DATA_LVL_MASK);
 }
 
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&host->lock, flags);
+       host->flags |= SDHCI_HS400_TUNING;
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return 0;
+}
+
 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
        struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        int tuning_loop_counter = MAX_TUNING_LOOP;
        int err = 0;
        unsigned long flags;
+       unsigned int tuning_count = 0;
+       bool hs400_tuning;
 
        sdhci_runtime_pm_get(host);
        spin_lock_irqsave(&host->lock, flags);
 
+       hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+       host->flags &= ~SDHCI_HS400_TUNING;
+
+       if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+               tuning_count = host->tuning_count;
+
        /*
         * The Host Controller needs tuning only in case of SDR104 mode
         * and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * tuning function has to be executed.
         */
        switch (host->timing) {
+       /* HS400 tuning is done in HS200 mode */
        case MMC_TIMING_MMC_HS400:
+               err = -EINVAL;
+               goto out_unlock;
+
        case MMC_TIMING_MMC_HS200:
+               /*
+                * Periodic re-tuning for HS400 is not expected to be needed, so
+                * disable it here.
+                */
+               if (hs400_tuning)
+                       tuning_count = 0;
+               break;
+
        case MMC_TIMING_UHS_SDR104:
                break;
 
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
                /* FALLTHROUGH */
 
        default:
-               spin_unlock_irqrestore(&host->lock, flags);
-               sdhci_runtime_pm_put(host);
-               return 0;
+               goto out_unlock;
        }
 
        if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
        }
 
 out:
-       /*
-        * If this is the very first time we are here, we start the retuning
-        * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
-        * flag won't be set, we check this condition before actually starting
-        * the timer.
-        */
-       if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
-           (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+       host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+       if (tuning_count) {
                host->flags |= SDHCI_USING_RETUNING_TIMER;
-               mod_timer(&host->tuning_timer, jiffies +
-                       host->tuning_count * HZ);
-               /* Tuning mode 1 limits the maximum data length to 4MB */
-               mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
-       } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
-               host->flags &= ~SDHCI_NEEDS_RETUNING;
-               /* Reload the new initial value for timer */
-               mod_timer(&host->tuning_timer, jiffies +
-                         host->tuning_count * HZ);
+               mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
        }
 
        /*
@@ -2070,6 +2092,7 @@ out:
 
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
        spin_unlock_irqrestore(&host->lock, flags);
        sdhci_runtime_pm_put(host);
 
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
+       int present;
 
        /* First check if client has provided their own card event */
        if (host->ops->card_event)
                host->ops->card_event(host);
 
+       present = sdhci_do_get_cd(host);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /* Check host->mrq first in case we are runtime suspended */
-       if (host->mrq && !sdhci_do_get_cd(host)) {
+       if (host->mrq && !present) {
                pr_err("%s: Card removed during transfer!\n",
                        mmc_hostname(host->mmc));
                pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
        .hw_reset       = sdhci_hw_reset,
        .enable_sdio_irq = sdhci_enable_sdio_irq,
        .start_signal_voltage_switch    = sdhci_start_signal_voltage_switch,
+       .prepare_hs400_tuning           = sdhci_prepare_hs400_tuning,
        .execute_tuning                 = sdhci_execute_tuning,
        .card_event                     = sdhci_card_event,
        .card_busy      = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
                mmc->max_segs = SDHCI_MAX_SEGS;
 
        /*
-        * Maximum number of sectors in one transfer. Limited by DMA boundary
-        * size (512KiB).
+        * Maximum number of sectors in one transfer. Limited by SDMA boundary
+        * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+        * is less anyway.
         */
        mmc->max_req_size = 524288;
 
index 184c434ae3055e4b8a586c116d1274ddbccec6c5..0dceba1a2ba15f4706922a5423f680e7cd17ef77 100644 (file)
@@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
            !netdev_has_upper_dev(slave_dev, bond_dev)) {
-               netdev_err(bond_dev, "cannot release %s\n",
+               netdev_dbg(bond_dev, "cannot release %s\n",
                           slave_dev->name);
                return -EINVAL;
        }
index a5fefb9059c592aa5134302ba79f411a2874d65a..b306210b02b7b40c717ae160e4116db8926418ce 100644 (file)
@@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
        struct vringh_kiov *riov = &cfv->ctx.riov;
        unsigned int skb_len;
 
-again:
        do {
                skb = NULL;
 
@@ -322,7 +321,6 @@ exit:
                    napi_schedule_prep(napi)) {
                        vringh_notify_disable_kern(cfv->vr_rx);
                        __napi_schedule(napi);
-                       goto again;
                }
                break;
 
index 89c8d9fc97de9cbc85611de9b9f032b1641d41e7..57e97910c72811ac9e5c24428cd3e3500dd089dd 100644 (file)
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
 
        if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
                dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
-               return -ENODEV;
+               goto err_out;
        }
 
        if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
                dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
                        NE_IO_EXTENT, ioaddr);
-               return -EBUSY;
+               goto err_out;
        }
 
        reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@ err_out_free_netdev:
        free_netdev (dev);
 err_out_free_res:
        release_region (ioaddr, NE_IO_EXTENT);
+err_out:
+       pci_disable_device(pdev);
        return -ENODEV;
 }
 
index df76050d0a9d26dc5a5b106a0ac44a18d84441b9..eadcb053807e46e8ae1f5c8ebce984abf0166219 100644 (file)
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
 source "drivers/net/ethernet/renesas/Kconfig"
 source "drivers/net/ethernet/rdc/Kconfig"
 source "drivers/net/ethernet/rocker/Kconfig"
-
-config S6GMAC
-       tristate "S6105 GMAC ethernet support"
-       depends on XTENSA_VARIANT_S6000
-       select PHYLIB
-       ---help---
-         This driver supports the on chip ethernet device on the
-         S6105 xtensa processor.
-
-         To compile this driver as a module, choose M here. The module
-         will be called s6gmac.
-
 source "drivers/net/ethernet/samsung/Kconfig"
 source "drivers/net/ethernet/seeq/Kconfig"
 source "drivers/net/ethernet/silan/Kconfig"
index bf56f8b36e90cbb2fcef22627fadff0041fef897..1367afcd0a8b2cd29681fc1867e7f0af71889fe0 100644 (file)
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
 obj-$(CONFIG_SH_ETH) += renesas/
 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
 obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
-obj-$(CONFIG_S6GMAC) += s6gmac.o
 obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
 obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
 obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
index 1fcd5568a3520981fd6ac03b57a3a599c2876a6c..f3470d96837a7fb0e59307b000855fe18a882af5 100644 (file)
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
        }
 
        db->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(db->clk))
+       if (IS_ERR(db->clk)) {
+               ret = PTR_ERR(db->clk);
                goto out;
+       }
 
        clk_prepare_enable(db->clk);
 
index 3498760dc22a96c17419abbda9250cc959165a39..760c72c6e2acd50ba8472e4b4dd77170c2c381d6 100644 (file)
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
 init_error:
        free_skbufs(dev);
 alloc_skbuf_error:
-       if (priv->phydev) {
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 phy_error:
        return ret;
 }
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
        int ret;
        unsigned long int flags;
 
-       /* Stop and disconnect the PHY */
-       if (priv->phydev) {
+       /* Stop the PHY */
+       if (priv->phydev)
                phy_stop(priv->phydev);
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
 
        netif_stop_queue(dev);
        napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
 static int altera_tse_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct altera_tse_private *priv = netdev_priv(ndev);
+
+       if (priv->phydev)
+               phy_disconnect(priv->phydev);
 
        platform_set_drvdata(pdev, NULL);
        altera_tse_mdio_destroy(ndev);
index e398eda0729832671561490c6d18fce9db485f5e..c8af3ce3ea38d16d4c470ec5773b2d4f088b0f15 100644 (file)
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
        schedule_work(&alx->reset_wk);
 }
 
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
        struct alx_rrd *rrd;
        struct alx_buffer *rxb;
        struct sk_buff *skb;
        u16 length, rfd_cleaned = 0;
+       int work = 0;
 
-       while (budget > 0) {
+       while (work < budget) {
                rrd = &rxq->rrd[rxq->rrd_read_idx];
                if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
                        break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
                                  RRD_NOR) != 1) {
                        alx_schedule_reset(alx);
-                       return 0;
+                       return work;
                }
 
                rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                }
 
                napi_gro_receive(&alx->napi, skb);
-               budget--;
+               work++;
 
 next_pkt:
                if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
        if (rfd_cleaned)
                alx_refill_rx_ring(alx, GFP_ATOMIC);
 
-       return budget > 0;
+       return work;
 }
 
 static int alx_poll(struct napi_struct *napi, int budget)
 {
        struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
        struct alx_hw *hw = &alx->hw;
-       bool complete = true;
        unsigned long flags;
+       bool tx_complete;
+       int work;
 
-       complete = alx_clean_tx_irq(alx) &&
-                  alx_clean_rx_irq(alx, budget);
+       tx_complete = alx_clean_tx_irq(alx);
+       work = alx_clean_rx_irq(alx, budget);
 
-       if (!complete)
-               return 1;
+       if (!tx_complete || work == budget)
+               return budget;
 
        napi_complete(&alx->napi);
 
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
 
        alx_post_write(hw);
 
-       return 0;
+       return work;
 }
 
 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
index 9f5e38769a294a3d66d9562e12e2e0116bac925e..72eef9fc883e8983d7544abd685fbb36da19951d 100644 (file)
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
        return 0;
 }
 
-static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+                                             struct net_device *dev,
+                                             netdev_features_t features)
 {
-       return vxlan_gso_check(skb);
+       return vxlan_features_check(skb, features);
 }
 
 static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
        .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
-       .ndo_gso_check          = bnx2x_gso_check,
+       .ndo_features_check     = bnx2x_features_check,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
index bb48a610b72a8db6beb501e990c5d4f1365c9495..96bf01ba32dda179b15d2c36d6168581ee43c250 100644 (file)
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
 }
 
 static void tg3_irq_quiesce(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        int i;
 
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
        tp->irq_sync = 1;
        smp_mb();
 
+       spin_unlock_bh(&tp->lock);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
+
+       spin_lock_bh(&tp->lock);
 }
 
 /* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
 
 /* tp->lock is held. */
 static int tg3_chip_reset(struct tg3 *tp)
+       __releases(tp->lock)
+       __acquires(tp->lock)
 {
        u32 val;
        void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
        }
        smp_mb();
 
+       tg3_full_unlock(tp);
+
        for (i = 0; i < tp->irq_cnt; i++)
                synchronize_irq(tp->napi[i].irq_vec);
 
+       tg3_full_lock(tp, 0);
+
        if (tg3_asic_rev(tp) == ASIC_REV_57780) {
                val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
                tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
 {
        struct tg3 *tp = (struct tg3 *) __opaque;
 
-       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
-               goto restart_timer;
-
        spin_lock(&tp->lock);
 
+       if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+               spin_unlock(&tp->lock);
+               goto restart_timer;
+       }
+
        if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
            tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
        struct tg3 *tp = container_of(work, struct tg3, reset_task);
        int err;
 
+       rtnl_lock();
        tg3_full_lock(tp, 0);
 
        if (!netif_running(tp->dev)) {
                tg3_flag_clear(tp, RESET_TASK_PENDING);
                tg3_full_unlock(tp);
+               rtnl_unlock();
                return;
        }
 
@@ -11138,6 +11154,7 @@ out:
                tg3_phy_start(tp);
 
        tg3_flag_clear(tp, RESET_TASK_PENDING);
+       rtnl_unlock();
 }
 
 static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -17800,23 +17817,6 @@ static int tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
-       /*
-        * Reset chip in case UNDI or EFI driver did not shutdown
-        * DMA self test will enable WDMAC and we'll see (spurious)
-        * pending DMA on the PCI bus at that point.
-        */
-       if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
-           (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
-               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-       }
-
-       err = tg3_test_dma(tp);
-       if (err) {
-               dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
-               goto err_out_apeunmap;
-       }
-
        intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
        rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
        sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17861,23 @@ static int tg3_init_one(struct pci_dev *pdev,
                        sndmbx += 0xc;
        }
 
+       /*
+        * Reset chip in case UNDI or EFI driver did not shutdown
+        * DMA self test will enable WDMAC and we'll see (spurious)
+        * pending DMA on the PCI bus at that point.
+        */
+       if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+           (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       }
+
+       err = tg3_test_dma(tp);
+       if (err) {
+               dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+               goto err_out_apeunmap;
+       }
+
        tg3_init_coal(tp);
 
        pci_set_drvdata(pdev, dev);
index 7d6aa8c87df84747bea056019a89517846817b21..619083a860a4b4bfc64518619202aa515c415b71 100644 (file)
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
 
        /* Retrieve flash partition info */
        fcomp.comp_status = 0;
-       init_completion(&fcomp.comp);
+       reinit_completion(&fcomp.comp);
        spin_lock_irqsave(&bnad->bna_lock, flags);
        ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
                                bnad_cb_completion, &fcomp);
index 55eb7f2af2b41ccf031f5018c12137daf752dc6c..7ef55f5fa664480ce052720bc55bd5ffb9ca8b57 100644 (file)
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
                res = PTR_ERR(lp->pclk);
                goto err_free_dev;
        }
-       clk_enable(lp->pclk);
+       clk_prepare_enable(lp->pclk);
 
        lp->hclk = ERR_PTR(-ENOENT);
        lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
 err_out_unregister_netdev:
        unregister_netdev(dev);
 err_disable_clock:
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
 err_free_dev:
        free_netdev(dev);
        return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
        kfree(lp->mii_bus->irq);
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
-       clk_disable(lp->pclk);
+       clk_disable_unprepare(lp->pclk);
        free_netdev(dev);
 
        return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
                netif_stop_queue(net_dev);
                netif_device_detach(net_dev);
 
-               clk_disable(lp->pclk);
+               clk_disable_unprepare(lp->pclk);
        }
        return 0;
 }
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
        struct macb *lp = netdev_priv(net_dev);
 
        if (netif_running(net_dev)) {
-               clk_enable(lp->pclk);
+               clk_prepare_enable(lp->pclk);
 
                netif_device_attach(net_dev);
                netif_start_queue(net_dev);
index d00a751f0588d8c65d6060352af578895e5d9f6d..6049f70e110c5701d0a6cb8685b3dbf2a78ab38d 100644 (file)
@@ -96,6 +96,9 @@ struct port_info {
        s16 xact_addr_filt;             /* index of our MAC address filter */
        u16 rss_size;                   /* size of VI's RSS table slice */
        u8 pidx;                        /* index into adapter port[] */
+       s8 mdio_addr;
+       u8 port_type;                   /* firmware port type */
+       u8 mod_type;                    /* firmware module type */
        u8 port_id;                     /* physical port ID */
        u8 nqsets;                      /* # of "Queue Sets" */
        u8 first_qset;                  /* index of first "Queue Set" */
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
  * is "contracted" to provide for the common code.
  */
 void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
 
 /*
  * SGE function prototype declarations.
index aa74ec34a4679cbff1905e2af7da5bfcdf71999f..a936ee8958c704fa90321a1934b2c7dacbf046a6 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/etherdevice.h>
 #include <linux/debugfs.h>
 #include <linux/ethtool.h>
+#include <linux/mdio.h>
 
 #include "t4vf_common.h"
 #include "t4vf_defs.h"
@@ -209,6 +210,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
        }
 }
 
+/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+       static const char * const mod_str[] = {
+               NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+       };
+       const struct net_device *dev = adapter->port[pidx];
+       const struct port_info *pi = netdev_priv(dev);
+
+       if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+               dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+                        dev->name);
+       else if (pi->mod_type < ARRAY_SIZE(mod_str))
+               dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+                        dev->name, mod_str[pi->mod_type]);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+               dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+                        "module inserted\n", dev->name);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+               dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+                        "forcing TWINAX\n", dev->name);
+       else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+               dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+                        dev->name);
+       else
+               dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+                        "inserted\n", dev->name, pi->mod_type);
+}
+
 /*
  * Net device operations.
  * ======================
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
  * state of the port to which we're linked.
  */
 
-/*
- * Return current port link settings.
- */
-static int cxgb4vf_get_settings(struct net_device *dev,
-                               struct ethtool_cmd *cmd)
-{
-       const struct port_info *pi = netdev_priv(dev);
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+                                         unsigned int caps)
+{
+       unsigned int v = 0;
+
+       if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+           type == FW_PORT_TYPE_BT_XAUI) {
+               v |= SUPPORTED_TP;
+               if (caps & FW_PORT_CAP_SPEED_100M)
+                       v |= SUPPORTED_100baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+               v |= SUPPORTED_Backplane;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseKX_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseKX4_Full;
+       } else if (type == FW_PORT_TYPE_KR)
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+       else if (type == FW_PORT_TYPE_BP_AP)
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+       else if (type == FW_PORT_TYPE_BP4_AP)
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+                    SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+                    SUPPORTED_10000baseKX4_Full;
+       else if (type == FW_PORT_TYPE_FIBER_XFI ||
+                type == FW_PORT_TYPE_FIBER_XAUI ||
+                type == FW_PORT_TYPE_SFP ||
+                type == FW_PORT_TYPE_QSFP_10G ||
+                type == FW_PORT_TYPE_QSA) {
+               v |= SUPPORTED_FIBRE;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_BP40_BA ||
+                  type == FW_PORT_TYPE_QSFP) {
+               v |= SUPPORTED_40000baseSR4_Full;
+               v |= SUPPORTED_FIBRE;
+       }
+
+       if (caps & FW_PORT_CAP_ANEG)
+               v |= SUPPORTED_Autoneg;
+       return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       const struct port_info *p = netdev_priv(dev);
+
+       if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+           p->port_type == FW_PORT_TYPE_BT_XFI ||
+           p->port_type == FW_PORT_TYPE_BT_XAUI)
+               cmd->port = PORT_TP;
+       else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+                p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+               cmd->port = PORT_FIBRE;
+       else if (p->port_type == FW_PORT_TYPE_SFP ||
+                p->port_type == FW_PORT_TYPE_QSFP_10G ||
+                p->port_type == FW_PORT_TYPE_QSA ||
+                p->port_type == FW_PORT_TYPE_QSFP) {
+               if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_SR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_ER ||
+                   p->mod_type == FW_PORT_MOD_TYPE_LRM)
+                       cmd->port = PORT_FIBRE;
+               else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+                        p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+                       cmd->port = PORT_DA;
+               else
+                       cmd->port = PORT_OTHER;
+       } else
+               cmd->port = PORT_OTHER;
 
-       cmd->supported = pi->link_cfg.supported;
-       cmd->advertising = pi->link_cfg.advertising;
+       if (p->mdio_addr >= 0) {
+               cmd->phy_address = p->mdio_addr;
+               cmd->transceiver = XCVR_EXTERNAL;
+               cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+                       MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+       } else {
+               cmd->phy_address = 0;  /* not really, but no better option */
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->mdio_support = 0;
+       }
+
+       cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+                                              p->link_cfg.supported);
+       cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+                                           p->link_cfg.advertising);
        ethtool_cmd_speed_set(cmd,
-                             netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+                             netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
        cmd->duplex = DUPLEX_FULL;
-
-       cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
-       cmd->phy_address = pi->port_id;
-       cmd->transceiver = XCVR_EXTERNAL;
-       cmd->autoneg = pi->link_cfg.autoneg;
+       cmd->autoneg = p->link_cfg.autoneg;
        cmd->maxtxpkt = 0;
        cmd->maxrxpkt = 0;
        return 0;
@@ -2318,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
         */
        n10g = 0;
        for_each_port(adapter, pidx)
-               n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+               n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
 
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
index 8d3237f5e36493aca04ba2fdb952ef4a148fee9c..b9debb4f29a355a54b6304ea39fa717a3b33cc14 100644 (file)
@@ -230,7 +230,7 @@ struct adapter_params {
 
 static inline bool is_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
 }
 
 static inline bool is_x_10g_port(const struct link_config *lc)
index 02e8833b7797af63e3c1d43f0ac5ce75450e3c76..60426cf890a774dd07ec939a622f88991c1044f6 100644 (file)
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
        return a & 0x3f;
 }
 
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+                    FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
 /**
  *     init_link_config - initialize a link's SW state
  *     @lc: structure holding the link state
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
        lc->requested_speed = 0;
        lc->speed = 0;
        lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
-       if (lc->supported & SUPPORTED_Autoneg) {
-               lc->advertising = lc->supported;
+       if (lc->supported & FW_PORT_CAP_ANEG) {
+               lc->advertising = lc->supported & ADVERT_MASK;
                lc->autoneg = AUTONEG_ENABLE;
                lc->requested_fc |= PAUSE_AUTONEG;
        } else {
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
        struct fw_vi_cmd vi_cmd, vi_rpl;
        struct fw_port_cmd port_cmd, port_rpl;
        int v;
-       u32 word;
 
        /*
         * Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,13 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
        if (v)
                return v;
 
-       v = 0;
-       word = be16_to_cpu(port_rpl.u.info.pcap);
-       if (word & FW_PORT_CAP_SPEED_100M)
-               v |= SUPPORTED_100baseT_Full;
-       if (word & FW_PORT_CAP_SPEED_1G)
-               v |= SUPPORTED_1000baseT_Full;
-       if (word & FW_PORT_CAP_SPEED_10G)
-               v |= SUPPORTED_10000baseT_Full;
-       if (word & FW_PORT_CAP_SPEED_40G)
-               v |= SUPPORTED_40000baseSR4_Full;
-       if (word & FW_PORT_CAP_ANEG)
-               v |= SUPPORTED_Autoneg;
-       init_link_config(&pi->link_cfg, v);
+       v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+       pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+                       FW_PORT_CMD_MDIOADDR_G(v) : -1;
+       pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+       pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+       init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
 
        return 0;
 }
@@ -1491,7 +1488,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                 */
                const struct fw_port_cmd *port_cmd =
                        (const struct fw_port_cmd *)rpl;
-               u32 word;
+               u32 stat, mod;
                int action, port_id, link_ok, speed, fc, pidx;
 
                /*
@@ -1509,21 +1506,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                port_id = FW_PORT_CMD_PORTID_G(
                        be32_to_cpu(port_cmd->op_to_portid));
 
-               word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
-               link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
+               stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+               link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
                speed = 0;
                fc = 0;
-               if (word & FW_PORT_CMD_RXPAUSE_F)
+               if (stat & FW_PORT_CMD_RXPAUSE_F)
                        fc |= PAUSE_RX;
-               if (word & FW_PORT_CMD_TXPAUSE_F)
+               if (stat & FW_PORT_CMD_TXPAUSE_F)
                        fc |= PAUSE_TX;
-               if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+               if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
                        speed = 100;
-               else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
                        speed = 1000;
-               else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
                        speed = 10000;
-               else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+               else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
                        speed = 40000;
 
                /*
@@ -1540,12 +1537,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
                                continue;
 
                        lc = &pi->link_cfg;
+
+                       mod = FW_PORT_CMD_MODTYPE_G(stat);
+                       if (mod != pi->mod_type) {
+                               pi->mod_type = mod;
+                               t4vf_os_portmod_changed(adapter, pidx);
+                       }
+
                        if (link_ok != lc->link_ok || speed != lc->speed ||
                            fc != lc->fc) {
                                /* something changed */
                                lc->link_ok = link_ok;
                                lc->speed = speed;
                                lc->fc = fc;
+                               lc->supported =
+                                       be16_to_cpu(port_cmd->u.info.pcap);
                                t4vf_os_link_changed(adapter, pidx, link_ok);
                        }
                }
index 868d0f605d60524053c46d87d010bf50e3341a1d..b29e027c476e538b93a7a10b9302deee1253c0b5 100644 (file)
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                                     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
                }
 
-               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
-                       skb->csum = htons(checksum);
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-               }
+               /* Hardware does not provide whole packet checksum. It only
+                * provides pseudo checksum. Since hw validates the packet
+                * checksum but not provide us the checksum value. use
+                * CHECSUM_UNNECESSARY.
+                */
+               if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+                   ipv4_csum_ok)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
                if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
                        netdev_err(netdev, "Unable to alloc receive buffers\n");
                        err = -ENOMEM;
-                       goto err_out_notify_unset;
+                       goto err_out_free_rq;
                }
        }
 
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
 
        return 0;
 
-err_out_notify_unset:
+err_out_free_rq:
+       for (i = 0; i < enic->rq_count; i++)
+               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        enic_dev_notify_unset(enic);
 err_out_free_intr:
        enic_free_intr(enic);
index a379c3e4b57f73ef3fd133bd4bb5114e29f10cb4..13d00a38a5bd60ed1e7af054f3a22b617e64317d 100644 (file)
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                 * break out of while loop if there are no more
                 * packets waiting
                 */
-               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
-                       napi_complete(napi);
-                       int_enable = dnet_readl(bp, INTR_ENB);
-                       int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
-                       dnet_writel(bp, int_enable, INTR_ENB);
-                       return 0;
-               }
+               if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+                       break;
 
                cmd_word = dnet_readl(bp, RX_LEN_FIFO);
                pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                               "size %u.\n", dev->name, pkt_len);
        }
 
-       budget -= npackets;
-
        if (npackets < budget) {
                /* We processed all packets available.  Tell NAPI it can
-                * stop polling then re-enable rx interrupts */
+                * stop polling then re-enable rx interrupts.
+                */
                napi_complete(napi);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
-               return 0;
        }
 
-       /* There are still packets waiting */
-       return 1;
+       return npackets;
 }
 
 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
index 196073110e320b1bb05e8b38bf9a5e1c66b5e929..41a0a5498da74c7b9b1129b68c9173c1b15470a4 100644 (file)
@@ -4459,9 +4459,11 @@ done:
        adapter->vxlan_port_count--;
 }
 
-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t be_features_check(struct sk_buff *skb,
+                                          struct net_device *dev,
+                                          netdev_features_t features)
 {
-       return vxlan_gso_check(skb);
+       return vxlan_features_check(skb, features);
 }
 #endif
 
@@ -4492,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = {
 #ifdef CONFIG_BE2NET_VXLAN
        .ndo_add_vxlan_port     = be_add_vxlan_port,
        .ndo_del_vxlan_port     = be_del_vxlan_port,
-       .ndo_gso_check          = be_gso_check,
+       .ndo_features_check     = be_features_check,
 #endif
 };
 
index 469691ad4a1ee25dda5ff9dbec0ef8735ae0f6bf..40132929daf7ac7713f2cd0f1f469656616fadfc 100644 (file)
@@ -424,6 +424,8 @@ struct bufdesc_ex {
  * (40ns * 6).
  */
 #define FEC_QUIRK_BUG_CAPTURE          (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO          (1 << 11)
 
 struct fec_enet_priv_tx_q {
        int index;
index 5ebdf8dc8a31300f526fd98912080dc4850937f2..bba87775419dc9c0d5adadcdda8c1e634dc8e04d 100644 (file)
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
                .driver_data = 0,
        }, {
                .name = "imx28-fec",
-               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+               .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+                               FEC_QUIRK_SINGLE_MDIO,
        }, {
                .name = "imx6q-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        int err = -ENXIO, i;
 
        /*
-        * The dual fec interfaces are not equivalent with enet-mac.
+        * The i.MX28 dual fec interfaces are not equal.
         * Here are the differences:
         *
         *  - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+       if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
                if (mii_cnt && fec0_mii_bus) {
                        fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        mii_cnt++;
 
        /* save fec0 mii_bus */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC)
+       if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
                fec0_mii_bus = fep->mii_bus;
 
        return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
                pdev->id_entry = of_id->data;
        fep->quirks = pdev->id_entry->driver_data;
 
+       fep->netdev = ndev;
        fep->num_rx_queues = num_rx_qs;
        fep->num_tx_queues = num_tx_qs;
 
index 5b8300a32bf5f5eb1df93d7262b22a00b7d77a9f..4d61ef50b465b73bd4bd87256a2ad47d83d4d666 100644 (file)
@@ -281,6 +281,17 @@ config I40E_DCB
 
          If unsure, say N.
 
+config I40E_FCOE
+       bool "Fibre Channel over Ethernet (FCoE)"
+       default n
+       depends on I40E && DCB && FCOE
+       ---help---
+         Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+         in the driver. This will create new netdev for exclusive FCoE
+         use with XL710 FCoE offloads enabled.
+
+         If unsure, say N.
+
 config I40EVF
        tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
        depends on PCI_MSI
index 781065eb5431c811f6fb37da81b6e6c12f495770..e9c3a87e5b115dc690ef2b81bbe16a5480dae5b1 100644 (file)
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
                mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
        } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
           (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
-               !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+               (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
                /* enable/disable MDI/MDI-X auto-switching. */
                mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
                                nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
index 4b94ddb29c248ed2571c4d90eb37d8c8ecbec935..c405819991214e21a25b5a670d0097482fd643ab 100644 (file)
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
index 433a55886ad29bfb1b357d47ad4953f9ddac6145..cb0de455683e452810c0a404d9b1bd138418fb88 100644 (file)
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                if (desc_n >= ring->count || desc_n < 0) {
                        dev_info(&pf->pdev->dev,
                                 "descriptor %d not found\n", desc_n);
-                       return;
+                       goto out;
                }
                if (!is_rx_ring) {
                        txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        } else {
                dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
        }
+
+out:
        kfree(ring);
 }
 
index 045b5c4b98b38ba74ef68104828351f2a8fdc67c..ad802dd0f67a3d4fdcb6810ebdc8d66b67706d66 100644 (file)
@@ -78,7 +78,7 @@ do {                                                            \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
 #define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
 #endif /* _I40E_OSDEP_H_ */
index 04b441460bbda6e36cb64731d24247a1b954e506..cecb340898fe2a881aac449c7377110ccc56793b 100644 (file)
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
        return le32_to_cpu(*(volatile __le32 *)head);
 }
 
+#define WB_STRIDE 0x3
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       /* check to see if there are any non-cache aligned descriptors
+        * waiting to be written back, and kick the hardware to force
+        * them to be written back in case of napi polling
+        */
+       if (budget &&
+           !((i & WB_STRIDE) == WB_STRIDE) &&
+           !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+           (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+               tx_ring->arm_wb = true;
+       else
+               tx_ring->arm_wb = false;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
                dev_info(tx_ring->dev,
-                        "tx hang detected on queue %d, resetting adapter\n",
+                        "tx hang detected on queue %d, reset requested\n",
                         tx_ring->queue_index);
 
-               tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+               /* do not fire the reset immediately, wait for the stack to
+                * decide we are truly stuck, also prevents every queue from
+                * simultaneously requesting a reset
+                */
 
-               /* the adapter is about to reset, no point in enabling stuff */
-               return true;
+               /* the adapter is about to reset, no point in enabling polling */
+               budget = 1;
        }
 
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                }
        }
 
-       return budget > 0;
+       return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector  on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+       u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+                 /* allow 00 to be written to the index */;
+
+       wr32(&vsi->back->hw,
+            I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+            val);
 }
 
 /**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
         * so the total length of IPv4 header is IHL*4 bytes
         * The UDP_0 bit *may* bet set if the *inner* header is UDP
         */
-       if (ipv4_tunnel &&
-           (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
-           !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+       if (ipv4_tunnel) {
                skb->transport_header = skb->mac_header +
                                        sizeof(struct ethhdr) +
                                        (ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                          skb->protocol == htons(ETH_P_8021AD))
                                          ? VLAN_HLEN : 0;
 
-               rx_udp_csum = udp_csum(skb);
-               iph = ip_hdr(skb);
-               csum = csum_tcpudp_magic(
-                               iph->saddr, iph->daddr,
-                               (skb->len - skb_transport_offset(skb)),
-                               IPPROTO_UDP, rx_udp_csum);
+               if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+                   (udp_hdr(skb)->check != 0)) {
+                       rx_udp_csum = udp_csum(skb);
+                       iph = ip_hdr(skb);
+                       csum = csum_tcpudp_magic(
+                                       iph->saddr, iph->daddr,
+                                       (skb->len - skb_transport_offset(skb)),
+                                       IPPROTO_UDP, rx_udp_csum);
 
-               if (udp_hdr(skb)->check != csum)
-                       goto checksum_fail;
+                       if (udp_hdr(skb)->check != csum)
+                               goto checksum_fail;
+
+               } /* else its GRE and so no outer UDP header */
        }
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_vsi *vsi = q_vector->vsi;
        struct i40e_ring *ring;
        bool clean_complete = true;
+       bool arm_wb = false;
        int budget_per_ring;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        /* Since the actual Tx work is minimal, we can give the Tx a larger
         * budget and be more aggressive about cleaning up the Tx descriptors.
         */
-       i40e_for_each_ring(ring, q_vector->tx)
+       i40e_for_each_ring(ring, q_vector->tx) {
                clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+               arm_wb |= ring->arm_wb;
+       }
 
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
-       if (!clean_complete)
+       if (!clean_complete) {
+               if (arm_wb)
+                       i40e_force_wb(vsi, q_vector);
                return budget;
+       }
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       if (protocol == htons(ETH_P_IP)) {
-               iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+       ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+       if (iph->version == 4) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
                tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
                                                 0, IPPROTO_TCP, 0);
-       } else if (skb_is_gso_v6(skb)) {
-
-               ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
-                                          : ipv6_hdr(skb);
+       } else if (ipv6h->version == 6) {
                tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
                ipv6h->payload_len = 0;
                tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
                } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
-                               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+                       if (tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
-                       } else {
-                               *cd_tunneling |=
-                                        I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-                       }
                }
 
                /* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                   ((skb_inner_network_offset(skb) -
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+               if (this_ip_hdr->version == 6) {
+                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       tx_flags |= I40E_TX_FLAGS_IPV6;
+               }
        } else {
                network_hdr_len = skb_network_header_len(skb);
                this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        /* Place RS bit on last descriptor of any packet that spans across the
         * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
         */
-#define WB_STRIDE 0x3
        if (((i & WB_STRIDE) != WB_STRIDE) &&
            (first <= &tx_ring->tx_bi[i]) &&
            (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
index e60d3accb2e2ec3f2992b056da718d633b978157..18b00231d2f117d714e7e1399aecba0061ead41a 100644 (file)
@@ -241,6 +241,7 @@ struct i40e_ring {
        unsigned long last_rx_timestamp;
 
        bool ring_active;               /* is ring online or not */
+       bool arm_wb;            /* do something to arm write back */
 
        /* stats structs */
        struct i40e_queue_stats stats;
index 051ea94bdcd3e8046181b361d8985c51d15ea19c..0f69ef81751a3d8154db558cc8f3d11e882928a0 100644 (file)
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
        u32 swmask = mask;
        u32 fwmask = mask << 16;
        s32 ret_val = 0;
-       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+       s32 i = 0, timeout = 200;
 
        while (i < timeout) {
                if (igb_get_hw_semaphore(hw)) {
index 190cbd931f6bc8527654d14d042dc85799883a81..d0d6dc1b8e46e8173cbd61f5c2e84d4292301bc4 100644 (file)
@@ -2365,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct  net_device *dev,
        queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
 }
 
-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+                                               struct net_device *dev,
+                                               netdev_features_t features)
 {
-       return vxlan_gso_check(skb);
+       return vxlan_features_check(skb, features);
 }
 #endif
 
@@ -2400,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_MLX4_EN_VXLAN
        .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
        .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
-       .ndo_gso_check          = mlx4_en_gso_check,
+       .ndo_features_check     = mlx4_en_features_check,
 #endif
 };
 
@@ -2434,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
 #ifdef CONFIG_MLX4_EN_VXLAN
        .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
        .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
-       .ndo_gso_check          = mlx4_en_gso_check,
+       .ndo_features_check     = mlx4_en_features_check,
 #endif
 };
 
index a308d41e4de08678107b185df2c7b8b7df3b39a7..e3357bf523df866222bdabdd0ec4c31cb24bb2a7 100644 (file)
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                tx_desc->ctrl.owner_opcode = op_own;
                if (send_doorbell) {
                        wmb();
-                       iowrite32(ring->doorbell_qpn,
+                       /* Since there is no iowrite*_native() that writes the
+                        * value as is, without byteswapping - using the one
+                        * the doesn't do byteswapping in the relevant arch
+                        * endianness.
+                        */
+#if defined(__LITTLE_ENDIAN)
+                       iowrite32(
+#else
+                       iowrite32be(
+#endif
+                                 ring->doorbell_qpn,
                                  ring->bf.uar->map + MLX4_SEND_DOORBELL);
                } else {
                        ring->xmit_more++;
index 943cbd47d832bb98719e355a727e8451c68bfbbe..03e9eb0dc761e00a6488583881deb91dfd610464 100644 (file)
@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_dev_cap(dev, &dev_cap);
                if (err) {
                        mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
-                       goto err_stop_fw;
+                       return err;
                }
 
                choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                                             &init_hca);
                if ((long long) icm_size < 0) {
                        err = icm_size;
-                       goto err_stop_fw;
+                       return err;
                }
 
                dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
                if (err)
-                       goto err_stop_fw;
+                       return err;
 
                err = mlx4_INIT_HCA(dev, &init_hca);
                if (err) {
@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        err = mlx4_query_func(dev, &dev_cap);
                        if (err < 0) {
                                mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
-                               goto err_stop_fw;
+                               goto err_close;
                        } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
                                dev->caps.num_eqs = dev_cap.max_eqs;
                                dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2006,6 @@ err_free_icm:
        if (!mlx4_is_slave(dev))
                mlx4_free_icms(dev);
 
-err_stop_fw:
-       if (!mlx4_is_slave(dev)) {
-               mlx4_UNMAP_FA(dev);
-               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-       }
        return err;
 }
 
index d6f549685c0fcd8a5cccf0c948536ed924024892..7094a9c70fd5f8be8cafc6e245d6297effe8c22a 100644 (file)
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
        mlx4_mtt_cleanup(dev, &mr->mtt);
+       mr->mtt.order = -1;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
 
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
 {
        int err;
 
-       mpt_entry->start       = cpu_to_be64(iova);
-       mpt_entry->length      = cpu_to_be64(size);
-       mpt_entry->entity_size = cpu_to_be32(page_shift);
-
        err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
        if (err)
                return err;
 
+       mpt_entry->start       = cpu_to_be64(mr->iova);
+       mpt_entry->length      = cpu_to_be64(mr->size);
+       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
        mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
                                           MLX4_MPT_PD_FLAG_EN_INV);
        mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
index f1ebed6c63b1bfe8a912963413d3afba3cd0fa51..2fa6ae026e4f331341253cc898742592089b9283 100644 (file)
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
 
 /* Spanning Tree */
 
-static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
-{
-       port_cfg(hw, p,
-               KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
-}
-
 static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
 {
        port_cfg(hw, p,
index af099057f0e9c263dc250924785a15cf75ea6edb..71af98bb72cbeb1cc2376013847835c4195ec805 100644 (file)
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
                                      &mgp->cmd_bus, GFP_KERNEL);
-       if (mgp->cmd == NULL)
+       if (!mgp->cmd) {
+               status = -ENOMEM;
                goto abort_with_enabled;
+       }
 
        mgp->board_span = pci_resource_len(pdev, 0);
        mgp->iomem_base = pci_resource_start(pdev, 0);
index c2f09af5c25b9f389ce2eb0bbe85487d733d3e1e..4847713211cafa2258b9511cda89f4232a623ebe 100644 (file)
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 {
        int i = 0;
 
-       while (i < 10) {
-               if (i)
-                       ssleep(1);
-
+       do {
                if (ql_sem_lock(qdev,
                                QL_DRVR_SEM_MASK,
                                (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
                                      "driver lock acquired\n");
                        return 1;
                }
-       }
+               ssleep(1);
+       } while (++i < 10);
 
        netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
        return 0;
index 1aa25b13ace1d2ccce0d5cf10fb07fb3a3158038..2528c3fb6b90b6976017082867327ade23a3d70e 100644 (file)
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
        adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
 }
 
-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+                                              struct net_device *dev,
+                                              netdev_features_t features)
 {
-       return vxlan_gso_check(skb);
+       return vxlan_features_check(skb, features);
 }
 #endif
 
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
 #ifdef CONFIG_QLCNIC_VXLAN
        .ndo_add_vxlan_port     = qlcnic_add_vxlan_port,
        .ndo_del_vxlan_port     = qlcnic_del_vxlan_port,
-       .ndo_gso_check          = qlcnic_gso_check,
+       .ndo_features_check     = qlcnic_features_check,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else {
                dev_err(&pdev->dev,
                        "%s: failed. Please Reboot\n", __func__);
+               err = -ENODEV;
                goto err_out_free_hw;
        }
 
index 6d0b9dfac313ce8aa4891a665bf5cf3c394f48f0..78bb4ceb1cdd364f3d044f746ada0ff0aef954e3 100644 (file)
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
        if (rc)
                goto err_out;
 
+       disable_dev_on_err = 1;
        rc = pci_request_regions (pdev, DRV_NAME);
        if (rc)
                goto err_out;
-       disable_dev_on_err = 1;
 
        pci_set_master (pdev);
 
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out:
+       netif_napi_del(&tp->napi);
        __rtl8139_cleanup_dev (dev);
        pci_disable_device (pdev);
        return i;
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
        assert (dev != NULL);
 
        cancel_delayed_work_sync(&tp->thread);
+       netif_napi_del(&tp->napi);
 
        unregister_netdev (dev);
 
index c29ba80ae02bfde60f41f4118c02bab642303b89..37583a9d88534346922b9f3afd9814dd6fe85a64 100644 (file)
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
+       .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
        .mpr            = 1,
@@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = {
                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
                          EESR_ECI,
 
+       .trscer_err_mask = DESC_I_RINT8,
+
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 
        if (!cd->eesr_err_check)
                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+       if (!cd->trscer_err_mask)
+               cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
 }
 
 static int sh_eth_check_reset(struct net_device *ndev)
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        /* Frame recv control (enable multiple-packets per rx irq) */
        sh_eth_write(ndev, RMCR_RNC, RMCR);
 
-       sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+       sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
 
        if (mdp->cd->bculr)
                sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
index 22301bf9c21daeb925d75aa7ce5c7a588d977e11..71f5de1171bd93d004beacf880c387a4168eaada 100644 (file)
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
        DESC_I_RINT1 = 0x0001,
 };
 
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
 /* RPADIR */
 enum RPADIR_BIT {
        RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
        unsigned long tx_check;
        unsigned long eesr_err_check;
 
+       /* Error mask */
+       unsigned long trscer_err_mask;
+
        /* hardware features */
        unsigned long irq_flags; /* IRQ configuration flags */
        unsigned no_psr:1;      /* EtherC DO NOT have PSR */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644 (file)
index f537cbe..0000000
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Ethernet driver for S6105 on chip network device
- * (c)2008 emlix GmbH http://www.emlix.com
- * Authors:    Oskar Schirmer <oskar@scara.com>
- *             Daniel Gloeckner <dg@emlix.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
-#include <linux/stddef.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#define DRV_NAME "s6gmac"
-#define DRV_PRMT DRV_NAME ": "
-
-
-/* register declarations */
-
-#define S6_GMAC_MACCONF1       0x000
-#define S6_GMAC_MACCONF1_TXENA         0
-#define S6_GMAC_MACCONF1_SYNCTX                1
-#define S6_GMAC_MACCONF1_RXENA         2
-#define S6_GMAC_MACCONF1_SYNCRX                3
-#define S6_GMAC_MACCONF1_TXFLOWCTRL    4
-#define S6_GMAC_MACCONF1_RXFLOWCTRL    5
-#define S6_GMAC_MACCONF1_LOOPBACK      8
-#define S6_GMAC_MACCONF1_RESTXFUNC     16
-#define S6_GMAC_MACCONF1_RESRXFUNC     17
-#define S6_GMAC_MACCONF1_RESTXMACCTRL  18
-#define S6_GMAC_MACCONF1_RESRXMACCTRL  19
-#define S6_GMAC_MACCONF1_SIMULRES      30
-#define S6_GMAC_MACCONF1_SOFTRES       31
-#define S6_GMAC_MACCONF2       0x004
-#define S6_GMAC_MACCONF2_FULL          0
-#define S6_GMAC_MACCONF2_CRCENA                1
-#define S6_GMAC_MACCONF2_PADCRCENA     2
-#define S6_GMAC_MACCONF2_LENGTHFCHK    4
-#define S6_GMAC_MACCONF2_HUGEFRAMENA   5
-#define S6_GMAC_MACCONF2_IFMODE                8
-#define S6_GMAC_MACCONF2_IFMODE_NIBBLE         1
-#define S6_GMAC_MACCONF2_IFMODE_BYTE           2
-#define S6_GMAC_MACCONF2_IFMODE_MASK           3
-#define S6_GMAC_MACCONF2_PREAMBLELEN   12
-#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK      0x0F
-#define S6_GMAC_MACIPGIFG      0x008
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK    0x7F
-#define S6_GMAC_MACIPGIFG_MINIFGENFORCE        8
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2        16
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1        24
-#define S6_GMAC_MACHALFDUPLEX  0x00C
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN        0
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK   0x3F
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX  12
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK     0x0F
-#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF        16
-#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF        17
-#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF        18
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA        19
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN        20
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK    0x0F
-#define S6_GMAC_MACMAXFRAMELEN 0x010
-#define S6_GMAC_MACMIICONF     0x020
-#define S6_GMAC_MACMIICONF_CSEL                0
-#define S6_GMAC_MACMIICONF_CSEL_DIV10          0
-#define S6_GMAC_MACMIICONF_CSEL_DIV12          1
-#define S6_GMAC_MACMIICONF_CSEL_DIV14          2
-#define S6_GMAC_MACMIICONF_CSEL_DIV18          3
-#define S6_GMAC_MACMIICONF_CSEL_DIV24          4
-#define S6_GMAC_MACMIICONF_CSEL_DIV34          5
-#define S6_GMAC_MACMIICONF_CSEL_DIV68          6
-#define S6_GMAC_MACMIICONF_CSEL_DIV168         7
-#define S6_GMAC_MACMIICONF_CSEL_MASK           7
-#define S6_GMAC_MACMIICONF_PREAMBLESUPR        4
-#define S6_GMAC_MACMIICONF_SCANAUTOINCR        5
-#define S6_GMAC_MACMIICMD      0x024
-#define S6_GMAC_MACMIICMD_READ         0
-#define S6_GMAC_MACMIICMD_SCAN         1
-#define S6_GMAC_MACMIIADDR     0x028
-#define S6_GMAC_MACMIIADDR_REG         0
-#define S6_GMAC_MACMIIADDR_REG_MASK            0x1F
-#define S6_GMAC_MACMIIADDR_PHY         8
-#define S6_GMAC_MACMIIADDR_PHY_MASK            0x1F
-#define S6_GMAC_MACMIICTRL     0x02C
-#define S6_GMAC_MACMIISTAT     0x030
-#define S6_GMAC_MACMIIINDI     0x034
-#define S6_GMAC_MACMIIINDI_BUSY                0
-#define S6_GMAC_MACMIIINDI_SCAN                1
-#define S6_GMAC_MACMIIINDI_INVAL       2
-#define S6_GMAC_MACINTERFSTAT  0x03C
-#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
-#define S6_GMAC_MACINTERFSTAT_EXCESSDEF        9
-#define S6_GMAC_MACSTATADDR1   0x040
-#define S6_GMAC_MACSTATADDR2   0x044
-
-#define S6_GMAC_FIFOCONF0      0x048
-#define S6_GMAC_FIFOCONF0_HSTRSTWT     0
-#define S6_GMAC_FIFOCONF0_HSTRSTSR     1
-#define S6_GMAC_FIFOCONF0_HSTRSTFR     2
-#define S6_GMAC_FIFOCONF0_HSTRSTST     3
-#define S6_GMAC_FIFOCONF0_HSTRSTFT     4
-#define S6_GMAC_FIFOCONF0_WTMENREQ     8
-#define S6_GMAC_FIFOCONF0_SRFENREQ     9
-#define S6_GMAC_FIFOCONF0_FRFENREQ     10
-#define S6_GMAC_FIFOCONF0_STFENREQ     11
-#define S6_GMAC_FIFOCONF0_FTFENREQ     12
-#define S6_GMAC_FIFOCONF0_WTMENRPLY    16
-#define S6_GMAC_FIFOCONF0_SRFENRPLY    17
-#define S6_GMAC_FIFOCONF0_FRFENRPLY    18
-#define S6_GMAC_FIFOCONF0_STFENRPLY    19
-#define S6_GMAC_FIFOCONF0_FTFENRPLY    20
-#define S6_GMAC_FIFOCONF1      0x04C
-#define S6_GMAC_FIFOCONF2      0x050
-#define S6_GMAC_FIFOCONF2_CFGLWM       0
-#define S6_GMAC_FIFOCONF2_CFGHWM       16
-#define S6_GMAC_FIFOCONF3      0x054
-#define S6_GMAC_FIFOCONF3_CFGFTTH      0
-#define S6_GMAC_FIFOCONF3_CFGHWMFT     16
-#define S6_GMAC_FIFOCONF4      0x058
-#define S6_GMAC_FIFOCONF_RSV_PREVDROP  0
-#define S6_GMAC_FIFOCONF_RSV_RUNT      1
-#define S6_GMAC_FIFOCONF_RSV_FALSECAR  2
-#define S6_GMAC_FIFOCONF_RSV_CODEERR   3
-#define S6_GMAC_FIFOCONF_RSV_CRCERR    4
-#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
-#define S6_GMAC_FIFOCONF_RSV_LENRANGE  6
-#define S6_GMAC_FIFOCONF_RSV_OK                7
-#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
-#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
-#define S6_GMAC_FIFOCONF_RSV_DRIBBLE   10
-#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
-#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
-#define S6_GMAC_FIFOCONF_RSV_UNOPCODE  13
-#define S6_GMAC_FIFOCONF_RSV_VLANTAG   14
-#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
-#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
-#define S6_GMAC_FIFOCONF_RSV_MASK              0x3FFFF
-#define S6_GMAC_FIFOCONF5      0x05C
-#define S6_GMAC_FIFOCONF5_DROPLT64     18
-#define S6_GMAC_FIFOCONF5_CFGBYTM      19
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE   20
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK      0xF
-
-#define S6_GMAC_STAT_REGS      0x080
-#define S6_GMAC_STAT_SIZE_MIN          12
-#define S6_GMAC_STATTR64       0x080
-#define S6_GMAC_STATTR64_SIZE          18
-#define S6_GMAC_STATTR127      0x084
-#define S6_GMAC_STATTR127_SIZE         18
-#define S6_GMAC_STATTR255      0x088
-#define S6_GMAC_STATTR255_SIZE         18
-#define S6_GMAC_STATTR511      0x08C
-#define S6_GMAC_STATTR511_SIZE         18
-#define S6_GMAC_STATTR1K       0x090
-#define S6_GMAC_STATTR1K_SIZE          18
-#define S6_GMAC_STATTRMAX      0x094
-#define S6_GMAC_STATTRMAX_SIZE         18
-#define S6_GMAC_STATTRMGV      0x098
-#define S6_GMAC_STATTRMGV_SIZE         18
-#define S6_GMAC_STATRBYT       0x09C
-#define S6_GMAC_STATRBYT_SIZE          24
-#define S6_GMAC_STATRPKT       0x0A0
-#define S6_GMAC_STATRPKT_SIZE          18
-#define S6_GMAC_STATRFCS       0x0A4
-#define S6_GMAC_STATRFCS_SIZE          12
-#define S6_GMAC_STATRMCA       0x0A8
-#define S6_GMAC_STATRMCA_SIZE          18
-#define S6_GMAC_STATRBCA       0x0AC
-#define S6_GMAC_STATRBCA_SIZE          22
-#define S6_GMAC_STATRXCF       0x0B0
-#define S6_GMAC_STATRXCF_SIZE          18
-#define S6_GMAC_STATRXPF       0x0B4
-#define S6_GMAC_STATRXPF_SIZE          12
-#define S6_GMAC_STATRXUO       0x0B8
-#define S6_GMAC_STATRXUO_SIZE          12
-#define S6_GMAC_STATRALN       0x0BC
-#define S6_GMAC_STATRALN_SIZE          12
-#define S6_GMAC_STATRFLR       0x0C0
-#define S6_GMAC_STATRFLR_SIZE          16
-#define S6_GMAC_STATRCDE       0x0C4
-#define S6_GMAC_STATRCDE_SIZE          12
-#define S6_GMAC_STATRCSE       0x0C8
-#define S6_GMAC_STATRCSE_SIZE          12
-#define S6_GMAC_STATRUND       0x0CC
-#define S6_GMAC_STATRUND_SIZE          12
-#define S6_GMAC_STATROVR       0x0D0
-#define S6_GMAC_STATROVR_SIZE          12
-#define S6_GMAC_STATRFRG       0x0D4
-#define S6_GMAC_STATRFRG_SIZE          12
-#define S6_GMAC_STATRJBR       0x0D8
-#define S6_GMAC_STATRJBR_SIZE          12
-#define S6_GMAC_STATRDRP       0x0DC
-#define S6_GMAC_STATRDRP_SIZE          12
-#define S6_GMAC_STATTBYT       0x0E0
-#define S6_GMAC_STATTBYT_SIZE          24
-#define S6_GMAC_STATTPKT       0x0E4
-#define S6_GMAC_STATTPKT_SIZE          18
-#define S6_GMAC_STATTMCA       0x0E8
-#define S6_GMAC_STATTMCA_SIZE          18
-#define S6_GMAC_STATTBCA       0x0EC
-#define S6_GMAC_STATTBCA_SIZE          18
-#define S6_GMAC_STATTXPF       0x0F0
-#define S6_GMAC_STATTXPF_SIZE          12
-#define S6_GMAC_STATTDFR       0x0F4
-#define S6_GMAC_STATTDFR_SIZE          12
-#define S6_GMAC_STATTEDF       0x0F8
-#define S6_GMAC_STATTEDF_SIZE          12
-#define S6_GMAC_STATTSCL       0x0FC
-#define S6_GMAC_STATTSCL_SIZE          12
-#define S6_GMAC_STATTMCL       0x100
-#define S6_GMAC_STATTMCL_SIZE          12
-#define S6_GMAC_STATTLCL       0x104
-#define S6_GMAC_STATTLCL_SIZE          12
-#define S6_GMAC_STATTXCL       0x108
-#define S6_GMAC_STATTXCL_SIZE          12
-#define S6_GMAC_STATTNCL       0x10C
-#define S6_GMAC_STATTNCL_SIZE          13
-#define S6_GMAC_STATTPFH       0x110
-#define S6_GMAC_STATTPFH_SIZE          12
-#define S6_GMAC_STATTDRP       0x114
-#define S6_GMAC_STATTDRP_SIZE          12
-#define S6_GMAC_STATTJBR       0x118
-#define S6_GMAC_STATTJBR_SIZE          12
-#define S6_GMAC_STATTFCS       0x11C
-#define S6_GMAC_STATTFCS_SIZE          12
-#define S6_GMAC_STATTXCF       0x120
-#define S6_GMAC_STATTXCF_SIZE          12
-#define S6_GMAC_STATTOVR       0x124
-#define S6_GMAC_STATTOVR_SIZE          12
-#define S6_GMAC_STATTUND       0x128
-#define S6_GMAC_STATTUND_SIZE          12
-#define S6_GMAC_STATTFRG       0x12C
-#define S6_GMAC_STATTFRG_SIZE          12
-#define S6_GMAC_STATCARRY(n)   (0x130 + 4*(n))
-#define S6_GMAC_STATCARRYMSK(n)        (0x138 + 4*(n))
-#define S6_GMAC_STATCARRY1_RDRP                0
-#define S6_GMAC_STATCARRY1_RJBR                1
-#define S6_GMAC_STATCARRY1_RFRG                2
-#define S6_GMAC_STATCARRY1_ROVR                3
-#define S6_GMAC_STATCARRY1_RUND                4
-#define S6_GMAC_STATCARRY1_RCSE                5
-#define S6_GMAC_STATCARRY1_RCDE                6
-#define S6_GMAC_STATCARRY1_RFLR                7
-#define S6_GMAC_STATCARRY1_RALN                8
-#define S6_GMAC_STATCARRY1_RXUO                9
-#define S6_GMAC_STATCARRY1_RXPF                10
-#define S6_GMAC_STATCARRY1_RXCF                11
-#define S6_GMAC_STATCARRY1_RBCA                12
-#define S6_GMAC_STATCARRY1_RMCA                13
-#define S6_GMAC_STATCARRY1_RFCS                14
-#define S6_GMAC_STATCARRY1_RPKT                15
-#define S6_GMAC_STATCARRY1_RBYT                16
-#define S6_GMAC_STATCARRY1_TRMGV       25
-#define S6_GMAC_STATCARRY1_TRMAX       26
-#define S6_GMAC_STATCARRY1_TR1K                27
-#define S6_GMAC_STATCARRY1_TR511       28
-#define S6_GMAC_STATCARRY1_TR255       29
-#define S6_GMAC_STATCARRY1_TR127       30
-#define S6_GMAC_STATCARRY1_TR64                31
-#define S6_GMAC_STATCARRY2_TDRP                0
-#define S6_GMAC_STATCARRY2_TPFH                1
-#define S6_GMAC_STATCARRY2_TNCL                2
-#define S6_GMAC_STATCARRY2_TXCL                3
-#define S6_GMAC_STATCARRY2_TLCL                4
-#define S6_GMAC_STATCARRY2_TMCL                5
-#define S6_GMAC_STATCARRY2_TSCL                6
-#define S6_GMAC_STATCARRY2_TEDF                7
-#define S6_GMAC_STATCARRY2_TDFR                8
-#define S6_GMAC_STATCARRY2_TXPF                9
-#define S6_GMAC_STATCARRY2_TBCA                10
-#define S6_GMAC_STATCARRY2_TMCA                11
-#define S6_GMAC_STATCARRY2_TPKT                12
-#define S6_GMAC_STATCARRY2_TBYT                13
-#define S6_GMAC_STATCARRY2_TFRG                14
-#define S6_GMAC_STATCARRY2_TUND                15
-#define S6_GMAC_STATCARRY2_TOVR                16
-#define S6_GMAC_STATCARRY2_TXCF                17
-#define S6_GMAC_STATCARRY2_TFCS                18
-#define S6_GMAC_STATCARRY2_TJBR                19
-
-#define S6_GMAC_HOST_PBLKCTRL  0x140
-#define S6_GMAC_HOST_PBLKCTRL_TXENA    0
-#define S6_GMAC_HOST_PBLKCTRL_RXENA    1
-#define S6_GMAC_HOST_PBLKCTRL_TXSRES   2
-#define S6_GMAC_HOST_PBLKCTRL_RXSRES   3
-#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ   8
-#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ   12
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_16           4
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_32           5
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_64           6
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_128          7
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK         0xF
-#define S6_GMAC_HOST_PBLKCTRL_STATENA  16
-#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ        17
-#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR        18
-#define S6_GMAC_HOST_PBLKCTRL_RGMII    19
-#define S6_GMAC_HOST_INTMASK   0x144
-#define S6_GMAC_HOST_INTSTAT   0x148
-#define S6_GMAC_HOST_INT_TXBURSTOVER   3
-#define S6_GMAC_HOST_INT_TXPREWOVER    4
-#define S6_GMAC_HOST_INT_RXBURSTUNDER  5
-#define S6_GMAC_HOST_INT_RXPOSTRFULL   6
-#define S6_GMAC_HOST_INT_RXPOSTRUNDER  7
-#define S6_GMAC_HOST_RXFIFOHWM 0x14C
-#define S6_GMAC_HOST_CTRLFRAMXP        0x150
-#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
-#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
-
-#define S6_GMAC_BURST_PREWR    0x1B0
-#define S6_GMAC_BURST_PREWR_LEN                0
-#define S6_GMAC_BURST_PREWR_LEN_MASK           ((1 << 20) - 1)
-#define S6_GMAC_BURST_PREWR_CFE                20
-#define S6_GMAC_BURST_PREWR_PPE                21
-#define S6_GMAC_BURST_PREWR_FCS                22
-#define S6_GMAC_BURST_PREWR_PAD                23
-#define S6_GMAC_BURST_POSTRD   0x1D0
-#define S6_GMAC_BURST_POSTRD_LEN       0
-#define S6_GMAC_BURST_POSTRD_LEN_MASK          ((1 << 20) - 1)
-#define S6_GMAC_BURST_POSTRD_DROP      20
-
-
-/* data handling */
-
-#define S6_NUM_TX_SKB  8       /* must be larger than TX fifo size */
-#define S6_NUM_RX_SKB  16
-#define S6_MAX_FRLEN   1536
-
-struct s6gmac {
-       u32 reg;
-       u32 tx_dma;
-       u32 rx_dma;
-       u32 io;
-       u8 tx_chan;
-       u8 rx_chan;
-       spinlock_t lock;
-       u8 tx_skb_i, tx_skb_o;
-       u8 rx_skb_i, rx_skb_o;
-       struct sk_buff *tx_skb[S6_NUM_TX_SKB];
-       struct sk_buff *rx_skb[S6_NUM_RX_SKB];
-       unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
-       unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
-       struct phy_device *phydev;
-       struct {
-               struct mii_bus *bus;
-               int irq[PHY_MAX_ADDR];
-       } mii;
-       struct {
-               unsigned int mbit;
-               u8 giga;
-               u8 isup;
-               u8 full;
-       } link;
-};
-
-static void s6gmac_rx_fillfifo(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       struct sk_buff *skb;
-       while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
-              (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
-              (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
-               pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
-               s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
-                       pd->io, (u32)skb->data, S6_MAX_FRLEN);
-       }
-}
-
-static void s6gmac_rx_interrupt(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       u32 pfx;
-       struct sk_buff *skb;
-       while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
-                       s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
-               skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
-               pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
-               if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
-                       dev_kfree_skb_irq(skb);
-               } else {
-                       skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
-                               & S6_GMAC_BURST_POSTRD_LEN_MASK);
-                       skb->protocol = eth_type_trans(skb, dev);
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       netif_rx(skb);
-               }
-       }
-}
-
-static void s6gmac_tx_interrupt(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
-                       s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
-               dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
-       }
-       if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
-               netif_wake_queue(dev);
-}
-
-struct s6gmac_statinf {
-       unsigned reg_size : 4; /* 0: unused */
-       unsigned reg_off : 6;
-       unsigned net_index : 6;
-};
-
-#define S6_STATS_B (8 * sizeof(u32))
-#define S6_STATS_C(b, r, f) [b] = { \
-       BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
-       BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
-                       >= (1<<4)) + \
-       r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
-       BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
-                       >= ((1<<6)-1)) + \
-       (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
-       BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
-                       % sizeof(unsigned long)) + \
-       BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
-                       / sizeof(unsigned long)) >= (1<<6))) + \
-       BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
-                       != sizeof(unsigned long))) + \
-       (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
-
-static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
-       S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
-}, {
-       S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
-       S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
-} };
-
-static void s6gmac_stats_collect(struct s6gmac *pd,
-               const struct s6gmac_statinf *inf)
-{
-       int b;
-       for (b = 0; b < S6_STATS_B; b++) {
-               if (inf[b].reg_size) {
-                       pd->stats[inf[b].net_index] +=
-                               readl(pd->reg + S6_GMAC_STAT_REGS
-                                       + sizeof(u32) * inf[b].reg_off);
-               }
-       }
-}
-
-static void s6gmac_stats_carry(struct s6gmac *pd,
-               const struct s6gmac_statinf *inf, u32 mask)
-{
-       int b;
-       while (mask) {
-               b = fls(mask) - 1;
-               mask &= ~(1 << b);
-               pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
-       }
-}
-
-static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
-{
-       int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
-               ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
-       return r;
-}
-
-static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
-{
-       u32 mask;
-       mask = s6gmac_stats_pending(pd, carry);
-       if (mask) {
-               writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
-               s6gmac_stats_carry(pd, &statinf[carry][0], mask);
-       }
-}
-
-static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = (struct net_device *)dev_id;
-       struct s6gmac *pd = netdev_priv(dev);
-       if (!dev)
-               return IRQ_NONE;
-       spin_lock(&pd->lock);
-       if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
-               s6gmac_rx_interrupt(dev);
-       s6gmac_rx_fillfifo(dev);
-       if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
-               s6gmac_tx_interrupt(dev);
-       s6gmac_stats_interrupt(pd, 0);
-       s6gmac_stats_interrupt(pd, 1);
-       spin_unlock(&pd->lock);
-       return IRQ_HANDLED;
-}
-
-static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
-       u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
-{
-       writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
-       writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
-       writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
-       writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
-}
-
-static inline void s6gmac_stop_device(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       writel(0, pd->reg + S6_GMAC_MACCONF1);
-}
-
-static inline void s6gmac_init_device(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       int is_rgmii = !!(pd->phydev->supported
-               & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
-#if 0
-       writel(1 << S6_GMAC_MACCONF1_SYNCTX |
-               1 << S6_GMAC_MACCONF1_SYNCRX |
-               1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
-               1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
-               1 << S6_GMAC_MACCONF1_RESTXFUNC |
-               1 << S6_GMAC_MACCONF1_RESRXFUNC |
-               1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
-               1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
-               pd->reg + S6_GMAC_MACCONF1);
-#endif
-       writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
-       udelay(1000);
-       writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
-               pd->reg + S6_GMAC_MACCONF1);
-       writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
-               1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
-               pd->reg + S6_GMAC_HOST_PBLKCTRL);
-       writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
-               S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
-               1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
-               1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
-               is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
-               pd->reg + S6_GMAC_HOST_PBLKCTRL);
-       writel(1 << S6_GMAC_MACCONF1_TXENA |
-               1 << S6_GMAC_MACCONF1_RXENA |
-               (dev->flags & IFF_LOOPBACK ? 1 : 0)
-                       << S6_GMAC_MACCONF1_LOOPBACK,
-               pd->reg + S6_GMAC_MACCONF1);
-       writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
-                       dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
-               pd->reg + S6_GMAC_MACMAXFRAMELEN);
-       writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
-               1 << S6_GMAC_MACCONF2_PADCRCENA |
-               1 << S6_GMAC_MACCONF2_LENGTHFCHK |
-               (pd->link.giga ?
-                       S6_GMAC_MACCONF2_IFMODE_BYTE :
-                       S6_GMAC_MACCONF2_IFMODE_NIBBLE)
-                       << S6_GMAC_MACCONF2_IFMODE |
-               7 << S6_GMAC_MACCONF2_PREAMBLELEN,
-               pd->reg + S6_GMAC_MACCONF2);
-       writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
-       writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
-       writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
-               1 << S6_GMAC_FIFOCONF0_SRFENREQ |
-               1 << S6_GMAC_FIFOCONF0_FRFENREQ |
-               1 << S6_GMAC_FIFOCONF0_STFENREQ |
-               1 << S6_GMAC_FIFOCONF0_FTFENREQ,
-               pd->reg + S6_GMAC_FIFOCONF0);
-       writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
-               128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
-               pd->reg + S6_GMAC_FIFOCONF3);
-       writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
-                       1 << S6_GMAC_FIFOCONF_RSV_RUNT |
-                       1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
-                       1 << S6_GMAC_FIFOCONF_RSV_OK |
-                       1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
-                       1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
-                       1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
-                       1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
-                       1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
-               1 << S6_GMAC_FIFOCONF5_DROPLT64 |
-               pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
-               1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
-               pd->reg + S6_GMAC_FIFOCONF5);
-       writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
-               1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
-               1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
-               1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
-               1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
-               1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
-               1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
-               pd->reg + S6_GMAC_FIFOCONF4);
-       s6gmac_set_dstaddr(pd, 0,
-               0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
-       s6gmac_set_dstaddr(pd, 1,
-               dev->dev_addr[5] |
-               dev->dev_addr[4] << 8 |
-               dev->dev_addr[3] << 16 |
-               dev->dev_addr[2] << 24,
-               dev->dev_addr[1] |
-               dev->dev_addr[0] << 8,
-               0xFFFFFFFF, 0x0000FFFF);
-       s6gmac_set_dstaddr(pd, 2,
-               0x00000000, 0x00000100, 0x00000000, 0x00000100);
-       s6gmac_set_dstaddr(pd, 3,
-               0x00000000, 0x00000000, 0x00000000, 0x00000000);
-       writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
-               1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
-               S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
-               S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
-               1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
-               1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
-               is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
-               pd->reg + S6_GMAC_HOST_PBLKCTRL);
-}
-
-static void s6mii_enable(struct s6gmac *pd)
-{
-       writel(readl(pd->reg + S6_GMAC_MACCONF1) &
-               ~(1 << S6_GMAC_MACCONF1_SOFTRES),
-               pd->reg + S6_GMAC_MACCONF1);
-       writel((readl(pd->reg + S6_GMAC_MACMIICONF)
-               & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
-               | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
-               pd->reg + S6_GMAC_MACMIICONF);
-}
-
-static int s6mii_busy(struct s6gmac *pd, int tmo)
-{
-       while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
-               if (--tmo == 0)
-                       return -ETIME;
-               udelay(64);
-       }
-       return 0;
-}
-
-static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
-       struct s6gmac *pd = bus->priv;
-       s6mii_enable(pd);
-       if (s6mii_busy(pd, 256))
-               return -ETIME;
-       writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
-               regnum << S6_GMAC_MACMIIADDR_REG,
-               pd->reg + S6_GMAC_MACMIIADDR);
-       writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
-       writel(0, pd->reg + S6_GMAC_MACMIICMD);
-       if (s6mii_busy(pd, 256))
-               return -ETIME;
-       return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
-}
-
-static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
-       struct s6gmac *pd = bus->priv;
-       s6mii_enable(pd);
-       if (s6mii_busy(pd, 256))
-               return -ETIME;
-       writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
-               regnum << S6_GMAC_MACMIIADDR_REG,
-               pd->reg + S6_GMAC_MACMIIADDR);
-       writel(value, pd->reg + S6_GMAC_MACMIICTRL);
-       if (s6mii_busy(pd, 256))
-               return -ETIME;
-       return 0;
-}
-
-static int s6mii_reset(struct mii_bus *bus)
-{
-       struct s6gmac *pd = bus->priv;
-       s6mii_enable(pd);
-       if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
-               return -ETIME;
-       return 0;
-}
-
-static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
-{
-       u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
-       pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
-       switch (pd->link.mbit) {
-       case 10:
-               pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
-               break;
-       case 100:
-               pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
-               break;
-       case 1000:
-               pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
-               break;
-       default:
-               return;
-       }
-       writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-}
-
-static inline void s6gmac_linkisup(struct net_device *dev, int isup)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       struct phy_device *phydev = pd->phydev;
-
-       pd->link.full = phydev->duplex;
-       pd->link.giga = (phydev->speed == 1000);
-       if (pd->link.mbit != phydev->speed) {
-               pd->link.mbit = phydev->speed;
-               s6gmac_set_rgmii_txclock(pd);
-       }
-       pd->link.isup = isup;
-       if (isup)
-               netif_carrier_on(dev);
-       phy_print_status(phydev);
-}
-
-static void s6gmac_adjust_link(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       struct phy_device *phydev = pd->phydev;
-       if (pd->link.isup &&
-                       (!phydev->link ||
-                       (pd->link.mbit != phydev->speed) ||
-                       (pd->link.full != phydev->duplex))) {
-               pd->link.isup = 0;
-               netif_tx_disable(dev);
-               if (!phydev->link) {
-                       netif_carrier_off(dev);
-                       phy_print_status(phydev);
-               }
-       }
-       if (!pd->link.isup && phydev->link) {
-               if (pd->link.full != phydev->duplex) {
-                       u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
-                       if (phydev->duplex)
-                               maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
-                       else
-                               maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
-                       writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
-               }
-
-               if (pd->link.giga != (phydev->speed == 1000)) {
-                       u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
-                       u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
-                       maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
-                                    << S6_GMAC_MACCONF2_IFMODE);
-                       if (phydev->speed == 1000) {
-                               fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
-                               maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
-                                          << S6_GMAC_MACCONF2_IFMODE;
-                       } else {
-                               fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
-                               maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
-                                          << S6_GMAC_MACCONF2_IFMODE;
-                       }
-                       writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
-                       writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
-               }
-
-               if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
-                       netif_wake_queue(dev);
-               s6gmac_linkisup(dev, 1);
-       }
-}
-
-static inline int s6gmac_phy_start(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       int i = 0;
-       struct phy_device *p = NULL;
-       while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
-               i++;
-       p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
-                       PHY_INTERFACE_MODE_RGMII);
-       if (IS_ERR(p)) {
-               printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
-               return PTR_ERR(p);
-       }
-       p->supported &= PHY_GBIT_FEATURES;
-       p->advertising = p->supported;
-       pd->phydev = p;
-       return 0;
-}
-
-static inline void s6gmac_init_stats(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       u32 mask;
-       mask =  1 << S6_GMAC_STATCARRY1_RDRP |
-               1 << S6_GMAC_STATCARRY1_RJBR |
-               1 << S6_GMAC_STATCARRY1_RFRG |
-               1 << S6_GMAC_STATCARRY1_ROVR |
-               1 << S6_GMAC_STATCARRY1_RUND |
-               1 << S6_GMAC_STATCARRY1_RCDE |
-               1 << S6_GMAC_STATCARRY1_RFLR |
-               1 << S6_GMAC_STATCARRY1_RALN |
-               1 << S6_GMAC_STATCARRY1_RMCA |
-               1 << S6_GMAC_STATCARRY1_RFCS |
-               1 << S6_GMAC_STATCARRY1_RPKT |
-               1 << S6_GMAC_STATCARRY1_RBYT;
-       writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
-       writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
-       mask =  1 << S6_GMAC_STATCARRY2_TDRP |
-               1 << S6_GMAC_STATCARRY2_TNCL |
-               1 << S6_GMAC_STATCARRY2_TXCL |
-               1 << S6_GMAC_STATCARRY2_TEDF |
-               1 << S6_GMAC_STATCARRY2_TPKT |
-               1 << S6_GMAC_STATCARRY2_TBYT |
-               1 << S6_GMAC_STATCARRY2_TFRG |
-               1 << S6_GMAC_STATCARRY2_TUND |
-               1 << S6_GMAC_STATCARRY2_TOVR |
-               1 << S6_GMAC_STATCARRY2_TFCS |
-               1 << S6_GMAC_STATCARRY2_TJBR;
-       writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
-       writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
-}
-
-static inline void s6gmac_init_dmac(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
-       s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
-       s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
-       s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
-}
-
-static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&pd->lock, flags);
-       writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
-               0 << S6_GMAC_BURST_PREWR_CFE |
-               1 << S6_GMAC_BURST_PREWR_PPE |
-               1 << S6_GMAC_BURST_PREWR_FCS |
-               ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
-               pd->reg + S6_GMAC_BURST_PREWR);
-       s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
-               (u32)skb->data, pd->io, skb->len);
-       if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
-               netif_stop_queue(dev);
-       if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
-               printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
-                       pd->tx_skb_o, pd->tx_skb_i);
-               BUG();
-       }
-       pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
-       spin_unlock_irqrestore(&pd->lock, flags);
-       return 0;
-}
-
-static void s6gmac_tx_timeout(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       unsigned long flags;
-       spin_lock_irqsave(&pd->lock, flags);
-       s6gmac_tx_interrupt(dev);
-       spin_unlock_irqrestore(&pd->lock, flags);
-}
-
-static int s6gmac_open(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       unsigned long flags;
-       phy_read_status(pd->phydev);
-       spin_lock_irqsave(&pd->lock, flags);
-       pd->link.mbit = 0;
-       s6gmac_linkisup(dev, pd->phydev->link);
-       s6gmac_init_device(dev);
-       s6gmac_init_stats(dev);
-       s6gmac_init_dmac(dev);
-       s6gmac_rx_fillfifo(dev);
-       s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
-               2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
-       s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
-               2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
-       writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
-               0 << S6_GMAC_HOST_INT_TXPREWOVER |
-               0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
-               0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
-               0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
-               pd->reg + S6_GMAC_HOST_INTMASK);
-       spin_unlock_irqrestore(&pd->lock, flags);
-       phy_start(pd->phydev);
-       netif_start_queue(dev);
-       return 0;
-}
-
-static int s6gmac_stop(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       unsigned long flags;
-       netif_stop_queue(dev);
-       phy_stop(pd->phydev);
-       spin_lock_irqsave(&pd->lock, flags);
-       s6gmac_init_dmac(dev);
-       s6gmac_stop_device(dev);
-       while (pd->tx_skb_i != pd->tx_skb_o)
-               dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
-       while (pd->rx_skb_i != pd->rx_skb_o)
-               dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
-       spin_unlock_irqrestore(&pd->lock, flags);
-       return 0;
-}
-
-static struct net_device_stats *s6gmac_stats(struct net_device *dev)
-{
-       struct s6gmac *pd = netdev_priv(dev);
-       struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
-       int i;
-       do {
-               unsigned long flags;
-               spin_lock_irqsave(&pd->lock, flags);
-               for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
-                       pd->stats[i] =
-                               pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
-               s6gmac_stats_collect(pd, &statinf[0][0]);
-               s6gmac_stats_collect(pd, &statinf[1][0]);
-               i = s6gmac_stats_pending(pd, 0) |
-                       s6gmac_stats_pending(pd, 1);
-               spin_unlock_irqrestore(&pd->lock, flags);
-       } while (i);
-       st->rx_errors = st->rx_crc_errors +
-                       st->rx_frame_errors +
-                       st->rx_length_errors +
-                       st->rx_missed_errors;
-       st->tx_errors += st->tx_aborted_errors;
-       return st;
-}
-
-static int s6gmac_probe(struct platform_device *pdev)
-{
-       struct net_device *dev;
-       struct s6gmac *pd;
-       int res;
-       unsigned long i;
-       struct mii_bus *mb;
-
-       dev = alloc_etherdev(sizeof(*pd));
-       if (!dev)
-               return -ENOMEM;
-
-       dev->open = s6gmac_open;
-       dev->stop = s6gmac_stop;
-       dev->hard_start_xmit = s6gmac_tx;
-       dev->tx_timeout = s6gmac_tx_timeout;
-       dev->watchdog_timeo = HZ;
-       dev->get_stats = s6gmac_stats;
-       dev->irq = platform_get_irq(pdev, 0);
-       pd = netdev_priv(dev);
-       memset(pd, 0, sizeof(*pd));
-       spin_lock_init(&pd->lock);
-       pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
-       i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
-       pd->tx_dma = DMA_MASK_DMAC(i);
-       pd->tx_chan = DMA_INDEX_CHNL(i);
-       i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
-       pd->rx_dma = DMA_MASK_DMAC(i);
-       pd->rx_chan = DMA_INDEX_CHNL(i);
-       pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
-       res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
-       if (res) {
-               printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
-               goto errirq;
-       }
-       res = register_netdev(dev);
-       if (res) {
-               printk(KERN_ERR DRV_PRMT "error registering device %s\n",
-                       dev->name);
-               goto errdev;
-       }
-       mb = mdiobus_alloc();
-       if (!mb) {
-               printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
-               res = -ENOMEM;
-               goto errmii;
-       }
-       mb->name = "s6gmac_mii";
-       mb->read = s6mii_read;
-       mb->write = s6mii_write;
-       mb->reset = s6mii_reset;
-       mb->priv = pd;
-       snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
-       mb->phy_mask = ~(1 << 0);
-       mb->irq = &pd->mii.irq[0];
-       for (i = 0; i < PHY_MAX_ADDR; i++) {
-               int n = platform_get_irq(pdev, i + 1);
-               if (n < 0)
-                       n = PHY_POLL;
-               pd->mii.irq[i] = n;
-       }
-       mdiobus_register(mb);
-       pd->mii.bus = mb;
-       res = s6gmac_phy_start(dev);
-       if (res)
-               return res;
-       platform_set_drvdata(pdev, dev);
-       return 0;
-errmii:
-       unregister_netdev(dev);
-errdev:
-       free_irq(dev->irq, dev);
-errirq:
-       free_netdev(dev);
-       return res;
-}
-
-static int s6gmac_remove(struct platform_device *pdev)
-{
-       struct net_device *dev = platform_get_drvdata(pdev);
-       if (dev) {
-               struct s6gmac *pd = netdev_priv(dev);
-               mdiobus_unregister(pd->mii.bus);
-               unregister_netdev(dev);
-               free_irq(dev->irq, dev);
-               free_netdev(dev);
-       }
-       return 0;
-}
-
-static struct platform_driver s6gmac_driver = {
-       .probe = s6gmac_probe,
-       .remove = s6gmac_remove,
-       .driver = {
-               .name = "s6gmac",
-       },
-};
-
-module_platform_driver(s6gmac_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
index 118a427d1942068f7cf37e8a384676bcbd21089e..8c6b7c1651e5f82329882a179fcca12e0a622982 100644 (file)
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
  *  0 on success and an appropriate (-)ve integer as defined in errno.h
  *  file on failure.
  */
-static int stmmac_hw_setup(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        int ret;
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
 
        stmmac_mmc_setup(priv);
 
-       ret = stmmac_init_ptp(priv);
-       if (ret && ret != -EOPNOTSUPP)
-               pr_warn("%s: failed PTP initialisation\n", __func__);
+       if (init_ptp) {
+               ret = stmmac_init_ptp(priv);
+               if (ret && ret != -EOPNOTSUPP)
+                       pr_warn("%s: failed PTP initialisation\n", __func__);
+       }
 
 #ifdef CONFIG_DEBUG_FS
        ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
                goto init_error;
        }
 
-       ret = stmmac_hw_setup(dev);
+       ret = stmmac_hw_setup(dev, true);
        if (ret < 0) {
                pr_err("%s: Hw setup failed\n", __func__);
                goto init_error;
@@ -3036,7 +3038,7 @@ int stmmac_resume(struct net_device *ndev)
        netif_device_attach(ndev);
 
        init_dma_desc_rings(ndev, GFP_ATOMIC);
-       stmmac_hw_setup(ndev);
+       stmmac_hw_setup(ndev, false);
        stmmac_init_tx_coalesce(priv);
 
        napi_enable(&priv->napi);
index 4032b170fe243e230b117c49a420c7e11db21459..3039de2465bac825049e99e2130f49be13291e18 100644 (file)
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
        .remove = stmmac_pltfr_remove,
        .driver = {
                   .name = STMMAC_RESOURCE_NAME,
-                  .owner = THIS_MODULE,
                   .pm = &stmmac_pltfr_pm_ops,
                   .of_match_table = of_match_ptr(stmmac_dt_ids),
        },
index 45c408ef67d0d904e08732c6c2b4f119a8b98ddd..d2835bf7b4fbef1744bf2bd6d840acfe47863a39 100644 (file)
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
                segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
        if (IS_ERR(segs)) {
                dev->stats.tx_dropped++;
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index c560f9aeb55d691f23c65dae362c18defa1e9e44..64d1cef4cda1e52167a36439256fdb7c0d967efc 100644 (file)
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 
                        /* Clear all mcast from ALE */
                        cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
-                                                priv->host_port);
+                                                priv->host_port, -1);
 
                        /* Flood All Unicast Packets to Host port */
                        cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
+       int vid;
+
+       if (priv->data.dual_emac)
+               vid = priv->slaves[priv->emac_port].port_vlan;
+       else
+               vid = priv->data.default_vlan;
 
        if (ndev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
        cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
 
        /* Clear all mcast from ALE */
-       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+                                vid);
 
        if (!netdev_mc_empty(ndev)) {
                struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
+       int value = irq - priv->irqs_table[0];
+
+       /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+        * is to make sure we will always write the correct value to the EOI
+        * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+        * for TX Interrupt and 3 for MISC Interrupt.
+        */
+       cpdma_ctlr_eoi(priv->dma, value);
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
        int                     num_tx, num_rx;
 
        num_tx = cpdma_chan_process(priv->txch, 128);
-       if (num_tx)
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
 
                napi_complete(napi);
                cpsw_intr_enable(priv);
-               cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
                prim_cpsw = cpsw_get_slave_priv(priv, 0);
                if (prim_cpsw->irq_enabled == false) {
                        prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
        napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
        prim_cpsw = cpsw_get_slave_priv(priv, 0);
        if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        cpdma_chan_start(priv->txch);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 
 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
        cpsw_interrupt(ndev->irq, priv);
        cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
-       cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
 }
 #endif
 
index 097ebe7077ac0c8de51e3eb7e8da5809f5e6bcea..5246b3a18ff86e8494d30db90c6154ba9f7ed25f 100644 (file)
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 }
 
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
 {
        u32 ale_entry[ALE_ENTRY_WORDS];
        int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
                if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
                        continue;
 
+               /* if vid passed is -1 then remove all multicast entry from
+                * the table irrespective of vlan id, if a valid vlan id is
+                * passed then remove only multicast added to that vlan id.
+                * if vlan id doesn't match then move on to next entry.
+                */
+               if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+                       continue;
+
                if (cpsw_ale_get_mcast(ale_entry)) {
                        u8 addr[6];
 
index c0d4127aa549285c7e50e47214c1579e17478210..af1e7ecd87c6fbd24b80954c7977e96aa3676a0c 100644 (file)
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
index 9c2d91ea0af48e35020594b73221f4354fc6dc5e..dbcbf0c5bcfa910c49ec81037892a45487aa081e 100644 (file)
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map temac regs.\n");
+               rc = -ENOMEM;
                goto nodev;
        }
 
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               rc = -ENODEV;
                goto err_iounmap;
        }
 
index 44b8d2bad8c3efd09a1d572d2ef7cf36f8d4a227..4c9b4fa1d3c1cbed1fc223634adca0a2cb7b5807 100644 (file)
@@ -388,7 +388,6 @@ struct axidma_bd {
  * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
  * @tx_irq:    Axidma TX IRQ number
  * @rx_irq:    Axidma RX IRQ number
- * @temac_type:        axienet type to identify between soft and hard temac
  * @phy_type:  Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
  * @options:   AxiEthernet option word
  * @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@ struct axienet_local {
 
        int tx_irq;
        int rx_irq;
-       u32 temac_type;
        u32 phy_type;
 
        u32 options;                    /* Current options word */
index 4ea2d4e6f1d1894f22362e9a2b422392e959331d..a6d2860b712c732c5459bea14647da9825ff1042 100644 (file)
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
        lp->regs = of_iomap(op->dev.of_node, 0);
        if (!lp->regs) {
                dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+               ret = -ENOMEM;
                goto nodev;
        }
        /* Setup checksum offload, but default to off if not specified */
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op)
                if ((be32_to_cpup(p)) >= 0x4000)
                        lp->jumbo_support = 1;
        }
-       p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
-                                      NULL);
-       if (p)
-               lp->temac_type = be32_to_cpup(p);
        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
        if (p)
                lp->phy_type = be32_to_cpup(p);
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
        np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
        if (!np) {
                dev_err(&op->dev, "could not find DMA node\n");
+               ret = -ENODEV;
                goto err_iounmap;
        }
        lp->dma_regs = of_iomap(np, 0);
index 24858799c204fbe2640ad375b5ea75154b6aa795..9d4ce388510a5034b2f29d890645afdda73b23f0 100644 (file)
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "no IRQ found\n");
+               rc = -ENXIO;
                goto error;
        }
 
index 2f48f790c9b43e983f44107a97f42f2268262099..384ca4f4de4a0e6ee6b053440937d96a272c4850 100644 (file)
@@ -590,6 +590,7 @@ struct nvsp_message {
 
 
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
+#define NETVSC_SEND_BUFFER_ID                  0
 
 #define NETVSC_PACKET_SIZE                      4096
 
index dd867e6cabd6167342b7a3875c427b94321650bf..9f49c0129a78a63f9a473012162ab74260f9449a 100644 (file)
@@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
 
        /* Deal with the send buffer we may have setup.
         * If we got a  send section size, it means we received a
-        * SendsendBufferComplete msg (ie sent
-        * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+        * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+        * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
         * to send a revoke msg here
         */
        if (net_device->send_section_size) {
@@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
 
                revoke_packet->hdr.msg_type =
                        NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
-               revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+               revoke_packet->msg.v1_msg.revoke_send_buf.id =
+                       NETVSC_SEND_BUFFER_ID;
 
                ret = vmbus_sendpacket(net_device->dev->channel,
                                       revoke_packet,
@@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
                net_device->send_buf_gpadl_handle = 0;
        }
        if (net_device->send_buf) {
-               /* Free up the receive buffer */
+               /* Free up the send buffer */
                vfree(net_device->send_buf);
                net_device->send_buf = NULL;
        }
@@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
        init_packet = &net_device->channel_init_pkt;
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
-       init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+       init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
                net_device->send_buf_gpadl_handle;
-       init_packet->msg.v1_msg.send_recv_buf.id = 0;
+       init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
 
        /* Send the gpadl notification request */
        ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
                netdev_err(ndev, "Unable to complete send buffer "
                           "initialization with NetVsp - status %d\n",
                           init_packet->msg.v1_msg.
-                          send_recv_buf_complete.status);
+                          send_send_buf_complete.status);
                ret = -EINVAL;
                goto cleanup;
        }
index c530de1e63f5d5eb5bf0ee6e12a7cb25cded50bb..3ad8ca76196d8254afd41359a437a7716defeecb 100644 (file)
@@ -88,6 +88,7 @@ struct kszphy_priv {
 
 static const struct kszphy_type ksz8021_type = {
        .led_mode_reg           = MII_KSZPHY_CTRL_2,
+       .has_broadcast_disable  = true,
        .has_rmii_ref_clk_sel   = true,
 };
 
@@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
        return 0;
 }
 
-static int ksz8021_config_init(struct phy_device *phydev)
-{
-       int rc;
-
-       rc = kszphy_config_init(phydev);
-       if (rc)
-               return rc;
-
-       rc = kszphy_broadcast_disable(phydev);
-
-       return rc < 0 ? rc : 0;
-}
-
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
                                       struct device_node *of_node, u16 reg,
                                       char *field1, char *field2,
@@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz8021_type,
        .probe          = kszphy_probe,
-       .config_init    = ksz8021_config_init,
+       .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .driver_data    = &ksz8021_type,
        .probe          = kszphy_probe,
-       .config_init    = ksz8021_config_init,
+       .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index 93e224217e24b36b089102be11ada5921f62d83b..f7ff493f1e73dfa129dbb10ed68c6436c52a4b1b 100644 (file)
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
 static void team_notify_peers_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, notify_peers.dw.work);
 
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
                schedule_delayed_work(&team->notify_peers.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+       if (val)
                schedule_delayed_work(&team->notify_peers.dw,
                                      msecs_to_jiffies(team->notify_peers.interval));
 }
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
 static void team_mcast_rejoin_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, mcast_rejoin.dw.work);
 
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+       if (val)
                schedule_delayed_work(&team->mcast_rejoin.dw,
                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 }
index dcb6d33141e0640f545555848434d8efd7822878..1e9cdca370144cffb22141e4c03aa3ff800aefdd 100644 (file)
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
         awd.done = 0;
 
         urb->context = &awd;
-        status = usb_submit_urb(urb, GFP_NOIO);
+        status = usb_submit_urb(urb, GFP_ATOMIC);
         if (status) {
                 // something went wrong
                 usb_free_urb(urb);
index b8a82b86f909095632c7d5747b9bf25cb81c970e..602dc6668c3af7ce9f6cc4ddd61437ba2f6adf29 100644 (file)
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
 /* default ethernet address used by the modem */
 static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
 
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
 /* Make up an ethernet header if the packet doesn't have one.
  *
  * A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
        }
 
-       /* Never use the same address on both ends of the link, even
-        * if the buggy firmware told us to.
+       /* Never use the same address on both ends of the link, even if the
+        * buggy firmware told us to. Or, if device is assigned the well-known
+        * buggy firmware MAC address, replace it with a random address,
         */
-       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+       if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+           ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
                eth_hw_addr_random(dev->net);
 
        /* make MAC addr easily distinguishable from an IP header */
index 2d1c77e81836c617364d668eecf064d0256063f5..57ec23e8ccfa4396610bb7bbfe99ec68145cdc12 100644 (file)
@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
        netif_wake_queue(netdev);
 }
 
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+                      netdev_features_t features)
+{
+       u32 mss = skb_shinfo(skb)->gso_size;
+       int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+       int offset = skb_transport_offset(skb);
+
+       if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+               features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+       else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+               features &= ~NETIF_F_GSO_MASK;
+
+       return features;
+}
+
 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
                                      struct net_device *netdev)
 {
@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_set_mac_address    = rtl8152_set_mac_address,
        .ndo_change_mtu         = rtl8152_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_features_check     = rtl8152_features_check,
 };
 
 static void r8152b_get_version(struct r8152 *tp)
index b8bd7191572dca25315c71558b434902720a63cc..5ca97713bfb33b5d5a7770f3ae6a2000a19df423 100644 (file)
@@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
                container_of(napi, struct receive_queue, napi);
        unsigned int r, received = 0;
 
-again:
        received += virtnet_receive(rq, budget - received);
 
        /* Out of packets? */
@@ -771,7 +770,6 @@ again:
                    napi_schedule_prep(napi)) {
                        virtqueue_disable_cb(rq->vq);
                        __napi_schedule(napi);
-                       goto again;
                }
        }
 
index 49d9f229199851c48f5a9e6f1b282b42cedc2a41..7fbd89fbe107878f5c2be4358bb13420ca6389ec 100644 (file)
@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
        bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
 
        skb = udp_tunnel_handle_offloads(skb, udp_sum);
-       if (IS_ERR(skb))
-               return -EINVAL;
+       if (IS_ERR(skb)) {
+               err = -EINVAL;
+               goto err;
+       }
 
        skb_scrub_packet(skb, xnet);
 
@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err))
-               return err;
+       if (unlikely(err)) {
+               kfree_skb(skb);
+               goto err;
+       }
 
        skb = vlan_hwaccel_push_inside(skb);
-       if (WARN_ON(!skb))
-               return -ENOMEM;
+       if (WARN_ON(!skb)) {
+               err = -ENOMEM;
+               goto err;
+       }
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
        udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
                             ttl, src_port, dst_port);
        return 0;
+err:
+       dst_release(dst);
+       return err;
 }
 #endif
 
@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
 
        skb = udp_tunnel_handle_offloads(skb, udp_sum);
        if (IS_ERR(skb))
-               return -EINVAL;
+               return PTR_ERR(skb);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
 
        /* Need space for new headers (invalidates iph ptr) */
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err))
+       if (unlikely(err)) {
+               kfree_skb(skb);
                return err;
+       }
 
        skb = vlan_hwaccel_push_inside(skb);
        if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                     tos, ttl, df, src_port, dst_port,
                                     htonl(vni << 8),
                                     !net_eq(vxlan->net, dev_net(vxlan->dev)));
-
-               if (err < 0)
+               if (err < 0) {
+                       /* skb is already freed. */
+                       skb = NULL;
                        goto rt_tx_error;
+               }
+
                iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
index 3c06e9365949a02e03e96d0f9fb51df9c006451c..9880dae2a56994b1e75a13acbba740e3af0aec7a 100644 (file)
@@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
         */
        if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
            ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
-            (sdiodev->pdata->oob_irq_supported)))
+            (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
                bus_if->wowl_supported = true;
 #endif
 
@@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
        brcmf_dbg(SDIO, "Enter\n");
-       if (sdiodev->pdata->oob_irq_supported)
+       if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
                disable_irq_wake(sdiodev->pdata->oob_irq_nr);
        brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
        atomic_set(&sdiodev->suspend, false);
index 91c0cb3c368e07d4dfd889d0ea5c765b395742d6..21de4fe6cf2d0ff87f46ef0b280d2a53f0999dc3 100644 (file)
@@ -65,7 +65,8 @@ config IPW2100_DEBUG
 
 config IPW2200
        tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
-       depends on PCI && CFG80211 && CFG80211_WEXT
+       depends on PCI && CFG80211
+       select CFG80211_WEXT
        select WIRELESS_EXT
        select WEXT_SPY
        select WEXT_PRIV
index e5be2d21868fa975619b72fb8217cad0f54cefe6..a5f9198d57473e6ab18966ca58ec88047f4428dc 100644 (file)
@@ -69,8 +69,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  10
-#define IWL3160_UCODE_API_MAX  10
+#define IWL7260_UCODE_API_MAX  12
+#define IWL3160_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   10
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
 #define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_7000         0
 
index bf0a95cb71535f390dfcfcc5820d39901359d440..3668fc57e7708be5e655353078f0bb44589ad6a1 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  10
+#define IWL8000_UCODE_API_MAX  12
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   10
index 38de1513e4dedd5588367e44d2877ceed39234b6..850b85a478063b9a5fc8ddc1e31eedb5b83b18c2 100644 (file)
@@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
 
  try_again:
        /* try next, if any */
-       kfree(pieces);
        release_firmware(ucode_raw);
        if (iwl_request_firmware(drv, false))
                goto out_unbind;
+       kfree(pieces);
        return;
 
  out_free_fw:
index 9564ae173d060a46bd34c60970ce77d6eb483a73..1f7f15eb86da208df591ae8e7ca27e52c5921acf 100644 (file)
@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE      (0x01000000)
 
 #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
+#define FH_MEM_TB_MAX_LENGTH                   (0x00020000)
 
 /* TFDB  Area - TFDs buffer table */
 #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
index f2a047f6bb3e519a6ab3ce3032be18aac3aa9aa9..1bbe4fc47b97bcbcd4c828ce094f487c81edd357 100644 (file)
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *     longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ *     regardless of the band or the number of the probes. FW will calculate
+ *     the actual dwell time.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
+       IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
 };
 
 /**
index 1f2acf47bfb2c78ddb1ee623f9254e35fa8eb109..201846de94e7d949819afacd8f5bac3d9379a6ec 100644 (file)
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
  * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
  * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
  *     and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
  */
 enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL         = BIT(0),
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
        IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS   = BIT(4),
        IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED       = BIT(5),
        IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED     = BIT(6),
+       IWL_MVM_LMAC_SCAN_FLAG_MATCH            = BIT(9),
 };
 
 enum iwl_scan_priority {
index 31a5b3f4266c3edaf26a05dfca208ddd2082f8ca..e880f9d4717ba4de642916e252bfaf7279ade237 100644 (file)
@@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
 {
        lockdep_assert_held(&mvm->mutex);
 
-       /* disallow low power states when the FW is down */
-       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+       /*
+        * Disallow low power states when the FW is down by taking
+        * the UCODE_DOWN ref. in case of ongoing hw restart the
+        * ref is already taken, so don't take it again.
+        */
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
        /* async_handlers_wk is now blocked */
 
@@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /* the fw is stopped, the aux sta is dead: clean up driver state */
        iwl_mvm_del_aux_sta(mvm);
 
+       /*
+        * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+        * won't be called in this case).
+        */
+       clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
        mvm->ucode_loaded = false;
 }
 
index e5294d01181e404c44d78190df5ae869d8c0cd21..ec9a8e7bae1de2934d9fddcd26e7d4b481cdafb9 100644 (file)
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
  * already included in the probe template, so we need to set only
  * req->n_ssids - 1 bits in addition to the first bit.
  */
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band, int n_ssids)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+               return 10;
        if (band == IEEE80211_BAND_2GHZ)
                return 20  + 3 * (n_ssids + 1);
        return 10  + 2 * (n_ssids + 1);
 }
 
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+                                    enum ieee80211_band band)
 {
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+                       return 110;
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
 
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                 */
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+                               iwl_mvm_get_passive_dwell(mvm,
+                                                         IEEE80211_BAND_2GHZ);
                        params->max_out_time = passive_dwell;
                } else {
                        params->passive_fragmented = true;
@@ -348,8 +355,8 @@ not_bound:
                        params->dwell[band].passive = frag_passive_dwell;
                else
                        params->dwell[band].passive =
-                               iwl_mvm_get_passive_dwell(band);
-               params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+                               iwl_mvm_get_passive_dwell(mvm, band);
+               params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
                                                                      n_ssids);
        }
 }
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
 
        if (iwl_mvm_scan_pass_all(mvm, req))
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       else
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 
        if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
index 4f15d9decc81bd4fe80bd2ac716667e2c3b97aa1..4333306ccdee75a02952288196897d63a349b6b0 100644 (file)
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
        }
 
-       /* tid_tspec will default to 0 = BE when QOS isn't enabled */
-       ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+       if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+               ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       else
+               ac = tid_to_mac80211_ac[0];
+
        tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
                        TX_CMD_FLG_BT_PRIO_POS;
 
index e56e77ef5d2e3d28d9a30185ef257aa43912ed23..917431e30f747bf9411c8c33b1f013d18b14f29e 100644 (file)
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
        if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
                return false;
 
-       if (!mvm->cfg->rx_with_siso_diversity)
+       if (mvm->cfg->rx_with_siso_diversity)
                return false;
 
        ieee80211_iterate_active_interfaces_atomic(
index 3ee8e3848876f48cce72ce98dff2e2bfde875a23..d5aadb00dd9e4a6acf4e29c72dd0d55d59d59203 100644 (file)
@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -523,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        else if (cfg == &iwl7265_n_cfg)
                cfg_7265d = &iwl7265d_n_cfg;
        if (cfg_7265d &&
-           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+           (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
                cfg = cfg_7265d;
+               iwl_trans->cfg = cfg_7265d;
+       }
 #endif
 
        pci_set_drvdata(pdev, iwl_trans);
index 5d79a1f44b8e43a723b9de98ca780a9ea5bc5812..523fe0c88dcb2d464ec915085371d376aec22e28 100644 (file)
@@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 {
        u8 *v_addr;
        dma_addr_t p_addr;
-       u32 offset, chunk_sz = section->len;
+       u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
        int ret = 0;
 
        IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        /* Stop the device, and put it in low power state */
        iwl_pcie_apm_stop(trans);
 
-       /* Upon stop, the APM issues an interrupt if HW RF kill is set.
-        * Clean again the interrupt here
+       /* stop and reset the on-board processor */
+       iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+       udelay(20);
+
+       /*
+        * Upon stop, the APM issues an interrupt if HW RF kill is set.
+        * This is a bug in certain verions of the hardware.
+        * Certain devices also keep sending HW RF kill interrupt all
+        * the time, unless the interrupt is ACKed even if the interrupt
+        * should be masked. Re-ACK all the interrupts here.
         */
        spin_lock(&trans_pcie->irq_lock);
        iwl_disable_interrupts(trans);
        spin_unlock(&trans_pcie->irq_lock);
 
-       /* stop and reset the on-board processor */
-       iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-       udelay(20);
 
        /* clear all status bits */
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
index 846a2e6e34d855d62726eda65b51ee427bc1a939..c70efb9a6e78ccf22170bb8bbfe0abe89be695ef 100644 (file)
@@ -666,7 +666,8 @@ tx_status_ok:
 }
 
 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
-                                   u8 *entry, int rxring_idx, int desc_idx)
+                                   struct sk_buff *new_skb, u8 *entry,
+                                   int rxring_idx, int desc_idx)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        u8 tmp_one = 1;
        struct sk_buff *skb;
 
+       if (likely(new_skb)) {
+               skb = new_skb;
+               goto remap;
+       }
        skb = dev_alloc_skb(rtlpci->rxbuffersize);
        if (!skb)
                return 0;
-       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
 
+remap:
        /* just set skb->cb to mapping addr for pci_unmap_single use */
        *((dma_addr_t *)skb->cb) =
                pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
        bufferaddress = *((dma_addr_t *)skb->cb);
        if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
                return 0;
+       rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
        if (rtlpriv->use_new_trx_flow) {
                rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                            HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                /*rx pkt */
                struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
                                      rtlpci->rx_ring[rxring_idx].idx];
+               struct sk_buff *new_skb;
 
                if (rtlpriv->use_new_trx_flow) {
                        rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
                                 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
 
+               /* get a new skb - if fail, old one will be reused */
+               new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+               if (unlikely(!new_skb)) {
+                       pr_err("Allocation of new skb failed in %s\n",
+                              __func__);
+                       goto no_new;
+               }
                if (rtlpriv->use_new_trx_flow) {
                        buffer_desc =
                          &rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                        schedule_work(&rtlpriv->works.lps_change_work);
                }
 end:
+               skb = new_skb;
+no_new:
                if (rtlpriv->use_new_trx_flow) {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
                                                 rxring_idx,
-                                              rtlpci->rx_ring[rxring_idx].idx);
+                                                rtlpci->rx_ring[rxring_idx].idx);
                } else {
-                       _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+                       _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+                                                rxring_idx,
                                                 rtlpci->rx_ring[rxring_idx].idx);
-
                        if (rtlpci->rx_ring[rxring_idx].idx ==
                            rtlpci->rxringcount - 1)
                                rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                rtlpci->rx_ring[rxring_idx].idx = 0;
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
 
                for (i = 0; i < rtlpci->rxringcount; i++) {
                        entry = &rtlpci->rx_ring[rxring_idx].desc[i];
-                       if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+                       if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
                                                      rxring_idx, i))
                                return -ENOMEM;
                }
index efbaf2ae1999a97982a8e57274e3cf09f95b42ec..794204e34fba4fb74d1c21f2fef0837ef340feaa 100644 (file)
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
                }
 
                queue->remaining_credit = credit_bytes;
+               queue->credit_usec = credit_usec;
 
                err = connect_rings(be, queue);
                if (err) {
index 22bcb4e12e2a1318fc1802fb3c5ff6b2cb4acf92..d8c10764f13061fd355e0e36abff7c83d3a458a5 100644 (file)
@@ -88,10 +88,8 @@ struct netfront_cb {
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
 struct netfront_stats {
-       u64                     rx_packets;
-       u64                     tx_packets;
-       u64                     rx_bytes;
-       u64                     tx_bytes;
+       u64                     packets;
+       u64                     bytes;
        struct u64_stats_sync   syncp;
 };
 
@@ -160,7 +158,8 @@ struct netfront_info {
        struct netfront_queue *queues;
 
        /* Statistics */
-       struct netfront_stats __percpu *stats;
+       struct netfront_stats __percpu *rx_stats;
+       struct netfront_stats __percpu *tx_stats;
 
        atomic_t rx_gso_checksum_fixup;
 };
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
-       struct netfront_stats *stats = this_cpu_ptr(np->stats);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
        struct xen_netif_tx_request *tx;
        char *data = skb->data;
        RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
 
-       u64_stats_update_begin(&stats->syncp);
-       stats->tx_bytes += skb->len;
-       stats->tx_packets++;
-       u64_stats_update_end(&stats->syncp);
+       u64_stats_update_begin(&tx_stats->syncp);
+       tx_stats->bytes += skb->len;
+       tx_stats->packets++;
+       u64_stats_update_end(&tx_stats->syncp);
 
        /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
        xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 static int handle_incoming_queue(struct netfront_queue *queue,
                                 struct sk_buff_head *rxq)
 {
-       struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+       struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
        int packets_dropped = 0;
        struct sk_buff *skb;
 
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
                        continue;
                }
 
-               u64_stats_update_begin(&stats->syncp);
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-               u64_stats_update_end(&stats->syncp);
+               u64_stats_update_begin(&rx_stats->syncp);
+               rx_stats->packets++;
+               rx_stats->bytes += skb->len;
+               u64_stats_update_end(&rx_stats->syncp);
 
                /* Pass it up. */
                napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+               struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+               struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
                u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
 
-                       rx_packets = stats->rx_packets;
-                       tx_packets = stats->tx_packets;
-                       rx_bytes = stats->rx_bytes;
-                       tx_bytes = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
 #endif
 };
 
+static void xennet_free_netdev(struct net_device *netdev)
+{
+       struct netfront_info *np = netdev_priv(netdev);
+
+       free_percpu(np->rx_stats);
+       free_percpu(np->tx_stats);
+       free_netdev(netdev);
+}
+
 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
 {
        int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np->queues = NULL;
 
        err = -ENOMEM;
-       np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
-       if (np->stats == NULL)
+       np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->rx_stats == NULL)
+               goto exit;
+       np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+       if (np->tx_stats == NULL)
                goto exit;
 
        netdev->netdev_ops      = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        return netdev;
 
  exit:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        return ERR_PTR(err);
 }
 
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
        return 0;
 
  fail:
-       free_netdev(netdev);
+       xennet_free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
        return err;
 }
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
                info->queues = NULL;
        }
 
-       free_percpu(info->stats);
-
-       free_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
 
        return 0;
 }
index e34da13885e8c422a7fdecc153d6407c0b52999f..27fa62ce613618debb31fadb6bb50a51d9e65893 100644 (file)
@@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy)
                ret = miphy28lp_init_usb3(miphy_phy);
                break;
        default:
-               return -EINVAL;
+               ret = -EINVAL;
+               break;
        }
 
        mutex_unlock(&miphy_dev->miphy_mutex);
index c96e8183a8ffe061e017976daa40632ac5a22c56..efe724f97e02fbf9eba84c215628993fa3ea1274 100644 (file)
 /**
  * omap_control_pcie_pcs - set the PCS delay count
  * @dev: the control module device
- * @id: index of the pcie PHY (should be 1 or 2)
  * @delay: 8 bit delay value
  */
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+void omap_control_pcie_pcs(struct device *dev, u8 delay)
 {
        u32 val;
        struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
 
        val = readl(control_phy->pcie_pcs);
        val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
-               (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT));
-       val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+               OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+       val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
        writel(val, control_phy->pcie_pcs);
 }
 EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
index fb02a67c91811e5291757c8fbb330439c3d95e89..a2b08f3ccb031cbf3a484cb0afab5665c0fb3ad7 100644 (file)
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
        else
                data->num_phys = 3;
 
-       if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+       if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
+           of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
                data->disc_thresh = 3;
        else
                data->disc_thresh = 2;
index 1387b4d4afe376556661f49938bf71bb85572d94..465de2c800f228d7fa4961083e243349f5fbcd33 100644 (file)
@@ -82,7 +82,6 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
-       u8                      id;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x)
        u32 val;
        int ret = 0;
 
+       /*
+        * Set pcie_pcs register to 0x96 for proper functioning of phy
+        * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
+        * 18-1804.
+        */
        if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
-               omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1);
+               omap_control_pcie_pcs(phy->control_dev, 0x96);
                return 0;
        }
 
@@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
        }
 
        if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
-               if (of_property_read_u8(node, "id", &phy->id) < 0)
-                       phy->id = 1;
 
                clk = devm_clk_get(phy->dev, "dpll_ref");
                if (IS_ERR(clk)) {
index ba74f0aa60c76ac76d90d2edaff8c219d280e6a3..3c22dbebc80f202087c9940b05e3887a4c002252 100644 (file)
@@ -89,6 +89,7 @@ struct rockchip_iomux {
  * @reg_pull: optional separate register for additional pull settings
  * @clk: clock of the gpio bank
  * @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
  * @pin_base: first pin number
  * @nr_pins: number of pins in this bank
  * @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
        struct regmap                   *regmap_pull;
        struct clk                      *clk;
        int                             irq;
+       u32                             saved_enables;
        u32                             pin_base;
        u8                              nr_pins;
        char                            *name;
@@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+       irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct rockchip_pin_bank *bank = gc->private;
+
+       irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val &= ~d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       u32 val;
+
+       irq_gc_lock(gc);
+
+       val = irq_reg_readl(gc, GPIO_INTEN);
+       val |= d->mask;
+       irq_reg_writel(gc, val, GPIO_INTEN);
+
+       irq_gc_unlock(gc);
+}
+
 static int rockchip_interrupts_register(struct platform_device *pdev,
                                                struct rockchip_pinctrl *info)
 {
@@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
                gc = irq_get_domain_generic_chip(bank->domain, 0);
                gc->reg_base = bank->reg_base;
                gc->private = bank;
-               gc->chip_types[0].regs.mask = GPIO_INTEN;
+               gc->chip_types[0].regs.mask = GPIO_INTMASK;
                gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
                gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-               gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
-               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+               gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+               gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
                gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+               gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+               gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
                gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
                gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
index 7c9d51382248d9c5b3069653d72f26e20751c214..9e5ec00084bb1dcc2aad395af7c37d2bf0e3044a 100644 (file)
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
                                   struct seq_file *s, unsigned pin_id)
 {
        unsigned long config;
-       st_pinconf_get(pctldev, pin_id, &config);
 
+       mutex_unlock(&pctldev->mutex);
+       st_pinconf_get(pctldev, pin_id, &config);
+       mutex_lock(&pctldev->mutex);
        seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
                "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
                "de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
 
 static struct irq_chip st_gpio_irqchip = {
        .name           = "GPIO",
+       .irq_disable    = st_gpio_irq_mask,
        .irq_mask       = st_gpio_irq_mask,
        .irq_unmask     = st_gpio_irq_unmask,
        .irq_set_type   = st_gpio_irq_set_type,
index c71443c4f265780b1fe7fa001e0a3fd6101a4886..97b5e4ee1ca40ae4bc5b50ab7e413833de693af7 100644 (file)
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
        RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
        RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
        RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+       RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
        RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
        {}
 };
index c1444c3d84c2823a8b4eaed28e9bf5036290ce26..2809ae0d6bcd9848bd15cbc8d4d45e69df1601a3 100644 (file)
@@ -570,7 +570,7 @@ static struct regulator_ops s2mps14_reg_ops = {
        .enable_mask    = S2MPS14_ENABLE_MASK           \
 }
 
-#define regulator_desc_s2mps14_buck(num, min, step) {          \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS14_BUCK##num,                    \
        .ops            = &s2mps14_reg_ops,                     \
@@ -579,7 +579,7 @@ static struct regulator_ops s2mps14_reg_ops = {
        .min_uV         = min,                                  \
        .uV_step        = step,                                 \
        .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
-       .linear_min_sel = S2MPS14_BUCK1235_START_SEL,           \
+       .linear_min_sel = min_sel,                              \
        .ramp_delay     = S2MPS14_BUCK_RAMP_DELAY,              \
        .vsel_reg       = S2MPS14_REG_B1CTRL2 + (num - 1) * 2,  \
        .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
@@ -613,11 +613,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
        regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
        regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
        regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
-       regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
-       regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+                                   S2MPS14_BUCK4_START_SEL),
+       regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
 };
 
 static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
index eebc52cb69849ea99f07c9c6694389a92e39afa5..3d95c87160b35003635460d5d67379d3da33d27f 100644 (file)
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
                goto err_alloc;
        }
 
+       spin_lock_init(&data->lock);
+
        data->rcdev.owner = THIS_MODULE;
        data->rcdev.nr_resets = size * 32;
        data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
        if (IS_ERR(data->membase))
                return PTR_ERR(data->membase);
 
+       spin_lock_init(&data->lock);
+
        data->rcdev.owner = THIS_MODULE;
        data->rcdev.nr_resets = resource_size(res) * 32;
        data->rcdev.ops = &sunxi_reset_ops;
index 91e97ec0141892cbf4d1676480d5fda3223b0e6b..4d41bf75c23318577638fa45493aab748e0473a7 100644 (file)
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
  */
 static inline int ap_test_config_domain(unsigned int domain)
 {
-       if (!ap_configuration)
-               return 1;
-       return ap_test_config(ap_configuration->aqm, domain);
+       if (!ap_configuration)    /* QCI not supported */
+               if (domain < 16)
+                       return 1; /* then domains 0...15 are configured */
+               else
+                       return 0;
+       else
+               return ap_test_config(ap_configuration->aqm, domain);
 }
 
 /**
index 3b73b96619e2deeb82e203927f57904913a51dff..26270c351624f229cf85b6069ffe53ee26d50bef 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.16"
+#define DRV_VERSION            "1.6.0.17"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 2097de42a14775c1b164485482e834198a77f474..155b286f1a9d3cc8b366a6a5b7610ae562337f62 100644 (file)
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
                goto fnic_abort_cmd_end;
        }
 
+       /* IO out of order */
+
+       if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+               spin_unlock_irqrestore(io_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "Issuing Host reset due to out of order IO\n");
+
+               if (fnic_host_reset(sc) == FAILED) {
+                       FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                               "fnic_host_reset failed.\n");
+               }
+               ret = FAILED;
+               goto fnic_abort_cmd_end;
+       }
+
        CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
 
        /*
index 12ca291c1380845e45a40b0b4f5f4dd3356ab429..cce1cbc1a9276f4492d6efa16996313c00daabd6 100644 (file)
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
         * Return target busy if we've received a non-zero retry_delay_timer
         * in a FCP_RSP.
         */
-       if (time_after(jiffies, fcport->retry_delay_timestamp))
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
                fcport->retry_delay_timestamp = 0;
        else
                goto qc24_target_busy;
index e42fff6e8c109d66891bf291c07f7cb14a875b30..8afb01604d515baff0ed4b10e2d526326935a913 100644 (file)
@@ -1041,7 +1041,7 @@ retry:
                }
                /* signal not to enter either branch of the if () below */
                timeleft = 0;
-               rtn = NEEDS_RETRY;
+               rtn = FAILED;
        } else {
                timeleft = wait_for_completion_timeout(&done, timeout);
                rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
                        rtn = FAILED;
                        break;
                }
-       } else if (!rtn) {
+       } else if (rtn != FAILED) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index 9ea95dd3e2604eea2613a5a15d074c2357fac7dd..6d5c0b8cb0bb47a040d7aec986dbcbae68ffa2b5 100644 (file)
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
 {
        struct scatterlist *first_chunk = NULL;
-       gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
        int ret;
 
        BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
        }
 
        ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
-                              first_chunk, gfp_mask, scsi_sg_alloc);
+                              first_chunk, GFP_ATOMIC, scsi_sg_alloc);
        if (unlikely(ret))
                scsi_free_sgtable(sdb, mq);
        return ret;
index fedab3c21ddf18adcb291a99c49b7b4ac051aa85..399516925d802fea379938b9958778daa7ee37f2 100644 (file)
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
                                sd_config_discard(sdkp, SD_LBP_WS16);
 
                } else {        /* LBP VPD page tells us what to use */
-
-                       if (sdkp->lbpws)
+                       if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+                               sd_config_discard(sdkp, SD_LBP_UNMAP);
+                       else if (sdkp->lbpws)
                                sd_config_discard(sdkp, SD_LBP_WS16);
                        else if (sdkp->lbpws10)
                                sd_config_discard(sdkp, SD_LBP_WS10);
index b410499cddca9f391a0832ff6e6d6189b7ec1c82..aad6683db81b9a0154d12d3fc71fd5d45fb8bfc3 100644 (file)
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
                default:
                        rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
                        rxconf.src_addr_width = 1;
-                       rxconf.src_maxburst = 1;
+                       rxconf.src_maxburst = 4;
                }
                dmaengine_slave_config(spfi->rx_ch, &rxconf);
 
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
                default:
                        txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
                        txconf.dst_addr_width = 1;
-                       txconf.dst_maxburst = 1;
+                       txconf.dst_maxburst = 4;
                        break;
                }
                dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
                dma_async_issue_pending(spfi->rx_ch);
        }
 
+       spfi_start(spfi);
+
        if (xfer->tx_buf) {
                spfi->tx_dma_busy = true;
                dmaengine_submit(txdesc);
                dma_async_issue_pending(spfi->tx_ch);
        }
 
-       spfi_start(spfi);
-
        return 1;
 
 stop_dma:
index 239be7cbe5a83ee5e5a037bec810ee3bd24bf382..96a5fc0878d86d4fc217b30d466621176b1bc24f 100644 (file)
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
        struct device_node      *np = spi->master->dev.of_node;
        struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
 
+       pm_runtime_get_sync(&p->pdev->dev);
+
        if (!np) {
                /*
                 * Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
        if (spi->cs_gpio >= 0)
                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 
+
+       pm_runtime_put_sync(&p->pdev->dev);
+
        return 0;
 }
 
index 86c72ba0a0cd5cc019ef068dcec54886f88e7653..f8c5fc371c4cb4ff0b349e9eb2ff871bc60a0574 100644 (file)
@@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
                /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
                /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
                /* Select VC1/VC2, CR215 = 0x02->0x06 */
-               bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+               bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
                /* }} */
 
                for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
index c8f739dd346eea7e4fc3287eb2eb5338955a1e46..70f870541f9268b598d78c8bf04792877b635231 100644 (file)
@@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
        if (pDevice->byCurrentCh == uConnectionChannel)
                return bResult;
 
+       /* Set VGA to max sensitivity */
+       if (pDevice->bUpdateBBVGA &&
+           pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+               pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+
+               BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+       }
+
        /* clear NAV */
        MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
 
index 83e4162c0094c4b2bed6853bc8cfb5636011191a..cd1a277d853b5d4cfa6f6e4156761f5d20558e25 100644 (file)
@@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
        head_td = priv->apCurrTD[dma_idx];
 
-       head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
+       head_td->m_td1TD1.byTCR = 0;
 
        head_td->pTDInfo->skb = skb;
 
@@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
        priv->bPWBitOn = false;
 
+       /* Set TSR1 & ReqCount in TxDescHead */
+       head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+       head_td->m_td1TD1.wReqCount =
+                       cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+
        head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
 
        if (dma_idx == TYPE_AC0DMA)
@@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                if (conf->enable_beacon) {
                        vnt_beacon_enable(priv, vif, conf);
 
-                       MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+                       MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+                                     TCR_AUTOBCNTX);
                } else {
-                       MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+                       MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+                                      TCR_AUTOBCNTX);
                }
        }
 
index 61c39dd7ad013c9e40b784c70414fac62bac908b..b5b0155961f22e08959766d01cea8db241893a7e 100644 (file)
@@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
 
        ptdCurr = (PSTxDesc)pHeadTD;
 
-       ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+       ptdCurr->pTDInfo->dwReqCount = cbReqCount;
        ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
        ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
        ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
-       /* Set TSR1 & ReqCount in TxDescHead */
-       ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
-       ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
 
        return cbHeaderLength;
 }
index 55f6774f706f729b92fb2045abb7f9a33740c2b4..aebde3289c50de6722062dfdea21fa1c549090cd 100644 (file)
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                goto reject;
        }
        if (!strncmp("=All", text_ptr, 4)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+               cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
        } else if (!strncmp("=iqn.", text_ptr, 5) ||
                   !strncmp("=eui.", text_ptr, 5)) {
-               cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+               cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
        } else {
                pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
                goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                return -ENOMEM;
        }
        /*
-        * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+        * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
         * explicit case..
         */
-       if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+       if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
                text_ptr = strchr(text_in, '=');
                if (!text_ptr) {
                        pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
 
        spin_lock(&tiqn_lock);
        list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
-               if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+               if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
                     strcmp(tiqn->tiqn, text_ptr)) {
                        continue;
                }
@@ -3512,7 +3512,7 @@ eob:
                if (end_of_buf)
                        break;
 
-               if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+               if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
        }
        spin_unlock(&tiqn_lock);
index 09a522bae222d190ec92e157a42f13d2e361da4a..cbcff38ac9b7d30cf5b21882efeb34bb4fb39641 100644 (file)
@@ -135,8 +135,8 @@ enum cmd_flags_table {
        ICF_CONTIG_MEMORY                       = 0x00000020,
        ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
        ICF_OOO_CMDSN                           = 0x00000080,
-       IFC_SENDTARGETS_ALL                     = 0x00000100,
-       IFC_SENDTARGETS_SINGLE                  = 0x00000200,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
 };
 
 /* struct iscsi_cmd->i_state */
index 7653cfb027a200cbec0dd51c95047708837c7227..58f49ff69b1424bf5feb33ed64eba495d8826851 100644 (file)
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 }
 EXPORT_SYMBOL(se_dev_set_queue_depth);
 
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
-       int block_size = dev->dev_attrib.block_size;
-
-       if (dev->export_count) {
-               pr_err("dev[%p]: Unable to change SE Device"
-                       " fabric_max_sectors while export_count is %d\n",
-                       dev, dev->export_count);
-               return -EINVAL;
-       }
-       if (!fabric_max_sectors) {
-               pr_err("dev[%p]: Illegal ZERO value for"
-                       " fabric_max_sectors\n", dev);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
-                       " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
-                               DA_STATUS_MAX_SECTORS_MIN);
-               return -EINVAL;
-       }
-       if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-               pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                       " greater than DA_STATUS_MAX_SECTORS_MAX:"
-                       " %u\n", dev, fabric_max_sectors,
-                       DA_STATUS_MAX_SECTORS_MAX);
-               return -EINVAL;
-       }
-       /*
-        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-        */
-       if (!block_size) {
-               block_size = 512;
-               pr_warn("Defaulting to 512 for zero block_size\n");
-       }
-       fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     block_size);
-
-       dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
-       pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
-                       dev, fabric_max_sectors);
-       return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
        if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, dev->export_count);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.unmap_granularity_alignment =
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
-       dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        xcopy_lun = &dev->xcopy_lun;
        xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index c2aea099ea4adf7c0ee60ac7949fa02089162932..d836de200a03bcf24be54004df89c7d6d5039030 100644 (file)
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct fd_prot fd_prot;
        sense_reason_t rc;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
        &fileio_dev_attrib_hw_block_size.attr,
        &fileio_dev_attrib_block_size.attr,
        &fileio_dev_attrib_hw_max_sectors.attr,
-       &fileio_dev_attrib_fabric_max_sectors.attr,
        &fileio_dev_attrib_optimal_sectors.attr,
        &fileio_dev_attrib_hw_queue_depth.attr,
        &fileio_dev_attrib_queue_depth.attr,
index 3efff94fbd9788838f565218965d180b78a67604..78346b850968ed8da28d88f35cf6a3ac15512a1b 100644 (file)
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
        &iblock_dev_attrib_hw_block_size.attr,
        &iblock_dev_attrib_block_size.attr,
        &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_fabric_max_sectors.attr,
        &iblock_dev_attrib_optimal_sectors.attr,
        &iblock_dev_attrib_hw_queue_depth.attr,
        &iblock_dev_attrib_queue_depth.attr,
index d56f2aaba9af9a6bb4b89d5c1080d426cba84e63..283cf786ef98be3d0594e847cc9749a072986b80 100644 (file)
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
 
                        return 0;
                }
+       } else if (we && registered_nexus) {
+               /*
+                * Reads are allowed for Write Exclusive locks
+                * from all registrants.
+                */
+               if (cmd->data_direction == DMA_FROM_DEVICE) {
+                       pr_debug("Allowing READ CDB: 0x%02x for %s"
+                               " reservation\n", cdb[0],
+                               core_scsi3_pr_dump_type(pr_reg_type));
+
+                       return 0;
+               }
        }
        pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
                " for %s reservation\n", transport_dump_cmd_direction(cmd),
index 60ebd170a561943be8bde26cd40112bbd22d1e8e..98e83ac5661bcfe5b3b7b98d6c9fbf21bb27c14e 100644 (file)
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
        &rd_mcp_dev_attrib_hw_block_size.attr,
        &rd_mcp_dev_attrib_block_size.attr,
        &rd_mcp_dev_attrib_hw_max_sectors.attr,
-       &rd_mcp_dev_attrib_fabric_max_sectors.attr,
        &rd_mcp_dev_attrib_optimal_sectors.attr,
        &rd_mcp_dev_attrib_hw_queue_depth.attr,
        &rd_mcp_dev_attrib_queue_depth.attr,
index 11bea1952435a397172ce69804a1088567bde03f..cd4bed7b27579b14a0f6e517eed23dacb6c1fe02 100644 (file)
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
-
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
index 1307600fe7264cb55234b6b8e88d8cc15878c799..4c71657da56ab3cdc96b5c1f8d784722f10e2c1d 100644 (file)
@@ -505,7 +505,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
        int opt, min;
 
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 8bfa61c9693dbef6fe6267e16a48c566eae6ac4a..1157b559683b1ff437f9eba74626a90f517d0a1e 100644 (file)
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
        &tcmu_dev_attrib_hw_block_size.attr,
        &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_fabric_max_sectors.attr,
        &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
        &tcmu_dev_attrib_queue_depth.attr,
index ad09e51ffae4d097109241d9a19b97c97858109b..f65f0d109fc8c015869a78b355478afb153fde5d 100644 (file)
@@ -4,6 +4,8 @@
  *  Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
  *  Copyright (C) 2012  Amit Daniel <amit.kachhap@linaro.org>
  *
+ *  Copyright (C) 2014  Viresh Kumar <viresh.kumar@linaro.org>
+ *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
 
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ *     level 0 --> 1st Max Freq
+ *     level 1 --> 2nd Max Freq
+ *     ...
+ */
+
 /**
  * struct cpufreq_cooling_device - data for cooling device with cpufreq
  * @id: unique integer value corresponding to each cpufreq_cooling_device
  *     cooling devices.
  * @cpufreq_val: integer value representing the absolute value of the clipped
  *     frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ *     cpufreq frequencies.
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
  *
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
  */
 struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
        unsigned int cpufreq_val;
+       unsigned int max_level;
+       unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
        struct list_head node;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
-static unsigned int cpufreq_dev_count;
-
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
  *
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
  */
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+                              unsigned int freq)
 {
-       struct cpufreq_policy policy;
-
-       return !cpufreq_get_policy(&policy, cpu);
-}
-
-enum cpufreq_cooling_property {
-       GET_LEVEL,
-       GET_FREQ,
-       GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- *    cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
-                       unsigned int *output,
-                       enum cpufreq_cooling_property property)
-{
-       int i;
-       unsigned long max_level = 0, level = 0;
-       unsigned int freq = CPUFREQ_ENTRY_INVALID;
-       int descend = -1;
-       struct cpufreq_frequency_table *pos, *table =
-                                       cpufreq_frequency_get_table(cpu);
-
-       if (!output)
-               return -EINVAL;
-
-       if (!table)
-               return -EINVAL;
-
-       cpufreq_for_each_valid_entry(pos, table) {
-               /* ignore duplicate entry */
-               if (freq == pos->frequency)
-                       continue;
-
-               /* get the frequency order */
-               if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
-                       descend = freq > pos->frequency;
-
-               freq = pos->frequency;
-               max_level++;
-       }
-
-       /* No valid cpu frequency entry */
-       if (max_level == 0)
-               return -EINVAL;
+       unsigned long level;
 
-       /* max_level is an index, not a counter */
-       max_level--;
+       for (level = 0; level <= cpufreq_dev->max_level; level++) {
+               if (freq == cpufreq_dev->freq_table[level])
+                       return level;
 
-       /* get max level */
-       if (property == GET_MAXL) {
-               *output = (unsigned int)max_level;
-               return 0;
+               if (freq > cpufreq_dev->freq_table[level])
+                       break;
        }
 
-       if (property == GET_FREQ)
-               level = descend ? input : (max_level - input);
-
-       i = 0;
-       cpufreq_for_each_valid_entry(pos, table) {
-               /* ignore duplicate entry */
-               if (freq == pos->frequency)
-                       continue;
-
-               /* now we have a valid frequency entry */
-               freq = pos->frequency;
-
-               if (property == GET_LEVEL && (unsigned int)input == freq) {
-                       /* get level by frequency */
-                       *output = descend ? i : (max_level - i);
-                       return 0;
-               }
-               if (property == GET_FREQ && level == i) {
-                       /* get frequency by level */
-                       *output = freq;
-                       return 0;
-               }
-               i++;
-       }
-
-       return -EINVAL;
+       return THERMAL_CSTATE_INVALID;
 }
 
 /**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
  * @cpu: cpu for which the level is required
  * @freq: the frequency of interest
  *
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
  */
 unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
-       unsigned int val;
-
-       if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
-               return THERMAL_CSTATE_INVALID;
-
-       return (unsigned long)val;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- *     e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
-       int ret = 0;
-       unsigned int freq;
-
-       ret = get_property(cpu, level, &freq, GET_FREQ);
-       if (ret)
-               return 0;
-
-       return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- *     clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
-                                unsigned long cooling_state)
-{
-       unsigned int cpuid, clip_freq;
-       struct cpumask *mask = &cpufreq_device->allowed_cpus;
-       unsigned int cpu = cpumask_any(mask);
-
-
-       /* Check if the old cooling action is same as new cooling action */
-       if (cpufreq_device->cpufreq_state == cooling_state)
-               return 0;
-
-       clip_freq = get_cpu_frequency(cpu, cooling_state);
-       if (!clip_freq)
-               return -EINVAL;
-
-       cpufreq_device->cpufreq_state = cooling_state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       struct cpufreq_cooling_device *cpufreq_dev;
 
-       for_each_cpu(cpuid, mask) {
-               if (is_cpufreq_valid(cpuid))
-                       cpufreq_update_policy(cpuid);
+       mutex_lock(&cooling_cpufreq_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+                       mutex_unlock(&cooling_cpufreq_lock);
+                       return get_level(cpufreq_dev, freq);
+               }
        }
+       mutex_unlock(&cooling_cpufreq_lock);
 
-       return 0;
+       pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+       return THERMAL_CSTATE_INVALID;
 }
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
 
 /**
  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                        &cpufreq_dev->allowed_cpus))
                        continue;
 
-               if (!cpufreq_dev->cpufreq_val)
-                       cpufreq_dev->cpufreq_val = get_cpu_frequency(
-                                       cpumask_any(&cpufreq_dev->allowed_cpus),
-                                       cpufreq_dev->cpufreq_state);
-
                max_freq = cpufreq_dev->cpufreq_val;
 
                if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
                                 unsigned long *state)
 {
        struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
-       struct cpumask *mask = &cpufreq_device->allowed_cpus;
-       unsigned int cpu;
-       unsigned int count = 0;
-       int ret;
-
-       cpu = cpumask_any(mask);
-
-       ret = get_property(cpu, 0, &count, GET_MAXL);
 
-       if (count > 0)
-               *state = count;
-
-       return ret;
+       *state = cpufreq_device->max_level;
+       return 0;
 }
 
 /**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
                                 unsigned long state)
 {
        struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+       unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+       unsigned int clip_freq;
+
+       /* Request state should be less than max_level */
+       if (WARN_ON(state > cpufreq_device->max_level))
+               return -EINVAL;
+
+       /* Check if the old cooling action is same as new cooling action */
+       if (cpufreq_device->cpufreq_state == state)
+               return 0;
 
-       return cpufreq_apply_cooling(cpufreq_device, state);
+       clip_freq = cpufreq_device->freq_table[state];
+       cpufreq_device->cpufreq_state = state;
+       cpufreq_device->cpufreq_val = clip_freq;
+
+       cpufreq_update_policy(cpu);
+
+       return 0;
 }
 
 /* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
        .notifier_call = cpufreq_thermal_notifier,
 };
 
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+                                 unsigned int prev_max)
+{
+       struct cpufreq_frequency_table *pos;
+       unsigned int max = 0;
+
+       cpufreq_for_each_valid_entry(pos, table) {
+               if (pos->frequency > max && pos->frequency < prev_max)
+                       max = pos->frequency;
+       }
+
+       return max;
+}
+
 /**
  * __cpufreq_cooling_register - helper function to create cpufreq cooling device
  * @np: a valid struct device_node to the cooling device device tree node
  * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
                           const struct cpumask *clip_cpus)
 {
        struct thermal_cooling_device *cool_dev;
-       struct cpufreq_cooling_device *cpufreq_dev = NULL;
-       unsigned int min = 0, max = 0;
+       struct cpufreq_cooling_device *cpufreq_dev;
        char dev_name[THERMAL_NAME_LENGTH];
-       int ret = 0, i;
-       struct cpufreq_policy policy;
+       struct cpufreq_frequency_table *pos, *table;
+       unsigned int freq, i;
+       int ret;
 
-       /* Verify that all the clip cpus have same freq_min, freq_max limit */
-       for_each_cpu(i, clip_cpus) {
-               /* continue if cpufreq policy not found and not return error */
-               if (!cpufreq_get_policy(&policy, i))
-                       continue;
-               if (min == 0 && max == 0) {
-                       min = policy.cpuinfo.min_freq;
-                       max = policy.cpuinfo.max_freq;
-               } else {
-                       if (min != policy.cpuinfo.min_freq ||
-                           max != policy.cpuinfo.max_freq)
-                               return ERR_PTR(-EINVAL);
-               }
+       table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+       if (!table) {
+               pr_debug("%s: CPUFreq table not found\n", __func__);
+               return ERR_PTR(-EPROBE_DEFER);
        }
-       cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
-                             GFP_KERNEL);
+
+       cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
        if (!cpufreq_dev)
                return ERR_PTR(-ENOMEM);
 
+       /* Find max levels */
+       cpufreq_for_each_valid_entry(pos, table)
+               cpufreq_dev->max_level++;
+
+       cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+                                         cpufreq_dev->max_level, GFP_KERNEL);
+       if (!cpufreq_dev->freq_table) {
+               cool_dev = ERR_PTR(-ENOMEM);
+               goto free_cdev;
+       }
+
+       /* max_level is an index, not a counter */
+       cpufreq_dev->max_level--;
+
        cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
 
        ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
        if (ret) {
-               kfree(cpufreq_dev);
-               return ERR_PTR(-EINVAL);
+               cool_dev = ERR_PTR(ret);
+               goto free_table;
        }
 
        snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,24 +368,43 @@ __cpufreq_cooling_register(struct device_node *np,
 
        cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
                                                      &cpufreq_cooling_ops);
-       if (IS_ERR(cool_dev)) {
-               release_idr(&cpufreq_idr, cpufreq_dev->id);
-               kfree(cpufreq_dev);
-               return cool_dev;
+       if (IS_ERR(cool_dev))
+               goto remove_idr;
+
+       /* Fill freq-table in descending order of frequencies */
+       for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+               freq = find_next_max(table, freq);
+               cpufreq_dev->freq_table[i] = freq;
+
+               /* Warn for duplicate entries */
+               if (!freq)
+                       pr_warn("%s: table has duplicate entries\n", __func__);
+               else
+                       pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
+
+       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
-       cpufreq_dev->cpufreq_state = 0;
+
        mutex_lock(&cooling_cpufreq_lock);
 
        /* Register the notifier for first cpufreq cooling device */
-       if (cpufreq_dev_count == 0)
+       if (list_empty(&cpufreq_dev_list))
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       cpufreq_dev_count++;
        list_add(&cpufreq_dev->node, &cpufreq_dev_list);
 
        mutex_unlock(&cooling_cpufreq_lock);
 
+       return cool_dev;
+
+remove_idr:
+       release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+       kfree(cpufreq_dev->freq_table);
+free_cdev:
+       kfree(cpufreq_dev);
+
        return cool_dev;
 }
 
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
        list_del(&cpufreq_dev->node);
-       cpufreq_dev_count--;
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (cpufreq_dev_count == 0)
+       if (list_empty(&cpufreq_dev_list))
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
        release_idr(&cpufreq_idr, cpufreq_dev->id);
+       kfree(cpufreq_dev->freq_table);
        kfree(cpufreq_dev);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
index 000d53e934a0600b7570de851dc08510240208d1..607b62c7e6114cc005ccf597e4f0ccb804e0e99b 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
 static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
 {
        struct thermal_cooling_device *cdev;
-       struct cpumask mask_val;
-
-       /* make sure cpufreq driver has been initialized */
-       if (!cpufreq_frequency_get_table(0))
-               return -EPROBE_DEFER;
-
-       cpumask_set_cpu(0, &mask_val);
-       cdev = cpufreq_cooling_register(&mask_val);
 
+       cdev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(cdev)) {
-               dev_err(&pdev->dev, "Failed to register cooling device\n");
-               return PTR_ERR(cdev);
+               int ret = PTR_ERR(cdev);
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "Failed to register cooling device %d\n",
+                               ret);
+                               
+               return ret;
        }
 
        platform_set_drvdata(pdev, cdev);
index 88b32f942dcf72839304dc32db3c10ccf681ee73..2ccbc0788353e9488e3f730de80d52ecafaf9082 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/clk.h>
 #include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
        const struct of_device_id *of_id =
                of_match_device(of_imx_thermal_match, &pdev->dev);
        struct imx_thermal_data *data;
-       struct cpumask clip_cpus;
        struct regmap *map;
        int measure_freq;
        int ret;
 
-       if (!cpufreq_get_current_driver()) {
-               dev_dbg(&pdev->dev, "no cpufreq driver!");
-               return -EPROBE_DEFER;
-       }
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
        regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
 
-       cpumask_set_cpu(0, &clip_cpus);
-       data->cdev = cpufreq_cooling_register(&clip_cpus);
+       data->cdev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(data->cdev)) {
                ret = PTR_ERR(data->cdev);
-               dev_err(&pdev->dev,
-                       "failed to register cpufreq cooling device: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to register cpufreq cooling device: %d\n",
+                               ret);
                return ret;
        }
 
@@ -613,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
        data->mode = THERMAL_DEVICE_DISABLED;
+       clk_disable_unprepare(data->thermal_clk);
 
        return 0;
 }
@@ -622,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
        struct imx_thermal_data *data = dev_get_drvdata(dev);
        struct regmap *map = data->tempmon;
 
+       clk_prepare_enable(data->thermal_clk);
        /* Enabled thermal sensor after resume */
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
index ffe40bffaf1a2e88acf818b4f611ca7a9e57f42f..d4413698a85f9738d226d5f82793e9d3c6f46abe 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_INT340X_THERMAL)  += int3400_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3402_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL)  += processor_thermal_device.o
 obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
index e4e61b3fb11e8b101abbf86cd1b58f0ed5ffb235..2c2ec7666eb182c44c24225609ab9ecd723ea04a 100644 (file)
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
        struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
 
        if (!acpi_has_method(handle, "_TRT"))
-               return 0;
+               return -ENODEV;
 
        status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
                        continue;
 
                result = acpi_bus_get_device(trt->source, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get source ACPI device\n");
 
                result = acpi_bus_get_device(trt->target, &adev);
-               if (!result)
-                       acpi_create_platform_device(adev);
-               else
+               if (result)
                        pr_warn("Failed to get target ACPI device\n");
        }
 
@@ -167,7 +163,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
                sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
 
        if (!acpi_has_method(handle, "_ART"))
-               return 0;
+               return -ENODEV;
 
        status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
 
                if (art->source) {
                        result = acpi_bus_get_device(art->source, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
                if (art->target) {
                        result = acpi_bus_get_device(art->target, &adev);
-                       if (!result)
-                               acpi_create_platform_device(adev);
-                       else
+                       if (result)
                                pr_warn("Failed to get source ACPI device\n");
                }
        }
@@ -321,8 +313,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
        unsigned long length = 0;
        int count = 0;
        char __user *arg = (void __user *)__arg;
-       struct trt *trts;
-       struct art *arts;
+       struct trt *trts = NULL;
+       struct art *arts = NULL;
 
        switch (cmd) {
        case ACPI_THERMAL_GET_TRT_COUNT:
index dcb306ea14a49008be5df0e3abb521e896d9195e..65a98a97df071cdf343776bc1e959dc9808dbbc4 100644 (file)
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
        .remove = int3400_thermal_remove,
        .driver = {
                   .name = "int3400 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = ACPI_PTR(int3400_thermal_match),
                   },
 };
index a5d08c14ba24a79654fdd1445c90cbcc00db63b9..c5cbc3af3a0539260218492bc5aaa6199d8d5307 100644 (file)
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
        .remove = int3402_thermal_remove,
        .driver = {
                   .name = "int3402 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = int3402_thermal_match,
                   },
 };
index 1bfa6a69e77a15a8d021cb3ed33760b212ba9ce9..0faf500d8a77874d7c1b6c8a1b3e1195fc9e8065 100644 (file)
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
 {
        struct int3403_sensor *obj = priv->priv;
 
+       acpi_remove_notify_handler(priv->adev->handle,
+                                  ACPI_DEVICE_NOTIFY, int3403_notify);
        thermal_zone_device_unregister(obj->tzone);
        return 0;
 }
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
        p = buf.pointer;
        if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
                printk(KERN_WARNING "Invalid PPSS data\n");
+               kfree(buf.pointer);
                return -EFAULT;
        }
 
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
 
        priv->priv = obj;
 
+       kfree(buf.pointer);
        /* TODO: add ACPI notification support */
 
        return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644 (file)
index 0000000..0fe5dbb
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+       u32     index;
+       u32     min_uw;
+       u32     max_uw;
+       u32     tmin_us;
+       u32     tmax_us;
+       u32     step_uw;
+};
+
+struct proc_thermal_device {
+       struct device *dev;
+       struct acpi_device *adev;
+       struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+       PROC_THERMAL_NONE,
+       PROC_THERMAL_PCI,
+       PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+                                                       PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+                                       struct device_attribute *attr, \
+                                       char *buf) \
+{ \
+       struct pci_dev *pci_dev; \
+       struct platform_device *pdev; \
+       struct proc_thermal_device *proc_dev; \
+\
+       if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+               pdev = to_platform_device(dev); \
+               proc_dev = platform_get_drvdata(pdev); \
+       } else { \
+               pci_dev = to_pci_dev(dev); \
+               proc_dev = pci_get_drvdata(pci_dev); \
+       } \
+       return sprintf(buf, "%lu\n",\
+       (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+       &dev_attr_power_limit_0_min_uw.attr,
+       &dev_attr_power_limit_1_min_uw.attr,
+       &dev_attr_power_limit_0_max_uw.attr,
+       &dev_attr_power_limit_1_max_uw.attr,
+       &dev_attr_power_limit_0_step_uw.attr,
+       &dev_attr_power_limit_1_step_uw.attr,
+       &dev_attr_power_limit_0_tmin_us.attr,
+       &dev_attr_power_limit_1_tmin_us.attr,
+       &dev_attr_power_limit_0_tmax_us.attr,
+       &dev_attr_power_limit_1_tmax_us.attr,
+       NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+       .attrs = power_limit_attrs,
+       .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+                           struct proc_thermal_device **priv)
+{
+       struct proc_thermal_device *proc_priv;
+       struct acpi_device *adev;
+       acpi_status status;
+       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *elements, *ppcc;
+       union acpi_object *p;
+       int i;
+       int ret;
+
+       adev = ACPI_COMPANION(dev);
+       if (!adev)
+               return -ENODEV;
+
+       status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       p = buf.pointer;
+       if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+               dev_err(dev, "Invalid PPCC data\n");
+               ret = -EFAULT;
+               goto free_buffer;
+       }
+       if (!p->package.count) {
+               dev_err(dev, "Invalid PPCC package size\n");
+               ret = -EFAULT;
+               goto free_buffer;
+       }
+
+       proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+       if (!proc_priv) {
+               ret = -ENOMEM;
+               goto free_buffer;
+       }
+
+       proc_priv->dev = dev;
+       proc_priv->adev = adev;
+
+       for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+               elements = &(p->package.elements[i+1]);
+               if (elements->type != ACPI_TYPE_PACKAGE ||
+                   elements->package.count != 6) {
+                       ret = -EFAULT;
+                       goto free_buffer;
+               }
+               ppcc = elements->package.elements;
+               proc_priv->power_limits[i].index = ppcc[0].integer.value;
+               proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+               proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+               proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+               proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+               proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+       }
+
+       *priv = proc_priv;
+
+       ret = sysfs_create_group(&dev->kobj,
+                                &power_limit_attribute_group);
+
+free_buffer:
+       kfree(buf.pointer);
+
+       return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+       sysfs_remove_group(&proc_priv->dev->kobj,
+                          &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+       struct proc_thermal_device *proc_priv;
+       int ret;
+
+       if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+               dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+               return -ENODEV;
+       }
+
+       ret = proc_thermal_add(&pdev->dev, &proc_priv);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, proc_priv);
+       proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+       return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+       proc_thermal_remove(platform_get_drvdata(pdev));
+
+       return 0;
+}
+
+static int  proc_thermal_pci_probe(struct pci_dev *pdev,
+                                  const struct pci_device_id *unused)
+{
+       struct proc_thermal_device *proc_priv;
+       int ret;
+
+       if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+               dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+               return -ENODEV;
+       }
+
+       ret = pci_enable_device(pdev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "error: could not enable device\n");
+               return ret;
+       }
+
+       ret = proc_thermal_add(&pdev->dev, &proc_priv);
+       if (ret) {
+               pci_disable_device(pdev);
+               return ret;
+       }
+
+       pci_set_drvdata(pdev, proc_priv);
+       proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+       return 0;
+}
+
+static void  proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+       proc_thermal_remove(pci_get_drvdata(pdev));
+       pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+       { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+       .name           = "proc_thermal",
+       .probe          = proc_thermal_pci_probe,
+       .remove         = proc_thermal_pci_remove,
+       .id_table       = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+       {"INT3401", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+       .probe = int3401_add,
+       .remove = int3401_remove,
+       .driver = {
+               .name = "int3401 thermal",
+               .acpi_match_table = int3401_device_ids,
+       },
+};
+
+static int __init proc_thermal_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&int3401_driver);
+       if (ret)
+               return ret;
+
+       ret = pci_register_driver(&proc_thermal_pci_driver);
+
+       return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+       platform_driver_unregister(&int3401_driver);
+       pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
index e98b4249187c3eaec106fc3602fe520fa710c95d..6ceebd659dd400423c0640b1d0911da36b441b74 100644 (file)
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x45},
        { X86_VENDOR_INTEL, 6, 0x46},
        { X86_VENDOR_INTEL, 6, 0x4c},
+       { X86_VENDOR_INTEL, 6, 0x56},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
index e145b66df444e65bb5cc4f9d7e7ed4dce7f76435..d717f3dab6f1410fc955daefb0497c2096298b56 100644 (file)
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
  *
  * Return: pointer to trip points table, NULL otherwise
  */
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        struct __thermal_zone *data = tz->devdata;
index 8803e693fe6868a620b76abda347e0e27645d6d3..2580a4872f90febeb5af00136e16054bb59e4903 100644 (file)
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
        struct mutex lock;
        struct list_head list;
        int id;
-       int ctemp;
+       u32 ctemp;
 };
 
 #define rcar_thermal_for_each_priv(pos, common)        \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
 {
        struct device *dev = rcar_priv_to_dev(priv);
        int i;
-       int ctemp, old, new;
+       u32 ctemp, old, new;
        int ret = -EINVAL;
 
        mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        int i;
        int ret = -ENODEV;
        int idle = IDLE_INTERVAL;
+       u32 enr_bits = 0;
 
        common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
        if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
 
                /*
                 * platform has IRQ support.
-                * Then, drier use common register
+                * Then, driver uses common registers
                 */
 
                ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(common->base))
                        return PTR_ERR(common->base);
 
-               /* enable temperature comparation */
-               rcar_thermal_common_write(common, ENR, 0x00030303);
-
                idle = 0; /* polling delay is not needed */
        }
 
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                        rcar_thermal_irq_enable(priv);
 
                list_move_tail(&priv->list, &common->head);
+
+               /* update ENR bits */
+               enr_bits |= 3 << (i * 8);
        }
 
+       /* enable temperature comparation */
+       if (irq)
+               rcar_thermal_common_write(common, ENR, enr_bits);
+
        platform_set_drvdata(pdev, common);
 
        dev_info(dev, "%d sensor probed\n", i);
index 1bcddfc60e915e9e6f97594671076330af4adb5a..9c6ce548e36312f95ca49f6352cf4999a1ab0fe0 100644 (file)
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
 static struct platform_driver rockchip_thermal_driver = {
        .driver = {
                .name = "rockchip-thermal",
-               .owner = THIS_MODULE,
                .pm = &rockchip_thermal_pm_ops,
                .of_match_table = of_rockchip_thermal_match,
        },
index f760389a204c673738e933c91ace4e742b735811..c43306ecc0abbb111dc4c6bfdda5201b64ae1738 100644 (file)
@@ -1,6 +1,6 @@
 config EXYNOS_THERMAL
        tristate "Exynos thermal management unit driver"
-       depends on ARCH_HAS_BANDGAP && OF
+       depends on OF
        help
          If you say yes here you get support for the TMU (Thermal Management
          Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
index b6be572704a4c7ff97055f1cb273ff3016399469..6dc3815cc73f514c71d16fab40552609fa12ba6d 100644 (file)
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
 int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
 {
        int ret;
-       struct cpumask mask_val;
        struct exynos_thermal_zone *th_zone;
 
        if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
         *       sensor
         */
        if (sensor_conf->cooling_data.freq_clip_count > 0) {
-               cpumask_set_cpu(0, &mask_val);
                th_zone->cool_dev[th_zone->cool_dev_size] =
-                                       cpufreq_cooling_register(&mask_val);
+                               cpufreq_cooling_register(cpu_present_mask);
                if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
-                       dev_err(sensor_conf->dev,
-                               "Failed to register cpufreq cooling device\n");
-                       ret = -EINVAL;
+                       ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(sensor_conf->dev,
+                                       "Failed to register cpufreq cooling device: %d\n",
+                                       ret);
                        goto err_unregister;
                }
                th_zone->cool_dev_size++;
index d44d91d681d4333055526c28ff461e73ed709d0c..d2f1e62a42328095a35efb25ca461875e9f87c9f 100644 (file)
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        /* Register the sensor with thermal management interface */
        ret = exynos_register_thermal(sensor_conf);
        if (ret) {
-               dev_err(&pdev->dev, "Failed to register thermal interface\n");
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "Failed to register thermal interface: %d\n",
+                               ret);
                goto err_clk;
        }
        data->reg_conf = sensor_conf;
index 84fdf0792e27cf57c979288a6a9b79543628495f..87e0b0782023cb37696a92150d9f0c10bd09b198 100644 (file)
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        struct thermal_zone_device *pos1;
        struct thermal_cooling_device *pos2;
        unsigned long max_state;
-       int result;
+       int result, ret;
 
        if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
                return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        if (tz != pos1 || cdev != pos2)
                return -EINVAL;
 
-       cdev->ops->get_max_state(cdev, &max_state);
+       ret = cdev->ops->get_max_state(cdev, &max_state);
+       if (ret)
+               return ret;
 
        /* lower default 0, upper default max_state */
        lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
index 9083e75206236c1953e4ad5d7858fbc5f164fad0..0531c752fbbb6680c40e939ad2a14fdc1830f357 100644 (file)
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
 void of_thermal_destroy_zones(void);
 int of_thermal_get_ntrips(struct thermal_zone_device *);
 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *);
 #else
 static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
 {
        return 0;
 }
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
 {
        return NULL;
index 5fd03865e396e373d20e3c8e2dd54c79a26955ee..3fb054a10f6a0fde450e29a98ee4cdf90e18f6c7 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/workqueue.h>
 #include <linux/thermal.h>
-#include <linux/cpufreq.h>
 #include <linux/cpumask.h>
 #include <linux/cpu_cooling.h>
 #include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
        if (!data)
                return -EINVAL;
 
-       if (!cpufreq_get_current_driver()) {
-               dev_dbg(bgp->dev, "no cpufreq driver yet\n");
-               return -EPROBE_DEFER;
-       }
-
        /* Register cooling device */
        data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(data->cool_dev)) {
-               dev_err(bgp->dev,
-                       "Failed to register cpufreq cooling device\n");
-               return PTR_ERR(data->cool_dev);
+               int ret = PTR_ERR(data->cool_dev);
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(bgp->dev,
+                               "Failed to register cpu cooling device %d\n",
+                               ret);
+
+               return ret;
        }
        ti_bandgap_set_sensor_data(bgp, id, data);
 
index d2b496750d590c1e06d755b0b563f40c30f8d165..4ddfa60c922205513d16ed74a770eefd111fda83 100644 (file)
@@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
-       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-               mask |= POLLHUP;
        if (input_available_p(tty, 1))
                mask |= POLLIN | POLLRDNORM;
-       else if (mask & POLLHUP) {
-               tty_flush_to_ldisc(tty);
-               if (input_available_p(tty, 1))
-                       mask |= POLLIN | POLLRDNORM;
-       }
        if (tty->packet && tty->link->ctrl_status)
                mask |= POLLPRI | POLLIN | POLLRDNORM;
+       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+               mask |= POLLHUP;
        if (tty_hung_up_p(file))
                mask |= POLLHUP;
        if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
index 31feeb2d0a6688513e1dccf82521f281d9842b03..d1f8dc6aabcbe5bca9b6b6bbfc14e6f3f75e60c5 100644 (file)
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
 }
 
 static int
-pci_wch_ch382_setup(struct serial_private *priv,
+pci_wch_ch38x_setup(struct serial_private *priv,
                     const struct pciserial_board *board,
                     struct uart_8250_port *port, int idx)
 {
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
 
 #define PCIE_VENDOR_ID_WCH             0x1c00
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P  0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S    0x3470
 
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
-       /* WCH CH382 2S1P card (16750 clone) */
+       /* WCH CH382 2S1P card (16850 clone) */
        {
                .vendor         = PCIE_VENDOR_ID_WCH,
                .device         = PCIE_DEVICE_ID_WCH_CH382_2S1P,
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
-               .setup          = pci_wch_ch382_setup,
+               .setup          = pci_wch_ch38x_setup,
+       },
+       /* WCH CH384 4S card (16850 clone) */
+       {
+               .vendor         = PCIE_VENDOR_ID_WCH,
+               .device         = PCIE_DEVICE_ID_WCH_CH384_4S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch38x_setup,
        },
        /*
         * ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
        pbn_fintek_4,
        pbn_fintek_8,
        pbn_fintek_12,
+       pbn_wch384_4,
 };
 
 /*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
                .base_baud      = 115200,
                .first_offset   = 0x40,
        },
+
+       [pbn_wch384_4] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 4,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+               .first_offset   = 0xC0,
+       },
 };
 
 static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 };
 
 /*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_wch384_4 },
+
        /*
         * Commtech, Inc. Fastcom adapters
         */
index 19273e31d22426071cc445e0c76655cc6394dbb2..107e807225752623c7f8cae56b17f00d4d003d37 100644 (file)
@@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
 #endif
 
 #if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_COMMON_SERIAL_DRV_DATA                          \
+       .info = &(struct s3c24xx_uart_info) {                   \
+               .name           = "Samsung Exynos UART",        \
+               .type           = PORT_S3C6400,                 \
+               .has_divslot    = 1,                            \
+               .rx_fifomask    = S5PV210_UFSTAT_RXMASK,        \
+               .rx_fifoshift   = S5PV210_UFSTAT_RXSHIFT,       \
+               .rx_fifofull    = S5PV210_UFSTAT_RXFULL,        \
+               .tx_fifofull    = S5PV210_UFSTAT_TXFULL,        \
+               .tx_fifomask    = S5PV210_UFSTAT_TXMASK,        \
+               .tx_fifoshift   = S5PV210_UFSTAT_TXSHIFT,       \
+               .def_clk_sel    = S3C2410_UCON_CLKSEL0,         \
+               .num_clks       = 1,                            \
+               .clksel_mask    = 0,                            \
+               .clksel_shift   = 0,                            \
+       },                                                      \
+       .def_cfg = &(struct s3c2410_uartcfg) {                  \
+               .ucon           = S5PV210_UCON_DEFAULT,         \
+               .ufcon          = S5PV210_UFCON_DEFAULT,        \
+               .has_fracval    = 1,                            \
+       }                                                       \
+
 static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
-       .info = &(struct s3c24xx_uart_info) {
-               .name           = "Samsung Exynos4 UART",
-               .type           = PORT_S3C6400,
-               .has_divslot    = 1,
-               .rx_fifomask    = S5PV210_UFSTAT_RXMASK,
-               .rx_fifoshift   = S5PV210_UFSTAT_RXSHIFT,
-               .rx_fifofull    = S5PV210_UFSTAT_RXFULL,
-               .tx_fifofull    = S5PV210_UFSTAT_TXFULL,
-               .tx_fifomask    = S5PV210_UFSTAT_TXMASK,
-               .tx_fifoshift   = S5PV210_UFSTAT_TXSHIFT,
-               .def_clk_sel    = S3C2410_UCON_CLKSEL0,
-               .num_clks       = 1,
-               .clksel_mask    = 0,
-               .clksel_shift   = 0,
-       },
-       .def_cfg = &(struct s3c2410_uartcfg) {
-               .ucon           = S5PV210_UCON_DEFAULT,
-               .ufcon          = S5PV210_UFCON_DEFAULT,
-               .has_fracval    = 1,
-       },
+       EXYNOS_COMMON_SERIAL_DRV_DATA,
        .fifosize = { 256, 64, 16, 16 },
 };
+
+static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
+       EXYNOS_COMMON_SERIAL_DRV_DATA,
+       .fifosize = { 64, 256, 16, 256 },
+};
+
 #define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
 #else
 #define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
 #endif
 
 static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
        }, {
                .name           = "exynos4210-uart",
                .driver_data    = EXYNOS4210_SERIAL_DRV_DATA,
+       }, {
+               .name           = "exynos5433-uart",
+               .driver_data    = EXYNOS5433_SERIAL_DRV_DATA,
        },
        { },
 };
@@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
                .data = (void *)S5PV210_SERIAL_DRV_DATA },
        { .compatible = "samsung,exynos4210-uart",
                .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+       { .compatible = "samsung,exynos5433-uart",
+               .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
        {},
 };
 MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
index 57ca61b14670f1a540c3f82a24328a390edcfb3c..984605bb5bf1d593087bfffe485323538144b2c5 100644 (file)
@@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
                break;
        }
 
-       dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+       printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+              port->dev ? dev_name(port->dev) : "",
+              port->dev ? ": " : "",
               drv->dev_name,
               drv->tty_driver->name_base + port->line,
               address, port->irq, port->uartclk / 16, uart_type(port));
index 4f35b43e24759c5ab2ae5bcfd7a0fd49672ed3d8..51f066aa375e64789e03b9a527679e3a0a7e1c8d 100644 (file)
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
            driver->subtype == PTY_TYPE_MASTER)
                return -EIO;
 
+       if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+               return -EBUSY;
+
        tty->count++;
 
        WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
                retval = -ENODEV;
        filp->f_flags = saved_flags;
 
-       if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
-                                               !capable(CAP_SYS_ADMIN))
-               retval = -EBUSY;
-
        if (retval) {
 #ifdef TTY_DEBUG_HANGUP
                printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
index 5b9825a4538a67740a3ccf0d1ceea28d08f5d5c5..a57dc8866fc5ff938641686f7945916074723fe8 100644 (file)
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
        if (!ci)
                return -ENOMEM;
 
-       platform_set_drvdata(pdev, ci);
        ci->dev = dev;
        ci->platdata = dev_get_platdata(dev);
        ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                }
        }
 
+       platform_set_drvdata(pdev, ci);
        ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
                        ci->platdata->name, ci);
        if (ret)
index c1694cff1eafd287c600435a45c861a4406e6514..48731d0bab357a75232fdbd7b4063fe29af3ad4e 100644 (file)
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
        if (!hcd)
                return -ENOMEM;
 
+       dev_set_drvdata(ci->dev, ci);
        hcd->rsrc_start = ci->hw_bank.phys;
        hcd->rsrc_len = ci->hw_bank.size;
        hcd->regs = ci->hw_bank.abs;
index 200168ec2d7567e63ce9b8a8fa4afb0009b85831..79242008085bbed84a9e7caf142077215d89aab7 100644 (file)
@@ -2567,7 +2567,7 @@ error:
  * s3c_hsotg_ep_disable - disable given endpoint
  * @ep: The endpoint to disable.
  */
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
 {
        struct s3c_hsotg_ep *hs_ep = our_ep(ep);
        struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
 
        spin_lock_irqsave(&hsotg->lock, flags);
        /* terminate all requests with shutdown */
-       kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+       kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force);
 
        hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
        hs_ep->fifo_index = 0;
@@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
        return 0;
 }
 
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+       return s3c_hsotg_ep_disable_force(ep, false);
+}
 /**
  * on_list - check request is on the given endpoint
  * @ep: The endpoint to check.
@@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
 
        /* all endpoints should be shutdown */
        for (ep = 1; ep < hsotg->num_of_eps; ep++)
-               s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+               s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true);
 
        spin_lock_irqsave(&hsotg->lock, flags);
 
index 7c4faf738747bdab2bf5a73e39297f8ae437aeb7..b642a2f998f9eaf8079b36e1b22612042e353288 100644 (file)
@@ -33,6 +33,8 @@
 #define PCI_DEVICE_ID_INTEL_BYT                0x0f37
 #define PCI_DEVICE_ID_INTEL_MRFLD      0x119e
 #define PCI_DEVICE_ID_INTEL_BSW                0x22B7
+#define PCI_DEVICE_ID_INTEL_SPTLP      0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH       0xa130
 
 struct dwc3_pci {
        struct device           *dev;
@@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index f03b136ecfce33b3b6936314d1fdebabb4ce936d..8f65ab3a3b928f3872dcc42b1ddfef38d4ba5d26 100644 (file)
@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
 
                                if (i == (request->num_mapped_sgs - 1) ||
                                                sg_is_last(s)) {
-                                       if (list_is_last(&req->list,
-                                                       &dep->request_list))
+                                       if (list_empty(&dep->request_list))
                                                last_one = true;
                                        chain = false;
                                }
@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
                                if (last_one)
                                        break;
                        }
+
+                       if (last_one)
+                               break;
                } else {
                        dma = req->request.dma;
                        length = req->request.length;
index 6e04e302dc3a85b0dba95cc90c12e97323fa635c..a1bc3e3a0b09f740342e949024db337e030e9ed9 100644 (file)
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
        value   = __le16_to_cpu(ctrl->wValue);
        length  = __le16_to_cpu(ctrl->wLength);
 
-       VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
-               "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+       VDBG(cdev,
+            "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
+            __func__, ctrl->bRequestType, ctrl->bRequest, value);
 
        switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
        case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
index a90440300735fecaa9502c0234e7abbb81db860b..259b656c0b3ec7bde9e119488f46ded351bb7300 100644 (file)
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
                req = midi_alloc_ep_req(ep, midi->buflen);
 
        if (!req) {
-               ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+               ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
                return;
        }
        req->length = 0;
index f7b20329320583d05c096882d8e04cec7d905097..e9715845f82e1dc825690c05f8cc08c2d8e41df7 100644 (file)
@@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
        struct f_uac1_opts *opts;
 
        opts = container_of(f, struct f_uac1_opts, func_inst);
-       gaudio_cleanup(opts->card);
        if (opts->fn_play_alloc)
                kfree(opts->fn_play);
        if (opts->fn_cap_alloc)
@@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f)
        struct f_audio *audio = func_to_audio(f);
        struct f_uac1_opts *opts;
 
+       gaudio_cleanup(&audio->card);
        opts = container_of(f->fi, struct f_uac1_opts, func_inst);
        kfree(audio);
        mutex_lock(&opts->lock);
index c744e4975d744c4fb710a429ec055616fffd4833..db49ec4c748e9469bd694645c8cfc22df7c829fa 100644 (file)
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        kbuf = memdup_user(buf, len);
        if (IS_ERR(kbuf)) {
                value = PTR_ERR(kbuf);
+               kbuf = NULL;
                goto free1;
        }
 
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                data->name, len, (int) value);
 free1:
        mutex_unlock(&data->lock);
+       kfree (kbuf);
        return value;
 }
 
index ce882371786b184d7b02f4a451d6c8c1b7a80efa..9f93bed42052cb5c910d5f34218287e58a558c6b 100644 (file)
@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
        req->using_dma = 1;
        req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
                        | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
-                       | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+                       | USBA_DMA_END_BUF_EN;
 
-       if (ep->is_in)
-               req->ctrl |= USBA_DMA_END_BUF_EN;
+       if (!ep->is_in)
+               req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
 
        /*
         * Add this request to the queue and submit for DMA if
@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 {
        struct usba_ep *ep = to_usba_ep(_ep);
        struct usba_udc *udc = ep->udc;
-       struct usba_request *req = to_usba_req(_req);
+       struct usba_request *req;
        unsigned long flags;
        u32 status;
 
@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
        spin_lock_irqsave(&udc->lock, flags);
 
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+
+       if (&req->req != _req) {
+               spin_unlock_irqrestore(&udc->lock, flags);
+               return -EINVAL;
+       }
+
        if (req->using_dma) {
                /*
                 * If this request is currently being transferred,
@@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
        if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
                DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
                receive_data(ep);
-               usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
        }
 }
 
index ff67ceac77c410a8f6a9ab27fc63f63afd9dc01b..d4fe8d769bd673c384707fae714944885061911d 100644 (file)
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
        struct bdc *bdc;
        int ret = 0;
 
-       bdc = ep->bdc;
        if (!req || !ep || !ep->usb_ep.desc)
                return -EINVAL;
 
+       bdc = ep->bdc;
+
        req->usb_req.actual = 0;
        req->usb_req.status = -EINPROGRESS;
        req->epnum = ep->ep_num;
index e113fd73aeae7148b0cbcd0d424aebb16d84a694..f9a332775c4781e57faf7bd584d5d58cbf5397e3 100644 (file)
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
        else
                next = (now + 2 + 7) & ~0x07;   /* full frame cache */
 
+       /* If needed, initialize last_iso_frame so that this URB will be seen */
+       if (ehci->isoc_count == 0)
+               ehci->last_iso_frame = now >> 3;
+
        /*
         * Use ehci->last_iso_frame as the base.  There can't be any
         * TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
         */
        now2 = (now - base) & (mod - 1);
 
-       /* Is the schedule already full? */
+       /* Is the schedule about to wrap around? */
        if (unlikely(!empty && start < period)) {
-               ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+               ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
                                urb, stream->next_uframe, base, period, mod);
-               status = -ENOSPC;
+               status = -EFBIG;
                goto fail;
        }
 
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
        urb->start_frame = start & (mod - 1);
        if (!stream->highspeed)
                urb->start_frame >>= 3;
-
-       /* Make sure scan_isoc() sees these */
-       if (ehci->isoc_count == 0)
-               ehci->last_iso_frame = now >> 3;
        return status;
 
  fail:
index 19a9af1b4d749cd577e1215a200679f9a1c8b37d..ff9af29b4e9f6b0ea4bfa1decd8050fcbede061d 100644 (file)
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
 
        u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
        if (IS_ERR(u_phy)) {
-               err = PTR_ERR(u_phy);
+               err = -EPROBE_DEFER;
                goto cleanup_clk_en;
        }
        hcd->usb_phy = u_phy;
index dd483c13565bb7bbedc561d6e39fccee1477c279..ce636466edb7a390efb346d3b0f511b9d4eb5404 100644 (file)
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 {
        void __iomem *base;
        u32 control;
-       u32 fminterval;
+       u32 fminterval = 0;
+       bool no_fminterval = false;
        int cnt;
 
        if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        if (base == NULL)
                return;
 
+       /*
+        * ULi M5237 OHCI controller locks the whole system when accessing
+        * the OHCI_FMINTERVAL offset.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+               no_fminterval = true;
+
        control = readl(base + OHCI_CONTROL);
 
 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        }
 
        /* software reset of the controller, preserving HcFmInterval */
-       fminterval = readl(base + OHCI_FMINTERVAL);
+       if (!no_fminterval)
+               fminterval = readl(base + OHCI_FMINTERVAL);
+
        writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
        /* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
                        break;
                udelay(1);
        }
-       writel(fminterval, base + OHCI_FMINTERVAL);
+
+       if (!no_fminterval)
+               writel(fminterval, base + OHCI_FMINTERVAL);
 
        /* Now the controller is safely in SUSPEND and nothing can wake it up */
        iounmap(base);
index 142b601f95636fdff622bca8c4fb1a9aef87093b..7f76c8a12f89db425e19c4f3a2a5200de542dbd7 100644 (file)
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "must be suspended extra slowly",
                                pdev->revision);
                }
+               if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+                       xhci->quirks |= XHCI_BROKEN_STREAMS;
                /* Fresco Logic confirms: all revisions of this chip do not
                 * support MSI, even though some of them claim to in their PCI
                 * capabilities.
index 01fcbb5eb06e7ec3d03bcd81d90941f889c5654a..c50d8d202618521793b37a8bd57d7b343cd757fd 100644 (file)
@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                return -EINVAL;
        }
 
+       if (setup == SETUP_CONTEXT_ONLY) {
+               slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+               if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+                   SLOT_STATE_DEFAULT) {
+                       xhci_dbg(xhci, "Slot already in default state\n");
+                       return 0;
+               }
+       }
+
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
        if (!command)
                return -ENOMEM;
index 9d68372dd9aaa01d72b3a192959b3bccc2b2de93..b005010240e5b30df0823d220664f5829855ac81 100644 (file)
@@ -72,6 +72,8 @@ config USB_MUSB_DA8XX
 
 config USB_MUSB_TUSB6010
        tristate "TUSB6010"
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
 
 config USB_MUSB_OMAP2PLUS
        tristate "OMAP2430 and onwards"
@@ -85,6 +87,7 @@ config USB_MUSB_AM35X
 config USB_MUSB_DSPS
        tristate "TI DSPS platforms"
        select USB_MUSB_AM335X_CHILD
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
        depends on OF_IRQ
 
 config USB_MUSB_BLACKFIN
@@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN
 
 config USB_MUSB_UX500
        tristate "Ux500 platforms"
+       depends on ARCH_U8500 || COMPILE_TEST
 
 config USB_MUSB_JZ4740
        tristate "JZ4740"
index a441a2de8619e51d5f639e47332f3ec120d2a4f2..1782501456139aeb8f970fa51311a215a909d555 100644 (file)
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
        bfin_write16(addr + offset, data);
 }
 
-static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
 {
        bfin_write16(addr + offset, (u16)data);
 }
index f64fd964dc6d544b0fecee86a1fd9bd85993862a..c39a16ad78329194e78135464283dea4e760a4cc 100644 (file)
@@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
                ret = of_property_read_string_index(np, "dma-names", i, &str);
                if (ret)
                        goto err;
-               if (!strncmp(str, "tx", 2))
+               if (strstarts(str, "tx"))
                        is_tx = 1;
-               else if (!strncmp(str, "rx", 2))
+               else if (strstarts(str, "rx"))
                        is_tx = 0;
                else {
                        dev_err(dev, "Wrong dmatype %s\n", str);
index ad3701a9738964d5f7846e76ec69a31660badda1..48131aa8472cfef70b19d6a2a72db0a8d6b2db85 100644 (file)
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
        { "RxMaxPp",    MUSB_RXMAXP,    16 },
        { "RxCSR",      MUSB_RXCSR,     16 },
        { "RxCount",    MUSB_RXCOUNT,   16 },
-       { "ConfigData", MUSB_CONFIGDATA,8 },
        { "IntrRxE",    MUSB_INTRRXE,   16 },
        { "IntrTxE",    MUSB_INTRTXE,   16 },
        { "IntrUsbE",   MUSB_INTRUSBE,  8 },
        { "DevCtl",     MUSB_DEVCTL,    8 },
-       { "BabbleCtl",  MUSB_BABBLE_CTL,8 },
-       { "TxFIFOsz",   MUSB_TXFIFOSZ,  8 },
-       { "RxFIFOsz",   MUSB_RXFIFOSZ,  8 },
-       { "TxFIFOadd",  MUSB_TXFIFOADD, 16 },
-       { "RxFIFOadd",  MUSB_RXFIFOADD, 16 },
        { "VControl",   0x68,           32 },
        { "HWVers",     0x69,           16 },
-       { "EPInfo",     MUSB_EPINFO,    8 },
-       { "RAMInfo",    MUSB_RAMINFO,   8 },
        { "LinkInfo",   MUSB_LINKINFO,  8 },
        { "VPLen",      MUSB_VPLEN,     8 },
        { "HS_EOF1",    MUSB_HS_EOF1,   8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
        { "DMA_CNTLch7",        0x274,  16 },
        { "DMA_ADDRch7",        0x278,  32 },
        { "DMA_COUNTch7",       0x27C,  32 },
+#ifndef CONFIG_BLACKFIN
+       { "ConfigData", MUSB_CONFIGDATA,8 },
+       { "BabbleCtl",  MUSB_BABBLE_CTL,8 },
+       { "TxFIFOsz",   MUSB_TXFIFOSZ,  8 },
+       { "RxFIFOsz",   MUSB_RXFIFOSZ,  8 },
+       { "TxFIFOadd",  MUSB_TXFIFOADD, 16 },
+       { "RxFIFOadd",  MUSB_RXFIFOADD, 16 },
+       { "EPInfo",     MUSB_EPINFO,    8 },
+       { "RAMInfo",    MUSB_RAMINFO,   8 },
+#endif
        {  }    /* Terminating Entry */
 };
 
@@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
-       if (!strncmp(buf, "force host", 9))
+       if (strstarts(buf, "force host"))
                test = MUSB_TEST_FORCE_HOST;
 
-       if (!strncmp(buf, "fifo access", 11))
+       if (strstarts(buf, "fifo access"))
                test = MUSB_TEST_FIFO_ACCESS;
 
-       if (!strncmp(buf, "force full-speed", 15))
+       if (strstarts(buf, "force full-speed"))
                test = MUSB_TEST_FORCE_FS;
 
-       if (!strncmp(buf, "force high-speed", 15))
+       if (strstarts(buf, "force high-speed"))
                test = MUSB_TEST_FORCE_HS;
 
-       if (!strncmp(buf, "test packet", 10)) {
+       if (strstarts(buf, "test packet")) {
                test = MUSB_TEST_PACKET;
                musb_load_testpacket(musb);
        }
 
-       if (!strncmp(buf, "test K", 6))
+       if (strstarts(buf, "test K"))
                test = MUSB_TEST_K;
 
-       if (!strncmp(buf, "test J", 6))
+       if (strstarts(buf, "test J"))
                test = MUSB_TEST_J;
 
-       if (!strncmp(buf, "test SE0 NAK", 12))
+       if (strstarts(buf, "test SE0 NAK"))
                test = MUSB_TEST_SE0_NAK;
 
        musb_writeb(musb->mregs, MUSB_TESTMODE, test);
index 23d474d3d7f466188bcf4cee8c8f974e4274d22f..883a9adfdfff5f0c1643036e0be7d7d22d1e73a8 100644 (file)
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
        if (musb->port_mode == MUSB_PORT_MODE_GADGET)
                return;
        usb_remove_hcd(musb->hcd);
-       musb->hcd = NULL;
 }
 
 void musb_host_free(struct musb *musb)
index 699e38c73d82c2ae76feddfe07f85a54ca76dd73..697a741a0cb1ed36ff336af31d8c3cd7d2fd32a5 100644 (file)
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
 static void mv_otg_update_state(struct mv_otg *mvotg)
 {
        struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
-       struct usb_phy *phy = &mvotg->phy;
        int old_state = mvotg->phy.otg->state;
 
        switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
 {
        struct mv_otg *mvotg = platform_get_drvdata(pdev);
 
-       if (mvotg->phy.state != OTG_STATE_B_IDLE) {
+       if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
                dev_info(&pdev->dev,
                         "OTG state is not B_IDLE, it is %d!\n",
-                        mvotg->phy.state);
+                        mvotg->phy.otg->state);
                return -EAGAIN;
        }
 
index b4066a001ba01573f9546749b7d4978180c48dc5..ccfdfb24b24017e8eda929fb30fb8363311ab7d2 100644 (file)
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
                return phy;
        }
 
-       return ERR_PTR(-ENODEV);
+       return ERR_PTR(-EPROBE_DEFER);
 }
 
 static struct usb_phy *__usb_find_phy_dev(struct device *dev,
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
 {
        struct usb_phy  *phy;
 
+       if (!of_device_is_available(node))
+               return ERR_PTR(-ENODEV);
+
        list_for_each_entry(phy, &phy_list, head) {
                if (node != phy->dev->of_node)
                        continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
                return phy;
        }
 
-       return ERR_PTR(-ENODEV);
+       return ERR_PTR(-EPROBE_DEFER);
 }
 
 static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
        spin_lock_irqsave(&phy_lock, flags);
 
        phy = __of_usb_find_phy(node);
-       if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
-               if (!IS_ERR(phy))
-                       phy = ERR_PTR(-EPROBE_DEFER);
+       if (IS_ERR(phy)) {
+               devres_free(ptr);
+               goto err1;
+       }
 
+       if (!try_module_get(phy->dev->driver->owner)) {
+               phy = ERR_PTR(-ENODEV);
                devres_free(ptr);
                goto err1;
        }
index 8d7fc48b1f307efffa1e5ec40a6e3250b852a54c..29fa1c3d0089bee738ed4f54a8b65d4f82dd0c03 100644 (file)
@@ -46,6 +46,8 @@ static struct console usbcons;
  * ------------------------------------------------------------
  */
 
+static const struct tty_operations usb_console_fake_tty_ops = {
+};
 
 /*
  * The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
                                goto reset_open_count;
                        }
                        kref_init(&tty->kref);
-                       tty_port_tty_set(&port->port, tty);
                        tty->driver = usb_serial_tty_driver;
                        tty->index = co->index;
+                       init_ldsem(&tty->ldisc_sem);
+                       INIT_LIST_HEAD(&tty->tty_files);
+                       kref_get(&tty->driver->kref);
+                       tty->ops = &usb_console_fake_tty_ops;
                        if (tty_init_termios(tty)) {
                                retval = -ENOMEM;
-                               goto free_tty;
+                               goto put_tty;
                        }
+                       tty_port_tty_set(&port->port, tty);
                }
 
                /* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
                        serial->type->set_termios(tty, port, &dummy);
 
                        tty_port_tty_set(&port->port, NULL);
-                       kfree(tty);
+                       tty_kref_put(tty);
                }
                set_bit(ASYNCB_INITIALIZED, &port->port.flags);
        }
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
 
  fail:
        tty_port_tty_set(&port->port, NULL);
free_tty:
-       kfree(tty);
put_tty:
+       tty_kref_put(tty);
  reset_open_count:
        port->port.count = 0;
        usb_autopm_put_interface(serial->interface);
index 6c4eb3cf5efd599653641e5d96d20b05610a6ed5..f4c56fc1a9f64dd32fae247c351c1b34e0d6400e 100644 (file)
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
-       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+       { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+       { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index 1bd192290b08df0fb697fa91f2517b6756680012..ccf1df7c4b80f3f7a596fa3d1d8904eb64a78db6 100644 (file)
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
 
        res = usb_submit_urb(port->read_urbs[index], mem_flags);
        if (res) {
-               if (res != -EPERM) {
+               if (res != -EPERM && res != -ENODEV) {
                        dev_err(&port->dev,
                                        "%s - usb_submit_urb failed: %d\n",
                                        __func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
                                                        __func__, urb->status);
                return;
        default:
-               dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+               dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
                                                        __func__, urb->status);
                goto resubmit;
        }
index 077c714f1285171ee3b9e4c418e0df42f60cd42c..e07b15ed58148698d939370ac8bf73b20141669a 100644 (file)
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
                /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->portNumber];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
 
        port = serial->port[0];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
 
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
index 7a4c21b4f67613f7bf64839b4cf4faf09804355e..efdcee15b52030e455ce3c6e17d401d4a5e2660d 100644 (file)
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
 
 #define QUALCOMM_VENDOR_ID                     0x05C6
 
+#define SIERRA_VENDOR_ID                       0x1199
+
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
 #define CMOTECH_PRODUCT_CMU_300                        0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
                OPTION_BLACKLIST_RESERVED_IF = 2
 };
 
-#define MAX_BL_NUM  8
+#define MAX_BL_NUM  11
 struct option_blacklist_info {
        /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
        const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
        .reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+       .sendsetup = BIT(0) | BIT(2),
+       .reserved = BIT(8) | BIT(10) | BIT(11),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index cb3e14780a7e0c6182e5f9bf3ad505b61fa75684..9c63897b3a564012ea63f99b9e5e73bc48b93d36 100644 (file)
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x0f3d, 0x68a2)},   /* Sierra Wireless MC7700 */
        {DEVICE_SWI(0x114f, 0x68a2)},   /* Sierra Wireless MC7750 */
        {DEVICE_SWI(0x1199, 0x68a2)},   /* Sierra Wireless MC7710 */
-       {DEVICE_SWI(0x1199, 0x68c0)},   /* Sierra Wireless MC73xx */
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
index 8a6f371ed6e77e3ccdc99632c3cd41ebeb155213..9893d696fc973e9e4183b57b56b3ceb22570942f 100644 (file)
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
                return 0;
 
        /*
-        * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
-        * broken on the ASM1051, use the number of streams to differentiate.
-        * New ASM1053-s also support 32 streams, but have a different prod-id.
+        * ASMedia has a number of usb3 to sata bridge chips, at the time of
+        * this writing the following versions exist:
+        * ASM1051 - no uas support version
+        * ASM1051 - with broken (*) uas support
+        * ASM1053 - with working uas support
+        * ASM1153 - with working uas support
+        *
+        * Devices with these chips re-use a number of device-ids over the
+        * entire line, so the device-id is useless to determine if we're
+        * dealing with an ASM1051 (which we want to avoid).
+        *
+        * The ASM1153 can be identified by config.MaxPower == 0,
+        * where as the ASM105x models have config.MaxPower == 36.
+        *
+        * Differentiating between the ASM1053 and ASM1051 is trickier, when
+        * connected over USB-3 we can look at the number of streams supported,
+        * ASM1051 supports 32 streams, where as early ASM1053 versions support
+        * 16 streams, newer ASM1053-s also support 32 streams, but have a
+        * different prod-id.
+        *
+        * (*) ASM1051 chips do work with UAS with some disks (with the
+        *     US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
         */
        if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
-                       le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
-               if (udev->speed < USB_SPEED_SUPER) {
+                       (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
+                        le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
+               if (udev->actconfig->desc.bMaxPower == 0) {
+                       /* ASM1153, do nothing */
+               } else if (udev->speed < USB_SPEED_SUPER) {
                        /* No streams info, assume ASM1051 */
                        flags |= US_FL_IGNORE_UAS;
                } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+                       /* Possibly an ASM1051, disable uas */
                        flags |= US_FL_IGNORE_UAS;
                }
        }
index 18a283d6de1c8bd18663b57bbf7499510c49fa2d..6df4357d9ee358b36d33961507e8677bd346d432 100644 (file)
  * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
  */
 
+/*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
+ */
+UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+               "Apricorn",
+               "",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_UAS),
+
 /* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
 UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
                "Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
+UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
+               "Seagate",
+               "Backup Plus",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
+               "Seagate",
+               "Backup Plus Desk",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
                "Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
+UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+               "Seagate",
+               "BUP Fast HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                "JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_OPCODES),
 
-/* Most ASM1051 based devices have issues with uas, blacklist them all */
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
-               "ASMedia",
-               "ASM1051",
-               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
-
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                "VIA",
@@ -104,6 +127,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_ATA_1X),
 
+/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+               "JMicron",
+               "JMS566",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
 UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
                "Hitachi",
index 255201f22126aabd9ea9178fe353cf5f42a82c68..7cc0122a18cecbb7ef45cf8e438112ec2fb4ff00 100644 (file)
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       u8 type;
        struct vfio_pci_device *vdev;
        struct iommu_group *group;
        int ret;
 
-       pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
-       if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+       if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
        group = iommu_group_get(&pdev->dev);
index 14419a8ccbb6b138aa8bd38b7765166c1f4aa398..d415d69dc2378cbc7568bbcdcc53e601770ee761 100644 (file)
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
                ++headcount;
                seg += in;
        }
-       heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+       heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
        *iovcount = seg;
        if (unlikely(log))
                *log_num = nlogs;
index 01c01cb3933fdfc5bce7e9c457d128d2e8f460e4..d695b1673ae532d9ac873bdc5661ccab84995c04 100644 (file)
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return TCM_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return TCM_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return TCM_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return TCM_ACA_TAG;
+       default:
+               break;
+       }
+       return TCM_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
-                       cmd->tvc_task_attr, cmd->tvc_data_direction,
-                       TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+                       vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+                       cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+                       sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+                       cmd->tvc_prot_sgl_count);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
index ed71b5347a766ee26c95039638a028ece31b53de..cb807d0ea498df3a197d0c3ed70d5548e57d97b7 100644 (file)
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                        r = -EFAULT;
                        break;
                }
-               if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
-                   (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
-                   (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+               /* Make sure it's safe to cast pointers to vring types. */
+               BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+               BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+               if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+                   (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+                   (a.log_guest_addr & (sizeof(u64) - 1))) {
                        r = -EINVAL;
                        break;
                }
index 1c29bd19e3d5fe9954153a8b821058a1be5ff6d5..0e5fde1d3ffbe5a152035f33063afa98bf84f33e 100644 (file)
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, start_sector_addr,
                                                data_start_addr, sector_buffer);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
                err = broadsheet_spiflash_read_range(par, tail_start_addr,
                        tail_len, sector_buffer + tail_start_addr);
                if (err)
-                       return err;
+                       goto out;
        }
 
        /* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
        /* first erase the sector */
        err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
        if (err)
-               return err;
+               goto out;
 
        /* now write it */
        err = broadsheet_spiflash_write_sector(par, start_sector_addr,
                                        sector_buffer, sector_size);
+out:
+       kfree(sector_buffer);
        return err;
 }
 
index 900aa4ecd617990c8ce0caf1c46fb2993c8233b5..d6cab1fd9a4795da2fe2348c2882bc3e04a6cf30 100644 (file)
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
        cancel_delayed_work_sync(&info->deferred_work);
 
        /* Run it immediately */
-       err = schedule_delayed_work(&info->deferred_work, 0);
+       schedule_delayed_work(&info->deferred_work, 0);
        mutex_unlock(&inode->i_mutex);
-       return err;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
 
index 87accdb59c81b5ed5686ad2c291c636fe541289a..ac83ef5cfd7d7f6a96848d546e3c7155dfdcd148 100644 (file)
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
        .mX_max = 127,
        .fint_min = 500000,
        .fint_max = 2500000,
-       .clkdco_max = 1800000000,
 
        .clkdco_min = 500000000,
        .clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
        .mX_max = 127,
        .fint_min = 620000,
        .fint_max = 2500000,
-       .clkdco_max = 1800000000,
 
        .clkdco_min = 750000000,
        .clkdco_low = 1500000000,
index 50bc62c5d367f5586bf5431a16e2b4ec91599ca6..335ffac224b97a57b6abc043b74ae47601e27535 100644 (file)
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
        return 0;
 
 err_enable:
-       regulator_disable(pll->regulator);
+       if (pll->regulator)
+               regulator_disable(pll->regulator);
 err_reg:
        clk_disable_unprepare(pll->clkin);
        return r;
index d51a983075bc57a579664367214637f3aeb3e647..5c2ccab5a958f6d41a68cbab3a48e78e8514cbda 100644 (file)
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
        out->output_type = OMAP_DISPLAY_TYPE_SDI;
        out->name = "sdi.0";
        out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+       /* We have SDI only on OMAP3, where it's on port 1 */
+       out->port_num = 1;
        out->ops.sdi = &sdi_ops;
        out->owner = THIS_MODULE;
 
index 92cac803dee3c2261655a34a45a6a0c2d170636f..1085c0432158c02038aba6f961ea7e300f7f950b 100644 (file)
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
        if (ret)
                return ret;
 
-       if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+       if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
                for_each_child_of_node(of_chosen, np) {
                        if (of_device_is_compatible(np, "simple-framebuffer"))
                                of_platform_device_create(np, NULL, NULL);
index 940cd196eef53ab6cc02bf44d2320f610e36685d..10fbfd8ab963f9e78905a5d932b13ae810639363 100644 (file)
@@ -21,6 +21,21 @@ static bool nologo;
 module_param(nologo, bool, 0);
 MODULE_PARM_DESC(nologo, "Disables startup logo");
 
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+       logos_freed = true;
+       return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
 /* logo's are marked __initdata. Use __init_refok to tell
  * modpost that it is intended that this function uses data
  * marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
 {
        const struct linux_logo *logo = NULL;
 
-       if (nologo)
+       if (nologo || logos_freed)
                return NULL;
 
        if (depth >= 1) {
index 2ef9529809d8bd198455a1af19151c22fe4ca715..9756f21b809e080d1d1975b0734cb82cdea6e9e3 100644 (file)
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
 
        vp_free_vectors(vdev);
        kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
-void virtio_pci_release_dev(struct device *_d)
-{
-       /*
-        * No need for a release method as we allocate/free
-        * all devices together with the pci devices.
-        * Provide an empty one to avoid getting a warning from core.
-        */
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int virtio_pci_freeze(struct device *dev)
 {
index adddb647b21d826c76ce54dac8707828c0ba381c..5a497289b7e9c336d1478db41ca5c0f60fbbafb9 100644 (file)
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
  * - ignore the affinity request if we're using INTX
  */
 int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
 
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id);
index 6c76f0f5658ccfcef86b865b9f9e2fa76334245c..a5486e65e04bd55d5c64a33d3dbeeadb27dd4857 100644 (file)
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
        .set_vq_affinity = vp_set_vq_affinity,
 };
 
+static void virtio_pci_release_dev(struct device *_d)
+{
+       struct virtio_device *vdev = dev_to_virtio(_d);
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       /* As struct device is a kobject, it's not safe to
+        * free the memory (including the reference counter itself)
+        * until it's release callback. */
+       kfree(vp_dev);
+}
+
 /* the PCI probing function */
 int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
                            const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
        pci_iounmap(pci_dev, vp_dev->ioaddr);
        pci_release_regions(pci_dev);
        pci_disable_device(pci_dev);
-       kfree(vp_dev);
 }
index 2d3e32ebfd15510b8e97519a006486c83755121b..8729cf68d2fef5e41540283d74beba55285f59c5 100644 (file)
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
 {
        int ret;
        int type;
-       struct btrfs_tree_block_info *info;
        struct btrfs_extent_inline_ref *eiref;
 
        if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        }
 
        /* we can treat both ref types equally here */
-       info = (struct btrfs_tree_block_info *)(ei + 1);
        *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
-       *out_level = btrfs_tree_block_level(eb, info);
+
+       if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+               struct btrfs_tree_block_info *info;
+
+               info = (struct btrfs_tree_block_info *)(ei + 1);
+               *out_level = btrfs_tree_block_level(eb, info);
+       } else {
+               ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+               *out_level = (u8)key->offset;
+       }
 
        if (ret == 1)
                *ptr = (unsigned long)-1;
index 054577bddaf27869d9a524a73d4df5a76072e4e1..de4e70fb3cbbd4a5c28d13f1fe3aec16733ed49f 100644 (file)
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 {
        struct btrfs_delayed_node *delayed_node;
 
+       /*
+        * we don't do delayed inode updates during log recovery because it
+        * leads to enospc problems.  This means we also can't do
+        * delayed inode refs
+        */
+       if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+               return -EAGAIN;
+
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
                return PTR_ERR(delayed_node);
index a80b97100d90b3162d7d3688ed6b3c459bb56bc8..15116585e7142d3865d822828011ee1ac38f0519 100644 (file)
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
 
        ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
-       if (ret < 0)
+       if (ret) {
+               if (ret > 0)
+                       ret = -ENOENT;
                goto fail;
-       BUG_ON(ret); /* Corruption */
+       }
 
        leaf = path->nodes[0];
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 fail:
-       if (ret) {
+       if (ret)
                btrfs_abort_transaction(trans, root, ret);
-               return ret;
-       }
-       return 0;
+       return ret;
 
 }
 
index e687bb0dc73a36724a921d40bc66c89473f7edac..8bf326affb944026a43bbc42a900d6f8355ce837 100644 (file)
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 out_fail:
        btrfs_end_transaction(trans, root);
-       if (drop_on_err)
+       if (drop_on_err) {
+               inode_dec_link_count(inode);
                iput(inode);
+       }
        btrfs_balance_delayed_items(root);
        btrfs_btree_balance_dirty(root);
        return err;
index f2bb13a23f860ea19d0403057395d38d8b9d2632..9e1569ffbf6ea66f1324022db9f9aba559339c73 100644 (file)
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
                ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
                                             flags, gen, mirror_num,
                                             have_csum ? csum : NULL);
-skip:
                if (ret)
                        return ret;
+skip:
                len -= l;
                logical += l;
                physical += l;
index f5013d92a7e6b73d9c4369683f6aac987afbd287..c81c0e004588b9e2ae03580ed6265096d6608d46 100644 (file)
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                }
        }
 
-       dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+       dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
             inode, ceph_vinop(inode), len, locked_page);
 
        if (len > 0) {
index 6e139111fdb250cc85f28d96d7d26fe0508850a4..22b289a3b1c4d3e12727cc0a005456fa9b295a00 100644 (file)
@@ -661,16 +661,16 @@ set_credits(struct TCP_Server_Info *server, const int val)
        server->ops->set_credits(server, val);
 }
 
-static inline __u64
+static inline __le64
 get_next_mid64(struct TCP_Server_Info *server)
 {
-       return server->ops->get_next_mid(server);
+       return cpu_to_le64(server->ops->get_next_mid(server));
 }
 
 static inline __le16
 get_next_mid(struct TCP_Server_Info *server)
 {
-       __u16 mid = get_next_mid64(server);
+       __u16 mid = server->ops->get_next_mid(server);
        /*
         * The value in the SMB header should be little endian for easy
         * on-the-wire decoding.
index b333ff60781d295809d8fa8f23366f9bcf8d9285..abae6dd2c6b998816db830f40934c86ea8c33fff 100644 (file)
@@ -926,6 +926,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
 
        /* Subtract the NTFS time offset, then convert to 1s intervals. */
        s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+       u64 abs_t;
 
        /*
         * Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@ cifs_NTtimeToUnix(__le64 ntutc)
         * to special case them
         */
        if (t < 0) {
-               t = -t;
-               ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+               abs_t = -t;
+               ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
                ts.tv_nsec = -ts.tv_nsec;
-               ts.tv_sec = -t;
+               ts.tv_sec = -abs_t;
        } else {
-               ts.tv_nsec = (long)do_div(t, 10000000) * 100;
-               ts.tv_sec = t;
+               abs_t = t;
+               ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+               ts.tv_sec = abs_t;
        }
 
        return ts;
index 8eaf20a806494c71002a668a3e49b159b1b66d71..c295338e0a98ce95a71c60afedc87ad9bd5267b5 100644 (file)
@@ -69,7 +69,8 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
  * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
  *
  * Find the dentry that matches "name". If there isn't one, create one. If it's
- * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ * a negative dentry or the uniqueid or filetype(mode) changed,
+ * then drop it and recreate it.
  */
 static void
 cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
                        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
                                fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
 
-                       /* update inode in place if i_ino didn't change */
-                       if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       /* update inode in place
+                        * if both i_ino and i_mode didn't change */
+                       if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+                           (inode->i_mode & S_IFMT) ==
+                           (fattr->cf_mode & S_IFMT)) {
                                cifs_fattr_to_inode(inode, fattr);
                                goto out;
                        }
index f1cefc9763edaeb3115ee1868d9bc4f033b7e0f5..689f035915cf70f075d71fca5e281ec009c5420a 100644 (file)
 static int
 check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
 {
+       __u64 wire_mid = le64_to_cpu(hdr->MessageId);
+
        /*
         * Make sure that this really is an SMB, that it is a response,
         * and that the message ids match.
         */
        if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
-           (mid == hdr->MessageId)) {
+           (mid == wire_mid)) {
                if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
                        return 0;
                else {
@@ -51,11 +53,11 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
                if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
                        cifs_dbg(VFS, "Bad protocol string signature header %x\n",
                                 *(unsigned int *) hdr->ProtocolId);
-               if (mid != hdr->MessageId)
+               if (mid != wire_mid)
                        cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
-                                mid, hdr->MessageId);
+                                mid, wire_mid);
        }
-       cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId);
+       cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
        return 1;
 }
 
@@ -95,7 +97,7 @@ smb2_check_message(char *buf, unsigned int length)
 {
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
        struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
-       __u64 mid = hdr->MessageId;
+       __u64 mid = le64_to_cpu(hdr->MessageId);
        __u32 len = get_rfc1002_length(buf);
        __u32 clc_len;  /* calculated length */
        int command;
index 93fd0586f9ec6e661c17de59cb66535d80e7ff51..96b5d40a2ece611b27ed19668cc4b7b665605113 100644 (file)
@@ -176,10 +176,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
 {
        struct mid_q_entry *mid;
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+       __u64 wire_mid = le64_to_cpu(hdr->MessageId);
 
        spin_lock(&GlobalMid_Lock);
        list_for_each_entry(mid, &server->pending_mid_q, qhead) {
-               if ((mid->mid == hdr->MessageId) &&
+               if ((mid->mid == wire_mid) &&
                    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
                    (mid->command == hdr->Command)) {
                        spin_unlock(&GlobalMid_Lock);
index ce858477002a6148e31a85e9f28fc012968f52c9..70867d54fb8bf485cb5ff4dcb3049f67ca86cb45 100644 (file)
@@ -110,7 +110,7 @@ struct smb2_hdr {
        __le16 CreditRequest;  /* CreditResponse */
        __le32 Flags;
        __le32 NextCommand;
-       __u64  MessageId;       /* opaque - so can stay little endian */
+       __le64 MessageId;
        __le32 ProcessId;
        __u32  TreeId;          /* opaque - so do not make little endian */
        __u64  SessionId;       /* opaque - so do not make little endian */
index 5111e7272db62e718fcb3d968af48083d598bccf..d4c5b6f109a7feaa6f2c99f21ca332ff41a2673f 100644 (file)
@@ -490,7 +490,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
                return temp;
        else {
                memset(temp, 0, sizeof(struct mid_q_entry));
-               temp->mid = smb_buffer->MessageId;      /* always LE */
+               temp->mid = le64_to_cpu(smb_buffer->MessageId);
                temp->pid = current->pid;
                temp->command = smb_buffer->Command;    /* Always LE */
                temp->when_alloc = jiffies;
index e5d3eadf47b1e7fb6251c590016044cf8b5c4c98..bed43081720f718fc30dca204be8509ddcf5eac5 100644 (file)
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        /* fallback to generic here if not in extents fmt */
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return __generic_block_fiemap(inode, fieinfo, start, len,
-                                             ext4_get_block);
+               return generic_block_fiemap(inode, fieinfo, start, len,
+                       ext4_get_block);
 
        if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
                return -EBADR;
index 513c12cf444c239f5c34bd4d73c653029bdaca96..8131be8c0af3166aac865557baa9f0371564a397 100644 (file)
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
  * we determine this extent as a data or a hole according to whether the
  * page cache has data or not.
  */
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
-                                    loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                    int whence,
+                                    struct ext4_map_blocks *map,
+                                    loff_t *offset)
 {
        struct pagevec pvec;
+       unsigned int blkbits;
        pgoff_t index;
        pgoff_t end;
+       loff_t endoff;
        loff_t startoff;
        loff_t lastoff;
        int found = 0;
 
+       blkbits = inode->i_sb->s_blocksize_bits;
        startoff = *offset;
        lastoff = startoff;
-
+       endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
 
        index = startoff >> PAGE_CACHE_SHIFT;
        end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t dataoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       dataoff = offset;
+
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
+               }
 
-               /* No extents found, EOF */
-               if (!fie.fi_extents_mapped) {
-                       ret = -ENXIO;
+               /*
+                * If there is a delay extent at this offset,
+                * it will be as a data.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       if (last != start)
+                               dataoff = (loff_t)last << blkbits;
                        break;
                }
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
 
-                       if (offset < (loff_t)ext[i].fe_logical)
-                               offset = (loff_t)ext[i].fe_logical;
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
-                               goto out;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+                                                             &map, &dataoff);
+                       if (unwritten)
+                               break;
+               }
 
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
-                                                     next, &offset))
-                               goto out;
+               last++;
+               dataoff = (loff_t)last << blkbits;
+       } while (last <= end);
 
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
-                               ret = -ENXIO;
-                               goto out;
-                       }
-                       offset = next;
-               }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (dataoff > isize)
+               return -ENXIO;
+
+       return vfs_setpos(file, dataoff, maxsize);
 }
 
 /*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
  */
 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
 {
        struct inode *inode = file->f_mapping->host;
-       struct fiemap_extent_info fie;
-       struct fiemap_extent ext[2];
-       loff_t next;
-       int i, ret = 0;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t holeoff, isize;
+       int blkbits;
+       int ret = 0;
 
        mutex_lock(&inode->i_mutex);
-       if (offset >= inode->i_size) {
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
                mutex_unlock(&inode->i_mutex);
                return -ENXIO;
        }
 
-       fie.fi_flags = 0;
-       fie.fi_extents_max = 2;
-       fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
-       while (1) {
-               mm_segment_t old_fs = get_fs();
-
-               fie.fi_extents_mapped = 0;
-               memset(ext, 0, sizeof(*ext));
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       holeoff = offset;
 
-               set_fs(get_ds());
-               ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
-               set_fs(old_fs);
-               if (ret)
-                       break;
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       last += ret;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               /* No extents found */
-               if (!fie.fi_extents_mapped)
-                       break;
+               /*
+                * If there is a delay extent at this offset,
+                * we will skip this extent.
+                */
+               ext4_es_find_delayed_extent_range(inode, last, last, &es);
+               if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+                       last = es.es_lblk + es.es_len;
+                       holeoff = (loff_t)last << blkbits;
+                       continue;
+               }
 
-               for (i = 0; i < fie.fi_extents_mapped; i++) {
-                       next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
-                       /*
-                        * If extent is not unwritten, then it contains valid
-                        * data, mapped or delayed.
-                        */
-                       if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
-                               if (offset < (loff_t)ext[i].fe_logical)
-                                       goto out;
-                               offset = next;
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+                                                             &map, &holeoff);
+                       if (!unwritten) {
+                               last += ret;
+                               holeoff = (loff_t)last << blkbits;
                                continue;
                        }
-                       /*
-                        * If there is a unwritten extent at this offset,
-                        * it will be as a data or a hole according to page
-                        * cache that has data or not.
-                        */
-                       if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
-                                                     next, &offset))
-                               goto out;
-
-                       offset = next;
-                       if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
-                               goto out;
                }
-       }
-       if (offset > inode->i_size)
-               offset = inode->i_size;
-out:
+
+               /* find a hole */
+               break;
+       } while (last <= end);
+
        mutex_unlock(&inode->i_mutex);
-       if (ret)
-               return ret;
 
-       return vfs_setpos(file, offset, maxsize);
+       if (holeoff > isize)
+               holeoff = isize;
+
+       return vfs_setpos(file, holeoff, maxsize);
 }
 
 /*
index bf76f405a5f91df5f276e2a2d1dcf2935178d4a7..8a8ec6293b195f16623e716342463979427b3156 100644 (file)
@@ -23,6 +23,18 @@ int ext4_resize_begin(struct super_block *sb)
        if (!capable(CAP_SYS_RESOURCE))
                return -EPERM;
 
+       /*
+        * If we are not using the primary superblock/GDT copy don't resize,
+         * because the user tools have no way of handling this.  Probably a
+         * bad time to do it anyways.
+         */
+       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+               ext4_warning(sb, "won't resize using backup superblock at %llu",
+                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+               return -EPERM;
+       }
+
        /*
         * We are not allowed to do online-resizing on a filesystem mounted
         * with error, because it can destroy the filesystem easily.
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
                       "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
                       gdb_num);
 
-       /*
-        * If we are not using the primary superblock/GDT copy don't resize,
-         * because the user tools have no way of handling this.  Probably a
-         * bad time to do it anyways.
-         */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
-           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
-               ext4_warning(sb, "won't resize using backup superblock at %llu",
-                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
-               return -EPERM;
-       }
-
        gdb_bh = sb_bread(sb, gdblock);
        if (!gdb_bh)
                return -EIO;
index 43c92b1685cbff914240436f4d0901861c9fd2c2..74c5f53595fbd1d236026f0d78b3071982f89075 100644 (file)
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
            EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
-               ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+               ext4_warning(sb, "metadata_csum and uninit_bg are "
                             "redundant flags; please run fsck.");
 
        /* Check for a known checksum algorithm */
index 99d440a4a6ba259e5bd7ec6b167dbedb2637ac5d..ee85cd4e136abbff33409fb018343028d21578e2 100644 (file)
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
         * is defined as O_NONBLOCK on some platforms and not on others.
         */
-       BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
-               __FMODE_EXEC    | O_PATH        | __O_TMPFILE
+               __FMODE_EXEC    | O_PATH        | __O_TMPFILE   |
+               __FMODE_NONOTIFY
                ));
 
        fasync_cache = kmem_cache_create("fasync_cache",
index ba1107977f2ecafa96cafc04f6498b3fb79a3145..ed19a7d622fa35decaa08b10e83b8bdee8712419 100644 (file)
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
        req->in.h.pid = current->pid;
 }
 
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+       /* Make sure stores before this are seen on another CPU */
+       smp_wmb();
+       fc->initialized = 1;
+}
+
 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
 {
        return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
                if (intr)
                        goto out;
        }
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
 
        err = -ENOTCONN;
        if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
 
        atomic_inc(&fc->num_waiting);
        wait_event(fc->blocked_waitq, fc->initialized);
+       /* Matches smp_wmb() in fuse_set_initialized() */
+       smp_rmb();
        req = fuse_request_alloc(0);
        if (!req)
                req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
 }
 EXPORT_SYMBOL_GPL(fuse_request_send);
 
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+       if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+               args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+       if (fc->minor < 9) {
+               switch (args->in.h.opcode) {
+               case FUSE_LOOKUP:
+               case FUSE_CREATE:
+               case FUSE_MKNOD:
+               case FUSE_MKDIR:
+               case FUSE_SYMLINK:
+               case FUSE_LINK:
+                       args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+                       break;
+               case FUSE_GETATTR:
+               case FUSE_SETATTR:
+                       args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+                       break;
+               }
+       }
+       if (fc->minor < 12) {
+               switch (args->in.h.opcode) {
+               case FUSE_CREATE:
+                       args->in.args[0].size = sizeof(struct fuse_open_in);
+                       break;
+               case FUSE_MKNOD:
+                       args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+                       break;
+               }
+       }
+}
+
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
 {
        struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+       fuse_adjust_compat(fc, args);
+
        req->in.h.opcode = args->in.h.opcode;
        req->in.h.nodeid = args->in.h.nodeid;
        req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
        if (fc->connected) {
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_io_requests(fc);
                end_queued_requests(fc);
                end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
                spin_lock(&fc->lock);
                fc->connected = 0;
                fc->blocked = 0;
-               fc->initialized = 1;
+               fuse_set_initialized(fc);
                end_queued_requests(fc);
                end_polls(fc);
                wake_up_all(&fc->blocked_waitq);
index 252b8a5de8b57f71b841d1fc64c9f48e78b641c2..08e7b1a9d5d0edaca8b94ef386d9200078958df3 100644 (file)
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = name->len + 1;
        args->in.args[0].value = name->name;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(struct fuse_entry_out);
+       args->out.args[0].size = sizeof(struct fuse_entry_out);
        args->out.args[0].value = outarg;
 }
 
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
        args.in.h.opcode = FUSE_CREATE;
        args.in.h.nodeid = get_node_id(dir);
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
        args.out.numargs = 2;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outentry);
+       args.out.args[0].size = sizeof(outentry);
        args.out.args[0].value = &outentry;
        args.out.args[1].size = sizeof(outopen);
        args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
        memset(&outarg, 0, sizeof(outarg));
        args->in.h.nodeid = get_node_id(dir);
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(outarg);
+       args->out.args[0].size = sizeof(outarg);
        args->out.args[0].value = &outarg;
        err = fuse_simple_request(fc, args);
        if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
        inarg.umask = current_umask();
        args.in.h.opcode = FUSE_MKNOD;
        args.in.numargs = 2;
-       args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
-                                               sizeof(inarg);
+       args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.in.args[1].size = entry->d_name.len + 1;
        args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
        args.in.args[0].size = sizeof(inarg);
        args.in.args[0].value = &inarg;
        args.out.numargs = 1;
-       if (fc->minor < 9)
-               args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args.out.args[0].size = sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
        args->in.args[0].size = sizeof(*inarg_p);
        args->in.args[0].value = inarg_p;
        args->out.numargs = 1;
-       if (fc->minor < 9)
-               args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
-       else
-               args->out.args[0].size = sizeof(*outarg_p);
+       args->out.args[0].size = sizeof(*outarg_p);
        args->out.args[0].value = outarg_p;
 }
 
index e0fc6725d1d0d66a4c3c7dce595239631ba353b1..1cdfb07c1376b4f4b5633e86fdbdfc4320953de2 100644 (file)
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
 int fuse_do_setattr(struct inode *inode, struct iattr *attr,
                    struct file *file);
 
+void fuse_set_initialized(struct fuse_conn *fc);
+
 #endif /* _FS_FUSE_I_H */
index 6749109f255da69a5c24825aab1f2a25140fbb47..f38256e4476ed2a9101480342bcd0fd90a99fd38 100644 (file)
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
        args.in.h.opcode = FUSE_STATFS;
        args.in.h.nodeid = get_node_id(dentry->d_inode);
        args.out.numargs = 1;
-       args.out.args[0].size =
-               fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+       args.out.args[0].size = sizeof(outarg);
        args.out.args[0].value = &outarg;
        err = fuse_simple_request(fc, &args);
        if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                fc->max_write = max_t(unsigned, 4096, fc->max_write);
                fc->conn_init = 1;
        }
-       fc->initialized = 1;
+       fuse_set_initialized(fc);
        wake_up_all(&fc->blocked_waitq);
 }
 
index bb63254ed8486f42200230b4bbaa80257f92700d..735d7522a3a911f19af593d6b5f7d366d6cf448d 100644 (file)
@@ -362,6 +362,9 @@ repeat:
                        rs.cont_size = isonum_733(rr->u.CE.size);
                        break;
                case SIG('E', 'R'):
+                       /* Invalid length of ER tag id? */
+                       if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+                               goto out;
                        ISOFS_SB(inode->i_sb)->s_rock = 1;
                        printk(KERN_DEBUG "ISO 9660 Extensions: ");
                        {
index 37989f02a226ac40e104ee02efdad39d805686c5..2d881b381d2b787bbb2ff40b151e7496c0abafae 100644 (file)
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
 static int kernfs_name_compare(unsigned int hash, const char *name,
                               const void *ns, const struct kernfs_node *kn)
 {
-       if (hash != kn->hash)
-               return hash - kn->hash;
-       if (ns != kn->ns)
-               return ns - kn->ns;
+       if (hash < kn->hash)
+               return -1;
+       if (hash > kn->hash)
+               return 1;
+       if (ns < kn->ns)
+               return -1;
+       if (ns > kn->ns)
+               return 1;
        return strcmp(name, kn->name);
 }
 
index e94c887da2d72f7043ed010bfa8675b4ad825bb6..55505cbe11afa165ec90ec934301c15a1b9a4314 100644 (file)
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       if (!nlm_timeout)
-               nlm_timeout = LOCKD_DFLT_TIMEO;
-       nlmsvc_timeout = nlm_timeout * HZ;
-
        /*
         * The main request loop. We don't terminate until the last
         * NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
+       if (!nlm_timeout)
+               nlm_timeout = LOCKD_DFLT_TIMEO;
+       nlmsvc_timeout = nlm_timeout * HZ;
+
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
index 735b8d3fa78c92bf746aff05475be1fa2a82abd6..59e2f905e4ffea324dbf44faf1b666974adc6c23 100644 (file)
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
                        break;
        }
        trace_generic_delete_lease(inode, fl);
-       if (fl)
+       if (fl && IS_LEASE(fl))
                error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
        spin_unlock(&inode->i_lock);
        locks_dispose_list(&dispose);
index 03311259b0c45c88de37122cffc0f28c8f6c7e63..953daa44a28232d6863da375e59d44a0b42f49b6 100644 (file)
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
        kfree(clp->cl_serverowner);
        kfree(clp->cl_serverscope);
        kfree(clp->cl_implid);
+       kfree(clp->cl_owner_id);
 }
 
 void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
        spin_unlock(&nn->nfs_client_lock);
 }
 
+static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
+               const struct nfs_client *clp2)
+{
+       if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
+               return true;
+       return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
+}
+
 /**
  * nfs40_walk_client_list - Find server that recognizes a client ID
  *
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
                if (pos->cl_clientid != new->cl_clientid)
                        continue;
 
+               if (!nfs4_match_client_owner_id(pos, new))
+                       continue;
+
                atomic_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
 
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
 }
 
 /*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
  */
 static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
 {
        struct nfs41_server_owner *o1 = a->cl_serverowner;
        struct nfs41_server_owner *o2 = b->cl_serverowner;
 
-       if (o1->minor_id != o2->minor_id) {
-               dprintk("NFS: --> %s server owner minor IDs do not match\n",
-                       __func__);
-               return false;
-       }
-
        if (o1->major_id_sz != o2->major_id_sz)
                goto out_major_mismatch;
        if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (pos->rpc_ops != new->rpc_ops)
                        continue;
 
-               if (pos->cl_proto != new->cl_proto)
-                       continue;
-
                if (pos->cl_minorversion != new->cl_minorversion)
                        continue;
 
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
-               if (!nfs4_match_serverowners(pos, new))
+               /*
+                * Note that session trunking is just a special subcase of
+                * client id trunking. In either case, we want to fall back
+                * to using the existing nfs_client.
+                */
+               if (!nfs4_check_clientid_trunking(pos, new))
+                       continue;
+
+               /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+                * uniform string, however someone might switch the
+                * uniquifier string on us.
+                */
+               if (!nfs4_match_client_owner_id(pos, new))
                        continue;
 
                atomic_inc(&pos->cl_count);
index e7f8d5ff2581a98269a262998beb43ccaca23e3c..c347705b016104de8b0360f163e87c19d9b4d78c 100644 (file)
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
-               return 0;
        if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
                return 0;
        nfs_mark_delegation_referenced(delegation);
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
 }
 
 static unsigned int
-nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+nfs4_init_nonuniform_client_string(struct nfs_client *clp,
                                   char *buf, size_t len)
 {
        unsigned int result;
 
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
+
        rcu_read_lock();
        result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
                                clp->cl_ipaddr,
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
                                rpc_peeraddr2str(clp->cl_rpcclient,
                                                        RPC_DISPLAY_PROTO));
        rcu_read_unlock();
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
        return result;
 }
 
 static unsigned int
-nfs4_init_uniform_client_string(const struct nfs_client *clp,
+nfs4_init_uniform_client_string(struct nfs_client *clp,
                                char *buf, size_t len)
 {
        const char *nodename = clp->cl_rpcclient->cl_nodename;
+       unsigned int result;
+
+       if (clp->cl_owner_id != NULL)
+               return strlcpy(buf, clp->cl_owner_id, len);
 
        if (nfs4_client_id_uniquifier[0] != '\0')
-               return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
                                clp->rpc_ops->version,
                                clp->cl_minorversion,
                                nfs4_client_id_uniquifier,
                                nodename);
-       return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+       else
+               result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
                                clp->rpc_ops->version, clp->cl_minorversion,
                                nodename);
+       clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
+       return result;
 }
 
 /*
index 3550a9c876161afe908274c937591c5f40d9f78a..c06a1ba80d73e5fd2cd6c68f61210e8be9518993 100644 (file)
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
                status = nfs4_setlease(dp);
                goto out;
        }
-       atomic_inc(&fp->fi_delegees);
        if (fp->fi_had_conflict) {
                status = -EAGAIN;
                goto out_unlock;
        }
+       atomic_inc(&fp->fi_delegees);
        hash_delegation_locked(dp, fp);
        status = 0;
 out_unlock:
index c991616acca9ef86e99d1e33dfd9d427adfb8b5f..bff8567aa42d1b04cd85e6f2863de3b1f4d9a2db 100644 (file)
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
        struct fsnotify_event *kevent;
        char __user *start;
        int ret;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        start = buf;
        group = file->private_data;
 
        pr_debug("%s: group=%p\n", __func__, group);
 
+       add_wait_queue(&group->notification_waitq, &wait);
        while (1) {
-               prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
                mutex_lock(&group->notification_mutex);
                kevent = get_one_event(group, count);
                mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
 
                        if (start != buf)
                                break;
-                       schedule();
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
                        continue;
                }
 
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
                buf += ret;
                count -= ret;
        }
+       remove_wait_queue(&group->notification_waitq, &wait);
 
-       finish_wait(&group->notification_waitq, &wait);
        if (start != buf && ret != -EFAULT)
                ret = buf - start;
        return ret;
index 79b5af5e6a7b5d8a593a251cfa8c27aa11769447..cecd875653e4cc12d4326e7bf3e192106c0a94c0 100644 (file)
@@ -2023,11 +2023,8 @@ leave:
        dlm_lockres_drop_inflight_ref(dlm, res);
        spin_unlock(&res->spinlock);
 
-       if (ret < 0) {
+       if (ret < 0)
                mlog_errno(ret);
-               if (newlock)
-                       dlm_lock_put(newlock);
-       }
 
        return ret;
 }
index b931e04e33889742a6192255b3bd95d8779203ea..914c121ec8900380482f83728b90f7b0bd14e418 100644 (file)
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
                                     struct inode *inode,
                                     const char *symname);
 
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+                            struct buffer_head **bh1,
+                            struct inode *inode1,
+                            struct buffer_head **bh2,
+                            struct inode *inode2,
+                            int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
 /* An orphan dir name is an 8 byte value, printed as a hex string */
 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
 
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
 {
        handle_t *handle;
        struct inode *inode = old_dentry->d_inode;
+       struct inode *old_dir = old_dentry->d_parent->d_inode;
        int err;
        struct buffer_head *fe_bh = NULL;
+       struct buffer_head *old_dir_bh = NULL;
        struct buffer_head *parent_fe_bh = NULL;
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
 
        dquot_initialize(dir);
 
-       err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+       err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+                       &parent_fe_bh, dir, 0);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
                return err;
        }
 
+       /* make sure both dirs have bhs
+        * get an extra ref on old_dir_bh if old==new */
+       if (!parent_fe_bh) {
+               if (old_dir_bh) {
+                       parent_fe_bh = old_dir_bh;
+                       get_bh(parent_fe_bh);
+               } else {
+                       mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+                       err = -EIO;
+                       goto out;
+               }
+       }
+
        if (!dir->i_nlink) {
                err = -ENOENT;
                goto out;
        }
 
-       err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+       err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
                        old_dentry->d_name.len, &old_de_ino);
        if (err) {
                err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
        ocfs2_inode_unlock(inode, 1);
 
 out:
-       ocfs2_inode_unlock(dir, 1);
+       ocfs2_double_unlock(old_dir, dir);
 
        brelse(fe_bh);
        brelse(parent_fe_bh);
+       brelse(old_dir_bh);
 
        ocfs2_free_dir_lookup_result(&lookup);
 
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
 }
 
 /*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct buffer_head **bh1,
                             struct inode *inode1,
                             struct buffer_head **bh2,
-                            struct inode *inode2)
+                            struct inode *inode2,
+                            int rename)
 {
        int status;
        int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                }
                /* lock id2 */
                status = ocfs2_inode_lock_nested(inode2, bh2, 1,
-                                                OI_LS_RENAME1);
+                               rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        }
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+       status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+                       rename == 1 ?  OI_LS_RENAME2 : OI_LS_PARENT);
        if (status < 0) {
                /*
                 * An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
 
        /* if old and new are the same, this'll just do one lock. */
        status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
-                                  &new_dir_bh, new_dir);
+                                  &new_dir_bh, new_dir, 1);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
index a012c51caffd2a195b6015b9594d0f1862dba324..05e90edd199214fd0507b5e5b79a3b60ea8a49d3 100644 (file)
@@ -57,6 +57,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        sector_t offset;
        int i, num, ret = 0;
        struct extent_position epos = { NULL, 0, {0, 0} };
+       struct super_block *sb = dir->i_sb;
 
        if (ctx->pos == 0) {
                if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        if (nf_pos == 0)
                nf_pos = udf_ext0_offset(dir);
 
-       fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+       fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
-               if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+               if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
                    &epos, &eloc, &elen, &offset)
                    != (EXT_RECORDED_ALLOCATED >> 30)) {
                        ret = -ENOENT;
                        goto out;
                }
-               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
-               if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+               block = udf_get_lb_pblock(sb, &eloc, offset);
+               if ((++offset << sb->s_blocksize_bits) < elen) {
                        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
                                epos.offset -= sizeof(struct short_ad);
                        else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        offset = 0;
                }
 
-               if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
+               if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
                        ret = -EIO;
                        goto out;
                }
 
-               if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
-                       i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
-                       if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
-                               i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+               if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
+                       i = 16 >> (sb->s_blocksize_bits - 9);
+                       if (i + offset > (elen >> sb->s_blocksize_bits))
+                               i = (elen >> sb->s_blocksize_bits) - offset;
                        for (num = 0; i > 0; i--) {
-                               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
-                               tmp = udf_tgetblk(dir->i_sb, block);
+                               block = udf_get_lb_pblock(sb, &eloc, offset + i);
+                               tmp = udf_tgetblk(sb, block);
                                if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
                                        bha[num++] = tmp;
                                else
@@ -152,12 +153,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                }
 
                if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
                                continue;
                }
 
                if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
                                continue;
                }
 
@@ -167,12 +168,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        continue;
                }
 
-               flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+               flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
                if (!flen)
                        continue;
 
                tloc = lelb_to_cpu(cfi.icb.extLocation);
-               iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+               iblock = udf_get_lb_pblock(sb, &tloc, 0);
                if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
                        goto out;
        } /* end while */
index c9b4df5810d52560b084b9557150faf8cfbe6e29..5bc71d9a674a7e5dfc3ff882ee61a2591a328c7b 100644 (file)
@@ -1489,6 +1489,20 @@ reread:
        }
        inode->i_generation = iinfo->i_unique;
 
+       /* Sanity checks for files in ICB so that we don't get confused later */
+       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+               /*
+                * For file in ICB data is stored in allocation descriptor
+                * so sizes should match
+                */
+               if (iinfo->i_lenAlloc != inode->i_size)
+                       goto out;
+               /* File in ICB has to fit in there... */
+               if (inode->i_size > inode->i_sb->s_blocksize -
+                                       udf_file_entry_alloc_offset(inode))
+                       goto out;
+       }
+
        switch (fe->icbTag.fileType) {
        case ICBTAG_FILE_TYPE_DIRECTORY:
                inode->i_op = &udf_dir_inode_operations;
index c12e260fd6c417eb9c690782b8860f0e5eeff8d9..33b246b82c98510289d533fcdcbc59154d38589f 100644 (file)
@@ -159,18 +159,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
        struct udf_inode_info *dinfo = UDF_I(dir);
        int isdotdot = child->len == 2 &&
                child->name[0] == '.' && child->name[1] == '.';
+       struct super_block *sb = dir->i_sb;
 
        size = udf_ext0_offset(dir) + dir->i_size;
        f_pos = udf_ext0_offset(dir);
 
        fibh->sbh = fibh->ebh = NULL;
-       fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
+       fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
        if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
-               if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+               if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
                    &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
                        goto out_err;
-               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
-               if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+               block = udf_get_lb_pblock(sb, &eloc, offset);
+               if ((++offset << sb->s_blocksize_bits) < elen) {
                        if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
                                epos.offset -= sizeof(struct short_ad);
                        else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                } else
                        offset = 0;
 
-               fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
+               fibh->sbh = fibh->ebh = udf_tread(sb, block);
                if (!fibh->sbh)
                        goto out_err;
        }
@@ -217,12 +218,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                }
 
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
                                continue;
                }
 
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
                                continue;
                }
 
@@ -233,7 +234,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                if (!lfi)
                        continue;
 
-               flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+               flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
                if (flen && udf_match(flen, fname, child->len, child->name))
                        goto out_ok;
        }
index 6fb7945c1e6e8813afce2ad81aa41ea28a6f4565..ac10ca939f267283ba0f64da9d4ec82de0eeeb61 100644 (file)
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
-                          int fromlen, unsigned char *to)
+static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
+                         int fromlen, unsigned char *to, int tolen)
 {
        struct pathComponent *pc;
        int elen = 0;
+       int comp_len;
        unsigned char *p = to;
 
+       /* Reserve one byte for terminating \0 */
+       tolen--;
        while (elen < fromlen) {
                pc = (struct pathComponent *)(from + elen);
+               elen += sizeof(struct pathComponent);
                switch (pc->componentType) {
                case 1:
                        /*
                         * Symlink points to some place which should be agreed
                         * upon between originator and receiver of the media. Ignore.
                         */
-                       if (pc->lengthComponentIdent > 0)
+                       if (pc->lengthComponentIdent > 0) {
+                               elen += pc->lengthComponentIdent;
                                break;
+                       }
                        /* Fall through */
                case 2:
+                       if (tolen == 0)
+                               return -ENAMETOOLONG;
                        p = to;
                        *p++ = '/';
+                       tolen--;
                        break;
                case 3:
+                       if (tolen < 3)
+                               return -ENAMETOOLONG;
                        memcpy(p, "../", 3);
                        p += 3;
+                       tolen -= 3;
                        break;
                case 4:
+                       if (tolen < 2)
+                               return -ENAMETOOLONG;
                        memcpy(p, "./", 2);
                        p += 2;
+                       tolen -= 2;
                        /* that would be . - just ignore */
                        break;
                case 5:
-                       p += udf_get_filename(sb, pc->componentIdent, p,
-                                             pc->lengthComponentIdent);
+                       elen += pc->lengthComponentIdent;
+                       if (elen > fromlen)
+                               return -EIO;
+                       comp_len = udf_get_filename(sb, pc->componentIdent,
+                                                   pc->lengthComponentIdent,
+                                                   p, tolen);
+                       p += comp_len;
+                       tolen -= comp_len;
+                       if (tolen == 0)
+                               return -ENAMETOOLONG;
                        *p++ = '/';
+                       tolen--;
                        break;
                }
-               elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
        }
        if (p > to + 1)
                p[-1] = '\0';
        else
                p[0] = '\0';
+       return 0;
 }
 
 static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        struct inode *inode = page->mapping->host;
        struct buffer_head *bh = NULL;
        unsigned char *symlink;
-       int err = -EIO;
+       int err;
        unsigned char *p = kmap(page);
        struct udf_inode_info *iinfo;
        uint32_t pos;
 
+       /* We don't support symlinks longer than one block */
+       if (inode->i_size > inode->i_sb->s_blocksize) {
+               err = -ENAMETOOLONG;
+               goto out_unmap;
+       }
+
        iinfo = UDF_I(inode);
        pos = udf_block_map(inode, 0);
 
@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        } else {
                bh = sb_bread(inode->i_sb, pos);
 
-               if (!bh)
-                       goto out;
+               if (!bh) {
+                       err = -EIO;
+                       goto out_unlock_inode;
+               }
 
                symlink = bh->b_data;
        }
 
-       udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+       err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
        brelse(bh);
+       if (err)
+               goto out_unlock_inode;
 
        up_read(&iinfo->i_data_sem);
        SetPageUptodate(page);
@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        unlock_page(page);
        return 0;
 
-out:
+out_unlock_inode:
        up_read(&iinfo->i_data_sem);
        SetPageError(page);
+out_unmap:
        kunmap(page);
        unlock_page(page);
        return err;
index 1cc3c993ebd04f4adb7b425f500e40d1185aae9f..47bb3f5ca360d4f1be8f92036685278868ab99e3 100644 (file)
@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
 }
 
 /* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
+                           int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
                            int);
 extern int udf_build_ustr(struct ustr *, dstring *, int);
index afd470e588ffbbd24ec886b3e8a619833a5e3e9a..b84fee372734bd494ba5eb86f5ce5c8c28b99b5a 100644 (file)
@@ -28,7 +28,8 @@
 
 #include "udf_sb.h"
 
-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
+                                 int);
 
 static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
 {
@@ -333,8 +334,8 @@ try_again:
        return u_len + 1;
 }
 
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
-                    int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+                    uint8_t *dname, int dlen)
 {
        struct ustr *filename, *unifilename;
        int len = 0;
@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
        if (!unifilename)
                goto out1;
 
-       if (udf_build_ustr_exact(unifilename, sname, flen))
+       if (udf_build_ustr_exact(unifilename, sname, slen))
                goto out2;
 
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
        } else
                goto out2;
 
-       len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+       len = udf_translate_to_linux(dname, dlen,
+                                    filename->u_name, filename->u_len,
                                     unifilename->u_name, unifilename->u_len);
 out2:
        kfree(unifilename);
@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
 #define EXT_MARK               '.'
 #define CRC_MARK               '#'
 #define EXT_SIZE               5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN                        5
 
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
-                                 int udfLen, uint8_t *fidName,
-                                 int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, int newLen,
+                                 uint8_t *udfName, int udfLen,
+                                 uint8_t *fidName, int fidNameLen)
 {
        int index, newIndex = 0, needsCRC = 0;
        int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
                                        newExtIndex = newIndex;
                                }
                        }
-                       if (newIndex < 256)
+                       if (newIndex < newLen)
                                newName[newIndex++] = curr;
                        else
                                needsCRC = 1;
@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
                                }
                                ext[localExtIndex++] = curr;
                        }
-                       maxFilenameLen = 250 - localExtIndex;
+                       maxFilenameLen = newLen - CRC_LEN - localExtIndex;
                        if (newIndex > maxFilenameLen)
                                newIndex = maxFilenameLen;
                        else
                                newIndex = newExtIndex;
-               } else if (newIndex > 250)
-                       newIndex = 250;
+               } else if (newIndex > newLen - CRC_LEN)
+                       newIndex = newLen - CRC_LEN;
                newName[newIndex++] = CRC_MARK;
                valueCRC = crc_itu_t(0, fidName, fidNameLen);
                newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
index 3ca9b751f1224cfd9a6816ebebe38d7559c7bcd0..b95dc32a6e6b61aefac9fefce83c6a3da20fa111 100644 (file)
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
 struct acpi_processor {
        acpi_handle handle;
        u32 acpi_id;
-       u32 apic_id;
-       u32 id;
+       u32 phys_id;    /* CPU hardware ID such as APIC ID for x86 */
+       u32 id;         /* CPU logical ID allocated by OS */
        u32 pblk;
        int performance_platform_limit;
        int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 #endif                         /* CONFIG_CPU_FREQ */
 
 /* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_pdc.c */
index 08848050922e613f28a1d8f7e4b3ad4c34b463be..db284bff29dcceb39360d458cec3a194745955f8 100644 (file)
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 
 static inline void __tlb_reset_range(struct mmu_gather *tlb)
 {
-       tlb->start = TASK_SIZE;
-       tlb->end = 0;
+       if (tlb->fullmm) {
+               tlb->start = tlb->end = ~0;
+       } else {
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
+       }
 }
 
 /*
index 59822a9958581dc5a1871ec3bc02c756656a836e..b5e6b0069ac7703b927a0c040ffcbd764c1afd89 100644 (file)
@@ -11,7 +11,7 @@
 #define _DT_BINDINGS_THERMAL_THERMAL_H
 
 /* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT               (-1UL)
+#define THERMAL_NO_LIMIT               (~0)
 
 #endif
 
index 856d381b1d5b83ce923be0f5d7da80ba420c1d14..d459cd17b477600cadf54ad2a227b66c5638112f 100644 (file)
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
index 8aded9ab2e4e89ddb66e5920c0819bade6fb0760..5735e7130d630f94fe62d3e0a7a7078434c8ee12 100644 (file)
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
        unsigned long           flags;          /* BLK_MQ_F_* flags */
 
        struct request_queue    *queue;
-       unsigned int            queue_num;
        struct blk_flush_queue  *fq;
 
        void                    *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
        unsigned long           dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
 
        unsigned int            numa_node;
-       unsigned int            cmd_size;       /* per-request extra data */
+       unsigned int            queue_num;
 
        atomic_t                nr_active;
 
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
 
+int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
 void __blk_mq_end_request(struct request *rq, int error);
 
 void blk_mq_requeue_request(struct request *rq);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
                void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
 
 /*
  * Driver command data is immediately after the request. So subtract request
index 445d59231bc4cc242e2aa0e1ca073e3c320af56c..c294e3e25e37a50a953a4a0bb3cb1aa66ba904d9 100644 (file)
@@ -190,6 +190,7 @@ enum rq_flag_bits {
        __REQ_PM,               /* runtime pm request */
        __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_MQ_INFLIGHT,      /* track inflight for MQ */
+       __REQ_NO_TIMEOUT,       /* requests may never expire */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -243,5 +244,6 @@ enum rq_flag_bits {
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_HASHED             (1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT                (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT         (1ULL << __REQ_NO_TIMEOUT)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 5d86416d35f2223da40fae9e9fd8e312d365026b..61b19c46bdb33d5fc2f4752df0c345350fa739e8 100644 (file)
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
                        struct ceph_osd_data osd_data;
                } extent;
                struct {
-                       __le32 name_len;
-                       __le32 value_len;
+                       u32 name_len;
+                       u32 value_len;
                        __u8 cmp_op;       /* CEPH_OSD_CMPXATTR_OP_* */
                        __u8 cmp_mode;     /* CEPH_OSD_CMPXATTR_MODE_* */
                        struct ceph_osd_data osd_data;
index a1c81f80978ee4b38bbbb1cdd781c7afa0362c5e..33063f872ee3cd698a233a0f0defa67aa1b6bd01 100644 (file)
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
        }
 }
 
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 /*
  * Prevent the compiler from merging or refetching reads or writes. The
  * compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
  * compiler is aware of some particular ordering.  One way to make the
  * compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
  *
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
  * compile-time warning.
  *
  * Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
 #define READ_ONCE(x) \
        ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
 
-#define ASSIGN_ONCE(val, x) \
-       ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index c303d383def1146589a30c980d2c8dadd1bd89cc..bd955270d5aae60f77cc8f936a5820d9690ac6ac 100644 (file)
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
 of_cpufreq_cooling_register(struct device_node *np,
                            const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 #endif
 
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
 static inline struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 static inline struct thermal_cooling_device *
 of_cpufreq_cooling_register(struct device_node *np,
                            const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 static inline
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
index a07e087f54b205741da6c126d0ca2144257140af..ab70f3bc44ad7a2c1ddf2454dac51f7a80e95f41 100644 (file)
@@ -53,7 +53,6 @@ struct cpuidle_state {
 };
 
 /* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID      (0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
 #define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
 /**
  * cpuidle_get_last_residency - retrieves the last state's residency time
  * @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
  */
 static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
 {
index f90c0282c11493f94a84095f61db94ac90974cc4..42efe13077b6c1b8dd139c6cea7a241e5d6b320d 100644 (file)
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define FMODE_CAN_WRITE         ((__force fmode_t)0x40000)
 
 /* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
index 290db1269c4c7970ab016165c62d93eec53059b4..75ae2e2631fceaa27915f3d100b1f03244b17500 100644 (file)
  * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
  */
 
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
 typedef enum {
-       KDB_REPEAT_NONE = 0,    /* Do not repeat this command */
-       KDB_REPEAT_NO_ARGS,     /* Repeat the command without arguments */
-       KDB_REPEAT_WITH_ARGS,   /* Repeat the command including its arguments */
-} kdb_repeat_t;
+       KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+       KDB_ENABLE_MEM_READ = (1 << 1),
+       KDB_ENABLE_MEM_WRITE = (1 << 2),
+       KDB_ENABLE_REG_READ = (1 << 3),
+       KDB_ENABLE_REG_WRITE = (1 << 4),
+       KDB_ENABLE_INSPECT = (1 << 5),
+       KDB_ENABLE_FLOW_CTRL = (1 << 6),
+       KDB_ENABLE_SIGNAL = (1 << 7),
+       KDB_ENABLE_REBOOT = (1 << 8),
+       /* User exposed values stop here, all remaining flags are
+        * exclusively used to describe a commands behaviour.
+        */
+
+       KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+       KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+       KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+                                     << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+                                    << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+                                      << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+                                   << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+                                        << KDB_ENABLE_NO_ARGS_SHIFT,
+       KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+       KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+       KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
 
 typedef int (*kdb_func_t)(int, const char **);
 
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
 #define KDB_BADLENGTH  (-19)
 #define KDB_NOBP       (-20)
 #define KDB_BADADDR    (-21)
+#define KDB_NOPERM     (-22)
 
 /*
  * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
 
 /* Dynamic kdb shell command registration */
 extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
-                              short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+                             short, kdb_cmdflags_t);
 extern int kdb_unregister(char *);
 #else /* ! CONFIG_KGDB_KDB */
 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
 static inline void kdb_init(int level) {}
 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
                               char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
-                                     char *help, short minlen,
-                                     kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+                                    char *help, short minlen,
+                                    kdb_cmdflags_t flags) { return 0; }
 static inline int kdb_unregister(char *cmd) { return 0; }
 #endif /* CONFIG_KGDB_KDB */
 enum {
index 575a86c7fcbd2d2b4168d25c3ee20c275e84fc0e..f742b6717d52af2d83aaebaa6d5ff22a72cf786b 100644 (file)
@@ -50,6 +50,8 @@ enum {
        STMPE_IDX_GPEDR_MSB,
        STMPE_IDX_GPRER_LSB,
        STMPE_IDX_GPFER_LSB,
+       STMPE_IDX_GPPUR_LSB,
+       STMPE_IDX_GPPDR_LSB,
        STMPE_IDX_GPAFR_U_MSB,
        STMPE_IDX_IEGPIOR_LSB,
        STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
 extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
 extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
 
-struct matrix_keymap_data;
-
-/**
- * struct stmpe_keypad_platform_data - STMPE keypad platform data
- * @keymap_data: key map table and size
- * @debounce_ms: debounce interval, in ms.  Maximum is
- *              %STMPE_KEYPAD_MAX_DEBOUNCE.
- * @scan_count: number of key scanning cycles to confirm key data.
- *             Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
- * @no_autorepeat: disable key autorepeat
- */
-struct stmpe_keypad_platform_data {
-       const struct matrix_keymap_data *keymap_data;
-       unsigned int debounce_ms;
-       unsigned int scan_count;
-       bool no_autorepeat;
-};
-
 #define STMPE_GPIO_NOREQ_811_TOUCH     (0xf0)
 
 /**
@@ -199,7 +183,6 @@ struct stmpe_ts_platform_data {
  * @irq_gpio: gpio number over which irq will be requested (significant only if
  *           irq_over_gpio is true)
  * @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
  * @ts: touchscreen-specific platform data
  */
 struct stmpe_platform_data {
@@ -212,7 +195,6 @@ struct stmpe_platform_data {
        int autosleep_timeout;
 
        struct stmpe_gpio_platform_data *gpio;
-       struct stmpe_keypad_platform_data *keypad;
        struct stmpe_ts_platform_data *ts;
 };
 
index f80d0194c9bc2fa67b73eadbf93ac65e62434000..80fc92a49649cf66ed87ac1343c153debd36619c 100644 (file)
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
-  #define expand_upwards(vma, address) do { } while (0)
+  #define expand_upwards(vma, address) (0)
 #endif
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
index 375af80bde7d7c90bb1c09efb3edc297dbe4d864..f767a0de611f8a726df957c6595d84df92d8efb3 100644 (file)
@@ -137,6 +137,7 @@ struct sdhci_host {
 #define SDHCI_SDR104_NEEDS_TUNING (1<<10)      /* SDR104/HS200 needs tuning */
 #define SDHCI_USING_RETUNING_TIMER (1<<11)     /* Host is using a retuning timer for the card */
 #define SDHCI_USE_64_BIT_DMA   (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING     (1<<13) /* Tuning for HS400 */
 
        unsigned int version;   /* SDHCI spec. version */
 
index c31f74d76ebd3c595160a4b3b513594423f43240..52fd8e8694cfade5e844d52a70b191c3183f60a2 100644 (file)
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is registered.
  *
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
  *     If device support VLAN filtering this function is called when a
  *     VLAN id is unregistered.
  *
@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     Callback to use for xmit over the accelerated station. This
  *     is used in place of ndo_start_xmit on accelerated net
  *     devices.
- * bool        (*ndo_gso_check) (struct sk_buff *skb,
- *                       struct net_device *dev);
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ *                                         struct net_device *dev
+ *                                         netdev_features_t features);
  *     Called by core transmit path to determine if device is capable of
- *     performing GSO on a packet. The device returns true if it is
- *     able to GSO the packet, false otherwise. If the return value is
- *     false the stack will do software GSO.
+ *     performing offload operations on a given packet. This is to give
+ *     the device an opportunity to implement any restrictions that cannot
+ *     be otherwise expressed by feature flags. The check is called with
+ *     the set of features that the stack has calculated and it returns
+ *     those the driver believes to be appropriate.
  *
  * int (*ndo_switch_parent_id_get)(struct net_device *dev,
  *                                struct netdev_phys_item_id *psid);
@@ -1178,8 +1181,9 @@ struct net_device_ops {
                                                        struct net_device *dev,
                                                        void *priv);
        int                     (*ndo_get_lock_subclass)(struct net_device *dev);
-       bool                    (*ndo_gso_check) (struct sk_buff *skb,
-                                                 struct net_device *dev);
+       netdev_features_t       (*ndo_features_check) (struct sk_buff *skb,
+                                                      struct net_device *dev,
+                                                      netdev_features_t features);
 #ifdef CONFIG_NET_SWITCHDEV
        int                     (*ndo_switch_parent_id_get)(struct net_device *dev,
                                                            struct netdev_phys_item_id *psid);
@@ -2081,7 +2085,7 @@ extern rwlock_t                           dev_base_lock;          /* Device list lock */
        list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
 #define for_each_netdev_in_bond_rcu(bond, slave)       \
                for_each_netdev_rcu(&init_net, slave)   \
-                       if (netdev_master_upper_dev_get_rcu(slave) == bond)
+                       if (netdev_master_upper_dev_get_rcu(slave) == (bond))
 #define net_device_entry(lh)   list_entry(lh, struct net_device, dev_list)
 
 static inline struct net_device *next_net_device(struct net_device *dev)
@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
                                   netdev_features_t features)
 {
        return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
-               (dev->netdev_ops->ndo_gso_check &&
-                !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
                unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
                         (skb->ip_summed != CHECKSUM_UNNECESSARY)));
 }
index 9e572daa15d568cc0d7c82342a0d7fb3ad37c327..02fc86d2348e2157d19cd4c16c4574ef86ac4d07 100644 (file)
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
        unsigned int    flags;
        void            (*input)(struct sk_buff *skb);
        struct mutex    *cb_mutex;
-       int             (*bind)(int group);
-       void            (*unbind)(int group);
+       int             (*bind)(struct net *net, int group);
+       void            (*unbind)(struct net *net, int group);
        bool            (*compare)(struct net *net, struct sock *sk);
 };
 
index 1e37fbb78f7afbc57b8ab3c076d66ce7555f3cb0..ddea982355f3be93947dae0b2e30f9e00209920e 100644 (file)
@@ -74,6 +74,9 @@ struct nfs_client {
        /* idmapper */
        struct idmap *          cl_idmap;
 
+       /* Client owner identifier */
+       const char *            cl_owner_id;
+
        /* Our own IP address, as a null-terminated string.
         * This is used to generate the mv0 callback address.
         */
index 7ea069cd32579caacc5953802356a62237ac0413..4b3736f7065c496601011b9474368238f9af923a 100644 (file)
@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
 #define FGP_NOWAIT             0x00000020
 
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
-               int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+               int fgp_flags, gfp_t cache_gfp_mask);
 
 /**
  * find_get_page - find and get a page reference
@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 static inline struct page *find_get_page(struct address_space *mapping,
                                        pgoff_t offset)
 {
-       return pagecache_get_page(mapping, offset, 0, 0, 0);
+       return pagecache_get_page(mapping, offset, 0, 0);
 }
 
 static inline struct page *find_get_page_flags(struct address_space *mapping,
                                        pgoff_t offset, int fgp_flags)
 {
-       return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+       return pagecache_get_page(mapping, offset, fgp_flags, 0);
 }
 
 /**
@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
 static inline struct page *find_lock_page(struct address_space *mapping,
                                        pgoff_t offset)
 {
-       return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+       return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 }
 
 /**
@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
 {
        return pagecache_get_page(mapping, offset,
                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
-                                       gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+                                       gfp_mask);
 }
 
 /**
@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 {
        return pagecache_get_page(mapping, index,
                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
-                       mapping_gfp_mask(mapping),
-                       GFP_NOFS);
+                       mapping_gfp_mask(mapping));
 }
 
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
index 486e84ccb1f92545ec0d4f74aaa153abf0ff8049..4f7a61ca4b393dc837cb4ad278c4a66306247cbd 100644 (file)
@@ -79,11 +79,6 @@ struct perf_branch_stack {
        struct perf_branch_entry        entries[0];
 };
 
-struct perf_regs {
-       __u64           abi;
-       struct pt_regs  *regs;
-};
-
 struct task_struct;
 
 /*
@@ -610,7 +605,14 @@ struct perf_sample_data {
                u32     reserved;
        }                               cpu_entry;
        struct perf_callchain_entry     *callchain;
+
+       /*
+        * regs_user may point to task_pt_regs or to regs_user_copy, depending
+        * on arch details.
+        */
        struct perf_regs                regs_user;
+       struct pt_regs                  regs_user_copy;
+
        struct perf_regs                regs_intr;
        u64                             stack_user_size;
 } ____cacheline_aligned;
index 3c73d5fe18be4b950628f82234b7ba58855b2c29..a5f98d53d7325b0358bd45b7b7406b4f02fef6d5 100644 (file)
@@ -1,11 +1,19 @@
 #ifndef _LINUX_PERF_REGS_H
 #define _LINUX_PERF_REGS_H
 
+struct perf_regs {
+       __u64           abi;
+       struct pt_regs  *regs;
+};
+
 #ifdef CONFIG_HAVE_PERF_REGS
 #include <asm/perf_regs.h>
 u64 perf_reg_value(struct pt_regs *regs, int idx);
 int perf_reg_validate(u64 mask);
 u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+                       struct pt_regs *regs,
+                       struct pt_regs *regs_user_copy);
 #else
 static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
 {
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
 {
        return PERF_SAMPLE_REGS_ABI_NONE;
 }
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+                                     struct pt_regs *regs,
+                                     struct pt_regs *regs_user_copy)
+{
+       regs_user->regs = task_pt_regs(current);
+       regs_user->abi = perf_reg_abi(current);
+}
 #endif /* CONFIG_HAVE_PERF_REGS */
 #endif /* _LINUX_PERF_REGS_H */
index e9e6cfbfbb589d0393060e2fed0422ec402dd612..eb7d4a135a9ea71364105c0bade762b5f06b67da 100644 (file)
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
 #define        OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF      0x0
 
 #define        OMAP_CTRL_PCIE_PCS_MASK                 0xff
-#define        OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT    0x8
+#define        OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT    16
 
 #define OMAP_CTRL_USB2_PHY_PD          BIT(28)
 
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
 void omap_control_phy_power(struct device *dev, int on);
 void omap_control_usb_set_mode(struct device *dev,
                               enum omap_control_usb_mode mode);
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay);
+void omap_control_pcie_pcs(struct device *dev, u8 delay);
 #else
 
 static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
 {
 }
 
-static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
 {
 }
 #endif
index 6cd20d5e651b9d751b9555000a6417d10368dee2..a9edab2c787a53e809150034128bc46448a010ad 100644 (file)
@@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
                        void *data);
 void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+                       struct of_phandle_args *genpdspec);
 
 struct generic_pm_domain *__of_genpd_xlate_simple(
                                        struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
 }
 static inline void of_genpd_del_provider(struct device_node *np) {}
 
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+                       struct of_phandle_args *genpdspec)
+{
+       return NULL;
+}
+
 #define __of_genpd_xlate_simple                NULL
 #define __of_genpd_xlate_onecell       NULL
 
index c0c2bce6b0b7bab50f5c7bb5a1353d8991f8e08b..d9d7e7e56352a8855def8f86af24fdb569ee480c 100644 (file)
@@ -36,6 +36,16 @@ struct anon_vma {
         */
        atomic_t refcount;
 
+       /*
+        * Count of child anon_vmas and VMAs which points to this anon_vma.
+        *
+        * This counter is used for making decision about reusing anon_vma
+        * instead of forking new one. See comments in function anon_vma_clone.
+        */
+       unsigned degree;
+
+       struct anon_vma *parent;        /* Parent of this anon_vma */
+
        /*
         * NOTE: the LSB of the rb_root.rb_node is set by
         * mm_take_all_locks() _after_ taking the above lock. So the
index c611a02fbc51246c9ea71e81eb9ccd6a4030d763..fc52e307efab8768effbb7880702986653e0a07c 100644 (file)
@@ -38,7 +38,7 @@
 #define THERMAL_CSTATE_INVALID -1UL
 
 /* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT       THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT       ((u32)~0)
 
 /* Unit conversion macros */
 #define KELVIN_TO_CELSIUS(t)   (long)(((long)t-2732 >= 0) ?    \
index a219be961c0a2cb7ab21ce08353bdb1c968d2b92..00048339c23e4f252ee6a4b15cd38b49b8032de4 100644 (file)
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
                      struct writeback_control *wbc, writepage_t writepage,
                      void *data);
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
 void writeback_set_ratelimit(void);
 void tag_pages_for_writeback(struct address_space *mapping,
                             pgoff_t start, pgoff_t end);
index af10c2cf8a1dce00d9cc5414f67d5917bf911570..84125088c309afb988daa4b24368c2d9e0f02588 100644 (file)
@@ -31,6 +31,9 @@ struct genl_info;
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
+ * @mcast_bind: a socket bound to the given multicast group (which
+ *     is given as the offset into the groups array)
+ * @mcast_unbind: a socket was unbound from the given multicast group
  * @attrbuf: buffer to store parsed attributes
  * @family_list: family list
  * @mcgrps: multicast groups used by this family (private)
@@ -53,6 +56,8 @@ struct genl_family {
        void                    (*post_doit)(const struct genl_ops *ops,
                                             struct sk_buff *skb,
                                             struct genl_info *info);
+       int                     (*mcast_bind)(struct net *net, int group);
+       void                    (*mcast_unbind)(struct net *net, int group);
        struct nlattr **        attrbuf;        /* private */
        const struct genl_ops * ops;            /* private */
        const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +400,11 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
 }
 
 static inline int genl_has_listeners(struct genl_family *family,
-                                    struct sock *sk, unsigned int group)
+                                    struct net *net, unsigned int group)
 {
        if (WARN_ON_ONCE(group >= family->n_mcgrps))
                return -EINVAL;
        group = family->mcgrp_offset + group;
-       return netlink_has_listeners(sk, group);
+       return netlink_has_listeners(net->genl_sock, group);
 }
 #endif /* __NET_GENERIC_NETLINK_H */
index 58d719ddaa60c93d2c6424765e96abf254cb4dc6..29c7be8808d52b21e0c41949ce62d816d8b58372 100644 (file)
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *
  * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
  *     driver to indicate that it requires IV generation for this
- *     particular key. Setting this flag does not necessarily mean that SKBs
- *     will have sufficient tailroom for ICV or MIC.
+ *     particular key.
  * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
  *     the driver for a TKIP key if it requires Michael MIC
  *     generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
  *     if space should be prepared for the IV, but the IV
  *     itself should not be generated. Do not set together with
- *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
- *     not necessarily mean that SKBs will have sufficient tailroom for ICV or
- *     MIC.
+ *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
  * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
  *     management frames. The flag can help drivers that have a hardware
  *     crypto implementation that doesn't deal with management frames
index eb070b3674a1ba346c2c779889a90b636a8d3d7a..76f708486aaec76031a24ee5ff1d02f126185304 100644 (file)
@@ -190,7 +190,6 @@ struct neigh_hash_table {
 
 
 struct neigh_table {
-       struct neigh_table      *next;
        int                     family;
        int                     entry_size;
        int                     key_len;
index 57cccd0052e58dd124ec997174d8cd9ff885be99..903461aa5644ce9d1e366818fa6cbda1ba0cc1b1 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __NET_VXLAN_H
 #define __NET_VXLAN_H 1
 
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/udp.h>
@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
-static inline bool vxlan_gso_check(struct sk_buff *skb)
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+                                                    netdev_features_t features)
 {
-       if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
+       u8 l4_hdr = 0;
+
+       if (!skb->encapsulation)
+               return features;
+
+       switch (vlan_get_protocol(skb)) {
+       case htons(ETH_P_IP):
+               l4_hdr = ip_hdr(skb)->protocol;
+               break;
+       case htons(ETH_P_IPV6):
+               l4_hdr = ipv6_hdr(skb)->nexthdr;
+               break;
+       default:
+               return features;;
+       }
+
+       if ((l4_hdr == IPPROTO_UDP) &&
            (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
             skb->inner_protocol != htons(ETH_P_TEB) ||
             (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
              sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
-               return false;
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
 
-       return true;
+       return features;
 }
 
 /* IP header + UDP + VXLAN + Ethernet header */
index 1e7f74acc2eccb75b735b82c66d1612ce762c248..b429b73e875ea2725f87d10d21c318adab83e4d5 100644 (file)
@@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the sample rate from the hw params
+ * params_rate - Get the sample rate from the hw params
  * @p: hw params
  */
 static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the period size (in frames) from the hw params
+ * params_period_size - Get the period size (in frames) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the number of periods from the hw params
+ * params_periods - Get the number of periods from the hw params
  * @p: hw params
  */
 static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the buffer size (in frames) from the hw params
+ * params_buffer_size - Get the buffer size (in frames) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
 }
 
 /**
- * params_channels - Get the buffer size (in bytes) from the hw params
+ * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
  * @p: hw params
  */
 static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
index 430cfaf92285f177d977d11717599bf1ff85b70b..db81c65b8f4857c011a025e5b54026bf7d683f7c 100644 (file)
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
 int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
 int    se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
 
index 3247d7530107968aa7c1d1957f96790c226845dd..186f7a92357094fbb4735043b2a2b8c8cd884af8 100644 (file)
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
        TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
        TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
-       DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors);                \
-       TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR);   \
        DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
        TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
        DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
index 397fb635766a96faa94c5b91788ad24fca0d2a34..4a8795a87b9e99f30ee07f43fdce3984ddc658e0 100644 (file)
@@ -77,8 +77,6 @@
 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
 /* Default max_write_same_len, disabled by default */
 #define DA_MAX_WRITE_SAME_LEN                  0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS                  8192
 /* Use a model alias based on the configfs backend device name */
 #define DA_EMULATE_MODEL_ALIAS                 0
 /* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
        u32             hw_block_size;
        u32             block_size;
        u32             hw_max_sectors;
-       u32             fabric_max_sectors;
        u32             optimal_sectors;
        u32             hw_queue_depth;
        u32             queue_depth;
index 7543b3e51331fcb38574e3f309713b3a6a2d31c0..e063effe0cc18f0976f01b49cb56c3aeac47bf69 100644 (file)
@@ -5,7 +5,7 @@
 
 /*
  * FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
  * These cannot be used by userspace O_* until internal and external open
  * flags are split.
  * -Eric Paris
index 74a2a1773494caba153cf20fc85e56693df15038..79b12b004ade03100930e9dc08f31b329e6398d3 100644 (file)
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
 /*
  *     IPV6 socket options
  */
-
+#if __UAPI_DEF_IPV6_OPTIONS
 #define IPV6_ADDRFORM          1
 #define IPV6_2292PKTINFO       2
 #define IPV6_2292HOPOPTS       3
@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
 
 #define IPV6_IPSEC_POLICY      34
 #define IPV6_XFRM_POLICY       35
+#endif
 
 /*
  * Multicast:
index 7acef41fc2092abee6e977970ddb22df2952e8f9..af94f31e33ac9d8ccb5f68312feff34ea4eb6aa1 100644 (file)
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
        uint32_t pad;
 };
 
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr)                  _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type)           _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type)           _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type)          _IOWR(AMDKFD_IOCTL_BASE, nr, type)
 
-#define KFD_IOC_GET_VERSION \
-               _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION                 \
+               AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
 
-#define KFD_IOC_CREATE_QUEUE \
-               _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE                        \
+               AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
 
-#define KFD_IOC_DESTROY_QUEUE \
-       _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE               \
+               AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
 
-#define KFD_IOC_SET_MEMORY_POLICY \
-       _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY           \
+               AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
 
-#define KFD_IOC_GET_CLOCK_COUNTERS \
-       _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS          \
+               AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
 
-#define KFD_IOC_GET_PROCESS_APERTURES \
-       _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES       \
+               AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
 
-#define KFD_IOC_UPDATE_QUEUE \
-       _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE                        \
+               AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START           0x01
+#define AMDKFD_COMMAND_END             0x08
 
 #endif
index c140620dad921a12bc36ac477911e12665d9a736..e28807ad17fa8dffb69879e1c0eaf23bf8971f88 100644 (file)
@@ -69,6 +69,7 @@
 #define __UAPI_DEF_SOCKADDR_IN6                0
 #define __UAPI_DEF_IPV6_MREQ           0
 #define __UAPI_DEF_IPPROTO_V6          0
+#define __UAPI_DEF_IPV6_OPTIONS                0
 
 #else
 
@@ -82,6 +83,7 @@
 #define __UAPI_DEF_SOCKADDR_IN6                1
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
+#define __UAPI_DEF_IPV6_OPTIONS                1
 
 #endif /* _NETINET_IN_H */
 
 #define __UAPI_DEF_SOCKADDR_IN6                1
 #define __UAPI_DEF_IPV6_MREQ           1
 #define __UAPI_DEF_IPPROTO_V6          1
+#define __UAPI_DEF_IPV6_OPTIONS                1
 
 /* Definitions for xattr.h */
 #define __UAPI_DEF_XATTR               1
index 3a6dcaa359b768d09bfc58f71c0f9b23582d9f24..f714e863335204a4c3e29525e41605f3a9e0f0f4 100644 (file)
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
        OVS_PACKET_ATTR_USERDATA,    /* OVS_ACTION_ATTR_USERSPACE arg. */
        OVS_PACKET_ATTR_EGRESS_TUN_KEY,  /* Nested OVS_TUNNEL_KEY_ATTR_*
                                            attributes. */
+       OVS_PACKET_ATTR_UNUSED1,
+       OVS_PACKET_ATTR_UNUSED2,
+       OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
+                                      error logging should be suppressed. */
        __OVS_PACKET_ATTR_MAX
 };
 
index 61c818a7fe70dfca4d26b2ef7f4269e9da22233f..a3318f31e8e7fd05f317c595e8c4ce3bc3595fac 100644 (file)
@@ -101,6 +101,13 @@ struct vring {
        struct vring_used *used;
 };
 
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
 /* The standard layout for the ring is a continuous chunk of memory which looks
  * like this.  We assume num is a power of 2.
  *
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644 (file)
index 0000000..b47d9d0
--- /dev/null
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error     0
+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr     1
+#define XEN_NMIREASON_pci_serr      (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown      2
+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback   0
+struct xennmi_callback {
+    unsigned long handler_address;
+    unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
index 231b7dcb154bae60f16b68d5abc337853d818c77..72ab759a0b43a6400750cefa71650ed64e7a8222 100644 (file)
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff  *skb)
 }
 
 /* Run custom bind function on netlink socket group connect or bind requests. */
-static int audit_bind(int group)
+static int audit_bind(struct net *net, int group)
 {
        if (!capable(CAP_AUDIT_READ))
                return -EPERM;
index 37c69ab561dad881c9fd76eaac1ed5b48e5bab52..072566dd0caf7739fc42b7d59c6791c29dc89343 100644 (file)
@@ -72,6 +72,8 @@
 #include <linux/fs_struct.h>
 #include <linux/compat.h>
 #include <linux/ctype.h>
+#include <linux/string.h>
+#include <uapi/linux/limits.h>
 
 #include "audit.h"
 
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
        }
 
        list_for_each_entry_reverse(n, &context->names_list, list) {
-               /* does the name pointer match? */
-               if (!n->name || n->name->name != name->name)
+               if (!n->name || strcmp(n->name->name, name->name))
                        continue;
 
                /* match the correct record type */
@@ -1881,14 +1882,44 @@ out_alloc:
        n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
        if (!n)
                return;
-       if (name)
-               /* since name is not NULL we know there is already a matching
-                * name record, see audit_getname(), so there must be a type
-                * mismatch; reuse the string path since the original name
-                * record will keep the string valid until we free it in
-                * audit_free_names() */
-               n->name = name;
+       /* unfortunately, while we may have a path name to record with the
+        * inode, we can't always rely on the string lasting until the end of
+        * the syscall so we need to create our own copy, it may fail due to
+        * memory allocation issues, but we do our best */
+       if (name) {
+               /* we can't use getname_kernel() due to size limits */
+               size_t len = strlen(name->name) + 1;
+               struct filename *new = __getname();
+
+               if (unlikely(!new))
+                       goto out;
+
+               if (len <= (PATH_MAX - sizeof(*new))) {
+                       new->name = (char *)(new) + sizeof(*new);
+                       new->separate = false;
+               } else if (len <= PATH_MAX) {
+                       /* this looks odd, but is due to final_putname() */
+                       struct filename *new2;
 
+                       new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
+                       if (unlikely(!new2)) {
+                               __putname(new);
+                               goto out;
+                       }
+                       new2->name = (char *)new;
+                       new2->separate = true;
+                       new = new2;
+               } else {
+                       /* we should never get here, but let's be safe */
+                       __putname(new);
+                       goto out;
+               }
+               strlcpy((char *)new->name, name->name, len);
+               new->uptr = NULL;
+               new->aname = n;
+               n->name = new;
+               n->name_put = true;
+       }
 out:
        if (parent) {
                n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
index 1adf62b39b96b496e56ca8484c6939429981277c..07ce18ca71e0cd46b70155269a77b04af23f6526 100644 (file)
@@ -27,6 +27,9 @@
  * version 2. This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
  */
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
 #include <linux/pid_namespace.h>
 #include <linux/clocksource.h>
 #include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
                return err;
        err = kgdb_arch_remove_breakpoint(&tmp);
        if (err)
-               printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
-                  "memory destroyed at: %lx", addr);
+               pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+                      addr);
        return err;
 }
 
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
                error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
                if (error) {
                        ret = error;
-                       printk(KERN_INFO "KGDB: BP install failed: %lx",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP install failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        continue;
                }
 
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
                        continue;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error) {
-                       printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
-                              kgdb_break[i].bpt_addr);
+                       pr_info("BP remove failed: %lx\n",
+                               kgdb_break[i].bpt_addr);
                        ret = error;
                }
 
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
                        goto setundefined;
                error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
                if (error)
-                       printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+                       pr_err("breakpoint remove failed: %lx\n",
                               kgdb_break[i].bpt_addr);
 setundefined:
                kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
        if (print_wait) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+                       pr_crit("waiting... or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+               pr_crit("Waiting for remote debugger\n");
 #endif
        }
        return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                exception_level = 0;
                kgdb_skipexception(ks->ex_vector, ks->linux_regs);
                dbg_activate_sw_breakpoints();
-               printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
-                       addr);
+               pr_crit("re-enter error: breakpoint removed %lx\n", addr);
                WARN_ON_ONCE(1);
 
                return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
                panic("Recursive entry to debugger");
        }
 
-       printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+       pr_crit("re-enter exception: ALL breakpoints killed\n");
 #ifdef CONFIG_KGDB_KDB
        /* Allow kdb to debug itself one level */
        return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
        int cpu;
        int trace_on = 0;
        int online_cpus = num_online_cpus();
+       u64 time_left;
 
        kgdb_info[ks->cpu].enter_kgdb++;
        kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
        /*
         * Wait for the other CPUs to be notified and be waiting for us:
         */
-       while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
-                               atomic_read(&slaves_in_kgdb)) != online_cpus)
+       time_left = loops_per_jiffy * HZ;
+       while (kgdb_do_roundup && --time_left &&
+              (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+                  online_cpus)
                cpu_relax();
+       if (!time_left)
+               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
 static void sysrq_handle_dbg(int key)
 {
        if (!dbg_io_ops) {
-               printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+               pr_crit("ERROR: No KGDB I/O module available\n");
                return;
        }
        if (!kgdb_connected) {
 #ifdef CONFIG_KGDB_KDB
                if (!dbg_kdb_mode)
-                       printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+                       pr_crit("KGDB or $3#33 for KDB\n");
 #else
-               printk(KERN_CRIT "Entering KGDB\n");
+               pr_crit("Entering KGDB\n");
 #endif
        }
 
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
 {
        kgdb_break_asap = 0;
 
-       printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+       pr_crit("Waiting for connection from remote gdb...\n");
        kgdb_breakpoint();
 }
 
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
        if (dbg_io_ops) {
                spin_unlock(&kgdb_registration_lock);
 
-               printk(KERN_ERR "kgdb: Another I/O driver is already "
-                               "registered with KGDB.\n");
+               pr_err("Another I/O driver is already registered with KGDB\n");
                return -EBUSY;
        }
 
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
-              new_dbg_io_ops->name);
+       pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
 
        /* Arm KGDB now. */
        kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
 
        spin_unlock(&kgdb_registration_lock);
 
-       printk(KERN_INFO
-               "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+       pr_info("Unregistered I/O driver %s, debugger disabled\n",
                old_dbg_io_ops->name);
 }
 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
index b20d544f20c2a12c24ac492c4dae56961e0b959d..e1dbf4a2c69e4ca9721c22184cb9f800325b9194 100644 (file)
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
        for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
                bp->bp_free = 1;
 
-       kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
-               "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
-               "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+               "Set/Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+               "Display breakpoints", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
-               kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
-               "[datar [length]|dataw [length]]   Set hw brk", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("bc", kdb_bc, "<bpnum>",
-               "Clear Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("be", kdb_bc, "<bpnum>",
-               "Enable Breakpoint", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bd", kdb_bc, "<bpnum>",
-               "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
-       kdb_register_repeat("ss", kdb_ss, "",
-               "Single Step", 1, KDB_REPEAT_NO_ARGS);
+               kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+               "[datar [length]|dataw [length]]   Set hw brk", 0,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("bc", kdb_bc, "<bpnum>",
+               "Clear Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("be", kdb_bc, "<bpnum>",
+               "Enable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+       kdb_register_flags("bd", kdb_bc, "<bpnum>",
+               "Disable Breakpoint", 0,
+               KDB_ENABLE_FLOW_CTRL);
+
+       kdb_register_flags("ss", kdb_ss, "",
+               "Single Step", 1,
+               KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
        /*
         * Architecture dependent initialization.
         */
index 8859ca34dcfe0a58dbd530b8668d8d88760a3eb4..15e1a7af5dd033f130ef2b4ed96cf1e2809442f5 100644 (file)
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
                ks->pass_exception = 1;
                KDB_FLAG_SET(CATASTROPHIC);
        }
+       /* set CATASTROPHIC if the system contains unresponsive processors */
+       for_each_online_cpu(i)
+               if (!kgdb_info[i].enter_kgdb)
+                       KDB_FLAG_SET(CATASTROPHIC);
        if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
                KDB_STATE_CLEAR(SSBPT);
                KDB_STATE_CLEAR(DOING_SS);
index 379650b984f8150bd7ead11fa57767261ce21758..f191bddf64b8ebdd0227556e119b1a804a30db88 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/ctype.h>
+#include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
 #include <linux/vmalloc.h>
 #include <linux/atomic.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/slab.h>
 #include "kdb_private.h"
 
+#undef MODULE_PARAM_PREFIX
+#define        MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
 #define GREP_LEN 256
 char kdb_grep_string[GREP_LEN];
 int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
        KDBMSG(BADLENGTH, "Invalid length field"),
        KDBMSG(NOBP, "No Breakpoint exists"),
        KDBMSG(BADADDR, "Invalid address"),
+       KDBMSG(NOPERM, "Permission denied"),
 };
 #undef KDBMSG
 
@@ -187,6 +196,26 @@ struct task_struct *kdb_curr_task(int cpu)
        return p;
 }
 
+/*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+                                  bool no_args)
+{
+       /* permissions comes from userspace so needs massaging slightly */
+       permissions &= KDB_ENABLE_MASK;
+       permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+       /* some commands change group when launched with no arguments */
+       if (no_args)
+               permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+       flags |= KDB_ENABLE_ALL;
+
+       return permissions & flags;
+}
+
 /*
  * kdbgetenv - This function will return the character string value of
  *     an environment variable.
@@ -475,6 +504,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
        char *cp;
        kdb_symtab_t symtab;
 
+       /*
+        * If the enable flags prohibit both arbitrary memory access
+        * and flow control then there are no reasonable grounds to
+        * provide symbol lookup.
+        */
+       if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+                            kdb_cmd_enabled, false))
+               return KDB_NOPERM;
+
        /*
         * Process arguments which follow the following syntax:
         *
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
                if (!s->count)
                        s->usable = 0;
                if (s->usable)
-                       kdb_register(s->name, kdb_exec_defcmd,
-                                    s->usage, s->help, 0);
+                       /* macros are always safe because when executed each
+                        * internal command re-enters kdb_parse() and is
+                        * safety checked individually.
+                        */
+                       kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+                                          s->help, 0,
+                                          KDB_ENABLE_ALWAYS_SAFE);
                return 0;
        }
        if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
 
        if (i < kdb_max_commands) {
                int result;
+
+               if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+                       return KDB_NOPERM;
+
                KDB_STATE_SET(CMD);
                result = (*tp->cmd_func)(argc-1, (const char **)argv);
                if (result && ignore_errors && result > KDB_CMD_GO)
                        result = 0;
                KDB_STATE_CLEAR(CMD);
-               switch (tp->cmd_repeat) {
-               case KDB_REPEAT_NONE:
-                       argc = 0;
-                       if (argv[0])
-                               *(argv[0]) = '\0';
-                       break;
-               case KDB_REPEAT_NO_ARGS:
-                       argc = 1;
-                       if (argv[1])
-                               *(argv[1]) = '\0';
-                       break;
-               case KDB_REPEAT_WITH_ARGS:
-                       break;
-               }
+
+               if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+                       return result;
+
+               argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+               if (argv[argc])
+                       *(argv[argc]) = '\0';
                return result;
        }
 
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
  */
 static int kdb_sr(int argc, const char **argv)
 {
+       bool check_mask =
+           !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
        if (argc != 1)
                return KDB_ARGCOUNT;
+
        kdb_trap_printk++;
-       __handle_sysrq(*argv[1], false);
+       __handle_sysrq(*argv[1], check_mask);
        kdb_trap_printk--;
 
        return 0;
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
        for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
                if (!cpu_online(i)) {
                        state = 'F';    /* cpu is offline */
+               } else if (!kgdb_info[i].enter_kgdb) {
+                       state = 'D';    /* cpu is online but unresponsive */
                } else {
                        state = ' ';    /* cpu is responding to kdb */
                        if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
                        return 0;
                if (!kt->cmd_name)
                        continue;
+               if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+                       continue;
                if (strlen(kt->cmd_usage) > 20)
                        space = "\n                                    ";
                kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
 }
 
 /*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
  *     debugger command.
  * Inputs:
  *     cmd     Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
  *     zero for success, one if a duplicate command.
  */
 #define kdb_command_extend 50  /* arbitrary */
-int kdb_register_repeat(char *cmd,
-                       kdb_func_t func,
-                       char *usage,
-                       char *help,
-                       short minlen,
-                       kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+                      kdb_func_t func,
+                      char *usage,
+                      char *help,
+                      short minlen,
+                      kdb_cmdflags_t flags)
 {
        int i;
        kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
        kp->cmd_func   = func;
        kp->cmd_usage  = usage;
        kp->cmd_help   = help;
-       kp->cmd_flags  = 0;
        kp->cmd_minlen = minlen;
-       kp->cmd_repeat = repeat;
+       kp->cmd_flags  = flags;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
 
 
 /*
  * kdb_register - Compatibility register function for commands that do
  *     not need to specify a repeat state.  Equivalent to
- *     kdb_register_repeat with KDB_REPEAT_NONE.
+ *     kdb_register_flags with flags set to 0.
  * Inputs:
  *     cmd     Command name
  *     func    Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
             char *help,
             short minlen)
 {
-       return kdb_register_repeat(cmd, func, usage, help, minlen,
-                                  KDB_REPEAT_NONE);
+       return kdb_register_flags(cmd, func, usage, help, minlen, 0);
 }
 EXPORT_SYMBOL_GPL(kdb_register);
 
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
        for_each_kdbcmd(kp, i)
                kp->cmd_name = NULL;
 
-       kdb_register_repeat("md", kdb_md, "<vaddr>",
+       kdb_register_flags("md", kdb_md, "<vaddr>",
          "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
-                           KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
-         "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
-         "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mds", kdb_md, "<vaddr>",
-         "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
-         "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
-       kdb_register_repeat("go", kdb_go, "[<vaddr>]",
-         "Continue Execution", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("rd", kdb_rd, "",
-         "Display Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
-         "Modify Registers", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ef", kdb_ef, "<vaddr>",
-         "Display exception frame", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
-         "Stack traceback", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("btp", kdb_bt, "<pid>",
-         "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
-         "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btc", kdb_bt, "",
-         "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+         "Display Raw Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+         "Display Physical Memory", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mds", kdb_md, "<vaddr>",
+         "Display Memory Symbolically", 0,
+         KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+         "Modify Memory Contents", 0,
+         KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+       kdb_register_flags("go", kdb_go, "[<vaddr>]",
+         "Continue Execution", 1,
+         KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("rd", kdb_rd, "",
+         "Display Registers", 0,
+         KDB_ENABLE_REG_READ);
+       kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+         "Modify Registers", 0,
+         KDB_ENABLE_REG_WRITE);
+       kdb_register_flags("ef", kdb_ef, "<vaddr>",
+         "Display exception frame", 0,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+         "Stack traceback", 1,
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("btp", kdb_bt, "<pid>",
+         "Display stack for process <pid>", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+         "Backtrace all processes matching state flag", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btc", kdb_bt, "",
+         "Backtrace current process on each cpu", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("btt", kdb_bt, "<vaddr>",
          "Backtrace process given its struct task address", 0,
-                           KDB_REPEAT_NONE);
-       kdb_register_repeat("env", kdb_env, "",
-         "Show environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("set", kdb_set, "",
-         "Set environment variables", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("help", kdb_help, "",
-         "Display Help Message", 1, KDB_REPEAT_NONE);
-       kdb_register_repeat("?", kdb_help, "",
-         "Display Help Message", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
-         "Switch to new cpu", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kgdb", kdb_kgdb, "",
-         "Enter kgdb mode", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
-         "Display active task list", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("pid", kdb_pid, "<pidnum>",
-         "Switch to another task", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("reboot", kdb_reboot, "",
-         "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+         KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+       kdb_register_flags("env", kdb_env, "",
+         "Show environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("set", kdb_set, "",
+         "Set environment variables", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("help", kdb_help, "",
+         "Display Help Message", 1,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("?", kdb_help, "",
+         "Display Help Message", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+         "Switch to new cpu", 0,
+         KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+       kdb_register_flags("kgdb", kdb_kgdb, "",
+         "Enter kgdb mode", 0, 0);
+       kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+         "Display active task list", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("pid", kdb_pid, "<pidnum>",
+         "Switch to another task", 0,
+         KDB_ENABLE_INSPECT);
+       kdb_register_flags("reboot", kdb_reboot, "",
+         "Reboot the machine immediately", 0,
+         KDB_ENABLE_REBOOT);
 #if defined(CONFIG_MODULES)
-       kdb_register_repeat("lsmod", kdb_lsmod, "",
-         "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("lsmod", kdb_lsmod, "",
+         "List loaded kernel modules", 0,
+         KDB_ENABLE_INSPECT);
 #endif
 #if defined(CONFIG_MAGIC_SYSRQ)
-       kdb_register_repeat("sr", kdb_sr, "<key>",
-         "Magic SysRq key", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("sr", kdb_sr, "<key>",
+         "Magic SysRq key", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
 #if defined(CONFIG_PRINTK)
-       kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
-         "Display syslog buffer", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+         "Display syslog buffer", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 #endif
        if (arch_kgdb_ops.enable_nmi) {
-               kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
-                 "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
-       }
-       kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
-         "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
-         "Send a signal to a process", 0, KDB_REPEAT_NONE);
-       kdb_register_repeat("summary", kdb_summary, "",
-         "Summarize the system", 4, KDB_REPEAT_NONE);
-       kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
-         "Display per_cpu variables", 3, KDB_REPEAT_NONE);
-       kdb_register_repeat("grephelp", kdb_grep_help, "",
-         "Display help on | grep", 0, KDB_REPEAT_NONE);
+               kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+                 "Disable NMI entry to KDB", 0,
+                 KDB_ENABLE_ALWAYS_SAFE);
+       }
+       kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+         "Define a set of commands, down to endefcmd", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+         "Send a signal to a process", 0,
+         KDB_ENABLE_SIGNAL);
+       kdb_register_flags("summary", kdb_summary, "",
+         "Summarize the system", 4,
+         KDB_ENABLE_ALWAYS_SAFE);
+       kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+         "Display per_cpu variables", 3,
+         KDB_ENABLE_MEM_READ);
+       kdb_register_flags("grephelp", kdb_grep_help, "",
+         "Display help on | grep", 0,
+         KDB_ENABLE_ALWAYS_SAFE);
 }
 
 /* Execute any commands defined in kdb_cmds.  */
index 7afd3c8c41d5d51f17a7a14566551bf2d731d068..eaacd1693954b13aa55c59028c597b6ed91488f3 100644 (file)
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
        kdb_func_t cmd_func;            /* Function to execute command */
        char    *cmd_usage;             /* Usage String for this command */
        char    *cmd_help;              /* Help message for this command */
-       short    cmd_flags;             /* Parsing flags */
        short    cmd_minlen;            /* Minimum legal # command
                                         * chars required */
-       kdb_repeat_t cmd_repeat;        /* Does command auto repeat on enter? */
+       kdb_cmdflags_t cmd_flags;       /* Command behaviour flags */
 } kdbtab_t;
 
 extern int kdb_bt(int, const char **); /* KDB display back trace */
index 4c1ee7f2bebc4bfb1434fe472f0d66f120f8cdc0..882f835a0d859e011848069ed6ee716f3def4dee 100644 (file)
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
 }
 
 static void perf_sample_regs_user(struct perf_regs *regs_user,
-                                 struct pt_regs *regs)
+                                 struct pt_regs *regs,
+                                 struct pt_regs *regs_user_copy)
 {
-       if (!user_mode(regs)) {
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               regs_user->abi  = perf_reg_abi(current);
+       if (user_mode(regs)) {
+               regs_user->abi = perf_reg_abi(current);
                regs_user->regs = regs;
+       } else if (current->mm) {
+               perf_get_regs_user(regs_user, regs, regs_user_copy);
        } else {
                regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
                regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 
        if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
-               perf_sample_regs_user(&data->regs_user, regs);
+               perf_sample_regs_user(&data->regs_user, regs,
+                                     &data->regs_user_copy);
 
        if (sample_type & PERF_SAMPLE_REGS_USER) {
                /* regs dump ABI info */
index 1ea4369890a31b6776aeb85bada4bf097594cce2..6806c55475eec17be40b1d6c53cf9fe007376279 100644 (file)
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
 static int wait_consider_task(struct wait_opts *wo, int ptrace,
                                struct task_struct *p)
 {
+       /*
+        * We can race with wait_task_zombie() from another thread.
+        * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+        * can't confuse the checks below.
+        */
+       int exit_state = ACCESS_ONCE(p->exit_state);
        int ret;
 
-       if (unlikely(p->exit_state == EXIT_DEAD))
+       if (unlikely(exit_state == EXIT_DEAD))
                return 0;
 
        ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
                return 0;
        }
 
-       if (unlikely(p->exit_state == EXIT_TRACE)) {
+       if (unlikely(exit_state == EXIT_TRACE)) {
                /*
                 * ptrace == 0 means we are the natural parent. In this case
                 * we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
        }
 
        /* slay zombie? */
-       if (p->exit_state == EXIT_ZOMBIE) {
+       if (exit_state == EXIT_ZOMBIE) {
                /* we don't reap group leaders with subthreads */
                if (!delay_group_leader(p)) {
                        /*
index 5cf6731b98e9ecf1ffffa754371701613cc64bcb..3ef3736002d895854794a4d940adcd96288640f2 100644 (file)
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
                        DEBUG_LOCKS_WARN_ON(lock->owner != current);
 
                DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
-               mutex_clear_owner(lock);
        }
 
        /*
         * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
         * mutexes so that we can do it here after we've verified state.
         */
+       mutex_clear_owner(lock);
        atomic_set(&lock->count, 1);
 }
 
index 322ea8e93e4ba36c11c0348a34c12e58c983da03..82cfc285b046d8e320559a48cf08007a19742dc0 100644 (file)
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
 {
        const struct range *r1 = x1;
        const struct range *r2 = x2;
-       s64 start1, start2;
 
-       start1 = r1->start;
-       start2 = r2->start;
-
-       return start1 - start2;
+       if (r1->start < r2->start)
+               return -1;
+       if (r1->start > r2->start)
+               return 1;
+       return 0;
 }
 
 int clean_sort_range(struct range *range, int az)
index b5797b78add65d967ab22f91c9f05182da0de2b4..c0accc00566eb774a022870635c60e63da6ee198 100644 (file)
@@ -7112,9 +7112,6 @@ void __init sched_init(void)
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
 #endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
index e5db8c6feebd7e319b20b482885a1bd5e8ad3be1..b52092f2636d50e8a816b2e7e20a648b00d6bb70 100644 (file)
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 static
 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
 {
-       int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
-       int rorun = dl_se->runtime <= 0;
-
-       if (!rorun && !dmiss)
-               return 0;
-
-       /*
-        * If we are beyond our current deadline and we are still
-        * executing, then we have already used some of the runtime of
-        * the next instance. Thus, if we do not account that, we are
-        * stealing bandwidth from the system at each deadline miss!
-        */
-       if (dmiss) {
-               dl_se->runtime = rorun ? dl_se->runtime : 0;
-               dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
-       }
-
-       return 1;
+       return (dl_se->runtime <= 0);
 }
 
 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
-               replenish_dl_entity(dl_se, pi_se);
-       else
+       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
+       else if (flags & ENQUEUE_REPLENISH)
+               replenish_dl_entity(dl_se, pi_se);
 
        __enqueue_dl_entity(dl_se);
 }
index df2cdf77f8998d46f58cbf4bc0b3693311c96911..40667cbf371ba9e8732e6c30940cc146752ee0c3 100644 (file)
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       /* init_cfs_bandwidth() was not called */
+       if (!cfs_b->throttled_cfs_rq.next)
+               return;
+
        hrtimer_cancel(&cfs_b->period_timer);
        hrtimer_cancel(&cfs_b->slack_timer);
 }
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                 * wl = S * s'_i; see (2)
                 */
                if (W > 0 && w < W)
-                       wl = (w * tg->shares) / W;
+                       wl = (w * (long)tg->shares) / W;
                else
                        wl = tg->shares;
 
index 929a733d302e0d438d2f15f8dfaf6e2e4c56c0d0..224e768bdc738da7c47aca41fcc6d9ecd4c190b4 100644 (file)
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
 }
 
 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
        ops->flags |= FTRACE_OPS_FL_MODIFYING;
-       ops->old_hash.filter_hash = old_hash;
+       ops->old_hash.filter_hash = old_hash->filter_hash;
+       ops->old_hash.notrace_hash = old_hash->notrace_hash;
        ftrace_run_update_code(command);
        ops->old_hash.filter_hash = NULL;
+       ops->old_hash.notrace_hash = NULL;
        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
 }
 
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
 
 static int ftrace_probe_registered;
 
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
 {
        int ret;
        int i;
@@ -3637,6 +3639,7 @@ int
 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                              void *data)
 {
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_probe *entry;
        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
        struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
+       old_hash_ops.filter_hash = old_hash;
+       /* Probes only have filters */
+       old_hash_ops.notrace_hash = NULL;
+
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
        if (!hash) {
                count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
 
-       __enable_ftrace_function_probe(old_hash);
+       __enable_ftrace_function_probe(&old_hash_ops);
 
        if (!ret)
                free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
 }
 
 static void ftrace_ops_update_code(struct ftrace_ops *ops,
-                                  struct ftrace_hash *old_hash)
+                                  struct ftrace_ops_hash *old_hash)
 {
-       if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+       struct ftrace_ops *op;
+
+       if (!ftrace_enabled)
+               return;
+
+       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+               return;
+       }
+
+       /*
+        * If this is the shared global_ops filter, then we need to
+        * check if there is another ops that shares it, is enabled.
+        * If so, we still need to run the modify code.
+        */
+       if (ops->func_hash != &global_ops.local_hash)
+               return;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->func_hash == &global_ops.local_hash &&
+                   op->flags & FTRACE_OPS_FL_ENABLED) {
+                       ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+                       /* Only need to do this once */
+                       return;
+               }
+       } while_for_each_ftrace_op(op);
 }
 
 static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                unsigned long ip, int remove, int reset, int enable)
 {
        struct ftrace_hash **orig_hash;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
        int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
 
        mutex_lock(&ftrace_lock);
        old_hash = *orig_hash;
+       old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+       old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret) {
-               ftrace_ops_update_code(ops, old_hash);
+               ftrace_ops_update_code(ops, &old_hash_ops);
                free_ftrace_hash_rcu(old_hash);
        }
        mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
 int ftrace_regex_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_ops_hash old_hash_ops;
        struct ftrace_iterator *iter;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_lock(&ftrace_lock);
                old_hash = *orig_hash;
+               old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+               old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
                ret = ftrace_hash_move(iter->ops, filter_hash,
                                       orig_hash, iter->hash);
                if (!ret) {
-                       ftrace_ops_update_code(iter->ops, old_hash);
+                       ftrace_ops_update_code(iter->ops, &old_hash_ops);
                        free_ftrace_hash_rcu(old_hash);
                }
                mutex_unlock(&ftrace_lock);
index 2e767972e99c2e8791236d10afa717b920543b70..4a9079b9f082fd3bb14e3b46522b1540b001fea1 100644 (file)
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
                        tracepoint_printk = 0;
        }
        tracer_alloc_buffers();
-       init_ftrace_syscalls();
        trace_event_init();     
 }
 
index 366a78a3e61e21a94c06aa96f5b5c02a72c41e78..b03a0ea77b993cf9f175ed7b44fc239832de7def 100644 (file)
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
        return 0;
 }
 
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+       char *buf = bootup_event_buf;
+       char *token;
+       int ret;
+
+       while (true) {
+               token = strsep(&buf, ",");
+
+               if (!token)
+                       break;
+               if (!*token)
+                       continue;
+
+               /* Restarting syscalls requires that we stop them first */
+               if (disable_first)
+                       ftrace_set_clr_event(tr, token, 0);
+
+               ret = ftrace_set_clr_event(tr, token, 1);
+               if (ret)
+                       pr_warn("Failed to enable trace event: %s\n", token);
+
+               /* Put back the comma to allow this to be called again */
+               if (buf)
+                       *(buf - 1) = ',';
+       }
+}
+
 static __init int event_trace_enable(void)
 {
        struct trace_array *tr = top_trace_array();
        struct ftrace_event_call **iter, *call;
-       char *buf = bootup_event_buf;
-       char *token;
        int ret;
 
        if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
         */
        __trace_early_add_events(tr);
 
-       while (true) {
-               token = strsep(&buf, ",");
-
-               if (!token)
-                       break;
-               if (!*token)
-                       continue;
-
-               ret = ftrace_set_clr_event(tr, token, 1);
-               if (ret)
-                       pr_warn("Failed to enable trace event: %s\n", token);
-       }
+       early_enable_events(tr, false);
 
        trace_printk_start_comm();
 
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
        return 0;
 }
 
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+       struct trace_array *tr;
+
+       tr = top_trace_array();
+       if (!tr)
+               return -ENODEV;
+
+       early_enable_events(tr, true);
+
+       return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
 static __init int event_trace_init(void)
 {
        struct trace_array *tr;
index b0b1c44e923a358bffcebda8c24b3d5e861b2693..3ccf5c2c1320131e5b1bc0bfe6a26b02edc402a8 100644 (file)
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
 
 static __init int kdb_ftrace_register(void)
 {
-       kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
-                           "Dump ftrace log", 0, KDB_REPEAT_NONE);
+       kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+                           "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
        return 0;
 }
 
index 358eb81fa28d1951dc443410a847aeac50ea23bb..c635a107a7dece45eafa5dd30c76b41c4733ffa2 100644 (file)
@@ -73,6 +73,31 @@ config KGDB_KDB
        help
          KDB frontend for kernel
 
+config KDB_DEFAULT_ENABLE
+       hex "KDB: Select kdb command functions to be enabled by default"
+       depends on KGDB_KDB
+       default 0x1
+       help
+         Specifiers which kdb commands are enabled by default. This may
+         be set to 1 or 0 to enable all commands or disable almost all
+         commands.
+
+         Alternatively the following bitmask applies:
+
+           0x0002 - allow arbitrary reads from memory and symbol lookup
+           0x0004 - allow arbitrary writes to memory
+           0x0008 - allow current register state to be inspected
+           0x0010 - allow current register state to be modified
+           0x0020 - allow passive inspection (backtrace, process list, lsmod)
+           0x0040 - allow flow control management (breakpoint, single step)
+           0x0080 - enable signalling of processes
+           0x0100 - allow machine to be rebooted
+
+         The config option merely sets the default at boot time. Both
+         issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+          setting with kdb.cmd_enable=X kernel command line option will
+         override the default settings.
+
 config KDB_KEYBOARD
        bool "KGDB_KDB: keyboard as input device"
        depends on VT && KGDB_KDB
index 2404d03e251a64ae7634d30c46aa22a08f9b4538..03dd576e67730fb2870c44512f55c1c0c39b77f3 100644 (file)
@@ -11,6 +11,7 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 //#define DEBUG
+#include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/assoc_array_priv.h>
index 56badfc4810a8a4e70597ce82a0532929e6e6dda..957d3da53dddcd53b72da82e39f77d951db57a66 100644 (file)
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
        depends on !KMEMCHECK
        select PAGE_EXTENSION
        select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
-       select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
          that would result in incorrect warnings of memory corruption after
          a resume because free pages are not saved to the suspend image.
 
-config WANT_PAGE_DEBUG_FLAGS
-       bool
-
 config PAGE_POISONING
        bool
-       select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
-       bool
-       select WANT_PAGE_DEBUG_FLAGS
index bd8543c6508fd8dfd9e6fce5f4884d1d6ab0b5bb..673e4581a2e541b44b02cd8ef201772dad5311a1 100644 (file)
@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
  * @mapping: the address_space to search
  * @offset: the page index
  * @fgp_flags: PCG flags
- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
  * FGP_ACCESSED: the page will be marked accessed
  * FGP_LOCK: Page is return locked
  * FGP_CREAT: If page is not present then a new page is allocated using
- *             @cache_gfp_mask and added to the page cache and the VM's LRU
- *             list. If radix tree nodes are allocated during page cache
- *             insertion then @radix_gfp_mask is used. The page is returned
- *             locked and with an increased refcount. Otherwise, %NULL is
- *             returned.
+ *             @gfp_mask and added to the page cache and the VM's LRU
+ *             list. The page is returned locked and with an increased
+ *             refcount. Otherwise, %NULL is returned.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
  * If there is a page cache page, it is returned with an increased refcount.
  */
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
-       int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+       int fgp_flags, gfp_t gfp_mask)
 {
        struct page *page;
 
@@ -1105,13 +1102,11 @@ no_page:
        if (!page && (fgp_flags & FGP_CREAT)) {
                int err;
                if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
-                       cache_gfp_mask |= __GFP_WRITE;
-               if (fgp_flags & FGP_NOFS) {
-                       cache_gfp_mask &= ~__GFP_FS;
-                       radix_gfp_mask &= ~__GFP_FS;
-               }
+                       gfp_mask |= __GFP_WRITE;
+               if (fgp_flags & FGP_NOFS)
+                       gfp_mask &= ~__GFP_FS;
 
-               page = __page_cache_alloc(cache_gfp_mask);
+               page = __page_cache_alloc(gfp_mask);
                if (!page)
                        return NULL;
 
@@ -1122,7 +1117,8 @@ no_page:
                if (fgp_flags & FGP_ACCESSED)
                        __SetPageReferenced(page);
 
-               err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+               err = add_to_page_cache_lru(page, mapping, offset,
+                               gfp_mask & GFP_RECLAIM_MASK);
                if (unlikely(err)) {
                        page_cache_release(page);
                        page = NULL;
@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
                fgp_flags |= FGP_NOFS;
 
        page = pagecache_get_page(mapping, index, fgp_flags,
-                       mapping_gfp_mask(mapping),
-                       GFP_KERNEL);
+                       mapping_gfp_mask(mapping));
        if (page)
                wait_for_stable_page(page);
 
index ef91e856c7e456a0674e7b76e15cbd771a6c9acb..851924fa5170e177e94080da70c64a1a64bcf896 100644 (file)
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
        if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
                mem_cgroup_swap_statistics(from, false);
                mem_cgroup_swap_statistics(to, true);
-               /*
-                * This function is only called from task migration context now.
-                * It postpones page_counter and refcount handling till the end
-                * of task migration(mem_cgroup_clear_mc()) for performance
-                * improvement. But we cannot postpone css_get(to)  because if
-                * the process that has been moved to @to does swap-in, the
-                * refcount of @to might be decreased to 0.
-                *
-                * We are in attach() phase, so the cgroup is guaranteed to be
-                * alive, so we can just call css_get().
-                */
-               css_get(&to->css);
                return 0;
        }
        return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (parent_css == NULL) {
                root_mem_cgroup = memcg;
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
        }
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
 
        if (parent->use_hierarchy) {
                page_counter_init(&memcg->memory, &parent->memory);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
 
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                 */
        } else {
                page_counter_init(&memcg->memory, NULL);
+               memcg->soft_limit = PAGE_COUNTER_MAX;
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
                /*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
        mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
        mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
        memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
-       memcg->soft_limit = 0;
+       memcg->soft_limit = PAGE_COUNTER_MAX;
 }
 
 #ifdef CONFIG_MMU
index ca920d1fd314a17c7250d7916bd37403afa96b79..54f3a9b0095600749fda793d7e6067bd2c7f997d 100644 (file)
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
+       if (!tlb->end)
+               return;
+
        tlb_flush(tlb);
        mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 {
        struct mmu_gather_batch *batch;
 
-       for (batch = &tlb->local; batch; batch = batch->next) {
+       for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
                batch->nr = 0;
        }
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
-               return;
-
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
@@ -2137,17 +2137,24 @@ reuse:
                if (!dirty_page)
                        return ret;
 
-               /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * do_shared_fault is protected similarly.
-                */
                if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page);
+                       struct address_space *mapping;
+                       int dirtied;
+
+                       lock_page(dirty_page);
+                       dirtied = set_page_dirty(dirty_page);
+                       VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+                       mapping = dirty_page->mapping;
+                       unlock_page(dirty_page);
+
+                       if (dirtied && mapping) {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+
                        /* file_update_time outside page_lock */
                        if (vma->vm_file)
                                file_update_time(vma->vm_file);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
index 7b36aa7cc89a43c7c5909b7799b77d13106a2929..7f684d5a808738c3c645798bc6da8b110fbb2383 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again:                    remove_next = 1 + (end > next->vm_end);
                if (exporter && exporter->anon_vma && !importer->anon_vma) {
                        int error;
 
+                       importer->anon_vma = exporter->anon_vma;
                        error = anon_vma_clone(importer, exporter);
-                       if (error)
+                       if (error) {
+                               importer->anon_vma = NULL;
                                return error;
-                       importer->anon_vma = exporter->anon_vma;
+                       }
                }
        }
 
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
index d5d81f5384d16f09076fdb4dc06cdf45f6d238ef..6f4335238e33311de251a647fe725d06d5897060 100644 (file)
@@ -1541,16 +1541,6 @@ pause:
                bdi_start_background_writeback(bdi);
 }
 
-void set_page_dirty_balance(struct page *page)
-{
-       if (set_page_dirty(page)) {
-               struct address_space *mapping = page_mapping(page);
-
-               if (mapping)
-                       balance_dirty_pages_ratelimited(mapping);
-       }
-}
-
 static DEFINE_PER_CPU(int, bdp_ratelimits);
 
 /*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
  *
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation.  Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
-               struct address_space *mapping2;
                unsigned long flags;
 
                if (!mapping)
                        return 1;
 
                spin_lock_irqsave(&mapping->tree_lock, flags);
-               mapping2 = page_mapping(page);
-               if (mapping2) { /* Race with truncate? */
-                       BUG_ON(mapping2 != mapping);
-                       WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
-                       account_page_dirtied(page, mapping);
-                       radix_tree_tag_set(&mapping->page_tree,
-                               page_index(page), PAGECACHE_TAG_DIRTY);
-               }
+               BUG_ON(page_mapping(page) != mapping);
+               WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+               account_page_dirtied(page, mapping);
+               radix_tree_tag_set(&mapping->page_tree, page_index(page),
+                                  PAGECACHE_TAG_DIRTY);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
                /*
                 * We carefully synchronise fault handlers against
                 * installing a dirty pte and marking the page dirty
-                * at this point. We do this by having them hold the
-                * page lock at some point after installing their
-                * pte, but before marking the page dirty.
-                * Pages are always locked coming in here, so we get
-                * the desired exclusion. See mm/memory.c:do_wp_page()
-                * for more comments.
+                * at this point.  We do this by having them hold the
+                * page lock while dirtying the page, and pages are
+                * always locked coming in here, so we get the desired
+                * exclusion.
                 */
                if (TestClearPageDirty(page)) {
                        dec_zone_page_state(page, NR_FILE_DIRTY);
index c5bc241127b205734eaef62964d6d152941174cc..71cd5bd0c17d760c6f6ab1af5991165a1ac05844 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
        anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
        if (anon_vma) {
                atomic_set(&anon_vma->refcount, 1);
+               anon_vma->degree = 1;   /* Reference for first vma */
+               anon_vma->parent = anon_vma;
                /*
                 * Initialise the anon_vma root to point to itself. If called
                 * from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
                        anon_vma_chain_link(vma, avc, anon_vma);
+                       /* vma reference or self-parent link for new root */
+                       anon_vma->degree++;
                        allocated = NULL;
                        avc = NULL;
                }
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
 /*
  * Attach the anon_vmas from src to dst.
  * Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
  */
 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
                anon_vma = pavc->anon_vma;
                root = lock_anon_vma_root(root, anon_vma);
                anon_vma_chain_link(dst, avc, anon_vma);
+
+               /*
+                * Reuse existing anon_vma if its degree lower than two,
+                * that means it has no vma and only one anon_vma child.
+                *
+                * Do not chose parent anon_vma, otherwise first child
+                * will always reuse it. Root anon_vma is never reused:
+                * it has self-parent reference and at least one child.
+                */
+               if (!dst->anon_vma && anon_vma != src->anon_vma &&
+                               anon_vma->degree < 2)
+                       dst->anon_vma = anon_vma;
        }
+       if (dst->anon_vma)
+               dst->anon_vma->degree++;
        unlock_anon_vma_root(root);
        return 0;
 
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (!pvma->anon_vma)
                return 0;
 
+       /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+       vma->anon_vma = NULL;
+
        /*
         * First, attach the new VMA to the parent VMA's anon_vmas,
         * so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        if (error)
                return error;
 
+       /* An existing anon_vma has been reused, all done then. */
+       if (vma->anon_vma)
+               return 0;
+
        /* Then add our own anon_vma. */
        anon_vma = anon_vma_alloc();
        if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
         * lock any of the anon_vmas in this anon_vma tree.
         */
        anon_vma->root = pvma->anon_vma->root;
+       anon_vma->parent = pvma->anon_vma;
        /*
         * With refcounts, an anon_vma can stay around longer than the
         * process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        vma->anon_vma = anon_vma;
        anon_vma_lock_write(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
+       anon_vma->parent->degree++;
        anon_vma_unlock_write(anon_vma);
 
        return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
                 * Leave empty anon_vmas on the list - we'll need
                 * to free them outside the lock.
                 */
-               if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+                       anon_vma->parent->degree--;
                        continue;
+               }
 
                list_del(&avc->same_vma);
                anon_vma_chain_free(avc);
        }
+       if (vma->anon_vma)
+               vma->anon_vma->degree--;
        unlock_anon_vma_root(root);
 
        /*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
                struct anon_vma *anon_vma = avc->anon_vma;
 
+               BUG_ON(anon_vma->degree);
                put_anon_vma(anon_vma);
 
                list_del(&avc->same_vma);
index bd9a72bc4a1b81f5b4a53e630360e725f6e1347b..ab2505c3ef5460e23facf80725633e977882ec8b 100644 (file)
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                return false;
 
        /*
-        * There is a potential race between when kswapd checks its watermarks
-        * and a process gets throttled. There is also a potential race if
-        * processes get throttled, kswapd wakes, a large process exits therby
-        * balancing the zones that causes kswapd to miss a wakeup. If kswapd
-        * is going to sleep, no process should be sleeping on pfmemalloc_wait
-        * so wake them now if necessary. If necessary, processes will wake
-        * kswapd and get throttled again
+        * The throttled processes are normally woken up in balance_pgdat() as
+        * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+        * zones, which causes kswapd to exit balance_pgdat() before reaching
+        * the wake up checks. If kswapd is going to sleep, no process should
+        * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+        * the wake up is premature, processes will wake kswapd and get
+        * throttled again. The difference from wake ups in balance_pgdat() is
+        * that here we are under prepare_to_wait().
         */
-       if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
-               wake_up(&pgdat->pfmemalloc_wait);
-               return false;
-       }
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
 
        return pgdat_balanced(pgdat, order, classzone_idx);
 }
index fc1835c6bb4099e50e75f964826b0aacdd7200f0..00f9e144cc97b1afe7cefc5b682524103799e4b7 100644 (file)
@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
        kfree(entry);
 
        /* Make room for the rest of the fragments. */
-       if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+       if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
                kfree_skb(skb_out);
                skb_out = NULL;
                goto free;
@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
         * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
         */
        mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
-       max_fragment_size = (mtu - header_size - ETH_HLEN);
+       max_fragment_size = mtu - header_size;
        max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
 
        /* Don't even try to fragment, if we need more than 16 fragments */
index 90cff585b37d5a3779cdb8f7b3b90a26eb2df88e..e0bcf9e842737427e8dc6d8f5e1be57f92252933 100644 (file)
@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                goto out;
 
        gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
-       if (!gw_node->bandwidth_down == 0)
+       if (!gw_node)
                goto out;
 
        switch (atomic_read(&bat_priv->gw_mode)) {
index ab6bb2af1d45d51a77b93a062b8a5b59cc69b1c0..b24e4bb64fb5fd51c813df7e5801b98d27caf959 100644 (file)
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
                if (orig_initialized)
                        atomic_dec(&bat_priv->mcast.num_disabled);
                orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
-       /* If mcast support is being switched off increase the disabled
-        * mcast node counter.
+       /* If mcast support is being switched off or if this is an initial
+        * OGM without mcast support then increase the disabled mcast
+        * node counter.
         */
        } else if (!orig_mcast_enabled &&
-                  orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+                  (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+                   !orig_initialized)) {
                atomic_inc(&bat_priv->mcast.num_disabled);
                orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
        }
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
 {
        struct batadv_priv *bat_priv = orig->bat_priv;
 
-       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+           orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
                atomic_dec(&bat_priv->mcast.num_disabled);
 
        batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
index 8d04d174669ed29c467436d33b099ad5d0ca13c4..fab47f1f3ef9752a4fc6e2caba504631efb510d6 100644 (file)
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
        if (!bat_priv->nc.decoding_hash)
                goto err;
 
-       batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+       batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
index 6a484514cd3e98b9e0b27a924b4dcb92f2682055..bea8198d0198104a2fcbf4253f631fb86708e0d6 100644 (file)
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 
        batadv_frag_purge_orig(orig_node, NULL);
 
-       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
-                                 "originator timed out");
-
        if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
                orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
 
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
+       orig_node->last_seen = jiffies;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
 #ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
                                batadv_gw_node_delete(bat_priv, orig_node);
                                hlist_del_rcu(&orig_node->hash_entry);
+                               batadv_tt_global_del_orig(orig_node->bat_priv,
+                                                         orig_node, -1,
+                                                         "originator timed out");
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
index 35f76f2f7824b8c2756e60bb5249fd4f5502cc8d..6648f321864d8f7b33d258f9ee2c93e339738a93 100644 (file)
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
 
        router = batadv_orig_router_get(orig_node, recv_if);
 
+       if (!router)
+               return router;
+
        /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
         * and if activated.
         */
-       if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
-           !router)
+       if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
                return router;
 
        /* bonding: loop through the list of possible routers found
index 76617be1e797ba6d41c72fbc8f582d5a5614ff3e..c989253737f05985e214fbf2f272a69412f9a0fb 100644 (file)
@@ -390,7 +390,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
 drop:
        dev->stats.rx_dropped++;
-       kfree_skb(skb);
        return NET_RX_DROP;
 }
 
index 85bcc21e84d2006c4839b2b7f409f2595cb41858..ce82722d049b7c013fd06f66f726fbbf75a01f56 100644 (file)
@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        BT_DBG("");
 
+       if (!l2cap_is_socket(sock))
+               return -EBADFD;
+
        baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
        baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
 
index 67fe5e84e68f0bffb166bbcfb6cdb736f15ab05e..278a194e6af488f67197c3725ca937f554989498 100644 (file)
@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 
        BT_DBG("");
 
+       if (!l2cap_is_socket(sock))
+               return -EBADFD;
+
        session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
        if (!session)
                return -ENOMEM;
index 39a5c8a017263694f1dbdca7888abd700c12952b..3f2e8b830cbd1cf37b65431bfd76981bc2447dff 100644 (file)
@@ -242,7 +242,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags))
+       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+           test_bit(HCI_CONFIG, &hdev->dev_flags))
                memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
 }
 
@@ -509,7 +510,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+           test_bit(HCI_CONFIG, &hdev->dev_flags)) {
                hdev->hci_ver = rp->hci_ver;
                hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
                hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
        if (rp->status)
                return;
 
-       if (test_bit(HCI_SETUP, &hdev->dev_flags))
+       if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+           test_bit(HCI_CONFIG, &hdev->dev_flags))
                memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
 }
 
@@ -2194,7 +2197,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                return;
        }
 
-       if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+       /* Require HCI_CONNECTABLE or a whitelist entry to accept the
+        * connection. These features are only touched through mgmt so
+        * only do the checks if HCI_MGMT is set.
+        */
+       if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+           !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
            !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
                                    BDADDR_BREDR)) {
                    hci_reject_conn(hdev, &ev->bdaddr);
index cc25d0b74b3609ef800453484eef7e0f55767162..07348e142f16a783b7764d72314830f4b7844330 100644 (file)
@@ -1314,13 +1314,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
 {
        struct hidp_session *session;
        struct l2cap_conn *conn;
-       struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+       struct l2cap_chan *chan;
        int ret;
 
        ret = hidp_verify_sockets(ctrl_sock, intr_sock);
        if (ret)
                return ret;
 
+       chan = l2cap_pi(ctrl_sock->sk)->chan;
        conn = NULL;
        l2cap_chan_lock(chan);
        if (chan->conn)
index 1f1de715197c19ab12f5333e9a518a317754f041..e2aa7be3a847f448a404e0a43f6d1a09f1a0517a 100644 (file)
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
        dst = NULL;
 
        if (is_broadcast_ether_addr(dest)) {
-               if (p->flags & BR_PROXYARP &&
+               if (IS_ENABLED(CONFIG_INET) &&
+                   p->flags & BR_PROXYARP &&
                    skb->protocol == htons(ETH_P_ARP))
                        br_do_proxy_arp(skb, br, vid);
 
index 15845814a0f25eaefb95590cb848aa6032a8e038..ba6eb17226da424d59bb462a2089186bbd3233af 100644 (file)
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
        int ret;
        char tmp_enc[40];
        __le32 tmp[5] = {
-               16u, msg->hdr.crc, msg->footer.front_crc,
+               cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
                msg->footer.middle_crc, msg->footer.data_crc,
        };
        ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
index a83062ceeec90660ee5b384fc2a758a70bbf1049..f2148e22b14897727faeba297e045f2b933a52b1 100644 (file)
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
        if (src_len != sizeof(u32) + dst_len)
                return -EINVAL;
 
-       buf_len = le32_to_cpu(*(u32 *)src);
+       buf_len = le32_to_cpu(*(__le32 *)src);
        if (buf_len != dst_len)
                return -EINVAL;
 
index f411c28d0a66805661db0a409b140d02e8ca4041..683d493aa1bf2225ac0c029ac403841f7d3c740e 100644 (file)
@@ -1694,6 +1694,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 
        skb_scrub_packet(skb, true);
        skb->protocol = eth_type_trans(skb, dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
        return 0;
 }
@@ -2522,7 +2523,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 /* If MPLS offload request, verify we are testing hardware MPLS features
  * instead of standard features for the netdev.
  */
-#ifdef CONFIG_NET_MPLS_GSO
+#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
 static netdev_features_t net_mpls_features(struct sk_buff *skb,
                                           netdev_features_t features,
                                           __be16 type)
@@ -2562,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
 netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
-       const struct net_device *dev = skb->dev;
+       struct net_device *dev = skb->dev;
        netdev_features_t features = dev->features;
        u16 gso_segs = skb_shinfo(skb)->gso_segs;
        __be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
        if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
                features &= ~NETIF_F_GSO_MASK;
 
-       if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
-               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-               protocol = veh->h_vlan_encapsulated_proto;
-       } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, features);
+       /* If encapsulation offload request, verify we are testing
+        * hardware encapsulation features instead of standard
+        * features for the netdev
+        */
+       if (skb->encapsulation)
+               features &= dev->hw_enc_features;
+
+       if (!vlan_tx_tag_present(skb)) {
+               if (unlikely(protocol == htons(ETH_P_8021Q) ||
+                            protocol == htons(ETH_P_8021AD))) {
+                       struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+                       protocol = veh->h_vlan_encapsulated_proto;
+               } else {
+                       goto finalize;
+               }
        }
 
        features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
                                                     NETIF_F_HW_VLAN_CTAG_TX |
                                                     NETIF_F_HW_VLAN_STAG_TX);
 
+finalize:
+       if (dev->netdev_ops->ndo_features_check)
+               features &= dev->netdev_ops->ndo_features_check(skb, dev,
+                                                               features);
+
        return harmonize_features(skb, features);
 }
 EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
        if (unlikely(!skb))
                goto out_null;
 
-       /* If encapsulation offload request, verify we are testing
-        * hardware encapsulation features instead of standard
-        * features for the netdev
-        */
-       if (skb->encapsulation)
-               features &= dev->hw_enc_features;
-
        if (netif_needs_gso(dev, skb, features)) {
                struct sk_buff *segs;
 
                segs = skb_gso_segment(skb, features);
                if (IS_ERR(segs)) {
-                       segs = NULL;
+                       goto out_kfree_skb;
                } else if (segs) {
                        consume_skb(skb);
                        skb = segs;
@@ -4557,6 +4566,68 @@ void netif_napi_del(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(netif_napi_del);
 
+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+{
+       void *have;
+       int work, weight;
+
+       list_del_init(&n->poll_list);
+
+       have = netpoll_poll_lock(n);
+
+       weight = n->weight;
+
+       /* This NAPI_STATE_SCHED test is for avoiding a race
+        * with netpoll's poll_napi().  Only the entity which
+        * obtains the lock and sees NAPI_STATE_SCHED set will
+        * actually make the ->poll() call.  Therefore we avoid
+        * accidentally calling ->poll() when NAPI is not scheduled.
+        */
+       work = 0;
+       if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+               work = n->poll(n, weight);
+               trace_napi_poll(n);
+       }
+
+       WARN_ON_ONCE(work > weight);
+
+       if (likely(work < weight))
+               goto out_unlock;
+
+       /* Drivers must not modify the NAPI state if they
+        * consume the entire weight.  In such cases this code
+        * still "owns" the NAPI instance and therefore can
+        * move the instance around on the list at-will.
+        */
+       if (unlikely(napi_disable_pending(n))) {
+               napi_complete(n);
+               goto out_unlock;
+       }
+
+       if (n->gro_list) {
+               /* flush too old packets
+                * If HZ < 1000, flush all packets.
+                */
+               napi_gro_flush(n, HZ >= 1000);
+       }
+
+       /* Some drivers may have called napi_schedule
+        * prior to exhausting their budget.
+        */
+       if (unlikely(!list_empty(&n->poll_list))) {
+               pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+                            n->dev ? n->dev->name : "backlog");
+               goto out_unlock;
+       }
+
+       list_add_tail(&n->poll_list, repoll);
+
+out_unlock:
+       netpoll_poll_unlock(have);
+
+       return work;
+}
+
 static void net_rx_action(struct softirq_action *h)
 {
        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@ static void net_rx_action(struct softirq_action *h)
        int budget = netdev_budget;
        LIST_HEAD(list);
        LIST_HEAD(repoll);
-       void *have;
 
        local_irq_disable();
        list_splice_init(&sd->poll_list, &list);
        local_irq_enable();
 
-       while (!list_empty(&list)) {
+       for (;;) {
                struct napi_struct *n;
-               int work, weight;
-
-               /* If softirq window is exhausted then punt.
-                * Allow this to run for 2 jiffies since which will allow
-                * an average latency of 1.5/HZ.
-                */
-               if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
-                       goto softnet_break;
-
-
-               n = list_first_entry(&list, struct napi_struct, poll_list);
-               list_del_init(&n->poll_list);
 
-               have = netpoll_poll_lock(n);
-
-               weight = n->weight;
-
-               /* This NAPI_STATE_SCHED test is for avoiding a race
-                * with netpoll's poll_napi().  Only the entity which
-                * obtains the lock and sees NAPI_STATE_SCHED set will
-                * actually make the ->poll() call.  Therefore we avoid
-                * accidentally calling ->poll() when NAPI is not scheduled.
-                */
-               work = 0;
-               if (test_bit(NAPI_STATE_SCHED, &n->state)) {
-                       work = n->poll(n, weight);
-                       trace_napi_poll(n);
+               if (list_empty(&list)) {
+                       if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
+                               return;
+                       break;
                }
 
-               WARN_ON_ONCE(work > weight);
-
-               budget -= work;
+               n = list_first_entry(&list, struct napi_struct, poll_list);
+               budget -= napi_poll(n, &repoll);
 
-               /* Drivers must not modify the NAPI state if they
-                * consume the entire weight.  In such cases this code
-                * still "owns" the NAPI instance and therefore can
-                * move the instance around on the list at-will.
+               /* If softirq window is exhausted then punt.
+                * Allow this to run for 2 jiffies since which will allow
+                * an average latency of 1.5/HZ.
                 */
-               if (unlikely(work == weight)) {
-                       if (unlikely(napi_disable_pending(n))) {
-                               napi_complete(n);
-                       } else {
-                               if (n->gro_list) {
-                                       /* flush too old packets
-                                        * If HZ < 1000, flush all packets.
-                                        */
-                                       napi_gro_flush(n, HZ >= 1000);
-                               }
-                               list_add_tail(&n->poll_list, &repoll);
-                       }
+               if (unlikely(budget <= 0 ||
+                            time_after_eq(jiffies, time_limit))) {
+                       sd->time_squeeze++;
+                       break;
                }
-
-               netpoll_poll_unlock(have);
        }
 
-       if (!sd_has_rps_ipi_waiting(sd) &&
-           list_empty(&list) &&
-           list_empty(&repoll))
-               return;
-out:
        local_irq_disable();
 
        list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@ out:
                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
 
        net_rps_action_and_irq_enable(sd);
-
-       return;
-
-softnet_break:
-       sd->time_squeeze++;
-       goto out;
 }
 
 struct netdev_adjacent {
index 8e38f17288d3c5a475471b0e56e339d9b6d5bf9e..8d614c93f86a233a5cb3864c600d9c68fe5f9ea1 100644 (file)
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
                        case NDTPA_BASE_REACHABLE_TIME:
                                NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
                                              nla_get_msecs(tbp[i]));
+                               /* update reachable_time as well, otherwise, the change will
+                                * only be effective after the next time neigh_periodic_work
+                                * decides to recompute it (can be multiple minutes)
+                                */
+                               p->reachable_time =
+                                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
                                break;
                        case NDTPA_GC_STALETIME:
                                NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+                                         void __user *buffer,
+                                         size_t *lenp, loff_t *ppos)
+{
+       struct neigh_parms *p = ctl->extra2;
+       int ret;
+
+       if (strcmp(ctl->procname, "base_reachable_time") == 0)
+               ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+       else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+               ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+       else
+               ret = -1;
+
+       if (write && ret == 0) {
+               /* update reachable_time as well, otherwise, the change will
+                * only be effective after the next time neigh_periodic_work
+                * decides to recompute it
+                */
+               p->reachable_time =
+                       neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+       }
+       return ret;
+}
+
 #define NEIGH_PARMS_DATA_OFFSET(index) \
        (&((struct neigh_parms *) 0)->data[index])
 
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
                /* ReachableTime (in milliseconds) */
                t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+       } else {
+               /* Those handlers will update p->reachable_time after
+                * base_reachable_time(_ms) is set to ensure the new timer starts being
+                * applied after the next neighbour update instead of waiting for
+                * neigh_periodic_work to update its value (can be multiple minutes)
+                * So any handler that replaces them should do this as well
+                */
+               /* ReachableTime */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+                       neigh_proc_base_reachable_time;
+               /* ReachableTime (in milliseconds) */
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+                       neigh_proc_base_reachable_time;
        }
 
        /* Don't export sysctls to unprivileged users */
index ae13ef6b3ea7f40b6843fa2d5b33fc06de0cb0b5..395c15b82087253cd9905d315cd689a29b89ae4d 100644 (file)
@@ -4148,6 +4148,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
+       skb_init_secmark(skb);
        secpath_reset(skb);
        nf_reset(skb);
        nf_reset_trace(skb);
index 95e47c97585e2e34635976d6a352a1484d5c091c..394a200f93c1f5b5338ccab70286545acad1ebda 100644 (file)
@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
        int err;
 
        skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
 
        min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
                        + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
                        + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
 
        err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err))
+       if (unlikely(err)) {
+               kfree_skb(skb);
                return err;
+       }
 
        skb = vlan_hwaccel_push_inside(skb);
        if (unlikely(!skb))
index ff2d23d8c87a16964e29f478640dccd5ca527a02..6ecfce63201a2753d4943813d3eccbb951f34d0b 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
 
        memset(&mr, 0, sizeof(mr));
        if (priv->sreg_proto_min) {
-               mr.range[0].min.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               mr.range[0].max.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               mr.range[0].min.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               mr.range[0].max.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 7f18262e2326ac4d7963347d7458273a325caa64..65caf8b95e1722b62fcdbcfae9526f7d529a6557 100644 (file)
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
 
-               if (tso_segs == 1) {
+               if (tso_segs == 1 || !max_segs) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
                                                      nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                }
 
                limit = mss_now;
-               if (tso_segs > 1 && !tcp_urg_mode(tp))
+               if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
                                                    min_t(unsigned int,
                                                          cwnd_quota,
index 2433a6bfb191c4259bedfe2d697baa618c818b51..11820b6b36130d4ec83af3f173a1ca70df8cbf93 100644 (file)
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
 
        memset(&range, 0, sizeof(range));
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 5ff87805258eb1f0dfa1ed14967b2e1c8f5c4c6a..9c0b54e87b472390c080857f886a2af4a7a300f8 100644 (file)
@@ -1387,6 +1387,28 @@ ipv6_pktoptions:
        return 0;
 }
 
+static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+                          const struct tcphdr *th)
+{
+       /* This is tricky: we move IP6CB at its correct location into
+        * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
+        * _decode_session6() uses IP6CB().
+        * barrier() makes sure compiler won't play aliasing games.
+        */
+       memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
+               sizeof(struct inet6_skb_parm));
+       barrier();
+
+       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+                                   skb->len - th->doff*4);
+       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+       TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
+       TCP_SKB_CB(skb)->sacked = 0;
+}
+
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
        const struct tcphdr *th;
@@ -1418,24 +1440,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
 
        th = tcp_hdr(skb);
        hdr = ipv6_hdr(skb);
-       /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
-        * barrier() makes sure compiler wont play fool^Waliasing games.
-        */
-       memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
-               sizeof(struct inet6_skb_parm));
-       barrier();
-
-       TCP_SKB_CB(skb)->seq = ntohl(th->seq);
-       TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
-                                   skb->len - th->doff*4);
-       TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
-       TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
-       TCP_SKB_CB(skb)->tcp_tw_isn = 0;
-       TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
-       TCP_SKB_CB(skb)->sacked = 0;
 
        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
-                               tcp_v6_iif(skb));
+                               inet6_iif(skb));
        if (!sk)
                goto no_tcp_socket;
 
@@ -1451,6 +1458,8 @@ process:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
+       tcp_v6_fill_cb(skb, hdr, th);
+
 #ifdef CONFIG_TCP_MD5SIG
        if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
@@ -1482,6 +1491,8 @@ no_tcp_socket:
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
+       tcp_v6_fill_cb(skb, hdr, th);
+
        if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
 csum_error:
                TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@ do_time_wait:
                goto discard_it;
        }
 
+       tcp_v6_fill_cb(skb, hdr, th);
+
        if (skb->len < (th->doff<<2)) {
                inet_twsk_put(inet_twsk(sk));
                goto bad_packet;
index 0bb7038121ac5557ba90114b706f8cbc0e404af2..bd4e46ec32bd3d43473c2ee3ac2b0b9888d397de 100644 (file)
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        if (!ret) {
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                        sdata->crypto_tx_tailroom_needed_cnt--;
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
        sta = key->sta;
        sdata = key->sdata;
 
-       if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+       if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+             (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+             (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                increment_tailroom_need_count(sdata);
 
        ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
        if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
                key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                        increment_tailroom_need_count(key->sdata);
        }
 
index ca27837974fedd82fffcd9c91aa565c60534b13c..349295d21946d0072fb6a0a6bada3bd45d54f95f 100644 (file)
@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_TCPV6 |
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
-                                 SKB_GSO_TCP_ECN |
-                                 SKB_GSO_GRE |
-                                 SKB_GSO_GRE_CSUM |
-                                 SKB_GSO_IPIP)))
+                                 SKB_GSO_TCP_ECN)))
                goto out;
 
        /* Setup inner SKB. */
index 1d5341f3761dfe1e57cc6505493bf270cca672de..5d3daae98bf0be1bc0fa699ec96208d2fa3da1d4 100644 (file)
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct nf_conn *ct;
        struct net *net;
 
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        struct net *net;
 
+       /* no diff required for incoming packets */
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       /* no diff required for incoming packets */
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
index a11674806707e18fb9d86860ad0717dfda0b0da2..46d1b26a468ed0d4cd0c9f30c02398f51c922db0 100644 (file)
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-       /* We have to check the DYING flag inside the lock to prevent
-          a race against nf_ct_get_next_corpse() possibly called from
-          user context, else we insert an already 'dead' hash, blocking
-          further use of that particular connection -JM */
+       /* We have to check the DYING flag after unlink to prevent
+        * a race against nf_ct_get_next_corpse() possibly called from
+        * user context, else we insert an already 'dead' hash, blocking
+        * further use of that particular connection -JM.
+        */
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
-       if (unlikely(nf_ct_is_dying(ct))) {
-               nf_conntrack_double_unlock(hash, reply_hash);
-               local_bh_enable();
-               return NF_ACCEPT;
-       }
+       if (unlikely(nf_ct_is_dying(ct)))
+               goto out;
 
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
-       nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
           weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        return NF_ACCEPT;
 
 out:
+       nf_ct_add_to_dying_list(ct);
        nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
        local_bh_enable();
index 129a8daa4abf31959801e99c4f2fbfc7f1aab230..3b3ddb4fb9ee122a5b6d3a39450be38a64d6f614 100644 (file)
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
        struct nft_chain *chain, *nc;
        struct nft_set *set, *ns;
 
-       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+       list_for_each_entry(chain, &ctx->table->chains, list) {
                ctx->chain = chain;
 
                err = nft_delrule_by_chain(ctx);
                if (err < 0)
                        goto out;
-
-               err = nft_delchain(ctx);
-               if (err < 0)
-                       goto out;
        }
 
        list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
                        goto out;
        }
 
+       list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+               ctx->chain = chain;
+
+               err = nft_delchain(ctx);
+               if (err < 0)
+                       goto out;
+       }
+
        err = nft_deltable(ctx);
 out:
        return err;
index 13c2e17bbe279e6660a0a04fc804a6e1dd0a7707..c421d94c4652625147ba65aebe4c251561cf37ff 100644 (file)
@@ -321,7 +321,8 @@ replay:
                nlh = nlmsg_hdr(skb);
                err = 0;
 
-               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+               if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+                   skb->len < nlh->nlmsg_len) {
                        err = -EINVAL;
                        goto ack;
                }
@@ -463,13 +464,13 @@ static void nfnetlink_rcv(struct sk_buff *skb)
 }
 
 #ifdef CONFIG_MODULES
-static int nfnetlink_bind(int group)
+static int nfnetlink_bind(struct net *net, int group)
 {
        const struct nfnetlink_subsystem *ss;
        int type;
 
        if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
-               return -EINVAL;
+               return 0;
 
        type = nfnl_group2type[group];
 
index afe2b0b45ec41f82df6f2430958a4392aa5eb608..aff54fb1c8a09fdb99fd4caab0959a80f28ffc37 100644 (file)
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
        }
 
        if (priv->sreg_proto_min) {
-               range.min_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_min].data[0];
-               range.max_proto.all = (__force __be16)
-                                       data[priv->sreg_proto_max].data[0];
+               range.min_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_min].data[0];
+               range.max_proto.all =
+                       *(__be16 *)&data[priv->sreg_proto_max].data[0];
                range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
        }
 
index 074cf3e91c6f2d5cb27ff7599f42d5b6c0ee1fd4..84ea76ca3f1fc52da96c31d3bf27fda678a1dd19 100644 (file)
@@ -1091,8 +1091,10 @@ static void netlink_remove(struct sock *sk)
        mutex_unlock(&nl_sk_hash_lock);
 
        netlink_table_grab();
-       if (nlk_sk(sk)->subscriptions)
+       if (nlk_sk(sk)->subscriptions) {
                __sk_del_bind_node(sk);
+               netlink_update_listeners(sk);
+       }
        netlink_table_ungrab();
 }
 
@@ -1139,8 +1141,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
        struct module *module = NULL;
        struct mutex *cb_mutex;
        struct netlink_sock *nlk;
-       int (*bind)(int group);
-       void (*unbind)(int group);
+       int (*bind)(struct net *net, int group);
+       void (*unbind)(struct net *net, int group);
        int err = 0;
 
        sock->state = SS_UNCONNECTED;
@@ -1226,8 +1228,8 @@ static int netlink_release(struct socket *sock)
 
        module_put(nlk->module);
 
-       netlink_table_grab();
        if (netlink_is_kernel(sk)) {
+               netlink_table_grab();
                BUG_ON(nl_table[sk->sk_protocol].registered == 0);
                if (--nl_table[sk->sk_protocol].registered == 0) {
                        struct listeners *old;
@@ -1241,11 +1243,16 @@ static int netlink_release(struct socket *sock)
                        nl_table[sk->sk_protocol].flags = 0;
                        nl_table[sk->sk_protocol].registered = 0;
                }
-       } else if (nlk->subscriptions) {
-               netlink_update_listeners(sk);
+               netlink_table_ungrab();
        }
-       netlink_table_ungrab();
 
+       if (nlk->netlink_unbind) {
+               int i;
+
+               for (i = 0; i < nlk->ngroups; i++)
+                       if (test_bit(i, nlk->groups))
+                               nlk->netlink_unbind(sock_net(sk), i + 1);
+       }
        kfree(nlk->groups);
        nlk->groups = NULL;
 
@@ -1410,9 +1417,10 @@ static int netlink_realloc_groups(struct sock *sk)
        return err;
 }
 
-static void netlink_unbind(int group, long unsigned int groups,
-                          struct netlink_sock *nlk)
+static void netlink_undo_bind(int group, long unsigned int groups,
+                             struct sock *sk)
 {
+       struct netlink_sock *nlk = nlk_sk(sk);
        int undo;
 
        if (!nlk->netlink_unbind)
@@ -1420,7 +1428,7 @@ static void netlink_unbind(int group, long unsigned int groups,
 
        for (undo = 0; undo < group; undo++)
                if (test_bit(undo, &groups))
-                       nlk->netlink_unbind(undo);
+                       nlk->netlink_unbind(sock_net(sk), undo);
 }
 
 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1466,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                for (group = 0; group < nlk->ngroups; group++) {
                        if (!test_bit(group, &groups))
                                continue;
-                       err = nlk->netlink_bind(group);
+                       err = nlk->netlink_bind(net, group);
                        if (!err)
                                continue;
-                       netlink_unbind(group, groups, nlk);
+                       netlink_undo_bind(group, groups, sk);
                        return err;
                }
        }
@@ -1471,7 +1479,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        netlink_insert(sk, net, nladdr->nl_pid) :
                        netlink_autobind(sock);
                if (err) {
-                       netlink_unbind(nlk->ngroups, groups, nlk);
+                       netlink_undo_bind(nlk->ngroups, groups, sk);
                        return err;
                }
        }
@@ -2122,7 +2130,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                if (!val || val - 1 >= nlk->ngroups)
                        return -EINVAL;
                if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
-                       err = nlk->netlink_bind(val);
+                       err = nlk->netlink_bind(sock_net(sk), val);
                        if (err)
                                return err;
                }
@@ -2131,7 +2139,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                                         optname == NETLINK_ADD_MEMBERSHIP);
                netlink_table_ungrab();
                if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
-                       nlk->netlink_unbind(val);
+                       nlk->netlink_unbind(sock_net(sk), val);
 
                err = 0;
                break;
index b20a1731759b2e6dc8bbe3064933bb3d67134d26..f123a88496f8f5282287ba5028ae03110d25bb7c 100644 (file)
@@ -39,8 +39,8 @@ struct netlink_sock {
        struct mutex            *cb_mutex;
        struct mutex            cb_def_mutex;
        void                    (*netlink_rcv)(struct sk_buff *skb);
-       int                     (*netlink_bind)(int group);
-       void                    (*netlink_unbind)(int group);
+       int                     (*netlink_bind)(struct net *net, int group);
+       void                    (*netlink_unbind)(struct net *net, int group);
        struct module           *module;
 #ifdef CONFIG_NETLINK_MMAP
        struct mutex            pg_vec_lock;
@@ -65,8 +65,8 @@ struct netlink_table {
        unsigned int            groups;
        struct mutex            *cb_mutex;
        struct module           *module;
-       int                     (*bind)(int group);
-       void                    (*unbind)(int group);
+       int                     (*bind)(struct net *net, int group);
+       void                    (*unbind)(struct net *net, int group);
        bool                    (*compare)(struct net *net, struct sock *sock);
        int                     registered;
 };
index 76393f2f4b225713b9c85ee5aae722e109881080..2e11061ef885562d2ff2a098448a7d9c8d1b64ee 100644 (file)
@@ -983,11 +983,67 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
        { .name = "notify", },
 };
 
+static int genl_bind(struct net *net, int group)
+{
+       int i, err = 0;
+
+       down_read(&cb_lock);
+       for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+               struct genl_family *f;
+
+               list_for_each_entry(f, genl_family_chain(i), family_list) {
+                       if (group >= f->mcgrp_offset &&
+                           group < f->mcgrp_offset + f->n_mcgrps) {
+                               int fam_grp = group - f->mcgrp_offset;
+
+                               if (!f->netnsok && net != &init_net)
+                                       err = -ENOENT;
+                               else if (f->mcast_bind)
+                                       err = f->mcast_bind(net, fam_grp);
+                               else
+                                       err = 0;
+                               break;
+                       }
+               }
+       }
+       up_read(&cb_lock);
+
+       return err;
+}
+
+static void genl_unbind(struct net *net, int group)
+{
+       int i;
+       bool found = false;
+
+       down_read(&cb_lock);
+       for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+               struct genl_family *f;
+
+               list_for_each_entry(f, genl_family_chain(i), family_list) {
+                       if (group >= f->mcgrp_offset &&
+                           group < f->mcgrp_offset + f->n_mcgrps) {
+                               int fam_grp = group - f->mcgrp_offset;
+
+                               if (f->mcast_unbind)
+                                       f->mcast_unbind(net, fam_grp);
+                               found = true;
+                               break;
+                       }
+               }
+       }
+       up_read(&cb_lock);
+
+       WARN_ON(!found);
+}
+
 static int __net_init genl_pernet_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .flags          = NL_CFG_F_NONROOT_RECV,
+               .bind           = genl_bind,
+               .unbind         = genl_unbind,
        };
 
        /* we'll bump the group number right afterwards */
index 764fdc39c63b6565d29d2c4a1d1ed7629924c412..770064c837112ca23fb1bbecf75b2d84643e88b0 100644 (file)
@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
        hdr = eth_hdr(skb);
        hdr->h_proto = mpls->mpls_ethertype;
 
-       skb_set_inner_protocol(skb, skb->protocol);
+       if (!skb->inner_protocol)
+               skb_set_inner_protocol(skb, skb->protocol);
        skb->protocol = mpls->mpls_ethertype;
 
        invalidate_flow_key(key);
index 332b5a0317392002a18b86dadfd4671e18fb6896..b07349e82d788dba64e768cf3ea1da77b08a9652 100644 (file)
@@ -83,8 +83,7 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
                            unsigned int group)
 {
        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
-              genl_has_listeners(family, genl_info_net(info)->genl_sock,
-                                 group);
+              genl_has_listeners(family, genl_info_net(info), group);
 }
 
 static void ovs_notify(struct genl_family *family,
@@ -525,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        struct vport *input_vport;
        int len;
        int err;
-       bool log = !a[OVS_FLOW_ATTR_PROBE];
+       bool log = !a[OVS_PACKET_ATTR_PROBE];
 
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -611,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
        [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 70bef2ab7f2bc6017081ad780a8c5cb1aa9feb67..da2fae0873a5d7f4ad56a85b81575152ec1727bc 100644 (file)
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 {
        struct flow_stats *stats;
        int node = numa_node_id();
+       int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
 
        stats = rcu_dereference(flow->stats[node]);
 
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                                if (likely(new_stats)) {
                                        new_stats->used = jiffies;
                                        new_stats->packet_count = 1;
-                                       new_stats->byte_count = skb->len;
+                                       new_stats->byte_count = len;
                                        new_stats->tcp_flags = tcp_flags;
                                        spin_lock_init(&new_stats->lock);
 
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 
        stats->used = jiffies;
        stats->packet_count++;
-       stats->byte_count += skb->len;
+       stats->byte_count += len;
        stats->tcp_flags |= tcp_flags;
 unlock:
        spin_unlock(&stats->lock);
index 9645a21d9eaa316057c0ed137a43bce11f34b70a..d1eecf707613eb5cb0c68531f63cf46ff6f82c63 100644 (file)
@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
                                  __be16 eth_type, __be16 vlan_tci, bool log)
 {
        const struct nlattr *a;
-       bool out_tnl_port = false;
        int rem, err;
 
        if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
                case OVS_ACTION_ATTR_OUTPUT:
                        if (nla_get_u32(a) >= DP_MAX_PORTS)
                                return -EINVAL;
-                       out_tnl_port = false;
-
                        break;
 
                case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
                case OVS_ACTION_ATTR_PUSH_MPLS: {
                        const struct ovs_action_push_mpls *mpls = nla_data(a);
 
-                       /* Networking stack do not allow simultaneous Tunnel
-                        * and MPLS GSO.
-                        */
-                       if (out_tnl_port)
-                               return -EINVAL;
-
                        if (!eth_p_mpls(mpls->mpls_ethertype))
                                return -EINVAL;
                        /* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
 
                case OVS_ACTION_ATTR_SET:
                        err = validate_set(a, key, sfa,
-                                          &out_tnl_port, eth_type, log);
+                                          &skip_copy, eth_type, log);
                        if (err)
                                return err;
-
-                       skip_copy = out_tnl_port;
                        break;
 
                case OVS_ACTION_ATTR_SAMPLE:
index 347fa2325b226309b8e3cfb239f2e1d0a6631a50..484864dd0e689290dfca8a2c204e984de1b33184 100644 (file)
@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
                              false);
        if (err < 0)
                ip_rt_put(rt);
+       return err;
+
 error:
+       kfree_skb(skb);
        return err;
 }
 
index 6b69df545b1da3dba7ffedb92e106855106fd8d6..d4168c442db5af8683c72bfb24ce95c789dadcf8 100644 (file)
@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
 
        skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
        if (IS_ERR(skb))
-               return NULL;
+               return skb;
 
        tpi.flags = filter_tnl_flags(tun_key->tun_flags);
        tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
                err = -EINVAL;
-               goto error;
+               goto err_free_skb;
        }
 
        tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
        fl.flowi4_proto = IPPROTO_GRE;
 
        rt = ip_route_output_key(net, &fl);
-       if (IS_ERR(rt))
-               return PTR_ERR(rt);
+       if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
+               goto err_free_skb;
+       }
 
        tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
 
@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        /* Push Tunnel header. */
        skb = __build_header(skb, tunnel_hlen);
-       if (unlikely(!skb)) {
-               err = 0;
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               skb = NULL;
                goto err_free_rt;
        }
 
@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
                             tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
 err_free_rt:
        ip_rt_put(rt);
-error:
+err_free_skb:
+       kfree_skb(skb);
        return err;
 }
 
index 38f95a52241bd53af8de06b76079e403de8413cf..d7c46b301024906cf748453d24b91d2790f2e272 100644 (file)
@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
                             false);
        if (err < 0)
                ip_rt_put(rt);
+       return err;
 error:
+       kfree_skb(skb);
        return err;
 }
 
index 9584526c077804f21d22726d4bb5bd7cc4260d36..2034c6d9cb5a51f3094747e768c93522024bea96 100644 (file)
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
        stats = this_cpu_ptr(vport->percpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+       stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
        u64_stats_update_end(&stats->syncp);
 
        OVS_CB(skb)->input_vport = vport;
@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
                u64_stats_update_end(&stats->syncp);
        } else if (sent < 0) {
                ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
-               kfree_skb(skb);
-       } else
+       } else {
                ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-
+       }
        return sent;
 }
 
index e52a4478568198e88d1d8f8ee6a8506284c36323..9cfe2e1dd8b5099bbac7d824241bd0d06cb01cf7 100644 (file)
@@ -785,6 +785,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 
        struct tpacket3_hdr *last_pkt;
        struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+       struct sock *sk = &po->sk;
 
        if (po->stats.stats3.tp_drops)
                status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
        /* Flush the block */
        prb_flush_block(pkc1, pbd1, status);
 
+       sk->sk_data_ready(sk);
+
        pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 }
 
@@ -2052,12 +2055,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        smp_wmb();
 #endif
 
-       if (po->tp_version <= TPACKET_V2)
+       if (po->tp_version <= TPACKET_V2) {
                __packet_set_status(po, h.raw, status);
-       else
+               sk->sk_data_ready(sk);
+       } else {
                prb_clear_blk_fill_status(&po->rx_ring);
-
-       sk->sk_data_ready(sk);
+       }
 
 drop_n_restore:
        if (skb_head != skb->data && skb_shared(skb)) {
@@ -2514,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -EINVAL;
        if (sock->type == SOCK_DGRAM) {
                offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
-               if (unlikely(offset) < 0)
+               if (unlikely(offset < 0))
                        goto out_free;
        } else {
                if (ll_header_truncated(dev, len))
index 1cb61242e55e47e66d5e50e9ad1b0fafd127ac40..4439ac4c1b53fcaf12a8a06723b4a51330dddeae 100644 (file)
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        struct kvec *head = buf->head;
        struct kvec *tail = buf->tail;
        int fraglen;
-       int new, old;
+       int new;
 
        if (len > buf->len) {
                WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
        buf->len -= fraglen;
 
        new = buf->page_base + buf->page_len;
-       old = new + fraglen;
-       xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
+
+       xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
 
        if (buf->page_len) {
                xdr->p = page_address(*xdr->page_ptr);
index 96ceefeb9daf4eb780cd168b4d008418c0055dd9..a9e174fc0f91fd672eb3779f23fff9ef7decdb74 100644 (file)
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
        struct sk_buff *skb;
 
        skb_queue_walk(&bcl->outqueue, skb) {
-               if (more(buf_seqno(skb), after))
+               if (more(buf_seqno(skb), after)) {
+                       tipc_link_retransmit(bcl, skb, mod(to - after));
                        break;
+               }
        }
-       tipc_link_retransmit(bcl, skb, mod(to - after));
 }
 
 /**
index 22ba971741e5c998c5f314e083ceead4b2f6a94f..29c8675f9a1189db65c185f2ad04f96a67702989 100644 (file)
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
          Most distributions have a CRDA package.  So if unsure, say N.
 
 config CFG80211_WEXT
-       bool
+       bool "cfg80211 wireless extensions compatibility"
        depends on CFG80211
        select WEXT_CORE
        help
index 1bca180db8ad0b6475464f3ca8ea78c881c1b1c5..627f8cbbedb88ca29667bbf1f88eb2004d5ee461 100644 (file)
@@ -42,19 +42,19 @@ __clean-files       := $(extra-y) $(extra-m) $(extra-)       \
 
 __clean-files   := $(filter-out $(no-clean-files), $(__clean-files))
 
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
 
 __clean-files   := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
-                  $(filter /%, $(__clean-files)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+                  $(filter $(objtree)/%, $(__clean-files)))
 
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
 
 __clean-dirs    := $(wildcard                                               \
-                   $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs)))    \
-                  $(filter /%, $(clean-dirs)))
+                  $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs)))    \
+                  $(filter $(objtree)/%, $(clean-dirs)))
 
 # ==========================================================================
 
index 9609a7f0faea2d53d2ca7c642cf24935012cdaae..c7952375ac5325cfb4c403fa1020671b5f31a150 100644 (file)
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               key_user_put(key->user);
-
                /* now throw away the key memory */
                if (key->type->destroy)
                        key->type->destroy(key);
 
+               key_user_put(key->user);
+
                kfree(key->description);
 
 #ifdef KEY_DEBUGGING
index 255dabc6fc3313debc4b943d02e80a15b9e7f70a..2a85e4209f0b74d6f169dd705ffbf6fc9a2d3402 100644 (file)
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        spin_lock_irq(&efw->lock);
 
        t = (struct snd_efw_transaction *)data;
-       length = min_t(size_t, t->length * sizeof(t->length), length);
+       length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
index 5f13d2d180791fb4cd674ee52ffcdb84ffc9c2d3..b422e406a9cb3ba284772d4fbd6d42854de40b95 100644 (file)
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
index 4f6413e01c133567a2c01ccee10543d6fc82a864..605d14003d257cb645b3519541d18a9b3045790a 100644 (file)
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
                        spec->gpio_mask;
        }
        if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
-               spec->gpio_mask &= spec->gpio_mask;
-       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
                spec->gpio_dir &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_data &= spec->gpio_mask;
        if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
                spec->eapd_mask &= spec->gpio_mask;
        if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
index 81fe1464d2686661047aad6f2b9334f5d6a65c22..c0fbe18814398b03ebfbfaabdc535424185dd3fa 100644 (file)
@@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = {
 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
                struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
 
        ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
 
@@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
 static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
                struct snd_ctl_elem_value *ucontrol)
 {
-       struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-       struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+       struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 
        rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
 
index b93168d4f6489ee667d94d89a7271e6b4eba8ad5..8d18bbda661b66412dba2b93246bc7069cc61c80 100644 (file)
@@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
 
        switch (config->chan_nr) {
        case EIGHT_CHANNEL_SUPPORT:
-               ch_reg = 3;
-               break;
        case SIX_CHANNEL_SUPPORT:
-               ch_reg = 2;
-               break;
        case FOUR_CHANNEL_SUPPORT:
-               ch_reg = 1;
-               break;
        case TWO_CHANNEL_SUPPORT:
-               ch_reg = 0;
                break;
        default:
                dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
 
        i2s_disable_channels(dev, substream->stream);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
-               i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
-               irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
-               i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
-               i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
-       } else {
-               i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
-               i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
-               irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
-               i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
-               i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+       for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
+               if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+                       i2s_write_reg(dev->i2s_base, TCR(ch_reg),
+                                     xfer_resolution);
+                       i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+                       irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+                       i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+                       i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+               } else {
+                       i2s_write_reg(dev->i2s_base, RCR(ch_reg),
+                                     xfer_resolution);
+                       i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+                       irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+                       i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+                       i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+               }
        }
 
        i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
        snd_soc_dai_set_dma_data(dai, substream, NULL);
 }
 
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+                         struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               i2s_write_reg(dev->i2s_base, TXFFR, 1);
+       else
+               i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+       return 0;
+}
+
 static int dw_i2s_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
@@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
        .startup        = dw_i2s_startup,
        .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
+       .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
 };
 
index e989ecf046c953a7ad79e7c464123909059a7f2a..f86de1211b966c300fd43411254e8fdcb994297f 100644 (file)
@@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
 
 config SND_SOC_INTEL_BYTCR_RT5640_MACH
        tristate "ASoC Audio DSP Support for MID BYT Platform"
-       depends on X86
+       depends on X86 && I2C
        select SND_SOC_RT5640
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
 
 config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
         tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
-        depends on X86_INTEL_LPSS
+        depends on X86_INTEL_LPSS && I2C
         select SND_SOC_RT5670
         select SND_SST_MFLD_PLATFORM
         select SND_SST_IPC_ACPI
index f5d0fc1ab10c1efea603c568219b76735fc45db0..eef0c56ec32e8d8733c9780b5dc5d07ab56fc90f 100644 (file)
@@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver);
 MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
 MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bytrt5640-audio");
+MODULE_ALIAS("platform:bytt100_rt5640");
index 4a5bde9c686be2bbe967fcdd899f7c10f52a74c1..ef2e8b5766a1b92df8879541888b4761f1c2a09f 100644 (file)
@@ -763,8 +763,12 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                /* does block span more than 1 section */
                if (ba->offset >= block->offset && ba->offset < block_end) {
 
+                       /* add block */
+                       list_move(&block->list, &dsp->used_block_list);
+                       list_add(&block->module_list, block_list);
                        /* align ba to block boundary */
-                       ba->offset = block->offset;
+                       ba->size -= block_end - ba->offset;
+                       ba->offset = block_end;
 
                        err = block_alloc_contiguous(dsp, ba, block_list);
                        if (err < 0)
index 3abc29e8a9287d133636d97fe74a96687c953bc0..2ac72eb5e75d82e1a6e8e11e4b176984692215d3 100644 (file)
@@ -343,7 +343,7 @@ int sst_acpi_remove(struct platform_device *pdev)
 }
 
 static struct sst_machines sst_acpi_bytcr[] = {
-       {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+       {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
                                                &byt_rvp_platform_data },
        {},
 };
index 26ec5117b35c1ace8b5b089200dbd367b06bbbf0..13d8507333b8f8507679350aadc12df277d35249 100644 (file)
@@ -454,11 +454,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
 
        i2s->playback_dma_data.addr = res->start + I2S_TXDR;
        i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->playback_dma_data.maxburst = 16;
+       i2s->playback_dma_data.maxburst = 4;
 
        i2s->capture_dma_data.addr = res->start + I2S_RXDR;
        i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->capture_dma_data.maxburst = 16;
+       i2s->capture_dma_data.maxburst = 4;
 
        i2s->dev = &pdev->dev;
        dev_set_drvdata(&pdev->dev, i2s);
index 89a5d8bc6ee7c9a356a0a9f58e1b6e938c354dce..93f456f518a97dc9a048350093acae715852e10a 100644 (file)
 #define I2S_DMACR_TDE_DISABLE  (0 << I2S_DMACR_TDE_SHIFT)
 #define I2S_DMACR_TDE_ENABLE   (1 << I2S_DMACR_TDE_SHIFT)
 #define I2S_DMACR_TDL_SHIFT    0
-#define I2S_DMACR_TDL(x)       ((x - 1) << I2S_DMACR_TDL_SHIFT)
+#define I2S_DMACR_TDL(x)       ((x) << I2S_DMACR_TDL_SHIFT)
 #define I2S_DMACR_TDL_MASK     (0x1f << I2S_DMACR_TDL_SHIFT)
 
 /*
index 985052b3fbed375dee764a64b63852998532d5e2..2c62620abca691af4033552518eb7138ea6485d5 100644 (file)
@@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                                   const char *propname)
 {
        struct device_node *np = card->dev->of_node;
-       int num_routes, old_routes;
+       int num_routes;
        struct snd_soc_dapm_route *routes;
        int i, ret;
 
@@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                return -EINVAL;
        }
 
-       old_routes = card->num_dapm_routes;
-       routes = devm_kzalloc(card->dev,
-                             (old_routes + num_routes) * sizeof(*routes),
+       routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
                              GFP_KERNEL);
        if (!routes) {
                dev_err(card->dev,
@@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                return -EINVAL;
        }
 
-       memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
-
        for (i = 0; i < num_routes; i++) {
                ret = of_property_read_string_index(np, propname,
-                       2 * i, &routes[old_routes + i].sink);
+                       2 * i, &routes[i].sink);
                if (ret) {
                        dev_err(card->dev,
                                "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                        return -EINVAL;
                }
                ret = of_property_read_string_index(np, propname,
-                       (2 * i) + 1, &routes[old_routes + i].source);
+                       (2 * i) + 1, &routes[i].source);
                if (ret) {
                        dev_err(card->dev,
                                "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                }
        }
 
-       card->num_dapm_routes += num_routes;
+       card->num_dapm_routes = num_routes;
        card->dapm_routes = routes;
 
        return 0;
index 272844746135763faa6425aae3fb8dc3bfc9ec50..327f8642ca80e66de2d0c880034cbd530f046dd1 100644 (file)
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
                return -EINVAL;
        }
 
-       if (cdev->n_streams < 2) {
+       if (cdev->n_streams < 1) {
                dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
                return -EINVAL;
        }
index 6eedba1f773227d5df93bae06d7f3a0fcc1bd648..653d1bad77de2331ba18771f99a01dddcee0175c 100644 (file)
@@ -22,6 +22,8 @@
 #error only <linux/bitops.h> can be included directly
 #endif
 
+#include <asm-generic/bitops/hweight.h>
+
 #include <asm-generic/bitops/atomic.h>
 
 #endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h
new file mode 100644 (file)
index 0000000..318bb2b
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/arch_hweight.h"
diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h
new file mode 100644 (file)
index 0000000..0afd644
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/const_hweight.h"
diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h
new file mode 100644 (file)
index 0000000..290120c
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
index 26005a15e7e29d34332b88e68a49fe223953913b..5ad9ee1dd7f6aed579a5e631e309438a80bb472f 100644 (file)
@@ -1,9 +1,9 @@
 #ifndef _TOOLS_LINUX_BITOPS_H_
 #define _TOOLS_LINUX_BITOPS_H_
 
+#include <asm/types.h>
 #include <linux/kernel.h>
 #include <linux/compiler.h>
-#include <asm/hweight.h>
 
 #ifndef __WORDSIZE
 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
 #define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
 #define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE)
 
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
 /*
  * Include this here because some architectures need generic_ffs/fls in
  * scope
index a74fba6d774353d33fac7f04b71abdd241e0218e..86ea2d7b88451c219dacad848e60564fb1a64ecf 100644 (file)
@@ -67,7 +67,7 @@ int debugfs_valid_mountpoint(const char *debugfs)
 
        if (statfs(debugfs, &st_fs) < 0)
                return -ENOENT;
-       else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+       else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
                return -ENOENT;
 
        return 0;
index 65d9be3f988747ae300db30d3b62cf0e8213dd69..128ef6332a6bd89c0ddbeef283c4dfccbf5f8417 100644 (file)
@@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic)
 
        if (statfs(fs, &st_fs) < 0)
                return -ENOENT;
-       else if (st_fs.f_type != magic)
+       else if ((long)st_fs.f_type != magic)
                return -ENOENT;
 
        return 0;
index 6f803609e498246d277d35829b18c7924a136eca..0b0112c80f22b390875799909ab567b42c66009e 100644 (file)
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
         *
         * TODO: Hook into free() and add that check there as well.
         */
-       debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+       debug_check_no_locks_freed(mutex, sizeof(*mutex));
        __del_lock(__get_lock(mutex));
        return ll_pthread_mutex_destroy(mutex);
 }
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
 {
        try_init_preload();
 
-       debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+       debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
        __del_lock(__get_lock(rwlock));
        return ll_pthread_rwlock_destroy(rwlock);
 }
index 83e2887f91a39200612290bd0b2b2fbd824e660a..fbbfdc39271dac69bd96aa932d6e62318dbce143 100644 (file)
@@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
 tools/include/asm/bug.h
+tools/include/asm-generic/bitops/arch_hweight.h
 tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/const_hweight.h
 tools/include/asm-generic/bitops/__ffs.h
 tools/include/asm-generic/bitops/__fls.h
 tools/include/asm-generic/bitops/find.h
 tools/include/asm-generic/bitops/fls64.h
 tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops/hweight.h
 tools/include/asm-generic/bitops.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
@@ -19,6 +22,8 @@ tools/include/linux/export.h
 tools/include/linux/hash.h
 tools/include/linux/log2.h
 tools/include/linux/types.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
 include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
@@ -29,6 +34,7 @@ include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
 lib/find_next_bit.c
+lib/hweight.c
 lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
index 67a03a825b3c94bb894f0f557dac3bb520943e65..aa6a50447c32b63fd3f55a5ada5059610d610ec1 100644 (file)
@@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h
 LIB_H += ../../include/linux/stringify.h
 LIB_H += util/include/linux/bitmap.h
 LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/arch_hweight.h
 LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/const_hweight.h
 LIB_H += ../include/asm-generic/bitops/find.h
 LIB_H += ../include/asm-generic/bitops/fls64.h
 LIB_H += ../include/asm-generic/bitops/fls.h
 LIB_H += ../include/asm-generic/bitops/__ffs.h
 LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops/hweight.h
 LIB_H += ../include/asm-generic/bitops.h
 LIB_H += ../include/linux/compiler.h
 LIB_H += ../include/linux/log2.h
@@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h
 LIB_H += util/include/asm/asm-offsets.h
 LIB_H += ../include/asm/bug.h
 LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/hweight.h
 LIB_H += util/include/asm/swab.h
 LIB_H += util/include/asm/system.h
 LIB_H += util/include/asm/uaccess.h
@@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
 # Benchmark modules
 BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
 BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(RAW_ARCH),x86_64)
+ifeq ($(ARCH), x86)
+ifeq ($(IS_64_BIT), 1)
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
 endif
+endif
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
@@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
+$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
 $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
index 3bb50eac5542fc62c4482c92f4478806e731a9f7..0c370f81e00280c6428ddfe0975584edcb9d02a7 100644 (file)
@@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
                return NULL;
        }
 
-       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
        if (result) {
                pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
                return NULL;
@@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
                return NULL;
        }
 
-       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
        if (result) {
                pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
                return NULL;
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
  *             yet used)
  *     -1 in case of errors
  */
-static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
+static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
 {
        int             rc = -1;
        Dwfl            *dwfl;
@@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
        Dwarf_Addr      start = pc;
        Dwarf_Addr      end = pc;
        bool            signalp;
+       const char      *exec_file = dso->long_name;
 
        dwfl = dso->dwfl;
 
@@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
                        return -1;
                }
 
-               if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) {
-                       pr_debug("dwfl_report_offline() failed %s\n",
+               mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1,
+                                               map_start, false);
+               if (!mod) {
+                       pr_debug("dwfl_report_elf() failed %s\n",
                                                dwarf_errmsg(-1));
                        /*
                         * We normally cache the DWARF debug info and never
@@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
                return skip_slot;
        }
 
-       rc = check_return_addr(dso, ip);
+       rc = check_return_addr(dso, al.map->start, ip);
 
-       pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
-                               dso->long_name, chain->nr, ip, rc);
+       pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
+                               dso->long_name, al.sym->name, ip, rc);
 
        if (rc == 0) {
                /*
index 07a8d7646a1549c61699f7311285238ef5ef93cf..005cc283790cfb7cf4db014362cb5d56f2a89ca2 100644 (file)
 #include <stdlib.h>
 #include <signal.h>
 #include <sys/wait.h>
-#include <linux/unistd.h>
 #include <string.h>
 #include <errno.h>
 #include <assert.h>
 #include <sys/time.h>
 #include <sys/types.h>
+#include <sys/syscall.h>
 
 #include <pthread.h>
 
index e7417fe97a9775eae8712d5bb58be2db0d1d7363..747f86103599826b6555563d25c8be25ae8f3d36 100644 (file)
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
                if (nr_samples > 0) {
                        total_nr_samples += nr_samples;
                        hists__collapse_resort(hists, NULL);
-                       hists__output_resort(hists);
+                       hists__output_resort(hists, NULL);
 
                        if (symbol_conf.event_group &&
                            !perf_evsel__is_group_leader(pos))
index 1ce425d101a99691121b62a2a0c879c4f362fdf5..1fd96c13f1998a4048cbc5ab3eef1df35a5f80b8 100644 (file)
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
        return __hist_entry__cmp_compute(p_left, p_right, c);
 }
 
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+                   struct hist_entry *right __maybe_unused)
+{
+       return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+       if (sort_compute)
+               return 0;
+
+       if (left->stat.period == right->stat.period)
+               return 0;
+       return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
 static void insert_hist_entry_by_compute(struct rb_root *root,
                                         struct hist_entry *he,
                                         int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
                hists__precompute(hists);
                hists__compute_resort(hists);
        } else {
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
        }
 
        hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
        fmt->header = hpp__header;
        fmt->width  = hpp__width;
        fmt->entry  = hpp__entry_global;
+       fmt->cmp    = hist_entry__cmp_nop;
+       fmt->collapse = hist_entry__cmp_nop;
 
        /* TODO more colors */
        switch (idx) {
        case PERF_HPP_DIFF__BASELINE:
                fmt->color = hpp__color_baseline;
+               fmt->sort  = hist_entry__cmp_baseline;
                break;
        case PERF_HPP_DIFF__DELTA:
                fmt->color = hpp__color_delta;
+               fmt->sort  = hist_entry__cmp_delta;
                break;
        case PERF_HPP_DIFF__RATIO:
                fmt->color = hpp__color_ratio;
+               fmt->sort  = hist_entry__cmp_ratio;
                break;
        case PERF_HPP_DIFF__WEIGHTED_DIFF:
                fmt->color = hpp__color_wdiff;
+               fmt->sort  = hist_entry__cmp_wdiff;
                break;
        default:
+               fmt->sort  = hist_entry__cmp_nop;
                break;
        }
 
        init_header(d, dfmt);
        perf_hpp__column_register(fmt);
+       perf_hpp__register_sort_field(fmt);
 }
 
 static void ui_init(void)
index 011195e38f2173947550100e62927e908b429d30..198f3c3aff952358766626f5bfea9ce81a996b28 100644 (file)
@@ -19,7 +19,9 @@
 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        int i;
-       const struct option list_options[] = {
+       bool raw_dump = false;
+       struct option list_options[] = {
+               OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
                OPT_END()
        };
        const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                NULL
        };
 
+       set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
        argc = parse_options(argc, argv, list_options, list_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
 
        setup_pager();
 
+       if (raw_dump) {
+               print_events(NULL, true);
+               return 0;
+       }
+
        if (argc == 0) {
                print_events(NULL, false);
                return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(NULL, false);
                else if (strcmp(argv[i], "pmu") == 0)
                        print_pmu_events(NULL, false);
-               else if (strcmp(argv[i], "--raw-dump") == 0)
-                       print_events(NULL, true);
                else {
                        char *sep = strchr(argv[i], ':'), *s;
                        int sep_idx;
index 39367609c707bc0332d4fdbd4a05cf49f4cb78d5..072ae8ad67fc1d258354b621a3ae7b2833deba0c 100644 (file)
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
        ui_progress__finish();
 }
 
+static void report__output_resort(struct report *rep)
+{
+       struct ui_progress prog;
+       struct perf_evsel *pos;
+
+       ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+       evlist__for_each(rep->session->evlist, pos)
+               hists__output_resort(evsel__hists(pos), &prog);
+
+       ui_progress__finish();
+}
+
 static int __cmd_report(struct report *rep)
 {
        int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
        if (session_done())
                return 0;
 
+       /*
+        * recalculate number of entries after collapsing since it
+        * might be changed during the collapse phase.
+        */
+       rep->nr_entries = 0;
+       evlist__for_each(session->evlist, pos)
+               rep->nr_entries += evsel__hists(pos)->nr_entries;
+
        if (rep->nr_entries == 0) {
                ui__error("The %s file has no samples!\n", file->path);
                return 0;
        }
 
-       evlist__for_each(session->evlist, pos)
-               hists__output_resort(evsel__hists(pos));
+       report__output_resort(rep);
 
        return report__browse_hists(rep);
 }
index 0aa7747ff1390e0995a875a6c185697901cb9632..616f0fcb47010abf68ac7e4a9e256fa559af4499 100644 (file)
@@ -66,7 +66,6 @@
 #include <sys/utsname.h>
 #include <sys/mman.h>
 
-#include <linux/unistd.h>
 #include <linux/types.h>
 
 static volatile int done;
@@ -285,7 +284,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
@@ -554,7 +553,7 @@ static void perf_top__sort_new_samples(void *arg)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 }
 
 static void *display_thread_tui(void *arg)
index 5d4b039fe1edc6ebf6f6dafec597e125383d0c4f..648e31ff4021c2e11520ab8b00e6f89213d324a9 100644 (file)
@@ -20,7 +20,7 @@ NO_PERF_REGS := 1
 
 # Additional ARCH settings for x86
 ifeq ($(ARCH),x86)
-  ifeq (${IS_X86_64}, 1)
+  ifeq (${IS_64_BIT}, 1)
     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
     ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
     LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
index 851cd0172a7694a0e21fd18d07c79031d5f9b253..ff95a68741d1ccdb54e54d929f2292e88963a5d1 100644 (file)
@@ -1,7 +1,7 @@
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                   -e s/arm.*/arm/ -e s/sa110/arm/ \
                                   -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                   -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
@@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                   -e s/tile.*/tile/ )
 
 # Additional ARCH settings for x86
-ifeq ($(ARCH),i386)
-  override ARCH := x86
+ifeq ($(RAW_ARCH),i386)
+  ARCH ?= x86
 endif
 
-ifeq ($(ARCH),x86_64)
-  override ARCH := x86
-  IS_X86_64 := 0
-  ifeq (, $(findstring m32,$(CFLAGS)))
-    IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
-    RAW_ARCH := x86_64
+ifeq ($(RAW_ARCH),x86_64)
+  ARCH ?= x86
+
+  ifneq (, $(findstring m32,$(CFLAGS)))
+    RAW_ARCH := x86_32
   endif
 endif
 
-ifeq (${IS_X86_64}, 1)
+ARCH ?= $(RAW_ARCH)
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
   IS_64_BIT := 1
-else ifeq ($(ARCH),x86)
-  IS_64_BIT := 0
 else
-  IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+  IS_64_BIT := 0
 endif
index a3b13d7dc1d43f3caf301b4d9f941c60d88ed37c..6ef68165c9db628d23bbe85b48945ac2581ec979 100644 (file)
@@ -6,7 +6,6 @@
 #include <sys/syscall.h>
 #include <linux/types.h>
 #include <linux/perf_event.h>
-#include <asm/unistd.h>
 
 #if defined(__i386__)
 #define mb()           asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
index ab28cca2cb97ad436dd7c419ee4ee7fa37ecb0b9..0bf06bec68c7e9786668990ad399b578326726c5 100644 (file)
@@ -11,6 +11,9 @@
 #include "thread.h"
 #include "callchain.h"
 
+/* For bsearch. We try to unwind functions in shared object. */
+#include <stdlib.h>
+
 static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        union perf_event *event,
                        struct perf_sample *sample __maybe_unused,
@@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine)
                                                  mmap_handler, machine, true);
 }
 
-#define MAX_STACK 6
+#define MAX_STACK 8
 
 static int unwind_entry(struct unwind_entry *entry, void *arg)
 {
@@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
        static const char *funcs[MAX_STACK] = {
                "test__arch_unwind_sample",
                "unwind_thread",
+               "compare",
+               "bsearch",
                "krava_3",
                "krava_2",
                "krava_1",
@@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread)
        return err;
 }
 
+static int global_unwind_retval = -INT_MAX;
+
+__attribute__ ((noinline))
+static int compare(void *p1, void *p2)
+{
+       /* Any possible value should be 'thread' */
+       struct thread *thread = *(struct thread **)p1;
+
+       if (global_unwind_retval == -INT_MAX)
+               global_unwind_retval = unwind_thread(thread);
+
+       return p1 - p2;
+}
+
 __attribute__ ((noinline))
 static int krava_3(struct thread *thread)
 {
-       return unwind_thread(thread);
+       struct thread *array[2] = {thread, thread};
+       void *fp = &bsearch;
+       /*
+        * make _bsearch a volatile function pointer to
+        * prevent potential optimization, which may expand
+        * bsearch and call compare directly from this function,
+        * instead of libc shared object.
+        */
+       void *(*volatile _bsearch)(void *, void *, size_t,
+                       size_t, int (*)(void *, void *));
+
+       _bsearch = fp;
+       _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
+       return global_unwind_retval;
 }
 
 __attribute__ ((noinline))
index 614d5c4978ab6509559eff9f275dc594310020f2..8d110dec393ee1a42f78cb1b440ea9d19f825e1c 100644 (file)
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
         * function since TEST_ASSERT_VAL() returns in case of failure.
         */
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
         *   30.00%    10.00%     perf  perf           [.] cmd_record
         *   20.00%     0.00%     bash  libc           [.] malloc
         *   10.00%    10.00%     bash  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
-        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *   10.00%    10.00%     perf  [kernel]       [k] page_fault
-        *   10.00%    10.00%     perf  libc           [.] free
         *   10.00%    10.00%     perf  libc           [.] malloc
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     perf  libc           [.] free
+        *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
-               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
+               { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
        };
 
        symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  malloc
         *                  main
         *
-        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *   10.00%    10.00%     bash  bash           [.] xmalloc
         *              |
-        *              --- schedule
-        *                  run_command
+        *              --- xmalloc
+        *                  malloc
+        *                  xmalloc     <--- NOTE: there's a cycle
+        *                  malloc
+        *                  xmalloc
         *                  main
         *
         *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
+        *   10.00%    10.00%     perf  [kernel]       [k] schedule
+        *              |
+        *              --- schedule
+        *                  run_command
+        *                  main
+        *
         *   10.00%    10.00%     perf  libc           [.] free
         *              |
         *              --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
         *                  run_command
         *                  main
         *
-        *   10.00%    10.00%     bash  bash           [.] xmalloc
-        *              |
-        *              --- xmalloc
-        *                  malloc
-        *                  xmalloc     <--- NOTE: there's a cycle
-        *                  malloc
-        *                  xmalloc
-        *                  main
-        *
         */
        struct result expected[] = {
                { 7000, 2000, "perf", "perf",     "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                { 3000, 1000, "perf", "perf",     "cmd_record" },
                { 2000,    0, "bash", "libc",     "malloc" },
                { 1000, 1000, "bash", "[kernel]", "page_fault" },
-               { 1000, 1000, "perf", "[kernel]", "schedule" },
+               { 1000, 1000, "bash", "bash",     "xmalloc" },
                { 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
                { 1000, 1000, "perf", "[kernel]", "page_fault" },
+               { 1000, 1000, "perf", "[kernel]", "schedule" },
                { 1000, 1000, "perf", "libc",     "free" },
                { 1000, 1000, "perf", "libc",     "malloc" },
-               { 1000, 1000, "bash", "bash",     "xmalloc" },
        };
        struct callchain_result expected_callchain[] = {
                {
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "bash",     "main" }, },
                },
                {
-                       3, {    { "[kernel]", "schedule" },
-                               { "perf",     "run_command" },
-                               { "perf",     "main" }, },
+                       6, {    { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "libc",     "malloc" },
+                               { "bash",     "xmalloc" },
+                               { "bash",     "main" }, },
                },
                {
                        3, {    { "[kernel]", "sys_perf_event_open" },
@@ -637,6 +640,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "run_command" },
                                { "perf",     "main" }, },
                },
+               {
+                       3, {    { "[kernel]", "schedule" },
+                               { "perf",     "run_command" },
+                               { "perf",     "main" }, },
+               },
                {
                        4, {    { "libc",     "free" },
                                { "perf",     "cmd_record" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                                { "perf",     "run_command" },
                                { "perf",     "main" }, },
                },
-               {
-                       6, {    { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "libc",     "malloc" },
-                               { "bash",     "xmalloc" },
-                               { "bash",     "main" }, },
-               },
        };
 
        symbol_conf.use_callchain = true;
index 74f257a812653177f9d334d7a25df8359d8ef3d4..59e53db7914c0ad6100ab2e616cdf21e39efea46 100644 (file)
@@ -138,7 +138,7 @@ int test__hists_filter(void)
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
-               hists__output_resort(hists);
+               hists__output_resort(hists, NULL);
 
                if (verbose > 2) {
                        pr_info("Normal histogram\n");
index a748f2be1222e3d44791eebacaf8c53174a617bf..f5547610da0200b70c0bdc1a006adaee925eba73 100644 (file)
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists);
+       hists__output_resort(hists, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
index e6bb04b5b09b863013e4d361120269d59f6207c6..788506eef5671da5e64016063569b79d4e060d97 100644 (file)
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
        bool need_percent;
 
        node = rb_first(root);
-       need_percent = !!rb_next(node);
+       need_percent = node && rb_next(node);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
index dc0d095f318c7da2868352d5a4c048a5dde40251..482adae3cc44a50889bb2278b323a3b6871197c6 100644 (file)
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
                if (ret)
                        return ret;
 
+               if (a->thread != b->thread || !symbol_conf.use_callchain)
+                       return 0;
+
                ret = b->callchain->max_depth - a->callchain->max_depth;
        }
        return ret;
index 2f612562978cdc13c7e89b6dbddd24c9f928d626..3c38f25b1695cdd289808d0d9f5ea858f06fc5db 100644 (file)
@@ -1,5 +1,8 @@
 #include <signal.h>
 #include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
 
 #include "../../util/cache.h"
 #include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
        return SLkp_getkey();
 }
 
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+       void *stackdump[32];
+       size_t size;
+
+       ui__exit(false);
+       psignal(sig, "perf");
+
+       printf("-------- backtrace --------\n");
+       size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+       backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+       exit(0);
+}
+#else
+# define ui__signal_backtrace  ui__signal
+#endif
+
 static void ui__signal(int sig)
 {
        ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
        ui_browser__init();
        tui_progress__init();
 
-       signal(SIGSEGV, ui__signal);
-       signal(SIGFPE, ui__signal);
+       signal(SIGSEGV, ui__signal_backtrace);
+       signal(SIGFPE, ui__signal_backtrace);
        signal(SIGINT, ui__signal);
        signal(SIGQUIT, ui__signal);
        signal(SIGTERM, ui__signal);
index 0784a9420528603efa9450a8c72c78ebf271298a..cadbdc90a5cbf319385cb67aa5a88c06cdf107dc 100644 (file)
@@ -116,11 +116,6 @@ struct annotation {
        struct annotated_source *src;
 };
 
-struct sannotation {
-       struct annotation annotation;
-       struct symbol     symbol;
-};
-
 static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
 {
        return (((void *)&notes->src->histograms) +
@@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i
 
 static inline struct annotation *symbol__annotation(struct symbol *sym)
 {
-       struct sannotation *a = container_of(sym, struct sannotation, symbol);
-       return &a->annotation;
+       return (void *)sym - symbol_conf.priv_size;
 }
 
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
index 5cf9e1b5989de40cb677b1bd744d684cd3bfc871..d04d770d90f6e29bc17d4d5d1299e76278b5ee1c 100644 (file)
@@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
 extern char *perf_pathdup(const char *fmt, ...)
        __attribute__((format (printf, 1, 2)));
 
+#ifndef __UCLIBC__
 /* Matches the libc/libbsd function attribute so we declare this unconditionally: */
 extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
 
 #endif /* __PERF_CACHE_H */
index 64b377e591e457746138173cfa59533f887e3d56..14e7a123d43b3f4ab4e04a5aba7448bd5d1106cd 100644 (file)
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
 
        return bf;
 }
+
+static void free_callchain_node(struct callchain_node *node)
+{
+       struct callchain_list *list, *tmp;
+       struct callchain_node *child;
+       struct rb_node *n;
+
+       list_for_each_entry_safe(list, tmp, &node->val, list) {
+               list_del(&list->list);
+               free(list);
+       }
+
+       n = rb_first(&node->rb_root_in);
+       while (n) {
+               child = container_of(n, struct callchain_node, rb_node_in);
+               n = rb_next(n);
+               rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+               free_callchain_node(child);
+               free(child);
+       }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+       if (!symbol_conf.use_callchain)
+               return;
+
+       free_callchain_node(&root->node);
+}
index dbc08cf5f970a2f25e9451ca5e259a38f5cdbfe1..c0ec1acc38e404aa599b5b6635d004ac2f0e204f 100644 (file)
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
 char *callchain_list__sym_name(struct callchain_list *cl,
                               char *bf, size_t bfsize, bool show_dso);
 
+void free_callchain(struct callchain_root *root);
+
 #endif /* __PERF_CALLCHAIN_H */
index 6e88b9e395df67abb0458eea80878112acab0b0a..182395546ddca63d919886f4b49896fbdd46e3e2 100644 (file)
@@ -6,6 +6,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "annotate.h"
+#include "ui/progress.h"
 #include <math.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
        size_t callchain_size = 0;
        struct hist_entry *he;
 
-       if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+       if (symbol_conf.use_callchain)
                callchain_size = sizeof(struct callchain_root);
 
        he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &callchain_cursor, sample->period);
+       hist_entry__append_callchain(he, sample);
 
        /*
         * We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
        iter->he = he;
        he_cache[iter->curr++] = he;
 
-       callchain_append(he->callchain, &cursor, sample->period);
+       if (symbol_conf.use_callchain)
+               callchain_append(he->callchain, &cursor, sample->period);
        return 0;
 }
 
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
        zfree(&he->mem_info);
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
+       free_callchain(he->callchain);
        free(he);
 }
 
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
                else
                        p = &(*p)->rb_right;
        }
+       hists->nr_entries++;
 
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
        if (!sort__need_collapse)
                return;
 
+       hists->nr_entries = 0;
+
        root = hists__get_rotate_entries_in(hists);
+
        next = rb_first(root);
 
        while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
        rb_insert_color(&he->rb_node, entries);
 }
 
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
 {
        struct rb_root *root;
        struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
 
                if (!n->filtered)
                        hists__calc_col_len(hists, n);
+
+               if (prog)
+                       ui_progress__update(prog, 1);
        }
 }
 
index d0ef9a19a7445caaf7bdc1d21b42ea2d1087a2a9..46bd50344f853f8f55f43bc23cd95f8459e53cab 100644 (file)
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
                              struct hists *hists);
 void hist_entry__free(struct hist_entry *);
 
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
 
 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
deleted file mode 100644 (file)
index 5c1d0d0..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-unsigned int hweight32(unsigned int w)
-{
-       unsigned int res = w - ((w >> 1) & 0x55555555);
-       res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
-       res = (res + (res >> 4)) & 0x0F0F0F0F;
-       res = res + (res >> 8);
-       return (res + (res >> 16)) & 0x000000FF;
-}
-
-unsigned long hweight64(__u64 w)
-{
-#if BITS_PER_LONG == 32
-       return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
-#elif BITS_PER_LONG == 64
-       __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
-       res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
-       res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
-       res = res + (res >> 8);
-       res = res + (res >> 16);
-       return (res + (res >> 32)) & 0x00000000000000FFul;
-#endif
-}
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
deleted file mode 100644 (file)
index 36cf26d..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef PERF_HWEIGHT_H
-#define PERF_HWEIGHT_H
-
-#include <linux/types.h>
-unsigned int hweight32(unsigned int w);
-unsigned long hweight64(__u64 w);
-
-#endif /* PERF_HWEIGHT_H */
index 94de3e48b4909a03a7e7037f779074e5bf31ff8a..1bca3a9f2b16bc91670f731e05564680d2f12a10 100644 (file)
@@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
        if (th != NULL) {
                rb_link_node(&th->rb_node, parent, p);
                rb_insert_color(&th->rb_node, &machine->threads);
-               machine->last_match = th;
 
                /*
                 * We have to initialize map_groups separately
@@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 * leader and that would screwed the rb tree.
                 */
                if (thread__init_map_groups(th, machine)) {
+                       rb_erase(&th->rb_node, &machine->threads);
                        thread__delete(th);
                        return NULL;
                }
+
+               machine->last_match = th;
        }
 
        return th;
index 28eb1417cb2a3fc5d3acebc2280cf37ac79ad778..94a717bf007de77658032dcb03dcc6db818b451b 100644 (file)
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        }
 
        if (ntevs == 0) {       /* No error but failed to find probe point. */
-               pr_warning("Probe point '%s' not found.\n",
+               pr_warning("Probe point '%s' not found in debuginfo.\n",
                           synthesize_perf_probe_point(&pev->point));
-               return -ENOENT;
+               if (need_dwarf)
+                       return -ENOENT;
+               return 0;
        }
        /* Error path : ntevs < 0 */
        pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
@@ -2050,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
        pr_debug("Writing event: %s\n", buf);
        if (!probe_event_dry_run) {
                ret = write(fd, buf, strlen(buf));
-               if (ret <= 0)
+               if (ret <= 0) {
+                       ret = -errno;
                        pr_warning("Failed to write event: %s\n",
                                   strerror_r(errno, sbuf, sizeof(sbuf)));
+               }
        }
        free(buf);
        return ret;
index c7918f83b300086649f522bc5f663d9a5a88a5dc..b5247d777f0e9348d1b77e3f33813c5b7713bce4 100644 (file)
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
        int ret = 0;
 
 #if _ELFUTILS_PREREQ(0, 142)
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+
        /* Get the call frame information from this dwarf */
-       pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+       elf = dwarf_getelf(dbg->dbg);
+       if (elf == NULL)
+               return -EINVAL;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               return -EINVAL;
+
+       if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+           shdr.sh_type == SHT_PROGBITS) {
+               pf->cfi = dwarf_getcfi_elf(elf);
+       } else {
+               pf->cfi = dwarf_getcfi(dbg->dbg);
+       }
 #endif
 
        off = 0;
index 16a475a7d492177623062143434488116cdf2a38..6c6a6953fa93fa5b4fe92229df7613bbe996bec9 100644 (file)
@@ -10,7 +10,7 @@ util/ctype.c
 util/evlist.c
 util/evsel.c
 util/cpumap.c
-util/hweight.c
+../../lib/hweight.c
 util/thread_map.c
 util/util.c
 util/xyarray.c
index 371219a6daf1cd8209687115bca9a67f7ffc3209..6edf535f65c23428b4982fb651ba3df997d55dfb 100644 (file)
@@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name)
        return offset;
 }
 
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int elf_is_exec(int fd, const char *name)
+{
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       int retval = 0;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL)
+               return 0;
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               goto out;
+
+       retval = (ehdr.e_type == ET_EXEC);
+
+out:
+       elf_end(elf);
+       pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
+       return retval;
+}
+#endif
+
 struct table_entry {
        u32 start_ip_offset;
        u32 fde_offset;
@@ -322,8 +344,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
        /* Check the .debug_frame section for unwinding info */
        if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+               int fd = dso__data_fd(map->dso, ui->machine);
+               int is_exec = elf_is_exec(fd, map->dso->name);
+               unw_word_t base = is_exec ? 0 : map->start;
+
                memset(&di, 0, sizeof(di));
-               if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+               if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
                                           map->start, map->end))
                        return dwarf_search_unwind_table(as, ip, &di, pi,
                                                         need_unwind_info, arg);
index 7cdcf88659c77d75196b3e078b89b657bf53a26a..9ea91437898598bf31982f6073ec483e3d51f687 100644 (file)
@@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
        }
 
        get_cpu_info(0, &cpupower_cpu_info);
-       run_as_root = !getuid();
+       run_as_root = !geteuid();
        if (run_as_root) {
                ret = uname(&uts);
                if (!ret && !strcmp(uts.machine, "x86_64") &&
index 09afe5d87f2bbe34387e1257cfcc67bd5d16d4dc..4e8fe2c7b05475ca8e6d015dd20228c5a53b957f 100644 (file)
@@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
 
        snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
        if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
-               return -ENODEV;
+               return 0;
 
        snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
        if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
index 33a5c06d95caa038f682c411ad26df0788d16f5b..e238c9559caf9a7757d2d389e0bda57cd73229a8 100644 (file)
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
 }
 
 static int check_execveat_invoked_rc(int fd, const char *path, int flags,
-                                    int expected_rc)
+                                    int expected_rc, int expected_rc2)
 {
        int status;
        int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
                        child, status);
                return 1;
        }
-       if (WEXITSTATUS(status) != expected_rc) {
-               printf("[FAIL] (child %d exited with %d not %d)\n",
-                       child, WEXITSTATUS(status), expected_rc);
+       if ((WEXITSTATUS(status) != expected_rc) &&
+           (WEXITSTATUS(status) != expected_rc2)) {
+               printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+                       child, WEXITSTATUS(status), expected_rc, expected_rc2);
                return 1;
        }
        printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
 
 static int check_execveat(int fd, const char *path, int flags)
 {
-       return check_execveat_invoked_rc(fd, path, flags, 99);
+       return check_execveat_invoked_rc(fd, path, flags, 99, 99);
 }
 
 static char *concat(const char *left, const char *right)
@@ -179,11 +180,11 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         */
        fd = open(longpath, O_RDONLY);
        if (fd > 0) {
-               printf("Invoke copy of '%s' via filename of length %lu:\n",
+               printf("Invoke copy of '%s' via filename of length %zu:\n",
                        src, strlen(longpath));
                fail += check_execveat(fd, "", AT_EMPTY_PATH);
        } else {
-               printf("Failed to open length %lu filename, errno=%d (%s)\n",
+               printf("Failed to open length %zu filename, errno=%d (%s)\n",
                        strlen(longpath), errno, strerror(errno));
                fail++;
        }
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         * Execute as a long pathname relative to ".".  If this is a script,
         * the interpreter will launch but fail to open the script because its
         * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+        *
+        * The failure code is usually 127 (POSIX: "If a command is not found,
+        * the exit status shall be 127."), but some systems give 126 (POSIX:
+        * "If the command name is found, but it is not an executable utility,
+        * the exit status shall be 126."), so allow either.
         */
        if (is_script)
-               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+               fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+                                                 127, 126);
        else
                fail += check_execveat(dot_dfd, longpath, 0);
 
index 94dae65eea4183b43bf3e52bf9bf22dc9c0d4471..8519e9ee97e3d3e4a344e798cabf5b38edfff602 100644 (file)
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
 {
        struct mq_attr attr;
        char *option, *next_option;
-       int i, cpu;
+       int i, cpu, rc;
        struct sigaction sa;
        poptContext popt_context;
-       char rc;
        void *retval;
 
        main_thread = pthread_self();
index 4c4b1f631ecf61f6e3048d746c23be39ea4f2ef9..077828c889f1377886b98c93349919d55ccb57a2 100644 (file)
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
 
 all: $(BINARIES)
 %: %.c
-       $(CC) $(CFLAGS) -o $@ $^
+       $(CC) $(CFLAGS) -o $@ $^ -lrt
 
 run_tests: all
        @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)