Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Dec 2016 00:54:46 +0000 (16:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Dec 2016 00:54:46 +0000 (16:54 -0800)
Pull x86 fixes from Ingo Molnar:
 "There's a number of fixes:

   - a round of fixes for CPUID-less legacy CPUs
   - a number of microcode loader fixes
   - i8042 detection robustization fixes
   - stack dump/unwinder fixes
   - x86 SoC platform driver fixes
   - a GCC 7 warning fix
   - virtualization related fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  Revert "x86/unwind: Detect bad stack return address"
  x86/paravirt: Mark unused patch_default label
  x86/microcode/AMD: Reload proper initrd start address
  x86/platform/intel/quark: Add printf attribute to imr_self_test_result()
  x86/platform/intel-mid: Switch MPU3050 driver to IIO
  x86/alternatives: Do not use sync_core() to serialize I$
  x86/topology: Document cpu_llc_id
  x86/hyperv: Handle unknown NMIs on one CPU when unknown_nmi_panic
  x86/asm: Rewrite sync_core() to use IRET-to-self
  x86/microcode/intel: Replace sync_core() with native_cpuid()
  Revert "x86/boot: Fail the boot if !M486 and CPUID is missing"
  x86/asm/32: Make sync_core() handle missing CPUID on all 32-bit kernels
  x86/cpu: Probe CPUID leaf 6 even when cpuid_level == 6
  x86/tools: Fix gcc-7 warning in relocs.c
  x86/unwind: Dump stack data on warnings
  x86/unwind: Adjust last frame check for aligned function stacks
  x86/init: Fix a couple of comment typos
  x86/init: Remove i8042_detect() from platform ops
  Input: i8042 - Trust firmware a bit more when probing on X86
  x86/init: Add i8042 state to the platform data
  ...

516 files changed:
CREDITS
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/mfd/altera-a10sr.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/qcom-pm8xxx.txt
Documentation/devicetree/bindings/mfd/rn5t618.txt
Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/regulator/tps65218.txt
Documentation/features/io/dma-api-debug/arch-support.txt
Documentation/features/io/dma-contiguous/arch-support.txt
Documentation/features/io/sg-chain/arch-support.txt
Documentation/scsi/g_NCR5380.txt
Documentation/sphinx/rstFlatTable.py
Documentation/virtual/kvm/locking.txt
Documentation/x86/intel_rdt_ui.txt [new file with mode: 0644]
MAINTAINERS
arch/Kconfig
arch/arc/Kconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/entry-compact.S
arch/arc/kernel/intc-arcv2.c
arch/arc/mm/cache.c
arch/arm/boot/dts/hisi-x5hd2.dtsi
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/memory.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/setup.c
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/parisc/Kconfig
arch/parisc/include/asm/elf.h
arch/parisc/include/asm/pdcpat.h
arch/parisc/include/asm/processor.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/firmware.c
arch/parisc/kernel/inventory.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/process.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/time.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/ima.h [new file with mode: 0644]
arch/powerpc/include/asm/kexec.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/ima_kexec.c [new file with mode: 0644]
arch/powerpc/kernel/kexec_elf_64.c
arch/powerpc/kernel/machine_kexec_file_64.c
arch/powerpc/platforms/85xx/corenet_generic.c
arch/x86/Kconfig
arch/x86/events/intel/core.c
arch/x86/events/intel/cqm.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel_rdt.h [new file with mode: 0644]
arch/x86/include/asm/intel_rdt_common.h [new file with mode: 0644]
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/intel_rdt.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_schemata.c [new file with mode: 0644]
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/kc705.dts
arch/xtensa/include/asm/Kbuild
arch/xtensa/kernel/Makefile
arch/xtensa/kernel/pci-dma.c
arch/xtensa/kernel/s32c1i_selftest.c [new file with mode: 0644]
arch/xtensa/kernel/setup.c
arch/xtensa/mm/init.c
block/bsg.c
block/ioctl.c
block/scsi_ioctl.c
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/bus.c
drivers/acpi/nfit/core.c
drivers/acpi/osl.c
drivers/acpi/processor_core.c
drivers/acpi/scan.c
drivers/acpi/spcr.c
drivers/acpi/tables.c
drivers/base/cacheinfo.c
drivers/clocksource/moxart_timer.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/firmware/dmi_scan.c
drivers/gpio/gpio-tps65218.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-octeon-core.c
drivers/i2c/busses/i2c-octeon-core.h
drivers/i2c/busses/i2c-xgene-slimpro.c
drivers/i2c/muxes/i2c-mux-mlxcpld.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_puda.c
drivers/infiniband/hw/i40iw/i40iw_type.h
drivers/infiniband/hw/i40iw/i40iw_ucontext.h
drivers/infiniband/hw/i40iw/i40iw_uk.c
drivers/infiniband/hw/i40iw/i40iw_user.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.h
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/input/misc/tps65218-pwrbutton.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/irqchip/irq-st.c
drivers/mailbox/bcm-pdc-mailbox.c
drivers/mailbox/mailbox-sti.c
drivers/mailbox/mailbox-test.c
drivers/mailbox/pcc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab3100-core.c
drivers/mfd/ab8500-core.c
drivers/mfd/ab8500-debugfs.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/ab8500-sysctrl.c
drivers/mfd/abx500-core.c
drivers/mfd/arizona-core.c
drivers/mfd/arizona-irq.c
drivers/mfd/axp20x-i2c.c
drivers/mfd/axp20x.c
drivers/mfd/bcm590xx.c
drivers/mfd/cs47l24-tables.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/fsl-imx25-tsadc.c
drivers/mfd/hi655x-pmic.c
drivers/mfd/intel-lpss-pci.c
drivers/mfd/intel_soc_pmic_bxtwc.c
drivers/mfd/lpc_ich.c
drivers/mfd/palmas.c
drivers/mfd/qcom-pm8xxx.c
drivers/mfd/rk808.c
drivers/mfd/rn5t618.c
drivers/mfd/si476x-i2c.c
drivers/mfd/sun4i-gpadc.c [new file with mode: 0644]
drivers/mfd/tc3589x.c
drivers/mfd/tps65217.c
drivers/mfd/tps65218.c
drivers/mfd/tps65912-core.c
drivers/mfd/wm5102-tables.c
drivers/mfd/wm8994-core.c
drivers/mmc/core/core.c
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/sfc/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/fddi/skfp/hwmtm.c
drivers/net/fddi/skfp/pmf.c
drivers/net/fddi/skfp/smt.c
drivers/net/phy/phy_device.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/nvme/host/pci.c
drivers/regulator/tps65218-regulator.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fsf.h
drivers/s390/scsi/zfcp_reqlist.h
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-9xxx.h
drivers/scsi/3w-sas.c
drivers/scsi/3w-sas.h
drivers/scsi/3w-xxxx.c
drivers/scsi/3w-xxxx.h
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/aacraid/linit.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/g_NCR5380.c
drivers/scsi/g_NCR5380.h
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvscsi.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/Kconfig [new file with mode: 0644]
drivers/scsi/qedi/Makefile [new file with mode: 0644]
drivers/scsi/qedi/qedi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_dbg.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_dbg.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_debugfs.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_gbl.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_hsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_iscsi.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_iscsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_main.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_sysfs.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_version.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sg.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.h
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target.h
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_auth.h
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_datain_values.h
drivers/target/iscsi/iscsi_target_device.h
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl1.h
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_erl2.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nego.h
drivers/target/iscsi/iscsi_target_nodeattrib.h
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_seq_pdu_list.h
drivers/target/iscsi/iscsi_target_tmr.h
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tpg.h
drivers/target/iscsi/iscsi_target_transport.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_iblock.h
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_sbc.c
drivers/target/target_core_ua.h
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/target/tcm_fc/tcm_fc.h
drivers/usb/gadget/function/f_tcm.c
fs/aio.c
fs/befs/befs.h
fs/befs/befs_fs_types.h
fs/befs/btree.c
fs/befs/btree.h
fs/befs/datastream.c
fs/befs/datastream.h
fs/befs/debug.c
fs/befs/inode.c
fs/befs/inode.h
fs/befs/io.c
fs/befs/io.h
fs/befs/linuxvfs.c
fs/befs/super.h
fs/compat.c
fs/exec.c
fs/ext2/inode.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/filelayout/filelayoutdev.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.c
fs/notify/inode_mark.c
fs/ocfs2/quota_global.c
fs/ocfs2/quota_local.c
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/pnode.c
fs/quota/dquot.c
fs/quota/quota.c
fs/read_write.c
fs/seq_file.c
fs/splice.c
fs/super.c
fs/ufs/inode.c
fs/xfs/xfs_reflink.c
include/acpi/acpi_io.h
include/acpi/acpixf.h
include/acpi/actbl.h
include/acpi/platform/aclinuxex.h
include/dt-bindings/net/mdio.h [deleted file]
include/linux/aio.h
include/linux/blkdev.h
include/linux/cacheinfo.h
include/linux/configfs.h
include/linux/fs.h
include/linux/ima.h
include/linux/mfd/axp20x.h
include/linux/mfd/davinci_voicecodec.h
include/linux/mfd/rk808.h
include/linux/mfd/rn5t618.h
include/linux/mfd/sun4i-gpadc.h [new file with mode: 0644]
include/linux/mfd/tps65217.h
include/linux/mfd/tps65218.h
include/linux/mfd/tps65912.h
include/linux/nfs_fs.h
include/linux/quota.h
include/linux/ratelimit.h
include/linux/sched.h
include/rdma/ib_addr.h
include/target/iscsi/iscsi_target_core.h
include/target/iscsi/iscsi_target_stat.h
include/target/iscsi/iscsi_transport.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/uapi/linux/magic.h
kernel/events/uprobes.c
kernel/kcov.c
kernel/kexec_file.c
kernel/sys_ni.c
lib/Kconfig.debug
lib/iov_iter.c
mm/fadvise.c
net/core/neighbour.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/route.c
net/ipv4/tcp_output.c
net/ipv6/datagram.c
net/ipv6/raw.c
net/openvswitch/flow_netlink.c
net/rds/rdma.c
net/sched/act_tunnel_key.c
net/sched/cls_flower.c
net/sched/sch_fq.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/bind_addr.c
net/sctp/protocol.c
samples/bpf/Makefile
samples/bpf/README.rst
samples/bpf/bpf_load.c
samples/bpf/bpf_load.h
samples/bpf/fds_example.c
samples/bpf/lathist_user.c
samples/bpf/libbpf.c [deleted file]
samples/bpf/libbpf.h
samples/bpf/lwt_len_hist_user.c
samples/bpf/offwaketime_user.c
samples/bpf/sampleip_user.c
samples/bpf/sock_example.c
samples/bpf/sock_example.h [new file with mode: 0644]
samples/bpf/sockex1_user.c
samples/bpf/sockex2_user.c
samples/bpf/sockex3_user.c
samples/bpf/spintest_user.c
samples/bpf/tc_l2_redirect_user.c
samples/bpf/test_cgrp2_array_pin.c
samples/bpf/test_cgrp2_attach.c
samples/bpf/test_cgrp2_attach2.c
samples/bpf/test_cgrp2_sock.c
samples/bpf/test_current_task_under_cgroup_user.c
samples/bpf/test_lru_dist.c
samples/bpf/test_probe_write_user_user.c
samples/bpf/trace_event_user.c
samples/bpf/trace_output_user.c
samples/bpf/tracex2_user.c
samples/bpf/tracex3_user.c
samples/bpf/tracex4_user.c
samples/bpf/tracex6_user.c
samples/bpf/xdp1_user.c
samples/bpf/xdp_tx_iptunnel_user.c
scripts/selinux/genheaders/Makefile
scripts/selinux/genheaders/genheaders.c
scripts/selinux/mdp/Makefile
scripts/selinux/mdp/mdp.c
security/integrity/ima/Kconfig
security/integrity/ima/Makefile
security/integrity/ima/ima.h
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_fs.c
security/integrity/ima/ima_init.c
security/integrity/ima/ima_kexec.c [new file with mode: 0644]
security/integrity/ima/ima_main.c
security/integrity/ima/ima_queue.c
security/integrity/ima/ima_template.c
security/integrity/ima/ima_template_lib.c
security/selinux/include/classmap.h
sound/usb/endpoint.c
tools/include/uapi/linux/bpf.h
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/libbpf.c
tools/perf/Documentation/perf-sched.txt
tools/perf/Makefile.perf
tools/perf/bench/futex-lock-pi.c
tools/perf/builtin-c2c.c
tools/perf/builtin-mem.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-stat.c
tools/perf/check-headers.sh [new file with mode: 0755]
tools/perf/perf.h
tools/perf/tests/builtin-test.c
tools/perf/tests/tests.h
tools/perf/tests/thread-map.c
tools/perf/trace/beauty/mmap.c
tools/perf/ui/browsers/annotate.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/symbol.c
tools/perf/util/thread_map.c
tools/perf/util/thread_map.h

diff --git a/CREDITS b/CREDITS
index 10a9eee807b6315540ed15062c811b8b6a7315c5..c58560701d13158f535046c09fd4b825922ced94 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3949,8 +3949,6 @@ E: gwingerde@gmail.com
 D: Ralink rt2x00 WLAN driver
 D: Minix V2 file-system
 D: Misc fixes
-S: Geessinkweg 177
-S: 7544 TX Enschede
 S: The Netherlands
 
 N: Lars Wirzenius
index 49874173705507f72fac2f5f55da46fc34c3cc52..2a4a423d08e0d3ed8bce7cc0c9bcd36bb30bb19d 100644 (file)
@@ -272,6 +272,22 @@ Description:       Parameters for the CPU cache attributes
                                     the modified cache line is written to main
                                     memory only when it is replaced
 
+
+What:          /sys/devices/system/cpu/cpu*/cache/index*/id
+Date:          September 2016
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Cache id
+
+               The id provides a unique number for a specific instance of
+               a cache of a particular type. E.g. there may be a level
+               3 unified cache on each socket in a server and we may
+               assign them ids 0, 1, 2, ...
+
+               Note that id value can be non-contiguous. E.g. level 1
+               caches typically exist per core, but there may not be a
+               power of two cores on a socket, so these caches may be
+               numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ...
+
 What:          /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
                /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
                /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
index be2d6d0a03a486c340c85ed7ba73c59b22a6d41f..21e2d88637050b7a33f141e558fff43d3f23d0c9 100644 (file)
                        The builtin appraise policy appraises all files
                        owned by uid=0.
 
+       ima_canonical_fmt [IMA]
+                       Use the canonical format for the binary runtime
+                       measurements, instead of host native format.
+
        ima_hash=       [IMA]
                        Format: { md5 | sha1 | rmd160 | sha256 | sha384
                                   | sha512 | ... }
diff --git a/Documentation/devicetree/bindings/mfd/altera-a10sr.txt b/Documentation/devicetree/bindings/mfd/altera-a10sr.txt
new file mode 100644 (file)
index 0000000..ea151f2
--- /dev/null
@@ -0,0 +1,46 @@
+* Altera Arria10 Development Kit System Resource Chip
+
+Required parent device properties:
+- compatible           : "altr,a10sr"
+- spi-max-frequency    : Maximum SPI frequency.
+- reg                  : The SPI Chip Select address for the Arria10
+                         System Resource chip
+- interrupt-parent     : The parent interrupt controller.
+- interrupts           : The interrupt line the device is connected to.
+- interrupt-controller : Marks the device node as an interrupt controller.
+- #interrupt-cells     : The number of cells to describe an IRQ, should be 2.
+                           The first cell is the IRQ number.
+                           The second cell is the flags, encoded as trigger
+                           masks from ../interrupt-controller/interrupts.txt.
+
+The A10SR consists of these sub-devices:
+
+Device                   Description
+------                   ----------
+a10sr_gpio               GPIO Controller
+
+Arria10 GPIO
+Required Properties:
+- compatible        : Should be "altr,a10sr-gpio"
+- gpio-controller   : Marks the device node as a GPIO Controller.
+- #gpio-cells       : Should be two.  The first cell is the pin number and
+                      the second cell is used to specify flags.
+                      See ../gpio/gpio.txt for more information.
+
+Example:
+
+        resource-manager@0 {
+               compatible = "altr,a10sr";
+               reg = <0>;
+               spi-max-frequency = <100000>;
+               interrupt-parent = <&portb>;
+               interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+
+               a10sr_gpio: gpio-controller {
+                       compatible = "altr,a10sr-gpio";
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+       };
index 37a088f9a648ec94e9d9f2046251d3993ff09ed7..9e5eba4a4f0d0ab8387c103007079702a6d278bd 100644 (file)
@@ -10,6 +10,7 @@ voltages and other various functionality to Qualcomm SoCs.
        Value type: <string>
        Definition: must be one of:
                    "qcom,pm8058"
+                   "qcom,pm8821"
                    "qcom,pm8921"
 
 - #address-cells:
index 9e6770b105c935f8932ba39fd3351f9f4628155f..65c23263cc5418db378215612f152f8d887613f7 100644 (file)
@@ -1,21 +1,25 @@
 * Ricoh RN5T567/RN5T618 PMIC
 
-Ricoh RN5T567/RN5T618 is a power management IC family which integrates
-3 to 4 step-down DCDC converters, 7 low-dropout regulators, GPIOs and
-a watchdog timer. The RN5T618 provides additionally a Li-ion battery
-charger, fuel gauge and an ADC. It can be controlled through an I2C
-interface.
+Ricoh RN5T567/RN5T618/RC5T619 is a power management IC family which
+integrates 3 to 5 step-down DCDC converters, 7 to 10 low-dropout regulators,
+GPIOs, and a watchdog timer. It can be controlled through an I2C interface.
+The RN5T618/RC5T619 provides additionally a Li-ion battery charger,
+fuel gauge, and an ADC.
+The RC5T619 additionnally includes USB charger detection and an RTC.
 
 Required properties:
  - compatible: must be one of
                "ricoh,rn5t567"
                "ricoh,rn5t618"
+               "ricoh,rc5t619"
  - reg: the I2C slave address of the device
 
 Sub-nodes:
  - regulators: the node is required if the regulator functionality is
    needed. The valid regulator names are: DCDC1, DCDC2, DCDC3, DCDC4
-   (RN5T567), LDO1, LDO2, LDO3, LDO4, LDO5, LDORTC1 and LDORTC2.
+   (RN5T567/RC5T619), LDO1, LDO2, LDO3, LDO4, LDO5, LDO6, LDO7, LDO8,
+   LDO9, LDO10, LDORTC1 and LDORTC2.
+   LDO7-10 are specific to RC5T619.
    The common bindings for each individual regulator can be found in:
    Documentation/devicetree/bindings/regulator/regulator.txt
 
index 750374fc9d945d39f4426e22eed96532e3ba3e9f..c0f37cb41a9b47516f29cafa573d7886d103a2a7 100644 (file)
@@ -1,7 +1,9 @@
 * Cadence SD/SDIO/eMMC Host Controller
 
 Required properties:
-- compatible: should be "cdns,sd4hc".
+- compatible: should be one of the following:
+    "cdns,sd4hc"               - default of the IP
+    "socionext,uniphier-sd4hc" - for Socionext UniPhier SoCs
 - reg: offset and length of the register set for the device.
 - interrupts: a single interrupt specifier.
 - clocks: phandle to the input clock.
@@ -19,7 +21,7 @@ if supported.  See mmc.txt for details.
 
 Example:
        emmc: sdhci@5a000000 {
-               compatible = "cdns,sd4hc";
+               compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
                reg = <0x5a000000 0x400>;
                interrupts = <0 78 4>;
                clocks = <&clk 4>;
index 063c02da018a6d5ca24e5a58329b1dee0c1b5c0a..eea73adc678f963888c3171006516272abd37f62 100644 (file)
@@ -2,11 +2,14 @@ Hisilicon hix5hd2 gmac controller
 
 Required properties:
 - compatible: should contain one of the following SoC strings:
-       * "hisilicon,hix5hd2-gemac"
-       * "hisilicon,hi3798cv200-gemac"
+       * "hisilicon,hix5hd2-gmac"
+       * "hisilicon,hi3798cv200-gmac"
+       * "hisilicon,hi3516a-gmac"
        and one of the following version string:
-       * "hisilicon,hisi-gemac-v1"
-       * "hisilicon,hisi-gemac-v2"
+       * "hisilicon,hisi-gmac-v1"
+       * "hisilicon,hisi-gmac-v2"
+  The version v1 includes SoCs hix5hd2.
+  The version v2 includes SoCs hi3798cv200, hi3516a.
 - reg: specifies base physical address(s) and size of the device registers.
   The first region is the MAC register base and size.
   The second region is external interface control register.
@@ -35,7 +38,7 @@ Required properties:
 
 Example:
        gmac0: ethernet@f9840000 {
-               compatible = "hisilicon,hi3798cv200-gemac", "hisilicon,hisi-gemac-v2";
+               compatible = "hisilicon,hi3798cv200-gmac", "hisilicon,hisi-gmac-v2";
                reg = <0xf9840000 0x1000>,<0xf984300c 0x4>;
                interrupts = <0 71 4>;
                #address-cells = <1>;
index 54749b60a4666adb70514674761fd1031320376b..ff1bc4b1bb3b5e1d91a1747fcc73c24d587cd1a8 100644 (file)
@@ -38,8 +38,14 @@ Optional Properties:
 - enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to
   compensate for the board being designed with the lanes swapped.
 
-- eee-broken-modes: Bits to clear in the MDIO_AN_EEE_ADV register to
-  disable EEE broken modes.
+- eee-broken-100tx:
+- eee-broken-1000t:
+- eee-broken-10gt:
+- eee-broken-1000kx:
+- eee-broken-10gkx4:
+- eee-broken-10gkr:
+  Mark the corresponding energy efficient ethernet mode as broken and
+  request the ethernet to stop advertising it.
 
 Example:
 
index fccc1d24af58a51e5da9bfb3ec08d21ffab18b02..02f0e9bbfbf8a43a447ddb67d96924789dcd7618 100644 (file)
@@ -1,23 +1,78 @@
 TPS65218 family of regulators
 
 Required properties:
-For tps65218 regulators/LDOs
-- compatible:
-  - "ti,tps65218-dcdc1" for DCDC1
-  - "ti,tps65218-dcdc2" for DCDC2
-  - "ti,tps65218-dcdc3" for DCDC3
-  - "ti,tps65218-dcdc4" for DCDC4
-  - "ti,tps65218-dcdc5" for DCDC5
-  - "ti,tps65218-dcdc6" for DCDC6
-  - "ti,tps65218-ldo1" for LDO1
-
-Optional properties:
-- Any optional property defined in bindings/regulator/regulator.txt
+- compatible: "ti,tps65218"
+- reg: I2C slave address
+
+- List of regulators provided by this controller, must be named
+  after their hardware counterparts: dcdc[1-6] and ldo1
+- This is the list of child nodes that specify the regulator
+  initialization data for defined regulators. Not all regulators for the given
+  device need to be present. The definition for each of these nodes is defined
+  using the standard binding for regulators found at ./regulator.txt.
+
+  The valid names for regulators are:
+  tps65217: regulator-dcdc1, regulator-dcdc2, regulator-dcdc3, regulator-dcdc4,
+  regulator-dcdc5, regulator-dcdc6, regulator-ldo1, regulator-ls3.
+  Each regulator is defined using the standard binding for regulators.
 
 Example:
+tps65218: tps65218@24 {
+       reg = <0x24>;
+       compatible = "ti,tps65218";
+       interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* NMIn */
+       interrupt-controller;
+       #interrupt-cells = <2>;
+
+       dcdc1: regulator-dcdc1 {
+               regulator-name = "vdd_core";
+               regulator-min-microvolt = <912000>;
+               regulator-max-microvolt = <1144000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       dcdc2: regulator-dcdc2 {
+               regulator-name = "vdd_mpu";
+               regulator-min-microvolt = <912000>;
+               regulator-max-microvolt = <1378000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       dcdc3: regulator-dcdc3 {
+               regulator-name = "vdcdc3";
+               regulator-min-microvolt = <1500000>;
+               regulator-max-microvolt = <1500000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       dcdc5: regulator-dcdc5 {
+               regulator-name = "v1_0bat";
+               regulator-min-microvolt = <1000000>;
+               regulator-max-microvolt = <1000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       dcdc6: regulator-dcdc6 {
+               regulator-name = "v1_8bat";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       ldo1: regulator-ldo1 {
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
 
-       xyz: regulator@0 {
-               compatible = "ti,tps65218-dcdc1";
-               regulator-min-microvolt  = <1000000>;
-               regulator-max-microvolt  = <3000000>;
+       ls3: regulator-ls3 {
+               regulator-min-microvolt = <100000>;
+               regulator-max-microvolt = <1000000>;
        };
+};
index 4f4a3443b114c6ecd5a41856c86559daca0d51b6..ffa522a9bdfdcc34b017bc9a3e70be8fe2d87082 100644 (file)
@@ -36,5 +36,5 @@
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
-    |      xtensa: | TODO |
+    |      xtensa: |  ok  |
     -----------------------
index a97e8e3f4ebbed8d2467e19d6e736ef21f8f9991..83d2cf989ea3494dc825fc9e4da3b1ed09491646 100644 (file)
@@ -36,5 +36,5 @@
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
-    |      xtensa: | TODO |
+    |      xtensa: |  ok  |
     -----------------------
index b9b675539b9df20ed3a754a01a4f5819d12bf7bb..6ca98f9911bbb31693ff342039d5c84772e37d91 100644 (file)
@@ -7,7 +7,7 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: | TODO |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |       avr32: | TODO |
index e2c187947e588d6146c464852e07735966f95145..37b1967a00a9f48e1d6cc870747ce3ae0ead07d6 100644 (file)
@@ -6,17 +6,15 @@ NCR53c400 extensions (c) 1994,1995,1996 Kevin Lentin
 This file documents the NCR53c400 extensions by Kevin Lentin and some
 enhancements to the NCR5380 core.
 
-This driver supports both NCR5380 and NCR53c400 cards in port or memory
-mapped modes. Currently this driver can only support one of those mapping
-modes at a time but it does support both of these chips at the same time.
-The next release of this driver will support port & memory mapped cards at
-the same time. It should be able to handle multiple different cards in the
-same machine.
+This driver supports NCR5380 and NCR53c400 and compatible cards in port or
+memory mapped modes.
 
-The drivers/scsi/Makefile has an override in it for the most common
-NCR53c400 card, the Trantor T130B in its default configuration:
-       Port: 0x350
-       IRQ : 5
+Use of an interrupt is recommended, if supported by the board, as this will
+allow targets to disconnect and thereby improve SCSI bus utilization.
+
+If the irq parameter is 254 or is omitted entirely, the driver will probe
+for the correct IRQ line automatically. If the irq parameter is 0 or 255
+then no IRQ will be used.
 
 The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
 supported by the driver.
@@ -47,22 +45,24 @@ These old-style parameters can support only one card:
   dtc_3181e=1  to set up for a Domex Technology Corp 3181E board
   hp_c2502=1   to set up for a Hewlett Packard C2502 board
 
-e.g.
-OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
-NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0
-  for a port mapped NCR5380 board or
-
-OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
-NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
-  for a memory mapped NCR53C400 board with interrupts disabled or
+E.g. Trantor T130B in its default configuration:
+modprobe g_NCR5380 irq=5 base=0x350 card=1
+or alternatively, using the old syntax,
+modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
 
-NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
-  for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ
-             and HP C2502 at 0x300 with IRQ 7
+E.g. a port mapped NCR5380 board, driver to probe for IRQ:
+modprobe g_NCR5380 base=0x350 card=0
+or alternatively,
+modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
 
-(255 should be specified for no or DMA interrupt, 254 to autoprobe for an 
-     IRQ line if overridden on the command line.)
+E.g. a memory mapped NCR53C400 board with no IRQ:
+modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+or alternatively,
+modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
 
+E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+and HP C2502 at 0x300 with IRQ 7:
+modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
 
 Kevin Lentin
 K.Lentin@cs.monash.edu.au
index 55f27579302830c4e07634c6b17e56f6fa14e227..25feb0d35e7abc7f6ff7797faf02e9e79321e644 100755 (executable)
@@ -157,6 +157,11 @@ class ListTableBuilder(object):
     def buildTableNode(self):
 
         colwidths    = self.directive.get_column_widths(self.max_cols)
+        if isinstance(colwidths, tuple):
+            # Since docutils 0.13, get_column_widths returns a (widths,
+            # colwidths) tuple, where widths is a string (i.e. 'auto').
+            # See https://sourceforge.net/p/docutils/patches/120/.
+            colwidths = colwidths[1]
         stub_columns = self.directive.options.get('stub-columns', 0)
         header_rows  = self.directive.options.get('header-rows', 0)
 
index e5dd9f4d61008ad6431e067b900608788e573020..fd013bf4115be70bafdb6489dc4caedf61e7600d 100644 (file)
@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows:
 - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
   them together is quite rare.
 
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock.  Everything
-else is a leaf: no other lock is taken inside the critical sections.
+On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
+
+Everything else is a leaf: no other lock is taken inside the critical
+sections.
 
 2: Exception
 ------------
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
new file mode 100644 (file)
index 0000000..d918d26
--- /dev/null
@@ -0,0 +1,214 @@
+User Interface for Resource Allocation in Intel Resource Director Technology
+
+Copyright (C) 2016 Intel Corporation
+
+Fenghua Yu <fenghua.yu@intel.com>
+Tony Luck <tony.luck@intel.com>
+
+This feature is enabled by the CONFIG_INTEL_RDT_A Kconfig and the
+X86 /proc/cpuinfo flag bits "rdt", "cat_l3" and "cdp_l3".
+
+To use the feature mount the file system:
+
+ # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+
+mount options are:
+
+"cdp": Enable code/data prioritization in L3 cache allocations.
+
+
+Info directory
+--------------
+
+The 'info' directory contains information about the enabled
+resources. Each resource has its own subdirectory. The subdirectory
+names reflect the resource names. Each subdirectory contains the
+following files:
+
+"num_closids":  The number of CLOSIDs which are valid for this
+               resource. The kernel uses the smallest number of
+               CLOSIDs of all enabled resources as limit.
+
+"cbm_mask":     The bitmask which is valid for this resource. This
+               mask is equivalent to 100%.
+
+"min_cbm_bits": The minimum number of consecutive bits which must be
+               set when writing a mask.
+
+
+Resource groups
+---------------
+Resource groups are represented as directories in the resctrl file
+system. The default group is the root directory. Other groups may be
+created as desired by the system administrator using the "mkdir(1)"
+command, and removed using "rmdir(1)".
+
+There are three files associated with each group:
+
+"tasks": A list of tasks that belongs to this group. Tasks can be
+       added to a group by writing the task ID to the "tasks" file
+       (which will automatically remove them from the previous
+       group to which they belonged). New tasks created by fork(2)
+       and clone(2) are added to the same group as their parent.
+       If a pid is not in any sub partition, it is in root partition
+       (i.e. default partition).
+
+"cpus": A bitmask of logical CPUs assigned to this group. Writing
+       a new mask can add/remove CPUs from this group. Added CPUs
+       are removed from their previous group. Removed ones are
+       given to the default (root) group. You cannot remove CPUs
+       from the default group.
+
+"schemata": A list of all the resources available to this group.
+       Each resource has its own line and format - see below for
+       details.
+
+When a task is running the following rules define which resources
+are available to it:
+
+1) If the task is a member of a non-default group, then the schemata
+for that group is used.
+
+2) Else if the task belongs to the default group, but is running on a
+CPU that is assigned to some specific group, then the schemata for
+the CPU's group is used.
+
+3) Otherwise the schemata for the default group is used.
+
+
+Schemata files - general concepts
+---------------------------------
+Each line in the file describes one resource. The line starts with
+the name of the resource, followed by specific values to be applied
+in each of the instances of that resource on the system.
+
+Cache IDs
+---------
+On current generation systems there is one L3 cache per socket and L2
+caches are generally just shared by the hyperthreads on a core, but this
+isn't an architectural requirement. We could have multiple separate L3
+caches on a socket, multiple cores could share an L2 cache. So instead
+of using "socket" or "core" to define the set of logical cpus sharing
+a resource we use a "Cache ID". At a given cache level this will be a
+unique number across the whole system (but it isn't guaranteed to be a
+contiguous sequence, there may be gaps).  To find the ID for each logical
+CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
+
+Cache Bit Masks (CBM)
+---------------------
+For cache resources we describe the portion of the cache that is available
+for allocation using a bitmask. The maximum value of the mask is defined
+by each cpu model (and may be different for different cache levels). It
+is found using CPUID, but is also provided in the "info" directory of
+the resctrl file system in "info/{resource}/cbm_mask". X86 hardware
+requires that these masks have all the '1' bits in a contiguous block. So
+0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9
+and 0xA are not.  On a system with a 20-bit mask each bit represents 5%
+of the capacity of the cache. You could partition the cache into four
+equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
+
+
+L3 details (code and data prioritization disabled)
+--------------------------------------------------
+With CDP disabled the L3 schemata format is:
+
+       L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L3 details (CDP enabled via mount option to resctrl)
+----------------------------------------------------
+When CDP is enabled L3 control is split into two separate resources
+so you can specify independent masks for code and data like this:
+
+       L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+       L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L2 details
+----------
+L2 cache does not support code and data prioritization, so the
+schemata format is always:
+
+       L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+Example 1
+---------
+On a two socket machine (one L3 cache per socket) with just four bits
+for cache bit masks
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+
+The default resource group is unmodified, so we have access to all parts
+of all caches (its schemata file reads "L3:0=f;1=f").
+
+Tasks that are under the control of group "p0" may only allocate from the
+"lower" 50% on cache ID 0, and the "upper" 50% of cache ID 1.
+Tasks in group "p1" use the "lower" 50% of cache on both sockets.
+
+Example 2
+---------
+Again two sockets, but this time with a more realistic 20-bit mask.
+
+Two real time tasks pid=1234 running on processor 0 and pid=5678 running on
+processor 1 on socket 0 on a 2-socket and dual core machine. To avoid noisy
+neighbors, each of the two real-time tasks exclusively occupies one quarter
+of L3 cache on socket 0.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff;1=fffff" > schemata
+
+Next we make a resource group for our first real time task and give
+it access to the "top" 25% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=f8000;1=fffff" > p0/schemata
+
+Finally we move our first real time task into this resource group. We
+also use taskset(1) to ensure the task always runs on a dedicated CPU
+on socket 0. Most uses of resource groups will also constrain which
+processors tasks run on.
+
+# echo 1234 > p0/tasks
+# taskset -cp 1 1234
+
+Ditto for the second real time task (with the remaining 25% of cache):
+
+# mkdir p1
+# echo "L3:0=7c00;1=fffff" > p1/schemata
+# echo 5678 > p1/tasks
+# taskset -cp 2 5678
+
+Example 3
+---------
+
+A single socket system which has real-time tasks running on core 4-7 and
+non real-time workload assigned to core 0-3. The real-time tasks share text
+and data, so a per task association is not required and due to interaction
+with the kernel it's desired that the kernel on these cores shares L3 with
+the tasks.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff" > schemata
+
+Next we make a resource group for our real time cores and give
+it access to the "top" 50% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=ffc00;" > p0/schemata
+
+Finally we move core 4-7 over to the new group and make sure that the
+kernel and the tasks running there get 50% of the cache.
+
+# echo C0 > p0/cpus
index f6eb97b35e0fd58fe353cefce963ce7ff162491f..979126a9a150028d26b3531fd5715833dbedc0b4 100644 (file)
@@ -143,7 +143,7 @@ S:  Maintained
 F:     drivers/net/ethernet/3com/typhoon*
 
 3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
-M:     Adam Radford <linuxraid@lsi.com>
+M:     Adam Radford <aradford@gmail.com>
 L:     linux-scsi@vger.kernel.org
 W:     http://www.lsi.com
 S:     Supported
@@ -1747,7 +1747,7 @@ F:        drivers/staging/media/platform/s5p-cec/
 
 ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
 M:     Andrzej Pietrasiewicz <andrzej.p@samsung.com>
-M:     Jacek Anaszewski <j.anaszewski@samsung.com>
+M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -7229,7 +7229,7 @@ F:        drivers/scsi/53c700*
 
 LED SUBSYSTEM
 M:     Richard Purdie <rpurdie@rpsys.net>
-M:     Jacek Anaszewski <j.anaszewski@samsung.com>
+M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
@@ -10136,6 +10136,12 @@ F:     drivers/net/ethernet/qlogic/qed/
 F:     include/linux/qed/
 F:     drivers/net/ethernet/qlogic/qede/
 
+QLOGIC QL41xxx ISCSI DRIVER
+M:     QLogic-Storage-Upstream@cavium.com
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/qedi/
+
 QNX4 FILESYSTEM
 M:     Anders Larsen <al@alarsen.net>
 W:     http://www.alarsen.net/linux/qnx4fs/
@@ -10327,6 +10333,14 @@ L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/sw/rdmavt
 
+RDT - RESOURCE ALLOCATION
+M:     Fenghua Yu <fenghua.yu@intel.com>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     arch/x86/kernel/cpu/intel_rdt*
+F:     arch/x86/include/asm/intel_rdt*
+F:     Documentation/x86/intel_rdt*
+
 READ-COPY UPDATE (RCU)
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
index 19483aea4bbc9e15b51f0bc7f200880902794a74..99839c23d453fa8ded2061968aead8a6ee4b2317 100644 (file)
@@ -5,6 +5,9 @@
 config KEXEC_CORE
        bool
 
+config HAVE_IMA_KEXEC
+       bool
+
 config OPROFILE
        tristate "OProfile system profiling"
        depends on PROFILING
index ab12723d39a01d8751807728a43c7b08cdc6a814..c75d29077e4a654276219883629444deec89c955 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_SG_CHAIN
        select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
index da41a54ea2d747011b6c20dcc0b1738e2a2e8062..f659942744de0c474962b118292100eeed7397ce 100644 (file)
@@ -244,7 +244,7 @@ struct cpuinfo_arc_mmu {
 };
 
 struct cpuinfo_arc_cache {
-       unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
+       unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
 };
 
 struct cpuinfo_arc_bpu {
index a093adbdb017580f6da74abb551ea02e14e04da5..fc662f49c55ac91916af7cbd830b1c978f827ffe 100644 (file)
@@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
  */
 #define PG_dc_clean    PG_arch_1
 
+#define CACHE_COLORS_NUM       4
+#define CACHE_COLORS_MSK       (CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
 /*
  * Simple wrapper over config option
  * Bootup code ensures that hardware matches kernel configuration
@@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
        return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
 }
 
-#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-
 /*
  * checks if two addresses (after page aligning) index into same cache set
  */
index e880dfa3fcd370c0f7ae8f9010a79df9ab8c3399..a64c447b0337804568c69cf2e15998deabdd9b40 100644 (file)
 #define AUX_IRQ_ACT_BIT_U      31
 
 /*
- * User space should be interruptable even by lowest prio interrupt
- * Safe even if actual interrupt priorities is fewer or even one
+ * Hardware supports 16 priorities (0 highest, 15 lowest)
+ * Linux by default runs at 1, priority 0 reserved for NMI style interrupts
  */
-#define ARCV2_IRQ_DEF_PRIO     15
+#define ARCV2_IRQ_DEF_PRIO     1
 
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
index 7a1c124ff021d53377d24ff5d1bb9cccbb2831de..0b6388a5f0b828323c03b53f0903a35bdc25b0cb 100644 (file)
@@ -67,12 +67,23 @@ ENTRY(handle_interrupt)
 
        INTERRUPT_PROLOGUE  irq
 
-       clri            ; To make status32.IE agree with CPU internal state
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       TRACE_ASM_IRQ_DISABLE
-#endif
-
+       # irq control APIs local_irq_save/restore/disable/enable fiddle with
+       # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
+       # However a taken interrupt doesn't clear these bits. Thus irqs_disabled()
+       # query in hard ISR path would return false (since .IE is set) which would
+       # trips genirq interrupt handling asserts.
+       #
+       # So do a "soft" disable of interrutps here.
+       #
+       # Note this disable is only for consistent book-keeping as further interrupts
+       # will be disabled anyways even w/o this. Hardware tracks active interrupts
+       # seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts
+       # unless this one returns (or higher prio becomes pending in 2-prio scheme)
+
+       IRQ_DISABLE
+
+       ; icause is banked: one per priority level
+       ; so a higher prio interrupt taken here won't clobber prev prio icause
        lr  r0, [ICAUSE]
        mov   blink, ret_from_exception
 
@@ -171,6 +182,7 @@ END(EV_TLBProtV)
 ; All 2 entry points to here already disable interrupts
 
 .Lrestore_regs:
+restore_regs:
 
        # Interrpts are actually disabled from this point on, but will get
        # reenabled after we return from interrupt/exception.
index 98812c1248dfaf85b28ed023287ae78f27286073..9211707634dcf57e1aa0ac13fa4a6bbc79b99eeb 100644 (file)
@@ -259,7 +259,7 @@ ENTRY(EV_TLBProtV)
 
        EXCEPTION_PROLOGUE
 
-       lr  r2, [ecr]
+       mov r2, r9      ; ECR set into r9 already
        lr  r0, [efa]   ; Faulting Data address (not part of pt_regs saved above)
 
        ; Exception auto-disables further Intr/exceptions.
index 62b59409a5d97eb8b95276a3f17b1e430b98899b..994dca7014db645b32cfb22753cb25bae4c46566 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
-static int irq_prio;
-
 /*
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,7 +22,7 @@ static int irq_prio;
  */
 void arc_init_IRQ(void)
 {
-       unsigned int tmp;
+       unsigned int tmp, irq_prio;
 
        struct irq_build {
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -67,12 +65,12 @@ void arc_init_IRQ(void)
 
        irq_prio = irq_bcr.prio;        /* Encoded as N-1 for N levels */
        pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
-               irq_prio + 1, irq_prio,
+               irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
                irq_bcr.firq ? " FIRQ (not used)":"");
 
        /* setup status32, don't enable intr yet as kernel doesn't want */
        tmp = read_aux_reg(0xa);
-       tmp |= STATUS_AD_MASK | (irq_prio << 1);
+       tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
        tmp &= ~STATUS_IE_MASK;
        asm volatile("kflag %0  \n"::"r"(tmp));
 }
@@ -93,7 +91,7 @@ void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
        write_aux_reg(AUX_IRQ_SELECT, data->irq);
-       write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
+       write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
 
        /*
         * hw auto enables (linux unmask) all by default
index 50d71695cd4ecbeefd64f28e9f44265195b01d15..ec86ac0e33213b889cd6100e10e95fda8f3c31e4 100644 (file)
@@ -40,7 +40,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        struct cpuinfo_arc_cache *p;
 
 #define PR_CACHE(p, cfg, str)                                          \
-       if (!(p)->ver)                                                  \
+       if (!(p)->line_len)                                             \
                n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
        else                                                            \
                n += scnprintf(buf + n, len - n,                        \
@@ -54,7 +54,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
 
        p = &cpuinfo_arc700[c].slc;
-       if (p->ver)
+       if (p->line_len)
                n += scnprintf(buf + n, len - n,
                               "SLC\t\t: %uK, %uB Line%s\n",
                               p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
@@ -104,7 +104,6 @@ static void read_decode_cache_bcr_arcv2(int cpu)
        READ_BCR(ARC_REG_SLC_BCR, sbcr);
        if (sbcr.ver) {
                READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
-               p_slc->ver = sbcr.ver;
                p_slc->sz_k = 128 << slc_cfg.sz;
                l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
        }
@@ -152,7 +151,6 @@ void read_decode_cache_bcr(void)
 
        p_ic->line_len = 8 << ibcr.line_len;
        p_ic->sz_k = 1 << (ibcr.sz - 1);
-       p_ic->ver = ibcr.ver;
        p_ic->vipt = 1;
        p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
 
@@ -176,7 +174,6 @@ dc_chk:
 
        p_dc->line_len = 16 << dbcr.line_len;
        p_dc->sz_k = 1 << (dbcr.sz - 1);
-       p_dc->ver = dbcr.ver;
 
 slc_chk:
        if (is_isa_arcv2())
@@ -945,17 +942,13 @@ void arc_cache_init(void)
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
 
-               if (!ic->ver)
+               if (!ic->line_len)
                        panic("cache support enabled but non-existent cache\n");
 
                if (ic->line_len != L1_CACHE_BYTES)
                        panic("ICache line [%d] != kernel Config [%d]",
                              ic->line_len, L1_CACHE_BYTES);
 
-               if (ic->ver != CONFIG_ARC_MMU_VER)
-                       panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
-                             ic->ver, CONFIG_ARC_MMU_VER);
-
                /*
                 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
                 * pair to provide vaddr/paddr respectively, just as in MMU v3
@@ -969,7 +962,7 @@ void arc_cache_init(void)
        if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
                struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
 
-               if (!dc->ver)
+               if (!dc->line_len)
                        panic("cache support enabled but non-existent cache\n");
 
                if (dc->line_len != L1_CACHE_BYTES)
@@ -979,11 +972,16 @@ void arc_cache_init(void)
                /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
                if (is_isa_arcompact()) {
                        int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-
-                       if (dc->alias && !handled)
-                               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-                       else if (!dc->alias && handled)
+                       int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
+
+                       if (dc->alias) {
+                               if (!handled)
+                                       panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+                               if (CACHE_COLORS_NUM != num_colors)
+                                       panic("CACHE_COLORS_NUM not optimized for config\n");
+                       } else if (!dc->alias && handled) {
                                panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+                       }
                }
        }
 
index c02e092fad8b17bf8c8e80c29b218db7ba86ea46..6c712a97e1fef042b00c8fe55d406e78285b973e 100644 (file)
                };
 
                gmac0: ethernet@1840000 {
-                       compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
+                       compatible = "hisilicon,hix5hd2-gmac", "hisilicon,hisi-gmac-v1";
                        reg = <0x1840000 0x1000>,<0x184300c 0x4>;
                        interrupts = <0 71 4>;
                        clocks = <&clock HIX5HD2_MAC0_CLK>;
                };
 
                gmac1: ethernet@1841000 {
-                       compatible = "hisilicon,hix5hd2-gemac", "hisilicon,hisi-gemac-v1";
+                       compatible = "hisilicon,hix5hd2-gmac", "hisilicon,hisi-gmac-v1";
                        reg = <0x1841000 0x1000>,<0x1843010 0x4>;
                        interrupts = <0 72 4>;
                        clocks = <&clock HIX5HD2_MAC1_CLK>;
index d0de0e032bc2c689f849ee679754f7b18d9451c3..c1976c0adca73025ec4526fc40be077101f8ef62 100644 (file)
@@ -29,7 +29,7 @@
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
-/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
+/* ACPI table mapping after acpi_permanent_mmap is set */
 static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
                                            acpi_size size)
 {
index b71086d251954f7b72837899346525322dc5d724..bfe632808d7724c0a51562efb60501e67f6bf157 100644 (file)
@@ -165,6 +165,11 @@ extern u64                 kimage_vaddr;
 /* the offset between the kernel virtual and physical mappings */
 extern u64                     kimage_voffset;
 
+static inline unsigned long kaslr_offset(void)
+{
+       return kimage_vaddr - KIMAGE_VADDR;
+}
+
 /*
  * Allow all memory at the discovery stage. We will clip it later.
  */
index 252a6d9c1da5d7868fffd23ec6ed93a95944c91d..64d9cbd61678233dc24648f11bb5b242df3ee244 100644 (file)
@@ -132,14 +132,13 @@ static int __init acpi_fadt_sanity_check(void)
        struct acpi_table_header *table;
        struct acpi_table_fadt *fadt;
        acpi_status status;
-       acpi_size tbl_size;
        int ret = 0;
 
        /*
         * FADT is required on arm64; retrieve it to check its presence
         * and carry out revision and ACPI HW reduced compliancy tests
         */
-       status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
+       status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
        if (ACPI_FAILURE(status)) {
                const char *msg = acpi_format_exception(status);
 
@@ -170,10 +169,10 @@ static int __init acpi_fadt_sanity_check(void)
 
 out:
        /*
-        * acpi_get_table_with_size() creates FADT table mapping that
+        * acpi_get_table() creates FADT table mapping that
         * should be released after parsing and before resuming boot
         */
-       early_acpi_os_unmap_memory(table, tbl_size);
+       acpi_put_table(table);
        return ret;
 }
 
index a53f52ac81c62ad4e6e70056949d143f7de4bfd4..b051367e21491cada86f115f4386fda8aace299a 100644 (file)
@@ -338,11 +338,11 @@ subsys_initcall(topology_init);
 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
                              void *p)
 {
-       u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
+       const unsigned long offset = kaslr_offset();
 
-       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
-               pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
-                        kaslr_offset, KIMAGE_VADDR);
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
+               pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+                        offset, KIMAGE_VADDR);
        } else {
                pr_emerg("Kernel Offset: disabled\n");
        }
index 805ae5d712e8baa63095199f1601ce548c26d606..032fed71223f54358a467d5df14d0b5fe176667a 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         392
+#define __NR_syscalls         398
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index a8bd3fa28bc7f4e97158fff49d25443524647b0d..d8086159d996dfdec4503fa95cf1638637f8932e 100644 (file)
 #define __NR_userfaultfd       389
 #define __NR_membarrier                390
 #define __NR_mlock2            391
+#define __NR_copy_file_range   392
+#define __NR_preadv2           393
+#define __NR_pwritev2          394
+#define __NR_pkey_mprotect     395
+#define __NR_pkey_alloc                396
+#define __NR_pkey_free         397
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index b70bb538f00165df2d46ec87c27217577ec4b95a..96b3f26d16beeb56b815da8d87661223e2beae91 100644 (file)
@@ -49,6 +49,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"9.3", 0x20},
        {"9.4", 0x21},
        {"9.5", 0x22},
+       {"9.6", 0x23},
+       {"10.0", 0x24},
        {NULL, 0},
 };
 
@@ -75,6 +77,10 @@ const struct family_string_key family_string_lookup[] = {
        {"zynq7000", 0x12},
        {"UltraScale Virtex", 0x13},
        {"UltraScale Kintex", 0x14},
+       {"UltraScale+ Zynq", 0x15},
+       {"UltraScale+ Virtex", 0x16},
+       {"UltraScale+ Kintex", 0x17},
+       {"Spartan7", 0x18},
        {NULL, 0},
 };
 
index 6b3dd99126d753a22a9ed270ec92761c2f936e27..6841c2df14d9acdfe30133baac0833111bf645d5 100644 (file)
@@ -392,3 +392,9 @@ ENTRY(sys_call_table)
        .long sys_userfaultfd
        .long sys_membarrier            /* 390 */
        .long sys_mlock2
+       .long sys_copy_file_range
+       .long sys_preadv2
+       .long sys_pwritev2
+       .long sys_pkey_mprotect         /* 395 */
+       .long sys_pkey_alloc
+       .long sys_pkey_free
index 5bbf38b916ef36839396c01cfd5bc105245ae934..9e954959f60504cd7cf04d1c8c58f897bf6cea76 100644 (file)
@@ -259,7 +259,7 @@ static int __init xilinx_timer_init(struct device_node *timer)
        int ret;
 
        if (initialized)
-               return;
+               return -EINVAL;
 
        initialized = 1;
 
index a14b865870131a052c995fed98cc9e3ed7a3cd2d..3a71f38cdc0553eeb8c026b2b12d3b412e2f2657 100644 (file)
@@ -7,6 +7,7 @@ config PARISC
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_SYSCALL_TRACEPOINTS
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_HAS_ELF_RANDOMIZE
        select RTC_CLASS
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
index 78c9fd32c5546b6ec91d591e853d89a761383c92..a6b2a421571edfb5f981e1558d75525a23a59404 100644 (file)
@@ -348,9 +348,10 @@ struct pt_regs;    /* forward declaration... */
 
 #define ELF_HWCAP      0
 
-#define STACK_RND_MASK (is_32bit_task() ? \
-                               0x7ff >> (PAGE_SHIFT - 12) : \
-                               0x3ffff >> (PAGE_SHIFT - 12))
+/* Masks for stack and mmap randomization */
+#define BRK_RND_MASK   (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK  (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+#define STACK_RND_MASK MMAP_RND_MASK
 
 struct mm_struct;
 extern unsigned long arch_randomize_brk(struct mm_struct *);
index 47539f11795835e8397bb77602d8e82d99e14a3f..e1d289092705f00431408f90ca6bd48e47c8cba4 100644 (file)
@@ -289,7 +289,7 @@ extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
 extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
 extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
 
-extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
 
 extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
 
index ca40741378be76c70ecf718a7240e0b96c316530..a3661ee6b060c1d258ab740e5468cfffb665f8d5 100644 (file)
@@ -93,9 +93,7 @@ struct system_cpuinfo_parisc {
 /* Per CPU data structure - ie varies per CPU.  */
 struct cpuinfo_parisc {
        unsigned long it_value;     /* Interval Timer at last timer Intr */
-       unsigned long it_delta;     /* Interval delta (tic_10ms / HZ * 100) */
        unsigned long irq_count;    /* number of IRQ's since boot */
-       unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
        unsigned long cpuid;        /* aka slot_number or set to NO_PROC_ID */
        unsigned long hpa;          /* Host Physical address */
        unsigned long txn_addr;     /* MMIO addr of EIR or id_eid */
@@ -103,8 +101,6 @@ struct cpuinfo_parisc {
        unsigned long pending_ipi;  /* bitmap of type ipi_message_type */
 #endif
        unsigned long bh_count;     /* number of times bh was invoked */
-       unsigned long prof_counter; /* per CPU profiling support */
-       unsigned long prof_multiplier;  /* per CPU profiling support */
        unsigned long fp_rev;
        unsigned long fp_model;
        unsigned int state;
index 4fcff2dcc9c304decddf7b7643e90f7803968531..ad4cb1613c57a56552e32d795d5d83fdffd99672 100644 (file)
@@ -878,6 +878,9 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
+       /* NOTE: Need to enable interrupts incase we schedule. */
+       ssm     PSW_SM_I, %r0
+
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -904,11 +907,6 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
-       /* NOTE: We need to enable interrupts if we have to deliver
-        * signals. We used to do this earlier but it caused kernel
-        * stack overflows. */
-       ssm     PSW_SM_I, %r0
-
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -960,10 +958,6 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
-       /* NOTE: We need to enable interrupts if we schedule.  We used
-        * to do this earlier but it caused kernel stack overflows. */
-       ssm     PSW_SM_I, %r0
-
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e5d71905cad567cc03e22ffdeb7f7295d635b12b..9d797ae4fa22248665a13e0ff29d72532e9e4bdb 100644 (file)
@@ -1258,7 +1258,7 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long
  *
  * Retrieve the cpu number for the cpu at the specified HPA.
  */
-int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa)
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa)
 {
        int retval;
        unsigned long flags;
index c05d1876d27c4975453194686976c6cb0147531d..c9789d9c73b40478bb5ff931fd2c3afb30a4840f 100644 (file)
@@ -216,9 +216,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
        register_parisc_device(dev);    /* advertise device */
 
 #ifdef DEBUG_PAT
-       pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
        /* dump what we see so far... */
        switch (PAT_GET_ENTITY(dev->mod_info)) {
+               pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
                unsigned long i;
 
        case PAT_ENTITY_PROC:
@@ -259,9 +259,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
                                pa_pdc_cell->mod[4 + i * 3]);   /* finish (ie end) */
                        printk(KERN_DEBUG 
                                "  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
-                               i, io_pdc_cell->mod[2 + i * 3], /* type */
-                               io_pdc_cell->mod[3 + i * 3],    /* start */
-                               io_pdc_cell->mod[4 + i * 3]);   /* finish (ie end) */
+                               i, io_pdc_cell.mod[2 + i * 3],  /* type */
+                               io_pdc_cell.mod[3 + i * 3],     /* start */
+                               io_pdc_cell.mod[4 + i * 3]);    /* finish (ie end) */
                }
                printk(KERN_DEBUG "\n");
                break;
index 518f4f5f1f43ec6b2dcaceb9b2f5c9536097d59f..6eabce62463bbbcf29031143c46d7b75c841b6fa 100644 (file)
@@ -301,7 +301,6 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
        loff_t *ppos)
 {
-       int err;
        size_t image_size;
        uint32_t image_type;
        uint32_t interface_type;
@@ -320,8 +319,8 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
        if (count != sizeof(uint32_t))
                return -EIO;
 
-       if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0) 
-               return err;
+       if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
+               return -EFAULT;
 
        /* Get the interface type and test type */
        interface_type = (image_type >> 16) & 0xffff;
index 40639439d8b35c7cec7c60d978ae6aa5b070b22f..ea6603ee8d24981abe93c5bc79477e626e54254a 100644 (file)
@@ -276,11 +276,7 @@ void *dereference_function_descriptor(void *ptr)
 
 static inline unsigned long brk_rnd(void)
 {
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-       else
-               return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+       return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
 }
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
index 0c2a94a0f7518b8082ecda3f0307ead9534ea972..85de47f4eb594564bc9639ff76a099b5ff9aa8c7 100644 (file)
@@ -78,11 +78,6 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 static void
 init_percpu_prof(unsigned long cpunum)
 {
-       struct cpuinfo_parisc *p;
-
-       p = &per_cpu(cpu_data, cpunum);
-       p->prof_counter = 1;
-       p->prof_multiplier = 1;
 }
 
 
@@ -99,6 +94,7 @@ static int processor_probe(struct parisc_device *dev)
        unsigned long txn_addr;
        unsigned long cpuid;
        struct cpuinfo_parisc *p;
+       struct pdc_pat_cpu_num cpu_info __maybe_unused;
 
 #ifdef CONFIG_SMP
        if (num_online_cpus() >= nr_cpu_ids) {
@@ -123,10 +119,6 @@ static int processor_probe(struct parisc_device *dev)
                ulong status;
                unsigned long bytecnt;
                pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
-#undef USE_PAT_CPUID
-#ifdef USE_PAT_CPUID
-               struct pdc_pat_cpu_num cpu_info;
-#endif
 
                pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
                if (!pa_pdc_cell)
@@ -145,22 +137,27 @@ static int processor_probe(struct parisc_device *dev)
 
                kfree(pa_pdc_cell);
 
+               /* get the cpu number */
+               status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
+               BUG_ON(PDC_OK != status);
+
+               pr_info("Logical CPU #%lu is physical cpu #%lu at location "
+                       "0x%lx with hpa %pa\n",
+                       cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
+                       &dev->hpa.start);
+
+#undef USE_PAT_CPUID
 #ifdef USE_PAT_CPUID
 /* We need contiguous numbers for cpuid. Firmware's notion
  * of cpuid is for physical CPUs and we just don't care yet.
  * We'll care when we need to query PAT PDC about a CPU *after*
  * boot time (ie shutdown a CPU from an OS perspective).
  */
-               /* get the cpu number */
-               status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
-
-               BUG_ON(PDC_OK != status);
-
                if (cpu_info.cpu_num >= NR_CPUS) {
-                       printk(KERN_WARNING "IGNORING CPU at 0x%x,"
+                       printk(KERN_WARNING "IGNORING CPU at %pa,"
                                " cpu_slot_id > NR_CPUS"
                                " (%ld > %d)\n",
-                               dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
+                               &dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
                        /* Ignore CPU since it will only crash */
                        boot_cpu_data.cpu_count--;
                        return 1;
index 0a393a04e89182cba498fa64774dd32177860eb7..a81e177cac7be0c7d1d0f86f0a871d350fea5fdc 100644 (file)
@@ -225,19 +225,17 @@ static unsigned long mmap_rnd(void)
 {
        unsigned long rnd = 0;
 
-       /*
-       *  8 bits of randomness in 32bit mmaps, 20 address space bits
-       * 28 bits of randomness in 64bit mmaps, 40 address space bits
-       */
-       if (current->flags & PF_RANDOMIZE) {
-               if (is_32bit_task())
-                       rnd = get_random_int() % (1<<8);
-               else
-                       rnd = get_random_int() % (1<<28);
-       }
+       if (current->flags & PF_RANDOMIZE)
+               rnd = get_random_int() & MMAP_RND_MASK;
+
        return rnd << PAGE_SHIFT;
 }
 
+unsigned long arch_mmap_rnd(void)
+{
+       return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+}
+
 static unsigned long mmap_legacy_base(void)
 {
        return TASK_UNMAPPED_BASE + mmap_rnd();
index 325f30d82b6434368425d652402fabf66fd4f8ee..4215f5596c8b6291516a9d39190e77df1b2bf488 100644 (file)
@@ -59,10 +59,9 @@ static unsigned long clocktick __read_mostly;        /* timer cycles per tick */
  */
 irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
 {
-       unsigned long now, now2;
+       unsigned long now;
        unsigned long next_tick;
-       unsigned long cycles_elapsed, ticks_elapsed = 1;
-       unsigned long cycles_remainder;
+       unsigned long ticks_elapsed = 0;
        unsigned int cpu = smp_processor_id();
        struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 
@@ -71,102 +70,49 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
 
        profile_tick(CPU_PROFILING);
 
-       /* Initialize next_tick to the expected tick time. */
+       /* Initialize next_tick to the old expected tick time. */
        next_tick = cpuinfo->it_value;
 
-       /* Get current cycle counter (Control Register 16). */
-       now = mfctl(16);
-
-       cycles_elapsed = now - next_tick;
-
-       if ((cycles_elapsed >> 6) < cpt) {
-               /* use "cheap" math (add/subtract) instead
-                * of the more expensive div/mul method
-                */
-               cycles_remainder = cycles_elapsed;
-               while (cycles_remainder > cpt) {
-                       cycles_remainder -= cpt;
-                       ticks_elapsed++;
-               }
-       } else {
-               /* TODO: Reduce this to one fdiv op */
-               cycles_remainder = cycles_elapsed % cpt;
-               ticks_elapsed += cycles_elapsed / cpt;
-       }
-
-       /* convert from "division remainder" to "remainder of clock tick" */
-       cycles_remainder = cpt - cycles_remainder;
-
-       /* Determine when (in CR16 cycles) next IT interrupt will fire.
-        * We want IT to fire modulo clocktick even if we miss/skip some.
-        * But those interrupts don't in fact get delivered that regularly.
-        */
-       next_tick = now + cycles_remainder;
+       /* Calculate how many ticks have elapsed. */
+       do {
+               ++ticks_elapsed;
+               next_tick += cpt;
+               now = mfctl(16);
+       } while (next_tick - now > cpt);
 
+       /* Store (in CR16 cycles) up to when we are accounting right now. */
        cpuinfo->it_value = next_tick;
 
-       /* Program the IT when to deliver the next interrupt.
-        * Only bottom 32-bits of next_tick are writable in CR16!
-        */
-       mtctl(next_tick, 16);
+       /* Go do system house keeping. */
+       if (cpu == 0)
+               xtime_update(ticks_elapsed);
+
+       update_process_times(user_mode(get_irq_regs()));
 
-       /* Skip one clocktick on purpose if we missed next_tick.
+       /* Skip clockticks on purpose if we know we would miss those.
         * The new CR16 must be "later" than current CR16 otherwise
         * itimer would not fire until CR16 wrapped - e.g 4 seconds
         * later on a 1Ghz processor. We'll account for the missed
-        * tick on the next timer interrupt.
+        * ticks on the next timer interrupt.
+        * We want IT to fire modulo clocktick even if we miss/skip some.
+        * But those interrupts don't in fact get delivered that regularly.
         *
         * "next_tick - now" will always give the difference regardless
         * if one or the other wrapped. If "now" is "bigger" we'll end up
         * with a very large unsigned number.
         */
-       now2 = mfctl(16);
-       if (next_tick - now2 > cpt)
-               mtctl(next_tick+cpt, 16);
+       while (next_tick - mfctl(16) > cpt)
+               next_tick += cpt;
 
-#if 1
-/*
- * GGG: DEBUG code for how many cycles programming CR16 used.
- */
-       if (unlikely(now2 - now > 0x3000))      /* 12K cycles */
-               printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
-                       " cyc %lX rem %lX "
-                       " next/now %lX/%lX\n",
-                       cpu, now2 - now, cycles_elapsed, cycles_remainder,
-                       next_tick, now );
-#endif
-
-       /* Can we differentiate between "early CR16" (aka Scenario 1) and
-        * "long delay" (aka Scenario 3)? I don't think so.
-        *
-        * Timer_interrupt will be delivered at least a few hundred cycles
-        * after the IT fires. But it's arbitrary how much time passes
-        * before we call it "late". I've picked one second.
-        *
-        * It's important NO printk's are between reading CR16 and
-        * setting up the next value. May introduce huge variance.
-        */
-       if (unlikely(ticks_elapsed > HZ)) {
-               /* Scenario 3: very long delay?  bad in any case */
-               printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
-                       " cycles %lX rem %lX "
-                       " next/now %lX/%lX\n",
-                       cpu,
-                       cycles_elapsed, cycles_remainder,
-                       next_tick, now );
-       }
-
-       /* Done mucking with unreliable delivery of interrupts.
-        * Go do system house keeping.
+       /* Program the IT when to deliver the next interrupt.
+        * Only bottom 32-bits of next_tick are writable in CR16!
+        * Timer interrupt will be delivered at least a few hundred cycles
+        * after the IT fires, so if we are too close (<= 500 cycles) to the
+        * next cycle, simply skip it.
         */
-
-       if (!--cpuinfo->prof_counter) {
-               cpuinfo->prof_counter = cpuinfo->prof_multiplier;
-               update_process_times(user_mode(get_irq_regs()));
-       }
-
-       if (cpu == 0)
-               xtime_update(ticks_elapsed);
+       if (next_tick - mfctl(16) <= 500)
+               next_tick += cpt;
+       mtctl(next_tick, 16);
 
        return IRQ_HANDLED;
 }
index 3da87e19887827f55f14bd47bca849bdc9a14492..a8ee573fe610bd5e2d8191b4dffb05e134a6d3c2 100644 (file)
@@ -469,6 +469,7 @@ config KEXEC
 config KEXEC_FILE
        bool "kexec file based system call"
        select KEXEC_CORE
+       select HAVE_IMA_KEXEC
        select BUILD_BIN2C
        depends on PPC64
        depends on CRYPTO=y
diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h
new file mode 100644 (file)
index 0000000..2313bdf
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_IMA_H
+#define _ASM_POWERPC_IMA_H
+
+struct kimage;
+
+int ima_get_kexec_buffer(void **addr, size_t *size);
+int ima_free_kexec_buffer(void);
+
+#ifdef CONFIG_IMA
+void remove_ima_buffer(void *fdt, int chosen_node);
+#else
+static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
+#endif
+
+#ifdef CONFIG_IMA_KEXEC
+int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
+                             size_t size);
+
+int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
+#else
+static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
+                                  int chosen_node)
+{
+       remove_ima_buffer(fdt, chosen_node);
+       return 0;
+}
+#endif /* CONFIG_IMA_KEXEC */
+
+#endif /* _ASM_POWERPC_IMA_H */
index 6c3b71502fbcbcc6566790b6f910955668bc43aa..25668bc8cb2a4c621db3c4a8b332b8c2ef9882a4 100644 (file)
@@ -94,11 +94,22 @@ static inline bool kdump_in_progress(void)
 #ifdef CONFIG_KEXEC_FILE
 extern struct kexec_file_ops kexec_elf64_ops;
 
+#ifdef CONFIG_IMA_KEXEC
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+       phys_addr_t ima_buffer_addr;
+       size_t ima_buffer_size;
+};
+#endif
+
 int setup_purgatory(struct kimage *image, const void *slave_code,
                    const void *fdt, unsigned long kernel_load_addr,
                    unsigned long fdt_load_addr);
-int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
-                 unsigned long initrd_len, const char *cmdline);
+int setup_new_fdt(const struct kimage *image, void *fdt,
+                 unsigned long initrd_load_addr, unsigned long initrd_len,
+                 const char *cmdline);
+int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
 #endif /* CONFIG_KEXEC_FILE */
 
 #else /* !CONFIG_KEXEC_CORE */
index a3a6047fd39502b389d5854f203426b6e79456c1..23f8082d7bfad95f4c9fbb8201e3e58c3104928f 100644 (file)
@@ -112,6 +112,10 @@ obj-$(CONFIG_PCI_MSI)              += msi.o
 obj-$(CONFIG_KEXEC_CORE)       += machine_kexec.o crash.o \
                                   machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC_FILE)       += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
+ifeq ($(CONFIG_HAVE_IMA_KEXEC)$(CONFIG_IMA),yy)
+obj-y                          += ima_kexec.o
+endif
+
 obj-$(CONFIG_AUDIT)            += audit.o
 obj64-$(CONFIG_AUDIT)          += compat_audit.o
 
diff --git a/arch/powerpc/kernel/ima_kexec.c b/arch/powerpc/kernel/ima_kexec.c
new file mode 100644 (file)
index 0000000..5ea42c9
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/kexec.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/libfdt.h>
+
+static int get_addr_size_cells(int *addr_cells, int *size_cells)
+{
+       struct device_node *root;
+
+       root = of_find_node_by_path("/");
+       if (!root)
+               return -EINVAL;
+
+       *addr_cells = of_n_addr_cells(root);
+       *size_cells = of_n_size_cells(root);
+
+       of_node_put(root);
+
+       return 0;
+}
+
+static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
+                              size_t *size)
+{
+       int ret, addr_cells, size_cells;
+
+       ret = get_addr_size_cells(&addr_cells, &size_cells);
+       if (ret)
+               return ret;
+
+       if (len < 4 * (addr_cells + size_cells))
+               return -ENOENT;
+
+       *addr = of_read_number(prop, addr_cells);
+       *size = of_read_number(prop + 4 * addr_cells, size_cells);
+
+       return 0;
+}
+
+/**
+ * ima_get_kexec_buffer - get IMA buffer from the previous kernel
+ * @addr:      On successful return, set to point to the buffer contents.
+ * @size:      On successful return, set to the buffer size.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int ima_get_kexec_buffer(void **addr, size_t *size)
+{
+       int ret, len;
+       unsigned long tmp_addr;
+       size_t tmp_size;
+       const void *prop;
+
+       prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
+       if (!prop)
+               return -ENOENT;
+
+       ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
+       if (ret)
+               return ret;
+
+       *addr = __va(tmp_addr);
+       *size = tmp_size;
+
+       return 0;
+}
+
+/**
+ * ima_free_kexec_buffer - free memory used by the IMA buffer
+ */
+int ima_free_kexec_buffer(void)
+{
+       int ret;
+       unsigned long addr;
+       size_t size;
+       struct property *prop;
+
+       prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
+       if (!prop)
+               return -ENOENT;
+
+       ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
+       if (ret)
+               return ret;
+
+       ret = of_remove_property(of_chosen, prop);
+       if (ret)
+               return ret;
+
+       return memblock_free(addr, size);
+
+}
+
+/**
+ * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
+ *
+ * The IMA measurement buffer is of no use to a subsequent kernel, so we always
+ * remove it from the device tree.
+ */
+void remove_ima_buffer(void *fdt, int chosen_node)
+{
+       int ret, len;
+       unsigned long addr;
+       size_t size;
+       const void *prop;
+
+       prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
+       if (!prop)
+               return;
+
+       ret = do_get_kexec_buffer(prop, len, &addr, &size);
+       fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
+       if (ret)
+               return;
+
+       ret = delete_fdt_mem_rsv(fdt, addr, size);
+       if (!ret)
+               pr_debug("Removed old IMA buffer reservation.\n");
+}
+
+#ifdef CONFIG_IMA_KEXEC
+/**
+ * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer
+ *
+ * Architectures should use this function to pass on the IMA buffer
+ * information to the next kernel.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
+                             size_t size)
+{
+       image->arch.ima_buffer_addr = load_addr;
+       image->arch.ima_buffer_size = size;
+
+       return 0;
+}
+
+static int write_number(void *p, u64 value, int cells)
+{
+       if (cells == 1) {
+               u32 tmp;
+
+               if (value > U32_MAX)
+                       return -EINVAL;
+
+               tmp = cpu_to_be32(value);
+               memcpy(p, &tmp, sizeof(tmp));
+       } else if (cells == 2) {
+               u64 tmp;
+
+               tmp = cpu_to_be64(value);
+               memcpy(p, &tmp, sizeof(tmp));
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * setup_ima_buffer - add IMA buffer information to the fdt
+ * @image:             kexec image being loaded.
+ * @fdt:               Flattened device tree for the next kernel.
+ * @chosen_node:       Offset to the chosen node.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node)
+{
+       int ret, addr_cells, size_cells, entry_size;
+       u8 value[16];
+
+       remove_ima_buffer(fdt, chosen_node);
+       if (!image->arch.ima_buffer_size)
+               return 0;
+
+       ret = get_addr_size_cells(&addr_cells, &size_cells);
+       if (ret)
+               return ret;
+
+       entry_size = 4 * (addr_cells + size_cells);
+
+       if (entry_size > sizeof(value))
+               return -EINVAL;
+
+       ret = write_number(value, image->arch.ima_buffer_addr, addr_cells);
+       if (ret)
+               return ret;
+
+       ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size,
+                          size_cells);
+       if (ret)
+               return ret;
+
+       ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value,
+                         entry_size);
+       if (ret < 0)
+               return -EINVAL;
+
+       ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr,
+                             image->arch.ima_buffer_size);
+       if (ret)
+               return -EINVAL;
+
+       pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
+                image->arch.ima_buffer_addr, image->arch.ima_buffer_size);
+
+       return 0;
+}
+#endif /* CONFIG_IMA_KEXEC */
index 6acffd34a70f302fb020317c3cc56bf2564beb68..9a42309b091a67c7a6bd9d62133e48f34e8e94eb 100644 (file)
@@ -627,7 +627,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
                goto out;
        }
 
-       ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline);
+       ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
        if (ret)
                goto out;
 
index 7abc8a75ee48cbf8b6e3bbb7f639ef9e45da06db..992c0d258e5d564c3b57526e968b26e78bb9292d 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/memblock.h>
 #include <linux/of_fdt.h>
 #include <linux/libfdt.h>
+#include <asm/ima.h>
 
 #define SLAVE_CODE_SIZE                256
 
@@ -180,7 +181,7 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
  *
  * Return: 0 on success, or negative errno on error.
  */
-static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
+int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
 {
        int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
 
@@ -209,6 +210,7 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
 
 /*
  * setup_new_fdt - modify /chosen and memory reservation for the next kernel
+ * @image:             kexec image being loaded.
  * @fdt:               Flattened device tree for the next kernel.
  * @initrd_load_addr:  Address where the next initrd will be loaded.
  * @initrd_len:                Size of the next initrd, or 0 if there will be none.
@@ -217,8 +219,9 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
  *
  * Return: 0 on success, or negative errno on error.
  */
-int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
-                 unsigned long initrd_len, const char *cmdline)
+int setup_new_fdt(const struct kimage *image, void *fdt,
+                 unsigned long initrd_load_addr, unsigned long initrd_len,
+                 const char *cmdline)
 {
        int ret, chosen_node;
        const void *prop;
@@ -328,6 +331,12 @@ int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
                }
        }
 
+       ret = setup_ima_buffer(image, fdt, chosen_node);
+       if (ret) {
+               pr_err("Error setting up the new device tree.\n");
+               return ret;
+       }
+
        ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
        if (ret) {
                pr_err("Error setting up the new device tree.\n");
index 3803b0addf657edafae38ee217136db69a5db900..6c0ba75fb2561f730908050c18afc35ebd45eb5f 100644 (file)
@@ -117,9 +117,6 @@ static const struct of_device_id of_device_ids[] = {
        {
                .compatible     = "fsl,qe",
        },
-       {
-               .compatible    = "fsl,fman",
-       },
        /* The following two are for the Freescale hypervisor */
        {
                .name           = "hypervisor",
index 64024c9995314a9488bb83261baa1d6f7a9b5cae..e487493bbd47f0f84caed99f6c952bf6b3062413 100644 (file)
@@ -412,6 +412,19 @@ config GOLDFISH
        def_bool y
        depends on X86_GOLDFISH
 
+config INTEL_RDT_A
+       bool "Intel Resource Director Technology Allocation support"
+       default n
+       depends on X86 && CPU_SUP_INTEL
+       select KERNFS
+       help
+         Select to enable resource allocation which is a sub-feature of
+         Intel Resource Director Technology(RDT). More information about
+         RDT can be found in the Intel x86 Architecture Software
+         Developer Manual.
+
+         Say N if unsure.
+
 if X86_32
 config X86_EXTENDED_PLATFORM
        bool "Support for extended (non-PC) x86 platforms"
index cb8522290e6a3f6dc3b5baacfc618d0ee8db65e3..86138267b68a77753737ffd4c4be2ece6a24cc84 100644 (file)
@@ -2110,6 +2110,27 @@ again:
                    GLOBAL_STATUS_LBRS_FROZEN);
        if (!status)
                goto done;
+       /*
+        * In case multiple PEBS events are sampled at the same time,
+        * it is possible to have GLOBAL_STATUS bit 62 set indicating
+        * PEBS buffer overflow and also seeing at most 3 PEBS counters
+        * having their bits set in the status register. This is a sign
+        * that there was at least one PEBS record pending at the time
+        * of the PMU interrupt. PEBS counters must only be processed
+        * via the drain_pebs() calls and not via the regular sample
+        * processing loop coming after that the function, otherwise
+        * phony regular samples may be generated in the sampling buffer
+        * not marked with the EXACT tag. Another possibility is to have
+        * one PEBS event and at least one non-PEBS event whic hoverflows
+        * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
+        * not be set, yet the overflow status bit for the PEBS counter will
+        * be on Skylake.
+        *
+        * To avoid this problem, we systematically ignore the PEBS-enabled
+        * counters from the GLOBAL_STATUS mask and we always process PEBS
+        * events via drain_pebs().
+        */
+       status &= ~cpuc->pebs_enabled;
 
        /*
         * PEBS overflow sets bit 62 in the global status register
@@ -2117,15 +2138,6 @@ again:
        if (__test_and_clear_bit(62, (unsigned long *)&status)) {
                handled++;
                x86_pmu.drain_pebs(regs);
-               /*
-                * There are cases where, even though, the PEBS ovfl bit is set
-                * in GLOBAL_OVF_STATUS, the PEBS events may also have their
-                * overflow bits set for their counters. We must clear them
-                * here because they have been processed as exact samples in
-                * the drain_pebs() routine. They must not be processed again
-                * in the for_each_bit_set() loop for regular samples below.
-                */
-               status &= ~cpuc->pebs_enabled;
                status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
        }
 
index 8f82b02934fa701a451ee5e7d8f76193eaefc2fa..0c45cc8e64ba77f6988cff5a0e5443dfe0449435 100644 (file)
@@ -7,9 +7,9 @@
 #include <linux/perf_event.h>
 #include <linux/slab.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel_rdt_common.h>
 #include "../perf_event.h"
 
-#define MSR_IA32_PQR_ASSOC     0x0c8f
 #define MSR_IA32_QM_CTR                0x0c8e
 #define MSR_IA32_QM_EVTSEL     0x0c8d
 
@@ -24,32 +24,13 @@ static unsigned int cqm_l3_scale; /* supposedly cacheline size */
 static bool cqm_enabled, mbm_enabled;
 unsigned int mbm_socket_max;
 
-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid:              The cached Resource Monitoring ID
- * @closid:            The cached Class Of Service ID
- * @rmid_usecnt:       The usage counter for rmid
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
-       u32                     rmid;
-       u32                     closid;
-       int                     rmid_usecnt;
-};
-
 /*
  * The cached intel_pqr_state is strictly per CPU and can never be
  * updated from a remote CPU. Both functions which modify the state
  * (intel_cqm_event_start and intel_cqm_event_stop) are called with
  * interrupts disabled, which is sufficient for the protection.
  */
-static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
 static struct hrtimer *mbm_timers;
 /**
  * struct sample - mbm event's (local or total) data
index 272427700d48deebdbcb582b1bd4af143e44a58c..e6832be714bc6e76965e1faa7cedcc3ad309c8fa 100644 (file)
@@ -669,7 +669,7 @@ static struct event_constraint snbep_uncore_cbox_constraints[] = {
        UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
        UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
        UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
-       EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
+       UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
index 6ccbf1aaa7ce1f72021757593c99b555480f78da..eafee3161d1c0fa04cd82b12dcf15003a89d04b4 100644 (file)
 
 #define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2     ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3     ( 7*32+ 6) /* Code and Data Prioritization L3 */
 
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
 #define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A      ( 9*32+15) /* Resource Director Technology Allocation */
 #define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
 #define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
 #define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644 (file)
index 0000000..95ce5c8
--- /dev/null
@@ -0,0 +1,224 @@
+#ifndef _ASM_X86_INTEL_RDT_H
+#define _ASM_X86_INTEL_RDT_H
+
+#ifdef CONFIG_INTEL_RDT_A
+
+#include <linux/kernfs.h>
+#include <linux/jump_label.h>
+
+#include <asm/intel_rdt_common.h>
+
+#define IA32_L3_QOS_CFG                0xc81
+#define IA32_L3_CBM_BASE       0xc90
+#define IA32_L2_CBM_BASE       0xd10
+
+#define L3_QOS_CDP_ENABLE      0x01ULL
+
+/**
+ * struct rdtgroup - store rdtgroup's data in resctrl file system.
+ * @kn:                                kernfs node
+ * @rdtgroup_list:             linked list for all rdtgroups
+ * @closid:                    closid for this rdtgroup
+ * @cpu_mask:                  CPUs assigned to this rdtgroup
+ * @flags:                     status bits
+ * @waitcount:                 how many cpus expect to find this
+ *                             group when they acquire rdtgroup_mutex
+ */
+struct rdtgroup {
+       struct kernfs_node      *kn;
+       struct list_head        rdtgroup_list;
+       int                     closid;
+       struct cpumask          cpu_mask;
+       int                     flags;
+       atomic_t                waitcount;
+};
+
+/* rdtgroup.flags */
+#define        RDT_DELETED             1
+
+/* List of all resource groups */
+extern struct list_head rdt_all_groups;
+
+int __init rdtgroup_init(void);
+
+/**
+ * struct rftype - describe each file in the resctrl file system
+ * @name: file name
+ * @mode: access mode
+ * @kf_ops: operations
+ * @seq_show: show content of the file
+ * @write: write to the file
+ */
+struct rftype {
+       char                    *name;
+       umode_t                 mode;
+       struct kernfs_ops       *kf_ops;
+
+       int (*seq_show)(struct kernfs_open_file *of,
+                       struct seq_file *sf, void *v);
+       /*
+        * write() is the generic write callback which maps directly to
+        * kernfs write operation and overrides all other operations.
+        * Maximum write size is determined by ->max_write_len.
+        */
+       ssize_t (*write)(struct kernfs_open_file *of,
+                        char *buf, size_t nbytes, loff_t off);
+};
+
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @enabled:                   Is this feature enabled on this machine
+ * @capable:                   Is this feature available on this machine
+ * @name:                      Name to use in "schemata" file
+ * @num_closid:                        Number of CLOSIDs available
+ * @max_cbm:                   Largest Cache Bit Mask allowed
+ * @min_cbm_bits:              Minimum number of consecutive bits to be set
+ *                             in a cache bit mask
+ * @domains:                   All domains for this resource
+ * @num_domains:               Number of domains active
+ * @msr_base:                  Base MSR address for CBMs
+ * @tmp_cbms:                  Scratch space when updating schemata
+ * @num_tmp_cbms:              Number of CBMs in tmp_cbms
+ * @cache_level:               Which cache level defines scope of this domain
+ * @cbm_idx_multi:             Multiplier of CBM index
+ * @cbm_idx_offset:            Offset of CBM index. CBM index is computed by:
+ *                             closid * cbm_idx_multi + cbm_idx_offset
+ */
+struct rdt_resource {
+       bool                    enabled;
+       bool                    capable;
+       char                    *name;
+       int                     num_closid;
+       int                     cbm_len;
+       int                     min_cbm_bits;
+       u32                     max_cbm;
+       struct list_head        domains;
+       int                     num_domains;
+       int                     msr_base;
+       u32                     *tmp_cbms;
+       int                     num_tmp_cbms;
+       int                     cache_level;
+       int                     cbm_idx_multi;
+       int                     cbm_idx_offset;
+};
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list:      all instances of this resource
+ * @id:                unique id for this instance
+ * @cpu_mask:  which cpus share this resource
+ * @cbm:       array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+       struct list_head        list;
+       int                     id;
+       struct cpumask          cpu_mask;
+       u32                     *cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res:       The resource to use
+ * @low:       Beginning index from base MSR
+ * @high:      End index
+ */
+struct msr_param {
+       struct rdt_resource     *res;
+       int                     low;
+       int                     high;
+};
+
+extern struct mutex rdtgroup_mutex;
+
+extern struct rdt_resource rdt_resources_all[];
+extern struct rdtgroup rdtgroup_default;
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+
+int __init rdtgroup_init(void);
+
+enum {
+       RDT_RESOURCE_L3,
+       RDT_RESOURCE_L3DATA,
+       RDT_RESOURCE_L3CODE,
+       RDT_RESOURCE_L2,
+
+       /* Must be the last */
+       RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r)                                     \
+       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+            r++)                                                             \
+               if (r->capable)
+
+#define for_each_enabled_rdt_resource(r)                                     \
+       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+            r++)                                                             \
+               if (r->enabled)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+       struct {
+               unsigned int cbm_len:5;
+       } split;
+       unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
+union cpuid_0x10_1_edx {
+       struct {
+               unsigned int cos_max:16;
+       } split;
+       unsigned int full;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+void rdt_cbm_update(void *arg);
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
+void rdtgroup_kn_unlock(struct kernfs_node *kn);
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+                               char *buf, size_t nbytes, loff_t off);
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+                          struct seq_file *s, void *v);
+
+/*
+ * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ *   which supports resource control and we enable by mounting the
+ *   resctrl file system.
+ * - Caches the per cpu CLOSid values and does the MSR write only
+ *   when a task with a different CLOSid is scheduled in.
+ *
+ * Must be called with preemption disabled.
+ */
+static inline void intel_rdt_sched_in(void)
+{
+       if (static_branch_likely(&rdt_enable_key)) {
+               struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+               int closid;
+
+               /*
+                * If this task has a closid assigned, use it.
+                * Else use the closid assigned to this cpu.
+                */
+               closid = current->closid;
+               if (closid == 0)
+                       closid = this_cpu_read(cpu_closid);
+
+               if (closid != state->closid) {
+                       state->closid = closid;
+                       wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
+               }
+       }
+}
+
+#else
+
+static inline void intel_rdt_sched_in(void) {}
+
+#endif /* CONFIG_INTEL_RDT_A */
+#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
new file mode 100644 (file)
index 0000000..b31081b
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_INTEL_RDT_COMMON_H
+#define _ASM_X86_INTEL_RDT_COMMON_H
+
+#define MSR_IA32_PQR_ASSOC     0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid:              The cached Resource Monitoring ID
+ * @closid:            The cached Class Of Service ID
+ * @rmid_usecnt:       The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+       u32                     rmid;
+       u32                     closid;
+       int                     rmid_usecnt;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
index 7892530cbacfb38e81684b1c5e60f90662e0d872..2e25038dbd932cfec9381eccbc1f4c30fe0c7b2e 100644 (file)
@@ -704,6 +704,7 @@ struct kvm_apic_map {
 
 /* Hyper-V emulation context */
 struct kvm_hv {
+       struct mutex hv_lock;
        u64 hv_guest_os_id;
        u64 hv_hypercall;
        u64 hv_tsc_page;
index 33b63670bf09e6d34c1f194cfe3600d9ea16dcb4..52000010c62ebaaf60939186128d44af757d2226 100644 (file)
@@ -32,6 +32,8 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)         += centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)     += transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)           += umc.o
 
+obj-$(CONFIG_INTEL_RDT_A)      += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
+
 obj-$(CONFIG_X86_MCE)                  += mcheck/
 obj-$(CONFIG_MTRR)                     += mtrr/
 obj-$(CONFIG_MICROCODE)                        += microcode/
index be633715650212059e8a67eb7a4974b188d5e076..0282b0df004a86023d00abceb53cbed24fe118f6 100644 (file)
@@ -153,6 +153,7 @@ struct _cpuid4_info_regs {
        union _cpuid4_leaf_eax eax;
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
+       unsigned int id;
        unsigned long size;
        struct amd_northbridge *nb;
 };
@@ -894,6 +895,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
 static void ci_leaf_init(struct cacheinfo *this_leaf,
                         struct _cpuid4_info_regs *base)
 {
+       this_leaf->id = base->id;
+       this_leaf->attributes = CACHE_ID;
        this_leaf->level = base->eax.split.level;
        this_leaf->type = cache_type_map[base->eax.split.type];
        this_leaf->coherency_line_size =
@@ -920,6 +923,22 @@ static int __init_cache_level(unsigned int cpu)
        return 0;
 }
 
+/*
+ * The max shared threads number comes from CPUID.4:EAX[25-14] with input
+ * ECX as cache index. Then right shift apicid by the number's order to get
+ * cache id for this cache node.
+ */
+static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
+{
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+       unsigned long num_threads_sharing;
+       int index_msb;
+
+       num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
+       index_msb = get_count_order(num_threads_sharing);
+       id4_regs->id = c->apicid >> index_msb;
+}
+
 static int __populate_cache_leaves(unsigned int cpu)
 {
        unsigned int idx, ret;
@@ -931,6 +950,7 @@ static int __populate_cache_leaves(unsigned int cpu)
                ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
                if (ret)
                        return ret;
+               get_cache_id(cpu, &id4_regs);
                ci_leaf_init(this_leaf++, &id4_regs);
                __cache_cpumap_setup(cpu, idx, &id4_regs);
        }
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
new file mode 100644 (file)
index 0000000..5a533fe
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ *    Fenghua Yu <fenghua.yu@intel.com>
+ *    Tony Luck <tony.luck@intel.com>
+ *    Vikas Shivappa <vikas.shivappa@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpuhotplug.h>
+
+#include <asm/intel-family.h>
+#include <asm/intel_rdt.h>
+
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+
+struct rdt_resource rdt_resources_all[] = {
+       {
+               .name           = "L3",
+               .domains        = domain_init(RDT_RESOURCE_L3),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 1,
+               .cbm_idx_offset = 0
+       },
+       {
+               .name           = "L3DATA",
+               .domains        = domain_init(RDT_RESOURCE_L3DATA),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 2,
+               .cbm_idx_offset = 0
+       },
+       {
+               .name           = "L3CODE",
+               .domains        = domain_init(RDT_RESOURCE_L3CODE),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 2,
+               .cbm_idx_offset = 1
+       },
+       {
+               .name           = "L2",
+               .domains        = domain_init(RDT_RESOURCE_L2),
+               .msr_base       = IA32_L2_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 2,
+               .cbm_idx_multi  = 1,
+               .cbm_idx_offset = 0
+       },
+};
+
+static int cbm_idx(struct rdt_resource *r, int closid)
+{
+       return closid * r->cbm_idx_multi + r->cbm_idx_offset;
+}
+
+/*
+ * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
+ * as they do not have CPUID enumeration support for Cache allocation.
+ * The check for Vendor/Family/Model is not enough to guarantee that
+ * the MSRs won't #GP fault because only the following SKUs support
+ * CAT:
+ *     Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
+ *     Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
+ *     Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
+ *     Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
+ *     Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
+ *     Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
+ *
+ * Probe by trying to write the first of the L3 cach mask registers
+ * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
+ * is always 20 on hsw server parts. The minimum cache bitmask length
+ * allowed for HSW server is always 2 bits. Hardcode all of them.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
+               struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
+               u32 l, h, max_cbm = BIT_MASK(20) - 1;
+
+               if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+                       return false;
+               rdmsr(IA32_L3_CBM_BASE, l, h);
+
+               /* If all the bits were set in MSR, return success */
+               if (l != max_cbm)
+                       return false;
+
+               r->num_closid = 4;
+               r->cbm_len = 20;
+               r->max_cbm = max_cbm;
+               r->min_cbm_bits = 2;
+               r->capable = true;
+               r->enabled = true;
+
+               return true;
+       }
+
+       return false;
+}
+
+static void rdt_get_config(int idx, struct rdt_resource *r)
+{
+       union cpuid_0x10_1_eax eax;
+       union cpuid_0x10_1_edx edx;
+       u32 ebx, ecx;
+
+       cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
+       r->num_closid = edx.split.cos_max + 1;
+       r->cbm_len = eax.split.cbm_len + 1;
+       r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+       r->capable = true;
+       r->enabled = true;
+}
+
+static void rdt_get_cdp_l3_config(int type)
+{
+       struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_resource *r = &rdt_resources_all[type];
+
+       r->num_closid = r_l3->num_closid / 2;
+       r->cbm_len = r_l3->cbm_len;
+       r->max_cbm = r_l3->max_cbm;
+       r->capable = true;
+       /*
+        * By default, CDP is disabled. CDP can be enabled by mount parameter
+        * "cdp" during resctrl file system mount time.
+        */
+       r->enabled = false;
+}
+
+static inline bool get_rdt_resources(void)
+{
+       bool ret = false;
+
+       if (cache_alloc_hsw_probe())
+               return true;
+
+       if (!boot_cpu_has(X86_FEATURE_RDT_A))
+               return false;
+
+       if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
+               rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+                       rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+                       rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+               }
+               ret = true;
+       }
+       if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+               /* CPUID 0x10.2 fields are same format at 0x10.1 */
+               rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               ret = true;
+       }
+
+       return ret;
+}
+
+static int get_cache_id(int cpu, int level)
+{
+       struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+       int i;
+
+       for (i = 0; i < ci->num_leaves; i++) {
+               if (ci->info_list[i].level == level)
+                       return ci->info_list[i].id;
+       }
+
+       return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+       struct msr_param *m = (struct msr_param *)arg;
+       struct rdt_resource *r = m->res;
+       int i, cpu = smp_processor_id();
+       struct rdt_domain *d;
+
+       list_for_each_entry(d, &r->domains, list) {
+               /* Find the domain that contains this CPU */
+               if (cpumask_test_cpu(cpu, &d->cpu_mask))
+                       goto found;
+       }
+       pr_info_once("cpu %d not found in any domain for resource %s\n",
+                    cpu, r->name);
+
+       return;
+
+found:
+       for (i = m->low; i < m->high; i++) {
+               int idx = cbm_idx(r, i);
+
+               wrmsrl(r->msr_base + idx, d->cbm[i]);
+       }
+}
+
+/*
+ * rdt_find_domain - Find a domain in a resource that matches input resource id
+ *
+ * Search resource r's domain list to find the resource id. If the resource
+ * id is found in a domain, return the domain. Otherwise, if requested by
+ * caller, return the first domain whose id is bigger than the input id.
+ * The domain list is sorted by id in ascending order.
+ */
+static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+                                         struct list_head **pos)
+{
+       struct rdt_domain *d;
+       struct list_head *l;
+
+       if (id < 0)
+               return ERR_PTR(id);
+
+       list_for_each(l, &r->domains) {
+               d = list_entry(l, struct rdt_domain, list);
+               /* When id is found, return its domain. */
+               if (id == d->id)
+                       return d;
+               /* Stop searching when finding id's position in sorted list. */
+               if (id < d->id)
+                       break;
+       }
+
+       if (pos)
+               *pos = l;
+
+       return NULL;
+}
+
+/*
+ * domain_add_cpu - Add a cpu to a resource's domain list.
+ *
+ * If an existing domain in the resource r's domain list matches the cpu's
+ * resource id, add the cpu in the domain.
+ *
+ * Otherwise, a new domain is allocated and inserted into the right position
+ * in the domain list sorted by id in ascending order.
+ *
+ * The order in the domain list is visible to users when we print entries
+ * in the schemata file and schemata input is validated to have the same order
+ * as this list.
+ */
+static void domain_add_cpu(int cpu, struct rdt_resource *r)
+{
+       int i, id = get_cache_id(cpu, r->cache_level);
+       struct list_head *add_pos = NULL;
+       struct rdt_domain *d;
+
+       d = rdt_find_domain(r, id, &add_pos);
+       if (IS_ERR(d)) {
+               pr_warn("Could't find cache id for cpu %d\n", cpu);
+               return;
+       }
+
+       if (d) {
+               cpumask_set_cpu(cpu, &d->cpu_mask);
+               return;
+       }
+
+       d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
+       if (!d)
+               return;
+
+       d->id = id;
+
+       d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
+       if (!d->cbm) {
+               kfree(d);
+               return;
+       }
+
+       for (i = 0; i < r->num_closid; i++) {
+               int idx = cbm_idx(r, i);
+
+               d->cbm[i] = r->max_cbm;
+               wrmsrl(r->msr_base + idx, d->cbm[i]);
+       }
+
+       cpumask_set_cpu(cpu, &d->cpu_mask);
+       list_add_tail(&d->list, add_pos);
+       r->num_domains++;
+}
+
+static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+{
+       int id = get_cache_id(cpu, r->cache_level);
+       struct rdt_domain *d;
+
+       d = rdt_find_domain(r, id, NULL);
+       if (IS_ERR_OR_NULL(d)) {
+               pr_warn("Could't find cache id for cpu %d\n", cpu);
+               return;
+       }
+
+       cpumask_clear_cpu(cpu, &d->cpu_mask);
+       if (cpumask_empty(&d->cpu_mask)) {
+               r->num_domains--;
+               kfree(d->cbm);
+               list_del(&d->list);
+               kfree(d);
+       }
+}
+
+static void clear_closid(int cpu)
+{
+       struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+       per_cpu(cpu_closid, cpu) = 0;
+       state->closid = 0;
+       wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+       for_each_capable_rdt_resource(r)
+               domain_add_cpu(cpu, r);
+       /* The cpu is set in default rdtgroup after online. */
+       cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
+       clear_closid(cpu);
+       mutex_unlock(&rdtgroup_mutex);
+
+       return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+       for_each_capable_rdt_resource(r)
+               domain_remove_cpu(cpu, r);
+       list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+               if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+                       break;
+       }
+       clear_closid(cpu);
+       mutex_unlock(&rdtgroup_mutex);
+
+       return 0;
+}
+
+static int __init intel_rdt_late_init(void)
+{
+       struct rdt_resource *r;
+       int state, ret;
+
+       if (!get_rdt_resources())
+               return -ENODEV;
+
+       state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                                 "x86/rdt/cat:online:",
+                                 intel_rdt_online_cpu, intel_rdt_offline_cpu);
+       if (state < 0)
+               return state;
+
+       ret = rdtgroup_init();
+       if (ret) {
+               cpuhp_remove_state(state);
+               return ret;
+       }
+
+       for_each_capable_rdt_resource(r)
+               pr_info("Intel RDT %s allocation detected\n", r->name);
+
+       return 0;
+}
+
+late_initcall(intel_rdt_late_init);
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
new file mode 100644 (file)
index 0000000..8af04af
--- /dev/null
@@ -0,0 +1,1115 @@
+/*
+ * User interface for Resource Alloction in Resource Director Technology(RDT)
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/task_work.h>
+
+#include <uapi/linux/magic.h>
+
+#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_common.h>
+
+DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
+struct kernfs_root *rdt_root;
+struct rdtgroup rdtgroup_default;
+LIST_HEAD(rdt_all_groups);
+
+/* Kernel fs node for "info" directory under root */
+static struct kernfs_node *kn_info;
+
+/*
+ * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+ * we can keep a bitmap of free CLOSIDs in a single integer.
+ *
+ * Using a global CLOSID across all resources has some advantages and
+ * some drawbacks:
+ * + We can simply set "current->closid" to assign a task to a resource
+ *   group.
+ * + Context switch code can avoid extra memory references deciding which
+ *   CLOSID to load into the PQR_ASSOC MSR
+ * - We give up some options in configuring resource groups across multi-socket
+ *   systems.
+ * - Our choices on how to configure each resource become progressively more
+ *   limited as the number of resources grows.
+ */
+static int closid_free_map;
+
+static void closid_init(void)
+{
+       struct rdt_resource *r;
+       int rdt_min_closid = 32;
+
+       /* Compute rdt_min_closid across all resources */
+       for_each_enabled_rdt_resource(r)
+               rdt_min_closid = min(rdt_min_closid, r->num_closid);
+
+       closid_free_map = BIT_MASK(rdt_min_closid) - 1;
+
+       /* CLOSID 0 is always reserved for the default group */
+       closid_free_map &= ~1;
+}
+
+int closid_alloc(void)
+{
+       int closid = ffs(closid_free_map);
+
+       if (closid == 0)
+               return -ENOSPC;
+       closid--;
+       closid_free_map &= ~(1 << closid);
+
+       return closid;
+}
+
+static void closid_free(int closid)
+{
+       closid_free_map |= 1 << closid;
+}
+
+/* set uid and gid of rdtgroup dirs and files to that of the creator */
+static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
+{
+       struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
+                               .ia_uid = current_fsuid(),
+                               .ia_gid = current_fsgid(), };
+
+       if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
+           gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
+               return 0;
+
+       return kernfs_setattr(kn, &iattr);
+}
+
+static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
+{
+       struct kernfs_node *kn;
+       int ret;
+
+       kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+                                 0, rft->kf_ops, rft, NULL, NULL);
+       if (IS_ERR(kn))
+               return PTR_ERR(kn);
+
+       ret = rdtgroup_kn_set_ugid(kn);
+       if (ret) {
+               kernfs_remove(kn);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
+                             int len)
+{
+       struct rftype *rft;
+       int ret;
+
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       for (rft = rfts; rft < rfts + len; rft++) {
+               ret = rdtgroup_add_file(kn, rft);
+               if (ret)
+                       goto error;
+       }
+
+       return 0;
+error:
+       pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
+       while (--rft >= rfts)
+               kernfs_remove_by_name(kn, rft->name);
+       return ret;
+}
+
+static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+       struct kernfs_open_file *of = m->private;
+       struct rftype *rft = of->kn->priv;
+
+       if (rft->seq_show)
+               return rft->seq_show(of, m, arg);
+       return 0;
+}
+
+static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
+                                  size_t nbytes, loff_t off)
+{
+       struct rftype *rft = of->kn->priv;
+
+       if (rft->write)
+               return rft->write(of, buf, nbytes, off);
+
+       return -EINVAL;
+}
+
+static struct kernfs_ops rdtgroup_kf_single_ops = {
+       .atomic_write_len       = PAGE_SIZE,
+       .write                  = rdtgroup_file_write,
+       .seq_show               = rdtgroup_seqfile_show,
+};
+
+static int rdtgroup_cpus_show(struct kernfs_open_file *of,
+                             struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+       if (rdtgrp)
+               seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
+       else
+               ret = -ENOENT;
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret;
+}
+
+/*
+ * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * because __switch_to() is executed with interrupts disabled. A local call
+ * from rdt_update_closid() is proteced against __switch_to() because
+ * preemption is disabled.
+ */
+static void rdt_update_cpu_closid(void *closid)
+{
+       if (closid)
+               this_cpu_write(cpu_closid, *(int *)closid);
+       /*
+        * We cannot unconditionally write the MSR because the current
+        * executing task might have its own closid selected. Just reuse
+        * the context switch code.
+        */
+       intel_rdt_sched_in();
+}
+
+/*
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
+ *
+ * Per task closids must have been set up before calling this function.
+ *
+ * The per cpu closids are updated with the smp function call, when @closid
+ * is not NULL. If @closid is NULL then all affected percpu closids must
+ * have been set up before calling this function.
+ */
+static void
+rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
+{
+       int cpu = get_cpu();
+
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_update_cpu_closid(closid);
+       smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
+       put_cpu();
+}
+
+static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
+                                  char *buf, size_t nbytes, loff_t off)
+{
+       cpumask_var_t tmpmask, newmask;
+       struct rdtgroup *rdtgrp, *r;
+       int ret;
+
+       if (!buf)
+               return -EINVAL;
+
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
+       if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
+               free_cpumask_var(tmpmask);
+               return -ENOMEM;
+       }
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+
+       ret = cpumask_parse(buf, newmask);
+       if (ret)
+               goto unlock;
+
+       /* check that user didn't specify any offline cpus */
+       cpumask_andnot(tmpmask, newmask, cpu_online_mask);
+       if (cpumask_weight(tmpmask)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       /* Check whether cpus are dropped from this group */
+       cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+       if (cpumask_weight(tmpmask)) {
+               /* Can't drop from default group */
+               if (rdtgrp == &rdtgroup_default) {
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+               /* Give any dropped cpus to rdtgroup_default */
+               cpumask_or(&rdtgroup_default.cpu_mask,
+                          &rdtgroup_default.cpu_mask, tmpmask);
+               rdt_update_closid(tmpmask, &rdtgroup_default.closid);
+       }
+
+       /*
+        * If we added cpus, remove them from previous group that owned them
+        * and update per-cpu closid
+        */
+       cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+       if (cpumask_weight(tmpmask)) {
+               list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
+                       if (r == rdtgrp)
+                               continue;
+                       cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
+               }
+               rdt_update_closid(tmpmask, &rdtgrp->closid);
+       }
+
+       /* Done pushing/pulling - update this group with new mask */
+       cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+unlock:
+       rdtgroup_kn_unlock(of->kn);
+       free_cpumask_var(tmpmask);
+       free_cpumask_var(newmask);
+
+       return ret ?: nbytes;
+}
+
+struct task_move_callback {
+       struct callback_head    work;
+       struct rdtgroup         *rdtgrp;
+};
+
+static void move_myself(struct callback_head *head)
+{
+       struct task_move_callback *callback;
+       struct rdtgroup *rdtgrp;
+
+       callback = container_of(head, struct task_move_callback, work);
+       rdtgrp = callback->rdtgrp;
+
+       /*
+        * If resource group was deleted before this task work callback
+        * was invoked, then assign the task to root group and free the
+        * resource group.
+        */
+       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+           (rdtgrp->flags & RDT_DELETED)) {
+               current->closid = 0;
+               kfree(rdtgrp);
+       }
+
+       preempt_disable();
+       /* update PQR_ASSOC MSR to make resource group go into effect */
+       intel_rdt_sched_in();
+       preempt_enable();
+
+       kfree(callback);
+}
+
+static int __rdtgroup_move_task(struct task_struct *tsk,
+                               struct rdtgroup *rdtgrp)
+{
+       struct task_move_callback *callback;
+       int ret;
+
+       callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+       if (!callback)
+               return -ENOMEM;
+       callback->work.func = move_myself;
+       callback->rdtgrp = rdtgrp;
+
+       /*
+        * Take a refcount, so rdtgrp cannot be freed before the
+        * callback has been invoked.
+        */
+       atomic_inc(&rdtgrp->waitcount);
+       ret = task_work_add(tsk, &callback->work, true);
+       if (ret) {
+               /*
+                * Task is exiting. Drop the refcount and free the callback.
+                * No need to check the refcount as the group cannot be
+                * deleted before the write function unlocks rdtgroup_mutex.
+                */
+               atomic_dec(&rdtgrp->waitcount);
+               kfree(callback);
+       } else {
+               tsk->closid = rdtgrp->closid;
+       }
+       return ret;
+}
+
+static int rdtgroup_task_write_permission(struct task_struct *task,
+                                         struct kernfs_open_file *of)
+{
+       const struct cred *tcred = get_task_cred(task);
+       const struct cred *cred = current_cred();
+       int ret = 0;
+
+       /*
+        * Even if we're attaching all tasks in the thread group, we only
+        * need to check permissions on one of them.
+        */
+       if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
+           !uid_eq(cred->euid, tcred->uid) &&
+           !uid_eq(cred->euid, tcred->suid))
+               ret = -EPERM;
+
+       put_cred(tcred);
+       return ret;
+}
+
+static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
+                             struct kernfs_open_file *of)
+{
+       struct task_struct *tsk;
+       int ret;
+
+       rcu_read_lock();
+       if (pid) {
+               tsk = find_task_by_vpid(pid);
+               if (!tsk) {
+                       rcu_read_unlock();
+                       return -ESRCH;
+               }
+       } else {
+               tsk = current;
+       }
+
+       get_task_struct(tsk);
+       rcu_read_unlock();
+
+       ret = rdtgroup_task_write_permission(tsk, of);
+       if (!ret)
+               ret = __rdtgroup_move_task(tsk, rdtgrp);
+
+       put_task_struct(tsk);
+       return ret;
+}
+
+static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
+                                   char *buf, size_t nbytes, loff_t off)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+       pid_t pid;
+
+       if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+               return -EINVAL;
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+       if (rdtgrp)
+               ret = rdtgroup_move_task(pid, rdtgrp, of);
+       else
+               ret = -ENOENT;
+
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret ?: nbytes;
+}
+
+static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+{
+       struct task_struct *p, *t;
+
+       rcu_read_lock();
+       for_each_process_thread(p, t) {
+               if (t->closid == r->closid)
+                       seq_printf(s, "%d\n", t->pid);
+       }
+       rcu_read_unlock();
+}
+
+static int rdtgroup_tasks_show(struct kernfs_open_file *of,
+                              struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (rdtgrp)
+               show_rdt_tasks(rdtgrp, s);
+       else
+               ret = -ENOENT;
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret;
+}
+
+/* Files in each rdtgroup */
+static struct rftype rdtgroup_base_files[] = {
+       {
+               .name           = "cpus",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_cpus_write,
+               .seq_show       = rdtgroup_cpus_show,
+       },
+       {
+               .name           = "tasks",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_tasks_write,
+               .seq_show       = rdtgroup_tasks_show,
+       },
+       {
+               .name           = "schemata",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_schemata_write,
+               .seq_show       = rdtgroup_schemata_show,
+       },
+};
+
+static int rdt_num_closids_show(struct kernfs_open_file *of,
+                               struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%d\n", r->num_closid);
+
+       return 0;
+}
+
+static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+                            struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%x\n", r->max_cbm);
+
+       return 0;
+}
+
+static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
+                            struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%d\n", r->min_cbm_bits);
+
+       return 0;
+}
+
+/* rdtgroup information files for one cache resource. */
+static struct rftype res_info_files[] = {
+       {
+               .name           = "num_closids",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_num_closids_show,
+       },
+       {
+               .name           = "cbm_mask",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_cbm_mask_show,
+       },
+       {
+               .name           = "min_cbm_bits",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_min_cbm_bits_show,
+       },
+};
+
+static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+{
+       struct kernfs_node *kn_subdir;
+       struct rdt_resource *r;
+       int ret;
+
+       /* create the directory */
+       kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+       if (IS_ERR(kn_info))
+               return PTR_ERR(kn_info);
+       kernfs_get(kn_info);
+
+       for_each_enabled_rdt_resource(r) {
+               kn_subdir = kernfs_create_dir(kn_info, r->name,
+                                             kn_info->mode, r);
+               if (IS_ERR(kn_subdir)) {
+                       ret = PTR_ERR(kn_subdir);
+                       goto out_destroy;
+               }
+               kernfs_get(kn_subdir);
+               ret = rdtgroup_kn_set_ugid(kn_subdir);
+               if (ret)
+                       goto out_destroy;
+               ret = rdtgroup_add_files(kn_subdir, res_info_files,
+                                        ARRAY_SIZE(res_info_files));
+               if (ret)
+                       goto out_destroy;
+               kernfs_activate(kn_subdir);
+       }
+
+       /*
+        * This extra ref will be put in kernfs_remove() and guarantees
+        * that @rdtgrp->kn is always accessible.
+        */
+       kernfs_get(kn_info);
+
+       ret = rdtgroup_kn_set_ugid(kn_info);
+       if (ret)
+               goto out_destroy;
+
+       kernfs_activate(kn_info);
+
+       return 0;
+
+out_destroy:
+       kernfs_remove(kn_info);
+       return ret;
+}
+
+static void l3_qos_cfg_update(void *arg)
+{
+       bool *enable = arg;
+
+       wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+{
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int cpu;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       list_for_each_entry(d, &r->domains, list) {
+               /* Pick one CPU from each domain instance to update MSR */
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+       }
+       cpu = get_cpu();
+       /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               l3_qos_cfg_update(&enable);
+       /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
+       smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+static int cdp_enable(void)
+{
+       struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+       struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+       struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+       int ret;
+
+       if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
+               return -EINVAL;
+
+       ret = set_l3_qos_cfg(r_l3, true);
+       if (!ret) {
+               r_l3->enabled = false;
+               r_l3data->enabled = true;
+               r_l3code->enabled = true;
+       }
+       return ret;
+}
+
+static void cdp_disable(void)
+{
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+       r->enabled = r->capable;
+
+       if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
+               rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
+               rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
+               set_l3_qos_cfg(r, false);
+       }
+}
+
+static int parse_rdtgroupfs_options(char *data)
+{
+       char *token, *o = data;
+       int ret = 0;
+
+       while ((token = strsep(&o, ",")) != NULL) {
+               if (!*token)
+                       return -EINVAL;
+
+               if (!strcmp(token, "cdp"))
+                       ret = cdp_enable();
+       }
+
+       return ret;
+}
+
+/*
+ * We don't allow rdtgroup directories to be created anywhere
+ * except the root directory. Thus when looking for the rdtgroup
+ * structure for a kernfs node we are either looking at a directory,
+ * in which case the rdtgroup structure is pointed at by the "priv"
+ * field, otherwise we have a file, and need only look to the parent
+ * to find the rdtgroup.
+ */
+static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
+{
+       if (kernfs_type(kn) == KERNFS_DIR) {
+               /*
+                * All the resource directories use "kn->priv"
+                * to point to the "struct rdtgroup" for the
+                * resource. "info" and its subdirectories don't
+                * have rdtgroup structures, so return NULL here.
+                */
+               if (kn == kn_info || kn->parent == kn_info)
+                       return NULL;
+               else
+                       return kn->priv;
+       } else {
+               return kn->parent->priv;
+       }
+}
+
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
+{
+       struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+       if (!rdtgrp)
+               return NULL;
+
+       atomic_inc(&rdtgrp->waitcount);
+       kernfs_break_active_protection(kn);
+
+       mutex_lock(&rdtgroup_mutex);
+
+       /* Was this group deleted while we waited? */
+       if (rdtgrp->flags & RDT_DELETED)
+               return NULL;
+
+       return rdtgrp;
+}
+
+void rdtgroup_kn_unlock(struct kernfs_node *kn)
+{
+       struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+       if (!rdtgrp)
+               return;
+
+       mutex_unlock(&rdtgroup_mutex);
+
+       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+           (rdtgrp->flags & RDT_DELETED)) {
+               kernfs_unbreak_active_protection(kn);
+               kernfs_put(kn);
+               kfree(rdtgrp);
+       } else {
+               kernfs_unbreak_active_protection(kn);
+       }
+}
+
+static struct dentry *rdt_mount(struct file_system_type *fs_type,
+                               int flags, const char *unused_dev_name,
+                               void *data)
+{
+       struct dentry *dentry;
+       int ret;
+
+       mutex_lock(&rdtgroup_mutex);
+       /*
+        * resctrl file system can only be mounted once.
+        */
+       if (static_branch_unlikely(&rdt_enable_key)) {
+               dentry = ERR_PTR(-EBUSY);
+               goto out;
+       }
+
+       ret = parse_rdtgroupfs_options(data);
+       if (ret) {
+               dentry = ERR_PTR(ret);
+               goto out_cdp;
+       }
+
+       closid_init();
+
+       ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
+       if (ret) {
+               dentry = ERR_PTR(ret);
+               goto out_cdp;
+       }
+
+       dentry = kernfs_mount(fs_type, flags, rdt_root,
+                             RDTGROUP_SUPER_MAGIC, NULL);
+       if (IS_ERR(dentry))
+               goto out_cdp;
+
+       static_branch_enable(&rdt_enable_key);
+       goto out;
+
+out_cdp:
+       cdp_disable();
+out:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return dentry;
+}
+
+static int reset_all_cbms(struct rdt_resource *r)
+{
+       struct msr_param msr_param;
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int i, cpu;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       msr_param.res = r;
+       msr_param.low = 0;
+       msr_param.high = r->num_closid;
+
+       /*
+        * Disable resource control for this resource by setting all
+        * CBMs in all domains to the maximum mask value. Pick one CPU
+        * from each domain to update the MSRs below.
+        */
+       list_for_each_entry(d, &r->domains, list) {
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+
+               for (i = 0; i < r->num_closid; i++)
+                       d->cbm[i] = r->max_cbm;
+       }
+       cpu = get_cpu();
+       /* Update CBM on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_cbm_update(&msr_param);
+       /* Update CBM on all other cpus in cpu_mask. */
+       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+/*
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
+ * in the systems are moved unconditionally (used for teardown).
+ *
+ * If @mask is not NULL the cpus on which moved tasks are running are set
+ * in that mask so the update smp function call is restricted to affected
+ * cpus.
+ */
+static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+                                struct cpumask *mask)
+{
+       struct task_struct *p, *t;
+
+       read_lock(&tasklist_lock);
+       for_each_process_thread(p, t) {
+               if (!from || t->closid == from->closid) {
+                       t->closid = to->closid;
+#ifdef CONFIG_SMP
+                       /*
+                        * This is safe on x86 w/o barriers as the ordering
+                        * of writing to task_cpu() and t->on_cpu is
+                        * reverse to the reading here. The detection is
+                        * inaccurate as tasks might move or schedule
+                        * before the smp function call takes place. In
+                        * such a case the function call is pointless, but
+                        * there is no other side effect.
+                        */
+                       if (mask && t->on_cpu)
+                               cpumask_set_cpu(task_cpu(t), mask);
+#endif
+               }
+       }
+       read_unlock(&tasklist_lock);
+}
+
+/*
+ * Forcibly remove all of subdirectories under root.
+ */
+static void rmdir_all_sub(void)
+{
+       struct rdtgroup *rdtgrp, *tmp;
+
+       /* Move all tasks to the default resource group */
+       rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
+
+       list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
+               /* Remove each rdtgroup other than root */
+               if (rdtgrp == &rdtgroup_default)
+                       continue;
+
+               /*
+                * Give any CPUs back to the default group. We cannot copy
+                * cpu_online_mask because a CPU might have executed the
+                * offline callback already, but is still marked online.
+                */
+               cpumask_or(&rdtgroup_default.cpu_mask,
+                          &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+               kernfs_remove(rdtgrp->kn);
+               list_del(&rdtgrp->rdtgroup_list);
+               kfree(rdtgrp);
+       }
+       /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+       get_online_cpus();
+       rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+       put_online_cpus();
+
+       kernfs_remove(kn_info);
+}
+
+static void rdt_kill_sb(struct super_block *sb)
+{
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+
+       /*Put everything back to default values. */
+       for_each_enabled_rdt_resource(r)
+               reset_all_cbms(r);
+       cdp_disable();
+       rmdir_all_sub();
+       static_branch_disable(&rdt_enable_key);
+       kernfs_kill_sb(sb);
+       mutex_unlock(&rdtgroup_mutex);
+}
+
+static struct file_system_type rdt_fs_type = {
+       .name    = "resctrl",
+       .mount   = rdt_mount,
+       .kill_sb = rdt_kill_sb,
+};
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+                         umode_t mode)
+{
+       struct rdtgroup *parent, *rdtgrp;
+       struct kernfs_node *kn;
+       int ret, closid;
+
+       /* Only allow mkdir in the root directory */
+       if (parent_kn != rdtgroup_default.kn)
+               return -EPERM;
+
+       /* Do not accept '\n' to avoid unparsable situation. */
+       if (strchr(name, '\n'))
+               return -EINVAL;
+
+       parent = rdtgroup_kn_lock_live(parent_kn);
+       if (!parent) {
+               ret = -ENODEV;
+               goto out_unlock;
+       }
+
+       ret = closid_alloc();
+       if (ret < 0)
+               goto out_unlock;
+       closid = ret;
+
+       /* allocate the rdtgroup. */
+       rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
+       if (!rdtgrp) {
+               ret = -ENOSPC;
+               goto out_closid_free;
+       }
+       rdtgrp->closid = closid;
+       list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+
+       /* kernfs creates the directory for rdtgrp */
+       kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+       if (IS_ERR(kn)) {
+               ret = PTR_ERR(kn);
+               goto out_cancel_ref;
+       }
+       rdtgrp->kn = kn;
+
+       /*
+        * kernfs_remove() will drop the reference count on "kn" which
+        * will free it. But we still need it to stick around for the
+        * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+        * here, which will be dropped inside rdtgroup_kn_unlock().
+        */
+       kernfs_get(kn);
+
+       ret = rdtgroup_kn_set_ugid(kn);
+       if (ret)
+               goto out_destroy;
+
+       ret = rdtgroup_add_files(kn, rdtgroup_base_files,
+                                ARRAY_SIZE(rdtgroup_base_files));
+       if (ret)
+               goto out_destroy;
+
+       kernfs_activate(kn);
+
+       ret = 0;
+       goto out_unlock;
+
+out_destroy:
+       kernfs_remove(rdtgrp->kn);
+out_cancel_ref:
+       list_del(&rdtgrp->rdtgroup_list);
+       kfree(rdtgrp);
+out_closid_free:
+       closid_free(closid);
+out_unlock:
+       rdtgroup_kn_unlock(parent_kn);
+       return ret;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+       int ret, cpu, closid = rdtgroup_default.closid;
+       struct rdtgroup *rdtgrp;
+       cpumask_var_t tmpmask;
+
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
+
+       rdtgrp = rdtgroup_kn_lock_live(kn);
+       if (!rdtgrp) {
+               ret = -EPERM;
+               goto out;
+       }
+
+       /* Give any tasks back to the default group */
+       rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
+
+       /* Give any CPUs back to the default group */
+       cpumask_or(&rdtgroup_default.cpu_mask,
+                  &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+       /* Update per cpu closid of the moved CPUs first */
+       for_each_cpu(cpu, &rdtgrp->cpu_mask)
+               per_cpu(cpu_closid, cpu) = closid;
+       /*
+        * Update the MSR on moved CPUs and CPUs which have moved
+        * task running on them.
+        */
+       cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+       rdt_update_closid(tmpmask, NULL);
+
+       rdtgrp->flags = RDT_DELETED;
+       closid_free(rdtgrp->closid);
+       list_del(&rdtgrp->rdtgroup_list);
+
+       /*
+        * one extra hold on this, will drop when we kfree(rdtgrp)
+        * in rdtgroup_kn_unlock()
+        */
+       kernfs_get(kn);
+       kernfs_remove(rdtgrp->kn);
+       ret = 0;
+out:
+       rdtgroup_kn_unlock(kn);
+       free_cpumask_var(tmpmask);
+       return ret;
+}
+
+static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
+{
+       if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled)
+               seq_puts(seq, ",cdp");
+       return 0;
+}
+
+static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
+       .mkdir          = rdtgroup_mkdir,
+       .rmdir          = rdtgroup_rmdir,
+       .show_options   = rdtgroup_show_options,
+};
+
+static int __init rdtgroup_setup_root(void)
+{
+       int ret;
+
+       rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
+                                     KERNFS_ROOT_CREATE_DEACTIVATED,
+                                     &rdtgroup_default);
+       if (IS_ERR(rdt_root))
+               return PTR_ERR(rdt_root);
+
+       mutex_lock(&rdtgroup_mutex);
+
+       rdtgroup_default.closid = 0;
+       list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
+
+       ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
+                                ARRAY_SIZE(rdtgroup_base_files));
+       if (ret) {
+               kernfs_destroy_root(rdt_root);
+               goto out;
+       }
+
+       rdtgroup_default.kn = rdt_root->kn;
+       kernfs_activate(rdtgroup_default.kn);
+
+out:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return ret;
+}
+
+/*
+ * rdtgroup_init - rdtgroup initialization
+ *
+ * Setup resctrl file system including set up root, create mount point,
+ * register rdtgroup filesystem, and initialize files under root directory.
+ *
+ * Return: 0 on success or -errno
+ */
+int __init rdtgroup_init(void)
+{
+       int ret = 0;
+
+       ret = rdtgroup_setup_root();
+       if (ret)
+               return ret;
+
+       ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+       if (ret)
+               goto cleanup_root;
+
+       ret = register_filesystem(&rdt_fs_type);
+       if (ret)
+               goto cleanup_mountpoint;
+
+       return 0;
+
+cleanup_mountpoint:
+       sysfs_remove_mount_point(fs_kobj, "resctrl");
+cleanup_root:
+       kernfs_destroy_root(rdt_root);
+
+       return ret;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
new file mode 100644 (file)
index 0000000..f369cb8
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ *    Fenghua Yu <fenghua.yu@intel.com>
+ *    Tony Luck <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * Check whether a cache bit mask is valid. The SDM says:
+ *     Please note that all (and only) contiguous '1' combinations
+ *     are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
+ * Additionally Haswell requires at least two bits set.
+ */
+static bool cbm_validate(unsigned long var, struct rdt_resource *r)
+{
+       unsigned long first_bit, zero_bit;
+
+       if (var == 0 || var > r->max_cbm)
+               return false;
+
+       first_bit = find_first_bit(&var, r->cbm_len);
+       zero_bit = find_next_zero_bit(&var, r->cbm_len, first_bit);
+
+       if (find_next_bit(&var, r->cbm_len, zero_bit) < r->cbm_len)
+               return false;
+
+       if ((zero_bit - first_bit) < r->min_cbm_bits)
+               return false;
+       return true;
+}
+
+/*
+ * Read one cache bit mask (hex). Check that it is valid for the current
+ * resource type.
+ */
+static int parse_cbm(char *buf, struct rdt_resource *r)
+{
+       unsigned long data;
+       int ret;
+
+       ret = kstrtoul(buf, 16, &data);
+       if (ret)
+               return ret;
+       if (!cbm_validate(data, r))
+               return -EINVAL;
+       r->tmp_cbms[r->num_tmp_cbms++] = data;
+
+       return 0;
+}
+
+/*
+ * For each domain in this resource we expect to find a series of:
+ *     id=mask
+ * separated by ";". The "id" is in decimal, and must appear in the
+ * right order.
+ */
+static int parse_line(char *line, struct rdt_resource *r)
+{
+       char *dom = NULL, *id;
+       struct rdt_domain *d;
+       unsigned long dom_id;
+
+       list_for_each_entry(d, &r->domains, list) {
+               dom = strsep(&line, ";");
+               if (!dom)
+                       return -EINVAL;
+               id = strsep(&dom, "=");
+               if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
+                       return -EINVAL;
+               if (parse_cbm(dom, r))
+                       return -EINVAL;
+       }
+
+       /* Any garbage at the end of the line? */
+       if (line && line[0])
+               return -EINVAL;
+       return 0;
+}
+
+static int update_domains(struct rdt_resource *r, int closid)
+{
+       struct msr_param msr_param;
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int cpu, idx = 0;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       msr_param.low = closid;
+       msr_param.high = msr_param.low + 1;
+       msr_param.res = r;
+
+       list_for_each_entry(d, &r->domains, list) {
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+               d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+       }
+       cpu = get_cpu();
+       /* Update CBM on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_cbm_update(&msr_param);
+       /* Update CBM on other cpus. */
+       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+                               char *buf, size_t nbytes, loff_t off)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+       char *tok, *resname;
+       int closid, ret = 0;
+       u32 *l3_cbms = NULL;
+
+       /* Valid input requires a trailing newline */
+       if (nbytes == 0 || buf[nbytes - 1] != '\n')
+               return -EINVAL;
+       buf[nbytes - 1] = '\0';
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
+
+       closid = rdtgrp->closid;
+
+       /* get scratch space to save all the masks while we validate input */
+       for_each_enabled_rdt_resource(r) {
+               r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+                                     GFP_KERNEL);
+               if (!r->tmp_cbms) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               r->num_tmp_cbms = 0;
+       }
+
+       while ((tok = strsep(&buf, "\n")) != NULL) {
+               resname = strsep(&tok, ":");
+               if (!tok) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               for_each_enabled_rdt_resource(r) {
+                       if (!strcmp(resname, r->name) &&
+                           closid < r->num_closid) {
+                               ret = parse_line(tok, r);
+                               if (ret)
+                                       goto out;
+                               break;
+                       }
+               }
+               if (!r->name) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       /* Did the parser find all the masks we need? */
+       for_each_enabled_rdt_resource(r) {
+               if (r->num_tmp_cbms != r->num_domains) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       for_each_enabled_rdt_resource(r) {
+               ret = update_domains(r, closid);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       rdtgroup_kn_unlock(of->kn);
+       for_each_enabled_rdt_resource(r) {
+               kfree(r->tmp_cbms);
+               r->tmp_cbms = NULL;
+       }
+       return ret ?: nbytes;
+}
+
+static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+{
+       struct rdt_domain *dom;
+       bool sep = false;
+
+       seq_printf(s, "%s:", r->name);
+       list_for_each_entry(dom, &r->domains, list) {
+               if (sep)
+                       seq_puts(s, ";");
+               seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+               sep = true;
+       }
+       seq_puts(s, "\n");
+}
+
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+                          struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+       int closid, ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (rdtgrp) {
+               closid = rdtgrp->closid;
+               for_each_enabled_rdt_resource(r) {
+                       if (closid < r->num_closid)
+                               show_doms(s, r, closid);
+               }
+       } else {
+               ret = -ENOENT;
+       }
+       rdtgroup_kn_unlock(of->kn);
+       return ret;
+}
index d1316f9c8329846b0d3f7dede754fa0eef256bd3..d9794060fe225f72f34e78cdc74959aa1bf38934 100644 (file)
@@ -20,12 +20,15 @@ struct cpuid_bit {
 /* Please keep the leaf sorted by cpuid_bit.level for faster search. */
 static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
-       { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
-       { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
+       { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
+       { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4VNNIW,    CPUID_EDX,  2, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4FMAPS,    CPUID_EDX,  3, 0x00000007, 0 },
-       { X86_FEATURE_HW_PSTATE,        CPUID_EDX,  7, 0x80000007, 0 },
-       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
+       { X86_FEATURE_CAT_L3,           CPUID_EBX,  1, 0x00000010, 0 },
+       { X86_FEATURE_CAT_L2,           CPUID_EBX,  2, 0x00000010, 0 },
+       { X86_FEATURE_CDP_L3,           CPUID_ECX,  2, 0x00000010, 1 },
+       { X86_FEATURE_HW_PSTATE,        CPUID_EDX,  7, 0x80000007, 0 },
+       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
        { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
        { 0, 0, 0, 0, 0 }
 };
index d0d7441085942fc5efe3fda3dd0c0c8fe9fdef69..a0ac3e81518ad8f633c4a3c16e17cabea38950e5 100644 (file)
@@ -53,6 +53,7 @@
 #include <asm/debugreg.h>
 #include <asm/switch_to.h>
 #include <asm/vm86.h>
+#include <asm/intel_rdt.h>
 
 void __show_regs(struct pt_regs *regs, int all)
 {
@@ -296,5 +297,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        this_cpu_write(current_task, next_p);
 
+       /* Load the Intel cache allocation PQR MSR. */
+       intel_rdt_sched_in();
+
        return prev_p;
 }
index a76b65e3e615e8d511f213b2ea2a4511aff516f4..a61e141b6891ed4a08437fee3413fedfbc65e429 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/switch_to.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/vdso.h>
+#include <asm/intel_rdt.h>
 
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
@@ -476,6 +477,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                        loadsegment(ss, __KERNEL_DS);
        }
 
+       /* Load the Intel cache allocation PQR MSR. */
+       intel_rdt_sched_in();
+
        return prev_p;
 }
 
index b2d3cf1ef54ae5c1cc0bfb19c8c65b08dd6f23b4..e85f6bd7b9d526266ca5f11cfd9a23f7ce19337d 100644 (file)
@@ -373,16 +373,17 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        const u32 kvm_cpuid_7_0_ebx_x86_features =
                F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
                F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
-               F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
-               F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
-               F(AVX512BW) | F(AVX512VL);
+               F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
+               F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
+               F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
 
        /* cpuid 0xD.1.eax */
        const u32 kvm_cpuid_D_1_eax_x86_features =
                F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
 
        /* cpuid 7.0.ecx*/
-       const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
+       const u32 kvm_cpuid_7_0_ecx_x86_features =
+               F(AVX512VBMI) | F(PKU) | 0 /*OSPKE*/;
 
        /* cpuid 7.0.edx*/
        const u32 kvm_cpuid_7_0_edx_x86_features =
index 99cde5220e0799fd84aca5a6774dce47e289a73c..1572c35b4f1a637b2ebb622ae88c5e7e14eafd63 100644 (file)
@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                return;
 
+       mutex_lock(&kvm->arch.hyperv.hv_lock);
+       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+               goto out_unlock;
+
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
        /*
         * Because the TSC parameters only vary when there is a
@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
         */
        if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
                                    &tsc_seq, sizeof(tsc_seq))))
-               return;
+               goto out_unlock;
 
        /*
         * While we're computing and writing the parameters, force the
@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        hv->tsc_ref.tsc_sequence = 0;
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               return;
+               goto out_unlock;
 
        if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
-               return;
+               goto out_unlock;
 
        /* Ensure sequence is zero before writing the rest of the struct.  */
        smp_wmb();
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
-               return;
+               goto out_unlock;
 
        /*
         * Now switch to the TSC page mechanism by writing the sequence.
@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        hv->tsc_ref.tsc_sequence = tsc_seq;
        kvm_write_guest(kvm, gfn_to_gpa(gfn),
                        &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+out_unlock:
+       mutex_unlock(&kvm->arch.hyperv.hv_lock);
 }
 
 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_get_msr(vcpu, msr, pdata);
@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 
 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
 {
-       return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+       return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
 }
 
 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
index aae43c6f24721e748451aea69d9c138c4be58f61..24db5fb6f575af27d3b61a67b15ce9996158ed8b 100644 (file)
@@ -1389,10 +1389,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
        return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
 }
 
-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
 }
 
 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
@@ -5728,7 +5728,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
        if (is_machine_check(intr_info))
                return handle_machine_check(vcpu);
 
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+       if (is_nmi(intr_info))
                return 1;  /* already handled by vmx_vcpu_run() */
 
        if (is_no_device(intr_info)) {
@@ -7122,7 +7122,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 
                if (vmptr == vmx->nested.vmxon_ptr) {
                        nested_vmx_failValid(vcpu,
-                                            VMXERR_VMCLEAR_VMXON_POINTER);
+                                            VMXERR_VMPTRLD_VMXON_POINTER);
                        return kvm_skip_emulated_instruction(vcpu);
                }
                break;
@@ -8170,7 +8170,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 
        switch (exit_reason) {
        case EXIT_REASON_EXCEPTION_NMI:
-               if (!is_exception(intr_info))
+               if (is_nmi(intr_info))
                        return false;
                else if (is_page_fault(intr_info))
                        return enable_ept;
@@ -8765,8 +8765,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
                kvm_machine_check();
 
        /* We need to handle NMIs before interrupts are enabled */
-       if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-           (exit_intr_info & INTR_INFO_VALID_MASK)) {
+       if (is_nmi(exit_intr_info)) {
                kvm_before_handle_nmi(&vmx->vcpu);
                asm("int $2");
                kvm_after_handle_nmi(&vmx->vcpu);
index 1f0d2383f5ee6e273751949a77882224947a7861..445c51b6cf6dc702a0da9710ee5b6eb4b996ec34 100644 (file)
@@ -2844,7 +2844,24 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+       int idx;
+       /*
+        * Disable page faults because we're in atomic context here.
+        * kvm_write_guest_offset_cached() would call might_fault()
+        * that relies on pagefault_disable() to tell if there's a
+        * bug. NOTE: the write to guest memory may not go through if
+        * during postcopy live migration or if there's heavy guest
+        * paging.
+        */
+       pagefault_disable();
+       /*
+        * kvm_memslots() will be called by
+        * kvm_write_guest_offset_cached() so take the srcu lock.
+        */
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        kvm_steal_time_set_preempted(vcpu);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
+       pagefault_enable();
        kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
@@ -7881,6 +7898,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        raw_spin_lock_init(&kvm->arch.tsc_write_lock);
        mutex_init(&kvm->arch.apic_map_lock);
+       mutex_init(&kvm->arch.hyperv.hv_lock);
        spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
 
        kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
index f61058617ada462c6e571c7936c3ebf92621aa26..f4126cf997a469da01161244e93a320d1dd2104e 100644 (file)
@@ -15,6 +15,7 @@ config XTENSA
        select GENERIC_SCHED_CLOCK
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
+       select HAVE_DMA_CONTIGUOUS
        select HAVE_EXIT_THREAD
        select HAVE_FUNCTION_TRACER
        select HAVE_FUTEX_CMPXCHG if !MMU
index b1f4ee8c9a22371ba9abedfcc7d21fb279b585f0..6106bdc097ad2f51a257d7643770b89c8d82ab8e 100644 (file)
                device_type = "memory";
                reg = <0x00000000 0x38000000>;
        };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* global autoconfigured region for contiguous allocations */
+               linux,cma {
+                       compatible = "shared-dma-pool";
+                       reusable;
+                       size = <0x04000000>;
+                       alignment = <0x2000>;
+                       alloc-ranges = <0x00000000 0x20000000>;
+                       linux,cma-default;
+               };
+       };
 };
index 28cf4c5d65efade019dbefaf809e71d13624e37c..b7fbaa56b51a573f393ce6d6abb6ec5c1895c27e 100644 (file)
@@ -3,6 +3,7 @@ generic-y += bug.h
 generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += div64.h
+generic-y += dma-contiguous.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
index c31f5d5afc7d2d969a2a0c532e9f91a4395ba49d..264fb89c444e9fde9fd5ba9157aba6d7b06eae2f 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
 obj-$(CONFIG_SMP) += smp.o mxhead.o
 obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
 
 AFLAGS_head.o += -mtext-section-literals
 AFLAGS_mxhead.o += -mtext-section-literals
index 6a16decf278fa0a7dfa4dd5200e55a31258dace3..70e362e6038e80c0e802bf6f2a1ebb3360292c07 100644 (file)
@@ -15,6 +15,7 @@
  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  */
 
+#include <linux/dma-contiguous.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
@@ -146,6 +147,8 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
 {
        unsigned long ret;
        unsigned long uncached = 0;
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       struct page *page = NULL;
 
        /* ignore region speicifiers */
 
@@ -153,11 +156,18 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
 
        if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
                flag |= GFP_DMA;
-       ret = (unsigned long)__get_free_pages(flag, get_order(size));
 
-       if (ret == 0)
+       if (gfpflags_allow_blocking(flag))
+               page = dma_alloc_from_contiguous(dev, count, get_order(size));
+
+       if (!page)
+               page = alloc_pages(flag, get_order(size));
+
+       if (!page)
                return NULL;
 
+       ret = (unsigned long)page_address(page);
+
        /* We currently don't support coherent memory outside KSEG */
 
        BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
@@ -170,16 +180,19 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
        return (void *)uncached;
 }
 
-static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
+static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
                            dma_addr_t dma_handle, unsigned long attrs)
 {
        unsigned long addr = (unsigned long)vaddr +
                XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+       struct page *page = virt_to_page(addr);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
        BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
               addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
-       free_pages(addr, get_order(size));
+       if (!dma_release_from_contiguous(dev, page, count))
+               __free_pages(page, get_order(size));
 }
 
 static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
diff --git a/arch/xtensa/kernel/s32c1i_selftest.c b/arch/xtensa/kernel/s32c1i_selftest.c
new file mode 100644 (file)
index 0000000..07e56e3
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * S32C1I selftest.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016 Cadence Design Systems Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/traps.h>
+
+#if XCHAL_HAVE_S32C1I
+
+static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
+
+/*
+ * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
+ *
+ * If *v == cmp, set *v = set.  Return previous *v.
+ */
+static inline int probed_compare_swap(int *v, int cmp, int set)
+{
+       int tmp;
+
+       __asm__ __volatile__(
+                       "       movi    %1, 1f\n"
+                       "       s32i    %1, %4, 0\n"
+                       "       wsr     %2, scompare1\n"
+                       "1:     s32c1i  %0, %3, 0\n"
+                       : "=a" (set), "=&a" (tmp)
+                       : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
+                       : "memory"
+                       );
+       return set;
+}
+
+/* Handle probed exception */
+
+static void __init do_probed_exception(struct pt_regs *regs,
+                                      unsigned long exccause)
+{
+       if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
+               regs->pc += 3;          /* skip the s32c1i instruction */
+               rcw_exc = exccause;
+       } else {
+               do_unhandled(regs, exccause);
+       }
+}
+
+/* Simple test of S32C1I (soc bringup assist) */
+
+static int __init check_s32c1i(void)
+{
+       int n, cause1, cause2;
+       void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
+
+       rcw_probe_pc = 0;
+       handbus  = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
+                       do_probed_exception);
+       handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
+                       do_probed_exception);
+       handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
+                       do_probed_exception);
+
+       /* First try an S32C1I that does not store: */
+       rcw_exc = 0;
+       rcw_word = 1;
+       n = probed_compare_swap(&rcw_word, 0, 2);
+       cause1 = rcw_exc;
+
+       /* took exception? */
+       if (cause1 != 0) {
+               /* unclean exception? */
+               if (n != 2 || rcw_word != 1)
+                       panic("S32C1I exception error");
+       } else if (rcw_word != 1 || n != 1) {
+               panic("S32C1I compare error");
+       }
+
+       /* Then an S32C1I that stores: */
+       rcw_exc = 0;
+       rcw_word = 0x1234567;
+       n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
+       cause2 = rcw_exc;
+
+       if (cause2 != 0) {
+               /* unclean exception? */
+               if (n != 0xabcde || rcw_word != 0x1234567)
+                       panic("S32C1I exception error (b)");
+       } else if (rcw_word != 0xabcde || n != 0x1234567) {
+               panic("S32C1I store error");
+       }
+
+       /* Verify consistency of exceptions: */
+       if (cause1 || cause2) {
+               pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
+               /* If emulation of S32C1I upon bus error gets implemented,
+                * we can get rid of this panic for single core (not SMP)
+                */
+               panic("S32C1I exceptions not currently supported");
+       }
+       if (cause1 != cause2)
+               panic("inconsistent S32C1I exceptions");
+
+       trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
+       trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
+       trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
+       return 0;
+}
+
+#else /* XCHAL_HAVE_S32C1I */
+
+/* This condition should not occur with a commercially deployed processor.
+ * Display reminder for early engr test or demo chips / FPGA bitstreams
+ */
+static int __init check_s32c1i(void)
+{
+       pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
+       return 0;
+}
+
+#endif /* XCHAL_HAVE_S32C1I */
+
+early_initcall(check_s32c1i);
index 88a044af7504dd9c47d0f97d64620ac8de4b5739..848e8568fb3c4a90c2eb89783c0420f8b5526cd6 100644 (file)
 # include <linux/console.h>
 #endif
 
-#ifdef CONFIG_RTC
-# include <linux/timex.h>
-#endif
-
 #ifdef CONFIG_PROC_FS
 # include <linux/seq_file.h>
 #endif
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <asm/param.h>
-#include <asm/traps.h>
 #include <asm/smp.h>
 #include <asm/sysmem.h>
 
 #include <platform/hardware.h>
 
 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
-struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
-#endif
-
-#ifdef CONFIG_BLK_DEV_FD
-extern struct fd_ops no_fd_ops;
-struct fd_ops *fd_ops;
+struct screen_info screen_info = {
+       .orig_x = 0,
+       .orig_y = 24,
+       .orig_video_cols = 80,
+       .orig_video_lines = 24,
+       .orig_video_isVGA = 1,
+       .orig_video_points = 16,
+};
 #endif
 
-extern struct rtc_ops no_rtc_ops;
-struct rtc_ops *rtc_ops;
-
 #ifdef CONFIG_BLK_DEV_INITRD
 extern unsigned long initrd_start;
 extern unsigned long initrd_end;
@@ -77,7 +71,6 @@ extern int initrd_below_start_ok;
 void *dtb_start = __dtb_start;
 #endif
 
-unsigned char aux_device_present;
 extern unsigned long loops_per_jiffy;
 
 /* Command line specified as configuration option. */
@@ -317,120 +310,6 @@ extern char _SecondaryResetVector_text_start;
 extern char _SecondaryResetVector_text_end;
 #endif
 
-
-#ifdef CONFIG_S32C1I_SELFTEST
-#if XCHAL_HAVE_S32C1I
-
-static int __initdata rcw_word, rcw_probe_pc, rcw_exc;
-
-/*
- * Basic atomic compare-and-swap, that records PC of S32C1I for probing.
- *
- * If *v == cmp, set *v = set.  Return previous *v.
- */
-static inline int probed_compare_swap(int *v, int cmp, int set)
-{
-       int tmp;
-
-       __asm__ __volatile__(
-                       "       movi    %1, 1f\n"
-                       "       s32i    %1, %4, 0\n"
-                       "       wsr     %2, scompare1\n"
-                       "1:     s32c1i  %0, %3, 0\n"
-                       : "=a" (set), "=&a" (tmp)
-                       : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set)
-                       : "memory"
-                       );
-       return set;
-}
-
-/* Handle probed exception */
-
-static void __init do_probed_exception(struct pt_regs *regs,
-               unsigned long exccause)
-{
-       if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */
-               regs->pc += 3;          /* skip the s32c1i instruction */
-               rcw_exc = exccause;
-       } else {
-               do_unhandled(regs, exccause);
-       }
-}
-
-/* Simple test of S32C1I (soc bringup assist) */
-
-static int __init check_s32c1i(void)
-{
-       int n, cause1, cause2;
-       void *handbus, *handdata, *handaddr; /* temporarily saved handlers */
-
-       rcw_probe_pc = 0;
-       handbus  = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR,
-                       do_probed_exception);
-       handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR,
-                       do_probed_exception);
-       handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR,
-                       do_probed_exception);
-
-       /* First try an S32C1I that does not store: */
-       rcw_exc = 0;
-       rcw_word = 1;
-       n = probed_compare_swap(&rcw_word, 0, 2);
-       cause1 = rcw_exc;
-
-       /* took exception? */
-       if (cause1 != 0) {
-               /* unclean exception? */
-               if (n != 2 || rcw_word != 1)
-                       panic("S32C1I exception error");
-       } else if (rcw_word != 1 || n != 1) {
-               panic("S32C1I compare error");
-       }
-
-       /* Then an S32C1I that stores: */
-       rcw_exc = 0;
-       rcw_word = 0x1234567;
-       n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde);
-       cause2 = rcw_exc;
-
-       if (cause2 != 0) {
-               /* unclean exception? */
-               if (n != 0xabcde || rcw_word != 0x1234567)
-                       panic("S32C1I exception error (b)");
-       } else if (rcw_word != 0xabcde || n != 0x1234567) {
-               panic("S32C1I store error");
-       }
-
-       /* Verify consistency of exceptions: */
-       if (cause1 || cause2) {
-               pr_warn("S32C1I took exception %d, %d\n", cause1, cause2);
-               /* If emulation of S32C1I upon bus error gets implemented,
-                  we can get rid of this panic for single core (not SMP) */
-               panic("S32C1I exceptions not currently supported");
-       }
-       if (cause1 != cause2)
-               panic("inconsistent S32C1I exceptions");
-
-       trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus);
-       trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata);
-       trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr);
-       return 0;
-}
-
-#else /* XCHAL_HAVE_S32C1I */
-
-/* This condition should not occur with a commercially deployed processor.
-   Display reminder for early engr test or demo chips / FPGA bitstreams */
-static int __init check_s32c1i(void)
-{
-       pr_warn("Processor configuration lacks atomic compare-and-swap support!\n");
-       return 0;
-}
-
-#endif /* XCHAL_HAVE_S32C1I */
-early_initcall(check_s32c1i);
-#endif /* CONFIG_S32C1I_SELFTEST */
-
 static inline int mem_reserve(unsigned long start, unsigned long end)
 {
        return memblock_reserve(start, end - start);
index 80e4cfb2471ad5af6a99433d6e8af76e16865558..720fe4e8b49712db84e4e00d5fb938c9ecb4220e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/nodemask.h>
 #include <linux/mm.h>
 #include <linux/of_fdt.h>
+#include <linux/dma-contiguous.h>
 
 #include <asm/bootparam.h>
 #include <asm/page.h>
@@ -60,6 +61,7 @@ void __init bootmem_init(void)
        max_low_pfn = min(max_pfn, MAX_LOW_PFN);
 
        memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+       dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
 
        memblock_dump_all();
 }
index 8a05a404ae708503589bdf33643948c37d243e62..a57046de2f07f00eae78aaabe9b5e43c1ecc178f 100644 (file)
@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 
        dprintk("%s: write %Zd bytes\n", bd->name, count);
 
+       if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+               return -EINVAL;
+
        bsg_set_block(bd, file);
 
        bytes_written = 0;
index f856963204f4949f5197c1cb0c0a247ca12c95da..656c8c6ed206f876bbdbd0f240dedf9997a88cf0 100644 (file)
@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
                                    || pstart < 0 || plength < 0 || partno > 65535)
                                        return -EINVAL;
                        }
+                       /* check if partition is aligned to blocksize */
+                       if (p.start & (bdev_logical_block_size(bdev) - 1))
+                               return -EINVAL;
 
                        mutex_lock(&bdev->bd_mutex);
 
index 0774799942e06a8d890a5c88e40990cd53a15037..c6fee7437be44573ade684d064e161954485301a 100644 (file)
@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
        __set_bit(WRITE_16, filter->write_ok);
        __set_bit(WRITE_LONG, filter->write_ok);
        __set_bit(WRITE_LONG_2, filter->write_ok);
+       __set_bit(WRITE_SAME, filter->write_ok);
+       __set_bit(WRITE_SAME_16, filter->write_ok);
+       __set_bit(WRITE_SAME_32, filter->write_ok);
        __set_bit(ERASE, filter->write_ok);
        __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
        __set_bit(MODE_SELECT, filter->write_ok);
index 7dd527f8ca1d24b10915c865dc3f24e4a8e34b92..94be8a8e6c082cbe5b808b049187db0657ad5cb0 100644 (file)
@@ -166,6 +166,12 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
 
 acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
 
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+                 struct acpi_table_header **out_table);
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc);
+
 /*
  * tbxfload
  */
index 5fb838e592dc430c49b657ce6fba3dce1572676f..81473a4880ce219febb9024515cc92759362d426 100644 (file)
@@ -311,6 +311,8 @@ void acpi_tb_parse_fadt(void)
 {
        u32 length;
        struct acpi_table_header *table;
+       struct acpi_table_desc *fadt_desc;
+       acpi_status status;
 
        /*
         * The FADT has multiple versions with different lengths,
@@ -319,14 +321,12 @@ void acpi_tb_parse_fadt(void)
         * Get a local copy of the FADT and convert it to a common format
         * Map entire FADT, assumed to be smaller than one page.
         */
-       length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
-
-       table =
-           acpi_os_map_memory(acpi_gbl_root_table_list.
-                              tables[acpi_gbl_fadt_index].address, length);
-       if (!table) {
+       fadt_desc = &acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index];
+       status = acpi_tb_get_table(fadt_desc, &table);
+       if (ACPI_FAILURE(status)) {
                return;
        }
+       length = fadt_desc->length;
 
        /*
         * Validate the FADT checksum before we copy the table. Ignore
@@ -340,7 +340,7 @@ void acpi_tb_parse_fadt(void)
 
        /* All done with the real FADT, unmap it */
 
-       acpi_os_unmap_memory(table, length);
+       acpi_tb_put_table(fadt_desc);
 
        /* Obtain the DSDT and FACS tables via their addresses within the FADT */
 
index 51eb07cf989844842b6dce634a589d1e7440e154..86854e84680056e164c6adad591ad772e85a3e79 100644 (file)
@@ -381,3 +381,88 @@ next_table:
        acpi_os_unmap_memory(table, length);
        return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_get_table
+ *
+ * PARAMETERS:  table_desc          - Table descriptor
+ *              out_table           - Where the pointer to the table is returned
+ *
+ * RETURN:      Status and pointer to the requested table
+ *
+ * DESCRIPTION: Increase a reference to a table descriptor and return the
+ *              validated table pointer.
+ *              If the table descriptor is an entry of the root table list,
+ *              this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+                 struct acpi_table_header **out_table)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_tb_get_table);
+
+       if (table_desc->validation_count == 0) {
+
+               /* Table need to be "VALIDATED" */
+
+               status = acpi_tb_validate_table(table_desc);
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
+
+       table_desc->validation_count++;
+       if (table_desc->validation_count == 0) {
+               ACPI_ERROR((AE_INFO,
+                           "Table %p, Validation count is zero after increment\n",
+                           table_desc));
+               table_desc->validation_count--;
+               return_ACPI_STATUS(AE_LIMIT);
+       }
+
+       *out_table = table_desc->pointer;
+       return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_put_table
+ *
+ * PARAMETERS:  table_desc          - Table descriptor
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Decrease a reference to a table descriptor and release the
+ *              validated table pointer if no references.
+ *              If the table descriptor is an entry of the root table list,
+ *              this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc)
+{
+
+       ACPI_FUNCTION_TRACE(acpi_tb_put_table);
+
+       if (table_desc->validation_count == 0) {
+               ACPI_WARNING((AE_INFO,
+                             "Table %p, Validation count is zero before decrement\n",
+                             table_desc));
+               return_VOID;
+       }
+       table_desc->validation_count--;
+
+       if (table_desc->validation_count == 0) {
+
+               /* Table need to be "INVALIDATED" */
+
+               acpi_tb_invalidate_table(table_desc);
+       }
+
+       return_VOID;
+}
index d5adb7ac468430c82c4c1d7a1d7c75be6be40c5e..7684707b254b93cf7b7c9aa90ef3346d50609ee7 100644 (file)
@@ -282,7 +282,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_table_with_size
+ * FUNCTION:    acpi_get_table
  *
  * PARAMETERS:  signature           - ACPI signature of needed table
  *              instance            - Which instance (for SSDTs)
@@ -292,16 +292,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
  *
  * DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
  *              RSDT/XSDT.
+ *              Note that an early stage acpi_get_table() call must be paired
+ *              with an early stage acpi_put_table() call. otherwise the table
+ *              pointer mapped by the early stage mapping implementation may be
+ *              erroneously unmapped by the late stage unmapping implementation
+ *              in an acpi_put_table() invoked during the late stage.
  *
  ******************************************************************************/
 acpi_status
-acpi_get_table_with_size(char *signature,
-              u32 instance, struct acpi_table_header **out_table,
-              acpi_size *tbl_size)
+acpi_get_table(char *signature,
+              u32 instance, struct acpi_table_header ** out_table)
 {
        u32 i;
        u32 j;
-       acpi_status status;
+       acpi_status status = AE_NOT_FOUND;
+       struct acpi_table_desc *table_desc;
 
        /* Parameter validation */
 
@@ -309,13 +314,22 @@ acpi_get_table_with_size(char *signature,
                return (AE_BAD_PARAMETER);
        }
 
+       /*
+        * Note that the following line is required by some OSPMs, they only
+        * check if the returned table is NULL instead of the returned status
+        * to determined if this function is succeeded.
+        */
+       *out_table = NULL;
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        /* Walk the root table list */
 
        for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
             i++) {
-               if (!ACPI_COMPARE_NAME
-                   (&(acpi_gbl_root_table_list.tables[i].signature),
-                    signature)) {
+               table_desc = &acpi_gbl_root_table_list.tables[i];
+
+               if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
                        continue;
                }
 
@@ -323,43 +337,65 @@ acpi_get_table_with_size(char *signature,
                        continue;
                }
 
-               status =
-                   acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
-               if (ACPI_SUCCESS(status)) {
-                       *out_table = acpi_gbl_root_table_list.tables[i].pointer;
-                       *tbl_size = acpi_gbl_root_table_list.tables[i].length;
-               }
-
-               if (!acpi_gbl_permanent_mmap) {
-                       acpi_gbl_root_table_list.tables[i].pointer = NULL;
-               }
-
-               return (status);
+               status = acpi_tb_get_table(table_desc, out_table);
+               break;
        }
 
-       return (AE_NOT_FOUND);
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+       return (status);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+ACPI_EXPORT_SYMBOL(acpi_get_table)
 
-acpi_status
-acpi_get_table(char *signature,
-              u32 instance, struct acpi_table_header **out_table)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_put_table
+ *
+ * PARAMETERS:  table               - The pointer to the table
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Release a table returned by acpi_get_table() and its clones.
+ *              Note that it is not safe if this function was invoked after an
+ *              uninstallation happened to the original table descriptor.
+ *              Currently there is no OSPMs' requirement to handle such
+ *              situations.
+ *
+ ******************************************************************************/
+void acpi_put_table(struct acpi_table_header *table)
 {
-       acpi_size tbl_size;
+       u32 i;
+       struct acpi_table_desc *table_desc;
+
+       ACPI_FUNCTION_TRACE(acpi_put_table);
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+       /* Walk the root table list */
+
+       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+               table_desc = &acpi_gbl_root_table_list.tables[i];
 
-       return acpi_get_table_with_size(signature,
-                      instance, out_table, &tbl_size);
+               if (table_desc->pointer != table) {
+                       continue;
+               }
+
+               acpi_tb_put_table(table_desc);
+               break;
+       }
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+       return_VOID;
 }
 
-ACPI_EXPORT_SYMBOL(acpi_get_table)
+ACPI_EXPORT_SYMBOL(acpi_put_table)
 
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_table_by_index
  *
  * PARAMETERS:  table_index         - Table index
- *              table               - Where the pointer to the table is returned
+ *              out_table           - Where the pointer to the table is returned
  *
  * RETURN:      Status and pointer to the requested table
  *
@@ -368,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
  *
  ******************************************************************************/
 acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table)
 {
        acpi_status status;
 
@@ -376,35 +412,33 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
 
        /* Parameter validation */
 
-       if (!table) {
+       if (!out_table) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       /*
+        * Note that the following line is required by some OSPMs, they only
+        * check if the returned table is NULL instead of the returned status
+        * to determined if this function is succeeded.
+        */
+       *out_table = NULL;
+
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
        /* Validate index */
 
        if (table_index >= acpi_gbl_root_table_list.current_table_count) {
-               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
        }
 
-       if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
-
-               /* Table is not mapped, map it */
+       status =
+           acpi_tb_get_table(&acpi_gbl_root_table_list.tables[table_index],
+                             out_table);
 
-               status =
-                   acpi_tb_validate_table(&acpi_gbl_root_table_list.
-                                          tables[table_index]);
-               if (ACPI_FAILURE(status)) {
-                       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-                       return_ACPI_STATUS(status);
-               }
-       }
-
-       *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-       return_ACPI_STATUS(AE_OK);
+       return_ACPI_STATUS(status);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
index 5cbefd7621f01174ff1c3ba3012d9c193dbc1e49..95855cb9d6fb772634e2dd6f683c62a1de819ffd 100644 (file)
@@ -974,7 +974,7 @@ void __init acpi_early_init(void)
        if (!acpi_strict)
                acpi_gbl_enable_interpreter_slack = TRUE;
 
-       acpi_gbl_permanent_mmap = 1;
+       acpi_permanent_mmap = true;
 
        /*
         * If the machine falls into the DMI check table,
index 312c4b4dc363fdbc4847d3db55c0cfbfbb80f5f4..2f82b8eba360e7f369338b7d7a340060d6519f4f 100644 (file)
@@ -2806,12 +2806,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
        acpi_size sz;
        int rc = 0;
 
-       status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+       status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
        if (ACPI_FAILURE(status)) {
                /* This is ok, we could have an nvdimm hotplugged later */
                dev_dbg(dev, "failed to find NFIT at startup\n");
                return 0;
        }
+       sz = tbl->length;
 
        acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
        if (!acpi_desc)
index 9a4c6abee63e0e86a6941d19665a409d3931b301..a404ff4d71511d0a9d56ce10a238682e9ffc6073 100644 (file)
@@ -76,6 +76,7 @@ static struct workqueue_struct *kacpi_notify_wq;
 static struct workqueue_struct *kacpi_hotplug_wq;
 static bool acpi_os_initialized;
 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
+bool acpi_permanent_mmap = false;
 
 /*
  * This list of permanent mappings is for memory that may be accessed from
@@ -306,7 +307,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * virtual address).  If not found, map it, add it to that list and return a
  * pointer to it.
  *
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
 void __iomem *__ref
@@ -322,7 +323,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
                return NULL;
        }
 
-       if (!acpi_gbl_permanent_mmap)
+       if (!acpi_permanent_mmap)
                return __acpi_map_table((unsigned long)phys, size);
 
        mutex_lock(&acpi_ioremap_lock);
@@ -392,7 +393,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
  * mappings, drop a reference to it and unmap it if there are no more active
  * references to it.
  *
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_unmap_table() to get the job done.  Since
  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
  * here.
@@ -401,7 +402,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
 {
        struct acpi_ioremap *map;
 
-       if (!acpi_gbl_permanent_mmap) {
+       if (!acpi_permanent_mmap) {
                __acpi_unmap_table(virt, size);
                return;
        }
@@ -426,12 +427,6 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
-void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
-{
-       if (!acpi_gbl_permanent_mmap)
-               __acpi_unmap_table(virt, size);
-}
-
 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
 {
        u64 addr;
index 5c78ee1860b0ad390671e8f1b1c624339f7414ed..611a5585a9024a728c71e60ada951b3a73936708 100644 (file)
@@ -154,18 +154,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
 phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
 {
        struct acpi_table_madt *madt = NULL;
-       acpi_size tbl_size;
        phys_cpuid_t rv;
 
-       acpi_get_table_with_size(ACPI_SIG_MADT, 0,
-                                (struct acpi_table_header **)&madt,
-                                &tbl_size);
+       acpi_get_table(ACPI_SIG_MADT, 0,
+                      (struct acpi_table_header **)&madt);
        if (!madt)
                return PHYS_CPUID_INVALID;
 
        rv = map_madt_entry(madt, 1, acpi_id, true);
 
-       early_acpi_os_unmap_memory(madt, tbl_size);
+       acpi_put_table((struct acpi_table_header *)madt);
 
        return rv;
 }
index 93b00cf4eb3922d541a43d200ce73d5d414f6c3f..45dec874ea55b820281457a3ae5de2fe1f12403c 100644 (file)
@@ -1120,9 +1120,6 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
-               if (!acpi_has_method(handle, "_BQC"))
-                       printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
-                               "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
                return AE_CTRL_TERMINATE;
        }
index e8d7bc7d4da89b1d0ff8f86bbf5ebbb258a411e4..b8019c4c1d38908895b21a92a094074ca2bd5bd8 100644 (file)
@@ -33,7 +33,6 @@ int __init parse_spcr(bool earlycon)
 {
        static char opts[64];
        struct acpi_table_spcr *table;
-       acpi_size table_size;
        acpi_status status;
        char *uart;
        char *iotype;
@@ -43,9 +42,8 @@ int __init parse_spcr(bool earlycon)
        if (acpi_disabled)
                return -ENODEV;
 
-       status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
-                                         (struct acpi_table_header **)&table,
-                                         &table_size);
+       status = acpi_get_table(ACPI_SIG_SPCR, 0,
+                               (struct acpi_table_header **)&table);
 
        if (ACPI_FAILURE(status))
                return -ENOENT;
@@ -106,6 +104,6 @@ int __init parse_spcr(bool earlycon)
        err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
 
 done:
-       early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+       acpi_put_table((struct acpi_table_header *)table);
        return err;
 }
index cdd56c4657e05ff38ed8e11da59d0c37a91326d6..2604189d6cd156e5449c7dd071d63120ea000a0e 100644 (file)
@@ -333,7 +333,6 @@ acpi_table_parse_entries_array(char *id,
                         unsigned int max_entries)
 {
        struct acpi_table_header *table_header = NULL;
-       acpi_size tbl_size;
        int count;
        u32 instance = 0;
 
@@ -346,7 +345,7 @@ acpi_table_parse_entries_array(char *id,
        if (!strncmp(id, ACPI_SIG_MADT, 4))
                instance = acpi_apic_instance;
 
-       acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
+       acpi_get_table(id, instance, &table_header);
        if (!table_header) {
                pr_warn("%4.4s not present\n", id);
                return -ENODEV;
@@ -355,7 +354,7 @@ acpi_table_parse_entries_array(char *id,
        count = acpi_parse_entries_array(id, table_size, table_header,
                        proc, proc_num, max_entries);
 
-       early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+       acpi_put_table(table_header);
        return count;
 }
 
@@ -397,7 +396,6 @@ acpi_table_parse_madt(enum acpi_madt_type id,
 int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
 {
        struct acpi_table_header *table = NULL;
-       acpi_size tbl_size;
 
        if (acpi_disabled)
                return -ENODEV;
@@ -406,13 +404,13 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
+               acpi_get_table(id, acpi_apic_instance, &table);
        else
-               acpi_get_table_with_size(id, 0, &table, &tbl_size);
+               acpi_get_table(id, 0, &table);
 
        if (table) {
                handler(table);
-               early_acpi_os_unmap_memory(table, tbl_size);
+               acpi_put_table(table);
                return 0;
        } else
                return -ENODEV;
@@ -426,16 +424,15 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
 static void __init check_multiple_madt(void)
 {
        struct acpi_table_header *table = NULL;
-       acpi_size tbl_size;
 
-       acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
+       acpi_get_table(ACPI_SIG_MADT, 2, &table);
        if (table) {
                pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
                        acpi_apic_instance);
                pr_warn("If \"acpi_apic_instance=%d\" works better, "
                        "notify linux-acpi@vger.kernel.org\n",
                        acpi_apic_instance ? 0 : 2);
-               early_acpi_os_unmap_memory(table, tbl_size);
+               acpi_put_table(table);
 
        } else
                acpi_apic_instance = 0;
index 1e3903d0d99441897cae42cd032dfff319b1115f..eb3af2739537a8def39259206e2345a2adc72c71 100644 (file)
@@ -363,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev,         \
        return sprintf(buf, "%u\n", this_leaf->object);         \
 }
 
+show_one(id, id);
 show_one(level, level);
 show_one(coherency_line_size, coherency_line_size);
 show_one(number_of_sets, number_of_sets);
@@ -444,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
        return n;
 }
 
+static DEVICE_ATTR_RO(id);
 static DEVICE_ATTR_RO(level);
 static DEVICE_ATTR_RO(type);
 static DEVICE_ATTR_RO(coherency_line_size);
@@ -457,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
 static DEVICE_ATTR_RO(physical_line_partition);
 
 static struct attribute *cache_default_attrs[] = {
+       &dev_attr_id.attr,
        &dev_attr_type.attr,
        &dev_attr_level.attr,
        &dev_attr_shared_cpu_map.attr,
@@ -480,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
        const struct cpumask *mask = &this_leaf->shared_cpu_map;
        umode_t mode = attr->mode;
 
+       if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+               return mode;
        if ((attr == &dev_attr_type.attr) && this_leaf->type)
                return mode;
        if ((attr == &dev_attr_level.attr) && this_leaf->level)
index 2a8f4705c734df7c2cc99f8a532a55cc0cbbe231..7f3430654fbd97972c8d4d74bc70644adcb6ce17 100644 (file)
@@ -161,19 +161,22 @@ static int __init moxart_timer_init(struct device_node *node)
        timer->base = of_iomap(node, 0);
        if (!timer->base) {
                pr_err("%s: of_iomap failed\n", node->full_name);
-               return -ENXIO;
+               ret = -ENXIO;
+               goto out_free;
        }
 
        irq = irq_of_parse_and_map(node, 0);
        if (irq <= 0) {
                pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unmap;
        }
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk))  {
                pr_err("%s: of_clk_get failed\n", node->full_name);
-               return PTR_ERR(clk);
+               ret = PTR_ERR(clk);
+               goto out_unmap;
        }
 
        pclk = clk_get_rate(clk);
@@ -186,7 +189,8 @@ static int __init moxart_timer_init(struct device_node *node)
                timer->t1_disable_val = ASPEED_TIMER1_DISABLE;
        } else {
                pr_err("%s: unknown platform\n", node->full_name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unmap;
        }
 
        timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -208,14 +212,14 @@ static int __init moxart_timer_init(struct device_node *node)
                                    clocksource_mmio_readl_down);
        if (ret) {
                pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
-               return ret;
+               goto out_unmap;
        }
 
        ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER,
                          node->name, &timer->clkevt);
        if (ret) {
                pr_err("%s: setup_irq failed\n", node->full_name);
-               return ret;
+               goto out_unmap;
        }
 
        /* Clear match registers */
@@ -241,6 +245,12 @@ static int __init moxart_timer_init(struct device_node *node)
        clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe);
 
        return 0;
+
+out_unmap:
+       iounmap(timer->base);
+out_free:
+       kfree(timer);
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
 CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init);
index 3a98702b7445f747d9a710f16753d29e312b17a6..3a2ca0f79daf281c5940222f6b9da179b35f64f3 100644 (file)
@@ -930,7 +930,7 @@ static void __init acpi_cpufreq_boost_init(void)
 
 static void acpi_cpufreq_boost_exit(void)
 {
-       if (acpi_cpufreq_online >= 0)
+       if (acpi_cpufreq_online > 0)
                cpuhp_remove_state_nocalls(acpi_cpufreq_online);
 }
 
index 176e84cc3991994871d6eee9a1b8927a3bdf95fa..0cb9040eca49c3c84fb852f674f649f213bfd3e7 100644 (file)
@@ -107,7 +107,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
 }
 
 #ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
+static void s3c64xx_cpufreq_config_regulator(void)
 {
        int count, v, i, found;
        struct cpufreq_frequency_table *freq;
index 88bebe1968b717b4060a449ff49278cee2a84315..54be60ead08f8068c18dc9bbfd5a40e3cb26685c 100644 (file)
@@ -560,7 +560,7 @@ static int __init dmi_present(const u8 *buf)
                                        dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
                        }
                        dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
-                       printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
+                       pr_info("DMI: %s\n", dmi_ids_string);
                        return 0;
                }
        }
@@ -588,7 +588,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
                                dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
                                dmi_ver & 0xFF);
                        dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
-                       pr_debug("DMI: %s\n", dmi_ids_string);
+                       pr_info("DMI: %s\n", dmi_ids_string);
                        return 0;
                }
        }
index d779307a96852b15f251c9595e035e0ee537db7e..46e6dcc089cbdd25fdf61025c53e7dc078233de3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/gpio/driver.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/mfd/tps65218.h>
 
 struct tps65218_gpio {
@@ -30,7 +31,7 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
        unsigned int val;
        int ret;
 
-       ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+       ret = regmap_read(tps65218->regmap, TPS65218_REG_ENABLE2, &val);
        if (ret)
                return ret;
 
index 4f973a9c7b8714d55229b6f712522f50fcfb1fdf..8ec1967a850b86361b18535955fd161a0808f6d4 100644 (file)
@@ -305,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
        GOP_VBIOS_CONTENT *vbios;
        VFCT_IMAGE_HEADER *vhdr;
 
-       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+       if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
                return false;
+       tbl_size = hdr->length;
        if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
                DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
                goto out_unmap;
index e564442b6393f82520aad6e2f47f99e23f7c68bc..b4e4ec630e8cfd5d38d55b61199f3cc60269e0c2 100644 (file)
@@ -1944,9 +1944,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height ||
-           hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1955,8 +1953,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_width = width;
-               amdgpu_crtc->cursor_height = height;
                amdgpu_crtc->cursor_hot_x = hot_x;
                amdgpu_crtc->cursor_hot_y = hot_y;
        }
index 6ce7fb42dbef68232a8325d720d54bf99b3c8783..584abe834a3ce4658de0e54f6aae416729ad868f 100644 (file)
@@ -2438,8 +2438,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_width = width;
-               amdgpu_crtc->cursor_height = height;
                amdgpu_crtc->cursor_hot_x = hot_x;
                amdgpu_crtc->cursor_hot_y = hot_y;
        }
index 558640aee15a178f9e79ba786bd67bfc12c7d197..b323f5ef64d217813683722ec4cd25282e9185ea 100644 (file)
@@ -411,244 +411,587 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
                break;
        }
 
-       if (adev->asic_type == CHIP_VERDE ||
-           adev->asic_type == CHIP_OLAND ||
-           adev->asic_type == CHIP_HAINAN) {
+       if (adev->asic_type == CHIP_VERDE) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
                        case 0:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 1:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 2:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 3:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
                        case 4:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 5:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        case 6:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        case 7:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
                        case 8:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
                                break;
                        case 9:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 10:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 11:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 12:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 13:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 14:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 15:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 16:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 17:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
                        case 21:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 22:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 23:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 24:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 25:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       default:
+                               continue;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else if (adev->asic_type == CHIP_OLAND ||
+           adev->asic_type == CHIP_HAINAN) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 4:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 5:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 7:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+                               break;
+                       case 9:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 15:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 16:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 17:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 21:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 22:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 23:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 24:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 25:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        default:
-                               gb_tile_moden = 0;
-                               break;
+                               continue;
                        }
                        adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
@@ -656,239 +999,291 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
        } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
-                       case 0:  /* non-AA compressed depth or any compressed stencil */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 0:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 1:  /* 2xAA/4xAA compressed depth only */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 1:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 2:  /* 8xAA compressed depth only */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 2:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 3:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 4:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 5:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 6:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                       case 7:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 8:  /* 1D and 1D Array Surfaces */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
                                break;
-                       case 9:  /* Displayable maps. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 9:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 10:  /* Display 8bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 10:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 11:  /* Display 16bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 11:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 12:  /* Display 32bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 12:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 13:  /* Thin. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 13:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 14:  /* Thin 8 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 14:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 15:  /* Thin 16 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 15:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 16:  /* Thin 32 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 16:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 17:  /* Thin 64 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 17:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 21:  /* 8 bpp PRT. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 22:  /* 16 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 21:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 22:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
-                       case 23:  /* 32 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 23:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 24:  /* 64 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 24:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 25:  /* 128 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                       case 25:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-                                                NUM_BANKS(ADDR_SURF_8_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       default:
-                               gb_tile_moden = 0;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       default:
+                               continue;
                        }
                        adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
index 6324f67bdb1fac705d79e80c4d85ce097ec7358f..d0ec00986f3826c32957ab432c677c53c24091c2 100644 (file)
@@ -3949,8 +3949,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
        temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
        data = mmRLC_SRM_INDEX_CNTL_DATA_0;
        for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
-               amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
-               amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+               if (unique_indices[i] != 0) {
+                       amdgpu_mm_wreg(adev, temp + i,
+                                       unique_indices[i] & 0x3FFFF, false);
+                       amdgpu_mm_wreg(adev, data + i,
+                                       unique_indices[i] >> 20, false);
+               }
        }
        kfree(register_list_format);
 
@@ -3966,20 +3970,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
 {
        uint32_t data;
 
-       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                             AMD_PG_SUPPORT_GFX_SMG |
-                             AMD_PG_SUPPORT_GFX_DMG)) {
-               WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
+       WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
-               data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
-               WREG32(mmRLC_PG_DELAY, data);
+       data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+       WREG32(mmRLC_PG_DELAY, data);
+
+       WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+       WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
 
-               WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
-               WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
-       }
 }
 
 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
@@ -3996,41 +3997,37 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
 
 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
 {
-       WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+       WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
 }
 
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
 {
-       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                             AMD_PG_SUPPORT_GFX_SMG |
-                             AMD_PG_SUPPORT_GFX_DMG |
-                             AMD_PG_SUPPORT_CP |
-                             AMD_PG_SUPPORT_GDS |
-                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+       if ((adev->asic_type == CHIP_CARRIZO) ||
+           (adev->asic_type == CHIP_STONEY)) {
                gfx_v8_0_init_csb(adev);
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
-
-               if ((adev->asic_type == CHIP_CARRIZO) ||
-                   (adev->asic_type == CHIP_STONEY)) {
-                       WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
-                       gfx_v8_0_init_power_gating(adev);
-                       WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
-                       if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
-                               cz_enable_sck_slow_down_on_power_up(adev, true);
-                               cz_enable_sck_slow_down_on_power_down(adev, true);
-                       } else {
-                               cz_enable_sck_slow_down_on_power_up(adev, false);
-                               cz_enable_sck_slow_down_on_power_down(adev, false);
-                       }
-                       if (adev->pg_flags & AMD_PG_SUPPORT_CP)
-                               cz_enable_cp_power_gating(adev, true);
-                       else
-                               cz_enable_cp_power_gating(adev, false);
-               } else if (adev->asic_type == CHIP_POLARIS11) {
-                       gfx_v8_0_init_power_gating(adev);
+               WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+               gfx_v8_0_init_power_gating(adev);
+               WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+               if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+                       cz_enable_sck_slow_down_on_power_up(adev, true);
+                       cz_enable_sck_slow_down_on_power_down(adev, true);
+               } else {
+                       cz_enable_sck_slow_down_on_power_up(adev, false);
+                       cz_enable_sck_slow_down_on_power_down(adev, false);
                }
+               if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+                       cz_enable_cp_power_gating(adev, true);
+               else
+                       cz_enable_cp_power_gating(adev, false);
+       } else if (adev->asic_type == CHIP_POLARIS11) {
+               gfx_v8_0_init_csb(adev);
+               gfx_v8_0_init_save_restore_list(adev);
+               gfx_v8_0_enable_save_restore_machine(adev);
+               gfx_v8_0_init_power_gating(adev);
        }
+
 }
 
 static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
@@ -5339,14 +5336,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
-               return 0;
-
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
-               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
-                       cz_update_gfx_cg_power_gating(adev, enable);
+
+               cz_update_gfx_cg_power_gating(adev, enable);
 
                if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
                        gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
@@ -5791,25 +5785,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
                                          enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_CG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_MG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_MG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
        return 0;
 }
@@ -5817,43 +5835,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
 static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                          enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_CG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_3D,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_3D,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_MG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_RLC,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_MG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+               pp_support_state = PP_STATE_SUPPORT_LS;
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_RLC,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+               pp_support_state = PP_STATE_SUPPORT_LS;
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
                        PP_BLOCK_GFX_CP,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_support_state,
                        pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
        return 0;
 }
index 3ed8ad8725b9cf09c965876a482b0699e5704058..c46b0159007d976406d64557316f45e045d82191 100644 (file)
 
 static const u32 tahiti_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
        0x031e, 0x00000080, 0x00000000,
-       0x340c, 0x000300c0, 0x00800040,
-       0x360c, 0x000300c0, 0x00800040,
+       0x340c, 0x000000c0, 0x00800040,
+       0x360c, 0x000000c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
        0x16f0, 0x00200000, 0x50100000,
        0x1c0c, 0x31000311, 0x00000011,
@@ -60,7 +61,7 @@ static const u32 tahiti_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x2a00126a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x23a2, 0x01ff1f3f, 0x00000000,
@@ -73,7 +74,11 @@ static const u32 tahiti_golden_registers[] =
        0x2234, 0xffffffff, 0x000fff40,
        0x2235, 0x0000001f, 0x00000010,
        0x0504, 0x20000000, 0x20fffed8,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 tahiti_golden_registers2[] =
@@ -83,16 +88,18 @@ static const u32 tahiti_golden_registers2[] =
 
 static const u32 tahiti_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x12011003,
        0x3109, 0xffffffff, 0x00601005,
        0x311f, 0xffffffff, 0x10104040,
        0x3122, 0xffffffff, 0x0100000a,
        0x30c5, 0xffffffff, 0x00000800,
        0x30c3, 0xffffffff, 0x800000f4,
-       0x3d2a, 0xffffffff, 0x00000000
+       0x3d2a, 0x00000008, 0x00000000
 };
 
 static const u32 pitcairn_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
@@ -110,7 +117,7 @@ static const u32 pitcairn_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x2a00126a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x2418, 0x0000007f, 0x00000020,
@@ -119,11 +126,16 @@ static const u32 pitcairn_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x32761054,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 pitcairn_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x12011003,
        0x3109, 0xffffffff, 0x00601004,
        0x311f, 0xffffffff, 0x10102020,
        0x3122, 0xffffffff, 0x01000020,
@@ -133,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] =
 
 static const u32 verde_pg_init[] =
 {
-       0xd4f, 0xffffffff, 0x40000,
-       0xd4e, 0xffffffff, 0x200010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x7007,
-       0xd4e, 0xffffffff, 0x300010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x400000,
-       0xd4e, 0xffffffff, 0x100010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x120200,
-       0xd4e, 0xffffffff, 0x500010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x1e1e16,
-       0xd4e, 0xffffffff, 0x600010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x171f1e,
-       0xd4e, 0xffffffff, 0x700010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4e, 0xffffffff, 0x9ff,
-       0xd40, 0xffffffff, 0x0,
-       0xd41, 0xffffffff, 0x10000800,
-       0xd41, 0xffffffff, 0xf,
-       0xd41, 0xffffffff, 0xf,
-       0xd40, 0xffffffff, 0x4,
-       0xd41, 0xffffffff, 0x1000051e,
-       0xd41, 0xffffffff, 0xffff,
-       0xd41, 0xffffffff, 0xffff,
-       0xd40, 0xffffffff, 0x8,
-       0xd41, 0xffffffff, 0x80500,
-       0xd40, 0xffffffff, 0x12,
-       0xd41, 0xffffffff, 0x9050c,
-       0xd40, 0xffffffff, 0x1d,
-       0xd41, 0xffffffff, 0xb052c,
-       0xd40, 0xffffffff, 0x2a,
-       0xd41, 0xffffffff, 0x1053e,
-       0xd40, 0xffffffff, 0x2d,
-       0xd41, 0xffffffff, 0x10546,
-       0xd40, 0xffffffff, 0x30,
-       0xd41, 0xffffffff, 0xa054e,
-       0xd40, 0xffffffff, 0x3c,
-       0xd41, 0xffffffff, 0x1055f,
-       0xd40, 0xffffffff, 0x3f,
-       0xd41, 0xffffffff, 0x10567,
-       0xd40, 0xffffffff, 0x42,
-       0xd41, 0xffffffff, 0x1056f,
-       0xd40, 0xffffffff, 0x45,
-       0xd41, 0xffffffff, 0x10572,
-       0xd40, 0xffffffff, 0x48,
-       0xd41, 0xffffffff, 0x20575,
-       0xd40, 0xffffffff, 0x4c,
-       0xd41, 0xffffffff, 0x190801,
-       0xd40, 0xffffffff, 0x67,
-       0xd41, 0xffffffff, 0x1082a,
-       0xd40, 0xffffffff, 0x6a,
-       0xd41, 0xffffffff, 0x1b082d,
-       0xd40, 0xffffffff, 0x87,
-       0xd41, 0xffffffff, 0x310851,
-       0xd40, 0xffffffff, 0xba,
-       0xd41, 0xffffffff, 0x891,
-       0xd40, 0xffffffff, 0xbc,
-       0xd41, 0xffffffff, 0x893,
-       0xd40, 0xffffffff, 0xbe,
-       0xd41, 0xffffffff, 0x20895,
-       0xd40, 0xffffffff, 0xc2,
-       0xd41, 0xffffffff, 0x20899,
-       0xd40, 0xffffffff, 0xc6,
-       0xd41, 0xffffffff, 0x2089d,
-       0xd40, 0xffffffff, 0xca,
-       0xd41, 0xffffffff, 0x8a1,
-       0xd40, 0xffffffff, 0xcc,
-       0xd41, 0xffffffff, 0x8a3,
-       0xd40, 0xffffffff, 0xce,
-       0xd41, 0xffffffff, 0x308a5,
-       0xd40, 0xffffffff, 0xd3,
-       0xd41, 0xffffffff, 0x6d08cd,
-       0xd40, 0xffffffff, 0x142,
-       0xd41, 0xffffffff, 0x2000095a,
-       0xd41, 0xffffffff, 0x1,
-       0xd40, 0xffffffff, 0x144,
-       0xd41, 0xffffffff, 0x301f095b,
-       0xd40, 0xffffffff, 0x165,
-       0xd41, 0xffffffff, 0xc094d,
-       0xd40, 0xffffffff, 0x173,
-       0xd41, 0xffffffff, 0xf096d,
-       0xd40, 0xffffffff, 0x184,
-       0xd41, 0xffffffff, 0x15097f,
-       0xd40, 0xffffffff, 0x19b,
-       0xd41, 0xffffffff, 0xc0998,
-       0xd40, 0xffffffff, 0x1a9,
-       0xd41, 0xffffffff, 0x409a7,
-       0xd40, 0xffffffff, 0x1af,
-       0xd41, 0xffffffff, 0xcdc,
-       0xd40, 0xffffffff, 0x1b1,
-       0xd41, 0xffffffff, 0x800,
-       0xd42, 0xffffffff, 0x6c9b2000,
-       0xd44, 0xfc00, 0x2000,
-       0xd51, 0xffffffff, 0xfc0,
-       0xa35, 0x00000100, 0x100
+       0x0d4f, 0xffffffff, 0x40000,
+       0x0d4e, 0xffffffff, 0x200010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x7007,
+       0x0d4e, 0xffffffff, 0x300010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x400000,
+       0x0d4e, 0xffffffff, 0x100010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x120200,
+       0x0d4e, 0xffffffff, 0x500010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x1e1e16,
+       0x0d4e, 0xffffffff, 0x600010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x171f1e,
+       0x0d4e, 0xffffffff, 0x700010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4e, 0xffffffff, 0x9ff,
+       0x0d40, 0xffffffff, 0x0,
+       0x0d41, 0xffffffff, 0x10000800,
+       0x0d41, 0xffffffff, 0xf,
+       0x0d41, 0xffffffff, 0xf,
+       0x0d40, 0xffffffff, 0x4,
+       0x0d41, 0xffffffff, 0x1000051e,
+       0x0d41, 0xffffffff, 0xffff,
+       0x0d41, 0xffffffff, 0xffff,
+       0x0d40, 0xffffffff, 0x8,
+       0x0d41, 0xffffffff, 0x80500,
+       0x0d40, 0xffffffff, 0x12,
+       0x0d41, 0xffffffff, 0x9050c,
+       0x0d40, 0xffffffff, 0x1d,
+       0x0d41, 0xffffffff, 0xb052c,
+       0x0d40, 0xffffffff, 0x2a,
+       0x0d41, 0xffffffff, 0x1053e,
+       0x0d40, 0xffffffff, 0x2d,
+       0x0d41, 0xffffffff, 0x10546,
+       0x0d40, 0xffffffff, 0x30,
+       0x0d41, 0xffffffff, 0xa054e,
+       0x0d40, 0xffffffff, 0x3c,
+       0x0d41, 0xffffffff, 0x1055f,
+       0x0d40, 0xffffffff, 0x3f,
+       0x0d41, 0xffffffff, 0x10567,
+       0x0d40, 0xffffffff, 0x42,
+       0x0d41, 0xffffffff, 0x1056f,
+       0x0d40, 0xffffffff, 0x45,
+       0x0d41, 0xffffffff, 0x10572,
+       0x0d40, 0xffffffff, 0x48,
+       0x0d41, 0xffffffff, 0x20575,
+       0x0d40, 0xffffffff, 0x4c,
+       0x0d41, 0xffffffff, 0x190801,
+       0x0d40, 0xffffffff, 0x67,
+       0x0d41, 0xffffffff, 0x1082a,
+       0x0d40, 0xffffffff, 0x6a,
+       0x0d41, 0xffffffff, 0x1b082d,
+       0x0d40, 0xffffffff, 0x87,
+       0x0d41, 0xffffffff, 0x310851,
+       0x0d40, 0xffffffff, 0xba,
+       0x0d41, 0xffffffff, 0x891,
+       0x0d40, 0xffffffff, 0xbc,
+       0x0d41, 0xffffffff, 0x893,
+       0x0d40, 0xffffffff, 0xbe,
+       0x0d41, 0xffffffff, 0x20895,
+       0x0d40, 0xffffffff, 0xc2,
+       0x0d41, 0xffffffff, 0x20899,
+       0x0d40, 0xffffffff, 0xc6,
+       0x0d41, 0xffffffff, 0x2089d,
+       0x0d40, 0xffffffff, 0xca,
+       0x0d41, 0xffffffff, 0x8a1,
+       0x0d40, 0xffffffff, 0xcc,
+       0x0d41, 0xffffffff, 0x8a3,
+       0x0d40, 0xffffffff, 0xce,
+       0x0d41, 0xffffffff, 0x308a5,
+       0x0d40, 0xffffffff, 0xd3,
+       0x0d41, 0xffffffff, 0x6d08cd,
+       0x0d40, 0xffffffff, 0x142,
+       0x0d41, 0xffffffff, 0x2000095a,
+       0x0d41, 0xffffffff, 0x1,
+       0x0d40, 0xffffffff, 0x144,
+       0x0d41, 0xffffffff, 0x301f095b,
+       0x0d40, 0xffffffff, 0x165,
+       0x0d41, 0xffffffff, 0xc094d,
+       0x0d40, 0xffffffff, 0x173,
+       0x0d41, 0xffffffff, 0xf096d,
+       0x0d40, 0xffffffff, 0x184,
+       0x0d41, 0xffffffff, 0x15097f,
+       0x0d40, 0xffffffff, 0x19b,
+       0x0d41, 0xffffffff, 0xc0998,
+       0x0d40, 0xffffffff, 0x1a9,
+       0x0d41, 0xffffffff, 0x409a7,
+       0x0d40, 0xffffffff, 0x1af,
+       0x0d41, 0xffffffff, 0xcdc,
+       0x0d40, 0xffffffff, 0x1b1,
+       0x0d41, 0xffffffff, 0x800,
+       0x0d42, 0xffffffff, 0x6c9b2000,
+       0x0d44, 0xfc00, 0x2000,
+       0x0d51, 0xffffffff, 0xfc0,
+       0x0a35, 0x00000100, 0x100
 };
 
 static const u32 verde_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x02010002,
        0x3109, 0xffffffff, 0x033f1005,
        0x311f, 0xffffffff, 0x10808020,
        0x3122, 0xffffffff, 0x00800008,
@@ -269,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] =
 
 static const u32 verde_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
        0x031e, 0x00000080, 0x00000000,
        0x340c, 0x000300c0, 0x00800040,
-       0x340c, 0x000300c0, 0x00800040,
-       0x360c, 0x000300c0, 0x00800040,
        0x360c, 0x000300c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
        0x16f0, 0x00200000, 0x50100000,
-
        0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0903, 0x000007ff, 0x00000000,
-       0x0903, 0x000007ff, 0x00000000,
        0x0903, 0x000007ff, 0x00000000,
        0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xffffffff, 0x00ffffff,
+       0x22c9, 0xffffffff, 0x00ffffff,
        0x22c4, 0x0000ff0f, 0x00000000,
-
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
-       0x2440, 0x07ffffff, 0x03000000,
-       0x23a2, 0x01ff1f3f, 0x00000000,
-       0x23a3, 0x01ff1f3f, 0x00000000,
        0x23a2, 0x01ff1f3f, 0x00000000,
-       0x23a1, 0x01ff1f3f, 0x00000000,
-       0x23a1, 0x01ff1f3f, 0x00000000,
-
        0x23a1, 0x01ff1f3f, 0x00000000,
        0x2418, 0x0000007f, 0x00000020,
        0x2542, 0x00010000, 0x00010000,
-       0x2b01, 0x000003ff, 0x00000003,
-       0x2b05, 0x000003ff, 0x00000003,
        0x2b05, 0x000003ff, 0x00000003,
        0x2b04, 0xffffffff, 0x00000000,
-       0x2b04, 0xffffffff, 0x00000000,
-       0x2b04, 0xffffffff, 0x00000000,
-       0x2b03, 0xffffffff, 0x00001032,
-       0x2b03, 0xffffffff, 0x00001032,
        0x2b03, 0xffffffff, 0x00001032,
        0x2235, 0x0000001f, 0x00000010,
-       0x2235, 0x0000001f, 0x00000010,
-       0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 oland_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
@@ -336,7 +329,7 @@ static const u32 oland_golden_registers[] =
        0x340c, 0x000300c0, 0x00800040,
        0x360c, 0x000300c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
-       0x16f9, 0x00200000, 0x50100000,
+       0x16f0, 0x00200000, 0x50100000,
        0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
        0x0903, 0x000007ff, 0x00000000,
@@ -345,7 +338,7 @@ static const u32 oland_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x00000082,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x2418, 0x0000007f, 0x00000020,
@@ -354,11 +347,16 @@ static const u32 oland_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x00003210,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 oland_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x02010002,
        0x3109, 0xffffffff, 0x00601005,
        0x311f, 0xffffffff, 0x10104040,
        0x3122, 0xffffffff, 0x0100000a,
@@ -368,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] =
 
 static const u32 hainan_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
-       0x4595, 0xff000fff, 0x00000100,
+       0x031e, 0x00000080, 0x00000000,
+       0x3430, 0xff000fff, 0x00000100,
        0x340c, 0x000300c0, 0x00800040,
        0x3630, 0xff000fff, 0x00000100,
        0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
        0x0903, 0x000007ff, 0x00000000,
        0x2285, 0xf000001f, 0x00000007,
        0x22c9, 0xffffffff, 0x00ffffff,
        0x22c4, 0x0000ff0f, 0x00000000,
-       0xa393, 0x07ffffff, 0x4e000000,
+       0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x00000000,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x03e00000, 0x03600000,
        0x2418, 0x0000007f, 0x00000020,
@@ -392,12 +395,16 @@ static const u32 hainan_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x00003210,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 hainan_golden_registers2[] =
 {
-       0x263e, 0xffffffff, 0x02010001
+       0x263e, 0xffffffff, 0x2011003
 };
 
 static const u32 tahiti_mgcg_cgcg_init[] =
@@ -513,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -612,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -709,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -788,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -867,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -1179,6 +1186,8 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                        adev->pg_flags = 0;
+               adev->external_rev_id = (adev->rev_id == 0) ? 1 :
+                                       (adev->rev_id == 1) ? 5 : 6;
                break;
        case CHIP_PITCAIRN:
                adev->cg_flags =
@@ -1198,6 +1207,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 20;
                break;
 
        case CHIP_VERDE:
@@ -1219,7 +1229,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
                //???
-               adev->external_rev_id = adev->rev_id + 0x14;
+               adev->external_rev_id = adev->rev_id + 40;
                break;
        case CHIP_OLAND:
                adev->cg_flags =
@@ -1238,6 +1248,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = 60;
                break;
        case CHIP_HAINAN:
                adev->cg_flags =
@@ -1255,6 +1266,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = 70;
                break;
 
        default:
index 9f771f4ffcb71bf1267ff3aae9c5853fb944e149..bf088d6d9bf1f96430d7afda1ec3fbeae6e82768 100644 (file)
@@ -932,18 +932,64 @@ static int vi_common_early_init(void *handle)
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
        case CHIP_TONGA:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
-               adev->pg_flags = AMD_PG_SUPPORT_UVD;
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG;
+               adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x14;
                break;
        case CHIP_POLARIS11:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_3D_CGCG |
+                       AMD_CG_SUPPORT_GFX_3D_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_VCE_MGCG;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x5A;
                break;
        case CHIP_POLARIS10:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_3D_CGCG |
+                       AMD_CG_SUPPORT_GFX_3D_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_VCE_MGCG;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x50;
@@ -971,6 +1017,7 @@ static int vi_common_early_init(void *handle)
                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
                                AMD_PG_SUPPORT_GFX_SMG |
                                AMD_PG_SUPPORT_GFX_PIPELINE |
+                               AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_UVD |
                                AMD_PG_SUPPORT_VCE;
                }
@@ -996,6 +1043,7 @@ static int vi_common_early_init(void *handle)
                adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
                        AMD_PG_SUPPORT_GFX_SMG |
                        AMD_PG_SUPPORT_GFX_PIPELINE |
+                       AMD_PG_SUPPORT_CP |
                        AMD_PG_SUPPORT_UVD |
                        AMD_PG_SUPPORT_VCE;
                adev->external_rev_id = adev->rev_id + 0x61;
@@ -1155,57 +1203,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
 static int vi_common_set_clockgating_state_by_smu(void *handle,
                                           enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_MC,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_SDMA,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_HDP,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_BIF,
-                      PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_BIF,
-                      PP_STATE_SUPPORT_CG,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_DRM,
-                      PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_ROM,
-                      PP_STATE_SUPPORT_CG,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_MC_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_MC,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_SDMA,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_HDP,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_BIF,
+                              PP_STATE_SUPPORT_LS,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+       if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_CG;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_BIF,
+                              PP_STATE_SUPPORT_CG,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_DRM,
+                              PP_STATE_SUPPORT_LS,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_CG;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_ROM,
+                              PP_STATE_SUPPORT_CG,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
        return 0;
 }
 
index d1986276dbbd71cc144a48ec52bb14b908095509..c02469ada9f131f417e2a4fdb1d0c6b1fac4eafa 100644 (file)
@@ -126,6 +126,10 @@ enum amd_vce_level {
 #define AMD_CG_SUPPORT_HDP_LS                  (1 << 15)
 #define AMD_CG_SUPPORT_HDP_MGCG                        (1 << 16)
 #define AMD_CG_SUPPORT_ROM_MGCG                        (1 << 17)
+#define AMD_CG_SUPPORT_DRM_LS                  (1 << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG                        (1 << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG             (1 << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS             (1 << 21)
 
 /* PG flags */
 #define AMD_PG_SUPPORT_GFX_PG                  (1 << 0)
index 904beaa932d03f0b5ce7d89a13d16729582052e0..f75c6421db6239c9435ed39dc7d6244d13894920 100644 (file)
@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
        ast_write32(ast, 0x10000, 0xfc600309);
 
        do {
-               ;
+               if (pci_channel_offline(dev->pdev))
+                       return -EIO;
        } while (ast_read32(ast, 0x10000) != 0x01);
        data = ast_read32(ast, 0x10004);
 
@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
        ast_detect_chip(dev, &need_post);
 
        if (ast->chip != AST1180) {
-               ast_get_dram_info(dev);
+               ret = ast_get_dram_info(dev);
+               if (ret)
+                       goto out_free;
                ast->vram_size = ast_get_vram_info(dev);
                DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
        }
index 56002a52936dcfc4a7dd5539973ce316353d0b1c..243224aeabf82f111ab14c4d6995ce868c0d36c1 100644 (file)
@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms);
 
 /* intel_sideband.c */
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
index 412f3513f269b4a96217f814aa0d882a27a3e1e5..4a31b7a891ecaf3e2732c9f2d6ce62fa633419f6 100644 (file)
@@ -174,21 +174,35 @@ static struct sg_table *
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
-       char *vaddr = obj->phys_handle->vaddr;
+       drm_dma_handle_t *phys;
        struct sg_table *st;
        struct scatterlist *sg;
+       char *vaddr;
        int i;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
                return ERR_PTR(-EINVAL);
 
+       /* Always aligning to the object size, allows a single allocation
+        * to handle all possible callers, and given typical object sizes,
+        * the alignment of the buddy allocation will naturally match.
+        */
+       phys = drm_pci_alloc(obj->base.dev,
+                            obj->base.size,
+                            roundup_pow_of_two(obj->base.size));
+       if (!phys)
+               return ERR_PTR(-ENOMEM);
+
+       vaddr = phys->vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
                struct page *page;
                char *src;
 
                page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page))
-                       return ERR_CAST(page);
+               if (IS_ERR(page)) {
+                       st = ERR_CAST(page);
+                       goto err_phys;
+               }
 
                src = kmap_atomic(page);
                memcpy(vaddr, src, PAGE_SIZE);
@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
-       if (st == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (!st) {
+               st = ERR_PTR(-ENOMEM);
+               goto err_phys;
+       }
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               st = ERR_PTR(-ENOMEM);
+               goto err_phys;
        }
 
        sg = st->sgl;
        sg->offset = 0;
        sg->length = obj->base.size;
 
-       sg_dma_address(sg) = obj->phys_handle->busaddr;
+       sg_dma_address(sg) = phys->busaddr;
        sg_dma_len(sg) = obj->base.size;
 
+       obj->phys_handle = phys;
+       return st;
+
+err_phys:
+       drm_pci_free(obj->base.dev, phys);
        return st;
 }
 
@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 
        sg_free_table(pages);
        kfree(pages);
+
+       drm_pci_free(obj->base.dev, obj->phys_handle);
 }
 
 static void
 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
 {
-       drm_pci_free(obj->base.dev, obj->phys_handle);
        i915_gem_object_unpin_pages(obj);
 }
 
@@ -538,15 +561,13 @@ int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                            int align)
 {
-       drm_dma_handle_t *phys;
        int ret;
 
-       if (obj->phys_handle) {
-               if ((unsigned long)obj->phys_handle->vaddr & (align -1))
-                       return -EBUSY;
+       if (align > obj->base.size)
+               return -EINVAL;
 
+       if (obj->ops == &i915_gem_phys_ops)
                return 0;
-       }
 
        if (obj->mm.madv != I915_MADV_WILLNEED)
                return -EFAULT;
@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (obj->mm.pages)
                return -EBUSY;
 
-       /* create a new object */
-       phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
-       if (!phys)
-               return -ENOMEM;
-
-       obj->phys_handle = phys;
        obj->ops = &i915_gem_phys_ops;
 
        return i915_gem_object_pin_pages(obj);
@@ -2326,7 +2341,8 @@ static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int page_count, i;
+       const unsigned long page_count = obj->base.size / PAGE_SIZE;
+       unsigned long i;
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        if (st == NULL)
                return ERR_PTR(-ENOMEM);
 
-       page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
                return ERR_PTR(-ENOMEM);
@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        i915_sg_trim(st);
 
        ret = i915_gem_gtt_prepare_pages(obj, st);
-       if (ret)
-               goto err_pages;
+       if (ret) {
+               /* DMA remapping failed? One possible cause is that
+                * it could not reserve enough large entries, asking
+                * for PAGE_SIZE chunks instead may be helpful.
+                */
+               if (max_segment > PAGE_SIZE) {
+                       for_each_sgt_page(page, sgt_iter, st)
+                               put_page(page);
+                       sg_free_table(st);
+
+                       max_segment = PAGE_SIZE;
+                       goto rebuild_st;
+               } else {
+                       dev_warn(&dev_priv->drm.pdev->dev,
+                                "Failed to DMA remap %lu pages\n",
+                                page_count);
+                       goto err_pages;
+               }
+       }
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
index ebaa941c83afd7843a56287f2b857d8f8fd3a9fa..abc78bbfc1dcd0fbc6373222c3801929d1dfd2d4 100644 (file)
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
                return -ENODEV;
 
        /* See the comment at the drm_mm_init() call for more about this check.
-        * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+        * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
         */
-       if (start < 4096 && (IS_GEN8(dev_priv) ||
-                            IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+       if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
                start = 4096;
 
        mutex_lock(&dev_priv->mm.stolen_lock);
index 47590ab08d7ea65e7cc94594853117f78e04946b..3df8d3dd31cd0fdb8972b5f3d2ddb5aa1c61d556 100644 (file)
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
index 8405b5a367d7164a15df16ea5c453b472e965916..7e3545f65257c415fa7fab7a65e27a6858e80a09 100644 (file)
@@ -46,14 +46,20 @@ struct edp_power_seq {
        u16 t11_t12;
 } __packed;
 
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
 enum mipi_seq {
        MIPI_SEQ_END = 0,
-       MIPI_SEQ_ASSERT_RESET,
+       MIPI_SEQ_DEASSERT_RESET,        /* Spec says MipiAssertResetPin */
        MIPI_SEQ_INIT_OTP,
        MIPI_SEQ_DISPLAY_ON,
        MIPI_SEQ_DISPLAY_OFF,
-       MIPI_SEQ_DEASSERT_RESET,
+       MIPI_SEQ_ASSERT_RESET,          /* Spec says MipiDeassertResetPin */
        MIPI_SEQ_BACKLIGHT_ON,          /* sequence block v2+ */
        MIPI_SEQ_BACKLIGHT_OFF,         /* sequence block v2+ */
        MIPI_SEQ_TEAR_ON,               /* sequence block v2+ */
index cf5cff7b03b8528ce10590a1dd973bbfa2b87776..6daad86137606d700b6101ee0ffde7a7ee35a1cd 100644 (file)
@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
        dev_priv->cdclk_pll.vco = 0;
 }
 
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
-       int ret;
-       u32 val;
-
-       /* inform PCU we want to change CDCLK */
-       val = SKL_CDCLK_PREPARE_FOR_CHANGE;
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
-       return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 {
        u32 freq_select, pcu_ack;
+       int ret;
 
        WARN_ON((cdclk == 24000) != (vco == 0));
 
        DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
 
-       if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
-               DRM_ERROR("failed to inform PCU about cdclk change\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               SKL_CDCLK_PREPARE_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE, 3);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (ret) {
+               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+                         ret);
                return;
        }
 
index 90283edcafba6786f79c509df64aa194e490bc3c..d9bc19be855e76b9bab0f1cee082bc170bccff26 100644 (file)
@@ -4014,8 +4014,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                return;
 
        /* FIXME: we need to synchronize this sort of stuff with hardware
-        * readout */
-       if (WARN_ON_ONCE(!intel_dp->lane_count))
+        * readout. Currently fast link training doesn't work on boot-up. */
+       if (!intel_dp->lane_count)
                return;
 
        /* if link training is requested we should perform it always */
index 0d8ff0034b88567369861dc9ee5aa8de6651b05b..47cd1b20fb3e958fc89c83981e29496ecdfbff49 100644 (file)
@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
        mutex_lock(&dev_priv->sb_lock);
        vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
        vlv_iosf_sb_write(dev_priv, port, cfg0,
-                         CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+                         CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+                         CHV_GPIO_GPIOTXSTATE(value));
        mutex_unlock(&dev_priv->sb_lock);
 }
 
@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
  */
 
 static const char * const seq_name[] = {
-       [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+       [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
        [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
        [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
        [MIPI_SEQ_DISPLAY_OFF]  = "MIPI_SEQ_DISPLAY_OFF",
-       [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+       [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
        [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
        [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
        [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
index 0a09024d6ca3ebc502f0b99cef07ed91f0b4cd64..d4961fa20c73d0e2d390673889ae5fa82f04dd07 100644 (file)
@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
                          ret);
        }
 
-       ret = logical_ring_init(engine);
-       if (ret) {
-               lrc_destroy_wa_ctx_obj(engine);
-       }
-
-       return ret;
+       return logical_ring_init(engine);
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
index d67974eb127a0f19caa005c1f5a69687ec88ae84..ae2c0bb4b2e8b384c6fc8858490143064ec4dec3 100644 (file)
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
-       int ret;
-       uint32_t temp = GEN9_SAGV_DISABLE;
-
-       ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
-                                    &temp);
-       if (ret)
-               return ret;
-       else
-               return temp & GEN9_SAGV_IS_DISABLED;
-}
-
 int
 intel_disable_sagv(struct drm_i915_private *dev_priv)
 {
-       int ret, result;
+       int ret;
 
        if (!intel_has_sagv(dev_priv))
                return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->rps.hw_lock);
 
        /* bspec says to keep retrying for at least 1 ms */
-       ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+       ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                               GEN9_SAGV_DISABLE,
+                               GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+                               1);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       if (ret == -ETIMEDOUT) {
-               DRM_ERROR("Request to disable SAGV timed out\n");
-               return -ETIMEDOUT;
-       }
-
        /*
         * Some skl systems, pre-release machines in particular,
         * don't actually have an SAGV.
         */
-       if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+       if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
                dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
-       } else if (result < 0) {
-               DRM_ERROR("Failed to disable the SAGV\n");
-               return result;
+       } else if (ret < 0) {
+               DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+               return ret;
        }
 
        dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+                                 u32 request, u32 reply_mask, u32 reply,
+                                 u32 *status)
+{
+       u32 val = request;
+
+       *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+       return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+       u32 status;
+       int ret;
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+                                  &status)
+
+       /*
+        * Prime the PCODE by doing a request first. Normally it guarantees
+        * that a subsequent request, at most @timeout_base_ms later, succeeds.
+        * _wait_for() doesn't guarantee when its passed condition is evaluated
+        * first, so send the first request explicitly.
+        */
+       if (COND) {
+               ret = 0;
+               goto out;
+       }
+       ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+       if (!ret)
+               goto out;
+
+       /*
+        * The above can time out if the number of requests was low (2 in the
+        * worst case) _and_ PCODE was busy for some reason even after a
+        * (queued) request and @timeout_base_ms delay. As a workaround retry
+        * the poll with preemption disabled to maximize the number of
+        * requests. Increase the timeout from @timeout_base_ms to 10ms to
+        * account for interrupts that could reduce the number of these
+        * requests.
+        */
+       DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+       WARN_ON_ONCE(timeout_base_ms > 3);
+       preempt_disable();
+       ret = wait_for_atomic(COND, 10);
+       preempt_enable();
+
+out:
+       return ret ? ret : status;
+#undef COND
+}
+
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        /*
index 7b488e2793d98457c2ae09dd988d49707288acf5..c6be70686b4af869a41db55d01b6f562a9355ce1 100644 (file)
@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
-       /* Per platform default */
-       if (i915.enable_psr == -1) {
-               if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-                       i915.enable_psr = 1;
-               else
-                       i915.enable_psr = 0;
-       }
+       /* Per platform default: all disabled. */
+       if (i915.enable_psr == -1)
+               i915.enable_psr = 0;
 
        /* Set link_standby x link_off defaults */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
index 356c662ad45325f6185a7eb6518e33641479d4c3..87b4af092d5487d219024c858585b5448dd3ea1c 100644 (file)
@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 
 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+       u32 val;
+
+       /*
+        * On driver load, a pipe may be active and driving a DSI display.
+        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+        * (and never recovering) in this case. intel_dsi_post_disable() will
+        * clear it when we turn off the display.
+        */
+       val = I915_READ(DSPCLK_GATE_D);
+       val &= DPOUNIT_CLOCK_GATE_DISABLE;
+       val |= VRHUNIT_CLOCK_GATE_DISABLE;
+       I915_WRITE(DSPCLK_GATE_D, val);
 
        /*
         * Disable trickle feed and enable pnd deadline calculation
index 21b6732425c50d4b368d6b0fb016ea507a8aa6a1..c829cfb02fc4c994a1167e9daa7b4c1010a97ab0 100644 (file)
@@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
        GOP_VBIOS_CONTENT *vbios;
        VFCT_IMAGE_HEADER *vhdr;
 
-       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+       if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
                return false;
+       tbl_size = hdr->length;
        if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
                DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
                goto out_unmap;
index 08153ea4d848097ef5659f40c6bb71964cbc99e2..6ce4313231257f8251b62e02f5aaa390a4619edf 100644 (file)
@@ -150,6 +150,29 @@ static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare)
        return 0;
 }
 
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+{
+       u32 param, tx_fifo_depth, rx_fifo_depth;
+
+       /*
+        * Try to detect the FIFO depth if not set by interface driver,
+        * the depth could be from 2 to 256 from HW spec.
+        */
+       param = i2c_dw_read_comp_param(dev);
+       tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+       rx_fifo_depth = ((param >> 8)  & 0xff) + 1;
+       if (!dev->tx_fifo_depth) {
+               dev->tx_fifo_depth = tx_fifo_depth;
+               dev->rx_fifo_depth = rx_fifo_depth;
+               dev->adapter.nr = id;
+       } else if (tx_fifo_depth >= 2) {
+               dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+                               tx_fifo_depth);
+               dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+                               rx_fifo_depth);
+       }
+}
+
 static int dw_i2c_plat_probe(struct platform_device *pdev)
 {
        struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -245,13 +268,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
                                1000000);
        }
 
-       if (!dev->tx_fifo_depth) {
-               u32 param1 = i2c_dw_read_comp_param(dev);
-
-               dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
-               dev->rx_fifo_depth = ((param1 >> 8)  & 0xff) + 1;
-               dev->adapter.nr = pdev->id;
-       }
+       dw_i2c_set_fifo_size(dev, pdev->id);
 
        adap = &dev->adapter;
        adap->owner = THIS_MODULE;
index 3d10f1a802be4b8df5488535fda6e32f5d2b2fca..1d87757990568c40b9cebe4df7dda54fe21918f7 100644 (file)
@@ -342,7 +342,9 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                if (result)
                        return result;
 
-               data[i] = octeon_i2c_data_read(i2c);
+               data[i] = octeon_i2c_data_read(i2c, &result);
+               if (result)
+                       return result;
                if (recv_len && i == 0) {
                        if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
                                return -EPROTO;
index 87151ea74acd475a37726fd43fa927f486ddc76e..e160f838c25461f111f89871eac87beebca61a09 100644 (file)
@@ -141,11 +141,14 @@ static inline void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
  */
 static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
 {
+       int tries = 1000;
        u64 tmp;
 
        __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
        do {
                tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+               if (--tries < 0)
+                       return;
        } while ((tmp & SW_TWSI_V) != 0);
 }
 
@@ -163,24 +166,32 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
  *
  * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
  */
-static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
+static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
+                                     int *error)
 {
+       int tries = 1000;
        u64 tmp;
 
        __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
        do {
                tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+               if (--tries < 0) {
+                       /* signal that the returned data is invalid */
+                       if (error)
+                               *error = -EIO;
+                       return 0;
+               }
        } while ((tmp & SW_TWSI_V) != 0);
 
        return tmp & 0xFF;
 }
 
 #define octeon_i2c_ctl_read(i2c)                                       \
-       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
-#define octeon_i2c_data_read(i2c)                                      \
-       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
+#define octeon_i2c_data_read(i2c, error)                               \
+       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
 #define octeon_i2c_stat_read(i2c)                                      \
-       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+       octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
 
 /**
  * octeon_i2c_read_int - read the TWSI_INT register
index 05cf192ef1acae340397d9ff67f942bca6d08d3e..0ab1e55558bcd7a520afd8dcf07ae3d8cb77e1c0 100644 (file)
@@ -415,6 +415,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
        adapter->algo = &xgene_slimpro_i2c_algorithm;
        adapter->class = I2C_CLASS_HWMON;
        adapter->dev.parent = &pdev->dev;
+       adapter->dev.of_node = pdev->dev.of_node;
        i2c_set_adapdata(adapter, ctx);
        rc = i2c_add_adapter(adapter);
        if (rc) {
index 3ab654bbfab56f4fe299800e2372fbee86b1385d..b7ca249ec9c38b884ed48fbb8387dcc6729f1cc6 100644 (file)
@@ -95,6 +95,7 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
                                 struct i2c_client *client, u8 val)
 {
        struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+       int ret = -ENODEV;
 
        if (adap->algo->master_xfer) {
                struct i2c_msg msg;
@@ -104,17 +105,21 @@ static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
                msg.flags = 0;
                msg.len = 2;
                msg.buf = msgbuf;
-               return __i2c_transfer(adap, &msg, 1);
+               ret = __i2c_transfer(adap, &msg, 1);
+
+               if (ret >= 0 && ret != 1)
+                       ret = -EREMOTEIO;
        } else if (adap->algo->smbus_xfer) {
                union i2c_smbus_data data;
 
                data.byte = val;
-               return adap->algo->smbus_xfer(adap, client->addr,
-                                             client->flags, I2C_SMBUS_WRITE,
-                                             pdata->sel_reg_addr,
-                                             I2C_SMBUS_BYTE_DATA, &data);
-       } else
-               return -ENODEV;
+               ret = adap->algo->smbus_xfer(adap, client->addr,
+                                            client->flags, I2C_SMBUS_WRITE,
+                                            pdata->sel_reg_addr,
+                                            I2C_SMBUS_BYTE_DATA, &data);
+       }
+
+       return ret;
 }
 
 static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
@@ -127,10 +132,7 @@ static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
        /* Only select the channel if its different from the last channel */
        if (data->last_chan != regval) {
                err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
-               if (err)
-                       data->last_chan = 0;
-               else
-                       data->last_chan = regval;
+               data->last_chan = err < 0 ? 0 : regval;
        }
 
        return err;
index 9a348ee4dc14deb9eb1686fd1a20dc672b73a5df..dd18b9ccb1f40b4f6ddf5904e1f6d529931801df 100644 (file)
@@ -167,6 +167,9 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
                buf[0] = val;
                msg.buf = buf;
                ret = __i2c_transfer(adap, &msg, 1);
+
+               if (ret >= 0 && ret != 1)
+                       ret = -EREMOTEIO;
        } else {
                union i2c_smbus_data data;
                ret = adap->algo->smbus_xfer(adap, client->addr,
@@ -195,7 +198,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
        /* Only select the channel if its different from the last channel */
        if (data->last_chan != regval) {
                ret = pca954x_reg_write(muxc->parent, client, regval);
-               data->last_chan = ret ? 0 : regval;
+               data->last_chan = ret < 0 ? 0 : regval;
        }
 
        return ret;
index b7ac97b27c88c2fe11ad11f564ce786085d3217c..cda5542e13a206347447a49f18f9e8cb930e7c8c 100644 (file)
@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+                                        FW_RI_RES_WR_FBMAX_V(3)) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               FW_RI_RES_WR_FBMAX_V(3) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
index 392f78384a604ad9175f5376845a0f8e854f5405..98923a8cf86d83361d0a93afd1783b57fc4e5995 100644 (file)
@@ -358,13 +358,16 @@ void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
  * @dev: sc device struct
  * @pd: sc pd ptr
  * @pd_id: pd_id for allocated pd
+ * @abi_ver: ABI version from user context, -1 if not valid
  */
 static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
                             struct i40iw_sc_pd *pd,
-                            u16 pd_id)
+                            u16 pd_id,
+                            int abi_ver)
 {
        pd->size = sizeof(*pd);
        pd->pd_id = pd_id;
+       pd->abi_ver = abi_ver;
        pd->dev = dev;
 }
 
@@ -2252,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
                                              offset);
 
        info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+       info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
        ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
        if (ret_code)
                return ret_code;
@@ -2270,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
                                                    false);
        i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
                    __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
-       ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
-                                              &wqe_size);
-       if (ret_code)
-               return ret_code;
+
+       switch (qp->pd->abi_ver) {
+       case 4:
+               ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+                                                      &wqe_size);
+               if (ret_code)
+                       return ret_code;
+               break;
+       case 5: /* fallthrough until next ABI version */
+       default:
+               if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+                       return I40IW_ERR_INVALID_FRAG_COUNT;
+               wqe_size = I40IW_MAX_WQE_SIZE_RQ;
+               break;
+       }
        qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
                                (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
        i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
index 449ba8c81ce786d1442a0808d72041b0edfb710d..db41ab40da9cea375b087d02fccbfe07adf0356a 100644 (file)
@@ -930,7 +930,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
        INIT_LIST_HEAD(&rsrc->txpend);
 
        rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
-       dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+       dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
        rsrc->qp_id = info->qp_id;
        rsrc->cq_id = info->cq_id;
        rsrc->sq_size = info->sq_size;
index f3f8e9cc3c058fe0a1b9fddbe06b64bca26daf9f..7b76259752b0062e5cf16f7bc097f5cd4b66098e 100644 (file)
@@ -280,6 +280,7 @@ struct i40iw_sc_pd {
        u32 size;
        struct i40iw_sc_dev *dev;
        u16 pd_id;
+       int abi_ver;
 };
 
 struct i40iw_cqp_quanta {
@@ -852,6 +853,7 @@ struct i40iw_qp_init_info {
        u64 host_ctx_pa;
        u64 q2_pa;
        u64 shadow_area_pa;
+       int abi_ver;
        u8 sq_tph_val;
        u8 rq_tph_val;
        u8 type;
@@ -1051,7 +1053,7 @@ struct i40iw_aeq_ops {
 };
 
 struct i40iw_pd_ops {
-       void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+       void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
 };
 
 struct i40iw_priv_qp_ops {
index 12acd688def4707fa018a9673fe5ea8b6408f9cb..57d3f1d11ff1f5bcbd0108a4ffb48727f5778b56 100644 (file)
@@ -39,8 +39,8 @@
 
 #include <linux/types.h>
 
-#define I40IW_ABI_USERSPACE_VER 4
-#define I40IW_ABI_KERNEL_VER    4
+#define I40IW_ABI_VER 5
+
 struct i40iw_alloc_ucontext_req {
        __u32 reserved32;
        __u8 userspace_ver;
index 4376cd628774248dbc18e7bf1ea731dcede4945c..2800f796271c4a89fccfbea526ff00395269f63c 100644 (file)
@@ -966,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
        if (ret_code)
                return ret_code;
 
-       ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
-       if (ret_code)
-               return ret_code;
-
        qp->sq_base = info->sq;
        qp->rq_base = info->rq;
        qp->shadow_area = info->shadow_area;
@@ -998,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
        if (!qp->use_srq) {
                qp->rq_size = info->rq_size;
                qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
-               qp->rq_wqe_size = rqshift;
                I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+               switch (info->abi_ver) {
+               case 4:
+                       ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+                       if (ret_code)
+                               return ret_code;
+                       break;
+               case 5: /* fallthrough until next ABI version */
+               default:
+                       rqshift = I40IW_MAX_RQ_WQE_SHIFT;
+                       break;
+               }
+               qp->rq_wqe_size = rqshift;
                qp->rq_wqe_size_multiplier = 4 << rqshift;
        }
        qp->ops = iw_qp_uk_ops;
index 80d9f464f65ea31813a2850298b443f9904dbac6..84be6f13b9c5264f27237a0eb6a2810f9d61b0ab 100644 (file)
@@ -76,6 +76,7 @@ enum i40iw_device_capabilities_const {
        I40IW_MAX_ORD_SIZE =                    127,
        I40IW_MAX_WQ_ENTRIES =                  2048,
        I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
+       I40IW_MAX_WQE_SIZE_RQ =                 128,
        I40IW_QP_CTX_SIZE =                     248,
        I40IW_MAX_PDS =                         32768
 };
@@ -97,6 +98,7 @@ enum i40iw_device_capabilities_const {
 #define i40iw_address_list u64 *
 
 #define        I40IW_MAX_MR_SIZE       0x10000000000L
+#define        I40IW_MAX_RQ_WQE_SHIFT  2
 
 struct i40iw_qp_uk;
 struct i40iw_cq_uk;
@@ -405,7 +407,7 @@ struct i40iw_qp_uk_init_info {
        u32 max_sq_frag_cnt;
        u32 max_rq_frag_cnt;
        u32 max_inline_data;
-
+       int abi_ver;
 };
 
 struct i40iw_cq_uk_init_info {
index 7368a50bbdaa09abdfae87783f239d1bac5497e7..29e97df9e1a7f87c784ebf33f4ebccfae217f433 100644 (file)
@@ -145,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
        if (ib_copy_from_udata(&req, udata, sizeof(req)))
                return ERR_PTR(-EINVAL);
 
-       if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
-               i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
-                            req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+       if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
+               i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
                return ERR_PTR(-EINVAL);
        }
 
@@ -155,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
        uresp.max_qps = iwdev->max_qp;
        uresp.max_pds = iwdev->max_pd;
        uresp.wq_size = iwdev->max_qp_wr * 2;
-       uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+       uresp.kernel_ver = req.userspace_ver;
 
        ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
        if (!ucontext)
                return ERR_PTR(-ENOMEM);
 
        ucontext->iwdev = iwdev;
+       ucontext->abi_ver = req.userspace_ver;
 
        if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
                kfree(ucontext);
@@ -333,6 +333,7 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
        struct i40iw_alloc_pd_resp uresp;
        struct i40iw_sc_pd *sc_pd;
+       struct i40iw_ucontext *ucontext;
        u32 pd_id = 0;
        int err;
 
@@ -353,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
        }
 
        sc_pd = &iwpd->sc_pd;
-       dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
 
        if (context) {
+               ucontext = to_ucontext(context);
+               dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
                memset(&uresp, 0, sizeof(uresp));
                uresp.pd_id = pd_id;
                if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
                        err = -EFAULT;
                        goto error;
                }
+       } else {
+               dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
        }
 
        i40iw_add_pdusecount(iwpd);
@@ -518,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
        struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
        u32 sqdepth, rqdepth;
        u32 sq_size, rq_size;
-       u8 sqshift, rqshift;
+       u8 sqshift;
        u32 size;
        enum i40iw_status_code status;
        struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
@@ -527,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
        rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
 
        status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
-       if (!status)
-               status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
-
        if (status)
                return -ENOMEM;
 
        sqdepth = sq_size << sqshift;
-       rqdepth = rq_size << rqshift;
+       rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
 
        size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
        iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
index 6549c939500f47068567e9bb275dfc8ebc829a70..07c3fec77de6a1fcbb3a52a95e4937b6e23ea8aa 100644 (file)
@@ -42,6 +42,7 @@ struct i40iw_ucontext {
        spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
        struct list_head qp_reg_mem_list;
        spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+       int abi_ver;
 };
 
 struct i40iw_pd {
index 302fb05e6e6fb1d17d5700363b02e70f42eba192..57c8de2080773b161272774a69eaebf02cc411ed 100644 (file)
@@ -890,6 +890,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
 
                pbl_ptr = cq->q.pbl_tbl->pa;
                page_cnt = cq->q.pbl_info.num_pbes;
+
+               cq->ibcq.cqe = chain_entries;
        } else {
                cq->cq_type = QEDR_CQ_TYPE_KERNEL;
 
@@ -905,6 +907,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
 
                page_cnt = qed_chain_get_page_cnt(&cq->pbl);
                pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+               cq->ibcq.cqe = cq->pbl.capacity;
        }
 
        qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
@@ -982,8 +985,13 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
 
        /* GSIs CQs are handled by driver, so they don't exist in the FW */
        if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+               int rc;
+
                iparams.icid = cq->icid;
-               dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+               rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+                                              &oparams);
+               if (rc)
+                       return rc;
                dev->ops->common->chain_free(dev->cdev, &cq->pbl);
        }
 
@@ -1966,7 +1974,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        if (attr_mask & IB_QP_STATE) {
                if ((qp->qp_type != IB_QPT_GSI) && (!udata))
-                       qedr_update_qp_state(dev, qp, qp_params.new_state);
+                       rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
                qp->state = qp_params.new_state;
        }
 
@@ -2070,8 +2078,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
        DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
                 qp, qp->qp_type);
 
-       if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
-                         QED_ROCE_QP_STATE_INIT)) {
+       if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
                attr.qp_state = IB_QPS_ERR;
                attr_mask |= IB_QP_STATE;
 
@@ -2626,7 +2636,9 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
        rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
        DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
 
-       if (wr->send_flags & IB_SEND_INLINE) {
+       if (wr->send_flags & IB_SEND_INLINE &&
+           (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+            wr->opcode == IB_WR_RDMA_WRITE)) {
                u8 flags = 0;
 
                SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
@@ -2977,8 +2989,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
-           (qp->state == QED_ROCE_QP_STATE_ERR)) {
+       if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_SQD)) {
                spin_unlock_irqrestore(&qp->q_lock, flags);
                *bad_wr = wr;
                DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -3031,8 +3044,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
-           (qp->state == QED_ROCE_QP_STATE_ERR)) {
+       if (qp->state == QED_ROCE_QP_STATE_RESET) {
                spin_unlock_irqrestore(&qp->q_lock, flags);
                *bad_wr = wr;
                return -EINVAL;
@@ -3174,6 +3186,7 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
 
                /* fill WC */
                wc->status = status;
+               wc->vendor_err = 0;
                wc->wc_flags = 0;
                wc->src_qp = qp->id;
                wc->qp = &qp->ibqp;
@@ -3225,7 +3238,7 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
                       cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
-                                 IB_WC_WR_FLUSH_ERR, 0);
+                                 IB_WC_WR_FLUSH_ERR, 1);
                break;
        default:
                /* process all WQE before the cosumer */
@@ -3363,6 +3376,7 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
 
        /* fill WC */
        wc->status = wc_status;
+       wc->vendor_err = 0;
        wc->src_qp = qp->id;
        wc->qp = &qp->ibqp;
        wc->wr_id = wr_id;
@@ -3391,6 +3405,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
        while (num_entries && qp->rq.wqe_cons != hw_cons) {
                /* fill WC */
                wc->status = IB_WC_WR_FLUSH_ERR;
+               wc->vendor_err = 0;
                wc->wc_flags = 0;
                wc->src_qp = qp->id;
                wc->byte_len = 0;
index cd27cbde765249b98331fd1da8f023d134dddd38..d369f24425f94008878b0d9514de16952cbae4af 100644 (file)
@@ -224,7 +224,7 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
                else
                        return COMPST_DONE;
        } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
-               return COMPST_ERROR_RETRY;
+               return COMPST_DONE;
        } else {
                return COMPST_CHECK_ACK;
        }
index 16967cdb45dffb6c9841748ac0b13f379d02adb9..342e78163613dfdc719b171e1396d01fd44432eb 100644 (file)
@@ -455,8 +455,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
                return -EAGAIN;
        }
 
-       if (pkt->qp)
-               atomic_inc(&pkt->qp->skb_out);
+       atomic_inc(&pkt->qp->skb_out);
        kfree_skb(skb);
 
        return 0;
index c3e60e4bde6e2a3ba5e0953b531a42f65927b717..486d576e55bc016dda1f8ddad6b8f00941f66727 100644 (file)
@@ -855,4 +855,5 @@ void rxe_qp_cleanup(void *arg)
        free_rd_atomic_resources(qp);
 
        kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+       sock_release(qp->sk);
 }
index 7a36ec9dbc0c98cc9907f378e593814fbe26ffcc..3435efff879960cece0c7e122b5960a057f2d4a1 100644 (file)
@@ -1070,12 +1070,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                                          struct rxe_pkt_info *pkt)
 {
        enum resp_states rc;
+       u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
 
        if (pkt->mask & RXE_SEND_MASK ||
            pkt->mask & RXE_WRITE_MASK) {
                /* SEND. Ack again and cleanup. C9-105. */
                if (bth_ack(pkt))
-                       send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+                       send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
                rc = RESPST_CLEANUP;
                goto out;
        } else if (pkt->mask & RXE_READ_MASK) {
index 3273217ce80c8feecdf7d1b4d4c98c63264901b1..cc74a41bdb0d24d7e5eddd0f1e019ab01cea71c4 100644 (file)
@@ -150,12 +150,20 @@ static int tps6521x_pb_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct platform_device_id tps6521x_pwrbtn_id_table[] = {
+       { "tps65218-pwrbutton", },
+       { "tps65217-pwrbutton", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_pwrbtn_id_table);
+
 static struct platform_driver tps6521x_pb_driver = {
        .probe  = tps6521x_pb_probe,
        .driver = {
                .name   = "tps6521x_pwrbutton",
                .of_match_table = of_tps6521x_pb_match,
        },
+       .id_table = tps6521x_pwrbtn_id_table,
 };
 module_platform_driver(tps6521x_pb_driver);
 
index 971154cbbb03eb36efc7fb60608afe23d48f38a2..6799cf9713f77f460f990e6bc0f38b31422c0745 100644 (file)
@@ -2209,14 +2209,13 @@ static void __init free_dma_resources(void)
 static int __init early_amd_iommu_init(void)
 {
        struct acpi_table_header *ivrs_base;
-       acpi_size ivrs_size;
        acpi_status status;
        int i, remap_cache_sz, ret = 0;
 
        if (!amd_iommu_detected)
                return -ENODEV;
 
-       status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
        if (status == AE_NOT_FOUND)
                return -ENODEV;
        else if (ACPI_FAILURE(status)) {
@@ -2338,7 +2337,7 @@ static int __init early_amd_iommu_init(void)
 
 out:
        /* Don't leak any ACPI memory */
-       early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+       acpi_put_table(ivrs_base);
        ivrs_base = NULL;
 
        return ret;
@@ -2362,10 +2361,9 @@ out:
 static bool detect_ivrs(void)
 {
        struct acpi_table_header *ivrs_base;
-       acpi_size ivrs_size;
        acpi_status status;
 
-       status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
        if (status == AE_NOT_FOUND)
                return false;
        else if (ACPI_FAILURE(status)) {
@@ -2374,7 +2372,7 @@ static bool detect_ivrs(void)
                return false;
        }
 
-       early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+       acpi_put_table(ivrs_base);
 
        /* Make sure ACS will be enabled during PCI probe */
        pci_request_acs();
index 8c53748a769d447fac83622725c305a3ee6bc92f..a88576d50740b2dbdbe6e65b2bfe1985f01c81ca 100644 (file)
@@ -68,7 +68,6 @@ DECLARE_RWSEM(dmar_global_lock);
 LIST_HEAD(dmar_drhd_units);
 
 struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
 static int dmar_dev_scope_status = 1;
 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
 
@@ -543,9 +542,7 @@ static int __init dmar_table_detect(void)
        acpi_status status = AE_OK;
 
        /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl,
-                               &dmar_tbl_size);
+       status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
 
        if (ACPI_SUCCESS(status) && !dmar_tbl) {
                pr_warn("Unable to map DMAR\n");
@@ -906,7 +903,7 @@ int __init detect_intel_iommu(void)
                x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
 
-       early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
+       acpi_put_table(dmar_tbl);
        dmar_tbl = NULL;
        up_write(&dmar_global_lock);
 
index 9af48a85c16fcc557e10387ebcc53d346262483c..5e0e250db0be9ed1d12955c35b963ce8fd16f8b2 100644 (file)
@@ -180,7 +180,7 @@ static int st_irq_syscfg_probe(struct platform_device *pdev)
        return st_irq_syscfg_enable(pdev);
 }
 
-static int st_irq_syscfg_resume(struct device *dev)
+static int __maybe_unused st_irq_syscfg_resume(struct device *dev)
 {
        struct st_irq_syscfg *ddata = dev_get_drvdata(dev);
 
index c19dd820ea9b4baafb1b2d15ebb5031f464af9d0..2aeb034d5fb9cd4d0ce14e5c26a10fb6abf62538 100644 (file)
 #define RING_ENTRY_SIZE   sizeof(struct dma64dd)
 
 /* # entries in PDC dma ring */
-#define PDC_RING_ENTRIES  128
+#define PDC_RING_ENTRIES  512
+/*
+ * Minimum number of ring descriptor entries that must be free to tell mailbox
+ * framework that it can submit another request
+ */
+#define PDC_RING_SPACE_MIN  15
+
 #define PDC_RING_SIZE    (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
 /* Rings are 8k aligned */
 #define RING_ALIGN_ORDER  13
  * Interrupt mask and status definitions. Enable interrupts for tx and rx on
  * ring 0
  */
-#define PDC_XMTINT_0         (24 + PDC_RINGSET)
 #define PDC_RCVINT_0         (16 + PDC_RINGSET)
-#define PDC_XMTINTEN_0       BIT(PDC_XMTINT_0)
 #define PDC_RCVINTEN_0       BIT(PDC_RCVINT_0)
-#define PDC_INTMASK  (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_INTMASK         (PDC_RCVINTEN_0)
 #define PDC_LAZY_FRAMECOUNT  1
 #define PDC_LAZY_TIMEOUT     10000
 #define PDC_LAZY_INT  (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
 
 /*
  * Sets the following bits for write to transmit control reg:
- *  0    - XmtEn - enable activity on the tx channel
  * 11    - PtyChkDisable - parity check is disabled
  * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
  */
-#define PDC_TX_CTL              0x000C0801
+#define PDC_TX_CTL             0x000C0800
+
+/* Bit in tx control reg to enable tx channel */
+#define PDC_TX_ENABLE          0x1
 
 /*
  * Sets the following bits for write to receive control reg:
- * 0     - RcvEn - enable activity on the rx channel
  * 7:1   - RcvOffset - size in bytes of status region at start of rx frame buf
  * 9     - SepRxHdrDescEn - place start of new frames only in descriptors
  *                          that have StartOfFrame set
  * 11    - PtyChkDisable - parity check is disabled
  * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
  */
-#define PDC_RX_CTL              0x000C0E01
+#define PDC_RX_CTL             0x000C0E00
+
+/* Bit in rx control reg to enable rx channel */
+#define PDC_RX_ENABLE          0x1
 
 #define CRYPTO_D64_RS0_CD_MASK   ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
 
@@ -252,11 +260,29 @@ struct pdc_ring_alloc {
        u32         size;    /* ring allocation size in bytes */
 };
 
+/*
+ * context associated with a receive descriptor.
+ * @rxp_ctx: opaque context associated with frame that starts at each
+ *           rx ring index.
+ * @dst_sg:  Scatterlist used to form reply frames beginning at a given ring
+ *           index. Retained in order to unmap each sg after reply is processed.
+ * @rxin_numd: Number of rx descriptors associated with the message that starts
+ *             at a descriptor index. Not set for every index. For example,
+ *             if descriptor index i points to a scatterlist with 4 entries,
+ *             then the next three descriptor indexes don't have a value set.
+ * @resp_hdr: Virtual address of buffer used to catch DMA rx status
+ * @resp_hdr_daddr: physical address of DMA rx status buffer
+ */
+struct pdc_rx_ctx {
+       void *rxp_ctx;
+       struct scatterlist *dst_sg;
+       u32  rxin_numd;
+       void *resp_hdr;
+       dma_addr_t resp_hdr_daddr;
+};
+
 /* PDC state structure */
 struct pdc_state {
-       /* synchronize access to this PDC state structure */
-       spinlock_t pdc_lock;
-
        /* Index of the PDC whose state is in this structure instance */
        u8 pdc_idx;
 
@@ -272,13 +298,8 @@ struct pdc_state {
 
        unsigned int pdc_irq;
 
-       /*
-        * Last interrupt status read from PDC device. Saved in interrupt
-        * handler so the handler can clear the interrupt in the device,
-        * and the interrupt thread called later can know which interrupt
-        * bits are active.
-        */
-       unsigned long intstatus;
+       /* tasklet for deferred processing after DMA rx interrupt */
+       struct tasklet_struct rx_tasklet;
 
        /* Number of bytes of receive status prior to each rx frame */
        u32 rx_status_len;
@@ -369,11 +390,7 @@ struct pdc_state {
        /* Index of next rx descriptor to post. */
        u32  rxout;
 
-       /*
-        * opaque context associated with frame that starts at each
-        * rx ring index.
-        */
-       void *rxp_ctx[PDC_RING_ENTRIES];
+       struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
 
        /*
         * Scatterlists used to form request and reply frames beginning at a
@@ -381,27 +398,18 @@ struct pdc_state {
         * is processed
         */
        struct scatterlist *src_sg[PDC_RING_ENTRIES];
-       struct scatterlist *dst_sg[PDC_RING_ENTRIES];
-
-       /*
-        * Number of rx descriptors associated with the message that starts
-        * at this descriptor index. Not set for every index. For example,
-        * if descriptor index i points to a scatterlist with 4 entries, then
-        * the next three descriptor indexes don't have a value set.
-        */
-       u32  rxin_numd[PDC_RING_ENTRIES];
-
-       void *resp_hdr[PDC_RING_ENTRIES];
-       dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
 
        struct dentry *debugfs_stats;  /* debug FS stats file for this PDC */
 
        /* counters */
-       u32  pdc_requests;    /* number of request messages submitted */
-       u32  pdc_replies;     /* number of reply messages received */
-       u32  txnobuf;         /* count of tx ring full */
-       u32  rxnobuf;         /* count of rx ring full */
-       u32  rx_oflow;        /* count of rx overflows */
+       u32  pdc_requests;     /* number of request messages submitted */
+       u32  pdc_replies;      /* number of reply messages received */
+       u32  last_tx_not_done; /* too few tx descriptors to indicate done */
+       u32  tx_ring_full;     /* unable to accept msg because tx ring full */
+       u32  rx_ring_full;     /* unable to accept msg because rx ring full */
+       u32  txnobuf;          /* unable to create tx descriptor */
+       u32  rxnobuf;          /* unable to create rx descriptor */
+       u32  rx_oflow;         /* count of rx overflows */
 };
 
 /* Global variables */
@@ -434,20 +442,33 @@ static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "SPU %u stats:\n", pdcs->pdc_idx);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "PDC requests............%u\n",
+                              "PDC requests....................%u\n",
                               pdcs->pdc_requests);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "PDC responses...........%u\n",
+                              "PDC responses...................%u\n",
                               pdcs->pdc_replies);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "Tx err ring full........%u\n",
+                              "Tx not done.....................%u\n",
+                              pdcs->last_tx_not_done);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "Tx ring full....................%u\n",
+                              pdcs->tx_ring_full);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "Rx ring full....................%u\n",
+                              pdcs->rx_ring_full);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "Tx desc write fail. Ring full...%u\n",
                               pdcs->txnobuf);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "Rx err ring full........%u\n",
+                              "Rx desc write fail. Ring full...%u\n",
                               pdcs->rxnobuf);
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
-                              "Receive overflow........%u\n",
+                              "Receive overflow................%u\n",
                               pdcs->rx_oflow);
+       out_offset += snprintf(buf + out_offset, out_count - out_offset,
+                              "Num frags in rx ring............%u\n",
+                              NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+                                         pdcs->nrxpost));
 
        if (out_offset > out_count)
                out_offset = out_count;
@@ -480,17 +501,16 @@ static void pdc_setup_debugfs(struct pdc_state *pdcs)
        if (!debugfs_dir)
                debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
 
-       pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+       /* S_IRUSR == 0400 */
+       pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400,
                                                  debugfs_dir, pdcs,
                                                  &pdc_debugfs_stats);
 }
 
 static void pdc_free_debugfs(void)
 {
-       if (debugfs_dir && simple_empty(debugfs_dir)) {
-               debugfs_remove_recursive(debugfs_dir);
-               debugfs_dir = NULL;
-       }
+       debugfs_remove_recursive(debugfs_dir);
+       debugfs_dir = NULL;
 }
 
 /**
@@ -505,17 +525,17 @@ pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
              u32 buf_len, u32 flags)
 {
        struct device *dev = &pdcs->pdev->dev;
+       struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
 
        dev_dbg(dev,
                "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
                pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
 
-       iowrite32(lower_32_bits(dma_addr),
-                 (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
-       iowrite32(upper_32_bits(dma_addr),
-                 (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
-       iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
-       iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+       rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+       rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+       rxd->ctrl1 = cpu_to_le32(flags);
+       rxd->ctrl2 = cpu_to_le32(buf_len);
+
        /* bump ring index and return */
        pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
 }
@@ -533,53 +553,50 @@ pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
              u32 flags)
 {
        struct device *dev = &pdcs->pdev->dev;
+       struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
 
        dev_dbg(dev,
                "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
                pdcs->pdc_idx, pdcs->txout, buf_len, flags);
 
-       iowrite32(lower_32_bits(dma_addr),
-                 (void *)&pdcs->txd_64[pdcs->txout].addrlow);
-       iowrite32(upper_32_bits(dma_addr),
-                 (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
-       iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
-       iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+       txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
+       txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
+       txd->ctrl1 = cpu_to_le32(flags);
+       txd->ctrl2 = cpu_to_le32(buf_len);
 
        /* bump ring index and return */
        pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
 }
 
 /**
- * pdc_receive() - Receive a response message from a given SPU.
+ * pdc_receive_one() - Receive a response message from a given SPU.
  * @pdcs:    PDC state for the SPU to receive from
- * @mssg:    mailbox message to be returned to client
  *
  * When the return code indicates success, the response message is available in
  * the receive buffers provided prior to submission of the request.
  *
- * Input:
- *   pdcs - PDC state structure for the SPU to be polled
- *   mssg - mailbox message to be returned to client. This function sets the
- *         context pointer on the message to help the client associate the
- *         response with a request.
- *
  * Return:  PDC_SUCCESS if one or more receive descriptors was processed
  *          -EAGAIN indicates that no response message is available
  *          -EIO an error occurred
  */
 static int
-pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+pdc_receive_one(struct pdc_state *pdcs)
 {
        struct device *dev = &pdcs->pdev->dev;
+       struct mbox_controller *mbc;
+       struct mbox_chan *chan;
+       struct brcm_message mssg;
        u32 len, rx_status;
        u32 num_frags;
-       int i;
        u8 *resp_hdr;    /* virtual addr of start of resp message DMA header */
        u32 frags_rdy;   /* number of fragments ready to read */
        u32 rx_idx;      /* ring index of start of receive frame */
        dma_addr_t resp_hdr_daddr;
+       struct pdc_rx_ctx *rx_ctx;
 
-       spin_lock(&pdcs->pdc_lock);
+       mbc = &pdcs->mbc;
+       chan = &mbc->chans[0];
+       mssg.type = BRCM_MESSAGE_SPU;
 
        /*
         * return if a complete response message is not yet ready.
@@ -587,47 +604,34 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
         * to read.
         */
        frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
-       if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
-               /* See if the hw has written more fragments than we know */
-               pdcs->last_rx_curr =
-                   (ioread32((void *)&pdcs->rxregs_64->status0) &
-                    CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
-               frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
-                                      pdcs->nrxpost);
-               if ((frags_rdy == 0) ||
-                   (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
-                       /* No response ready */
-                       spin_unlock(&pdcs->pdc_lock);
-                       return -EAGAIN;
-               }
-               /* can't read descriptors/data until write index is read */
-               rmb();
-       }
+       if ((frags_rdy == 0) ||
+           (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
+               /* No response ready */
+               return -EAGAIN;
 
        num_frags = pdcs->txin_numd[pdcs->txin];
+       WARN_ON(num_frags == 0);
+
        dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
                     sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
 
-       for (i = 0; i < num_frags; i++)
-               pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+       pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
 
        dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
                pdcs->pdc_idx, num_frags);
 
        rx_idx = pdcs->rxin;
-       num_frags = pdcs->rxin_numd[rx_idx];
+       rx_ctx = &pdcs->rx_ctx[rx_idx];
+       num_frags = rx_ctx->rxin_numd;
        /* Return opaque context with result */
-       mssg->ctx = pdcs->rxp_ctx[rx_idx];
-       pdcs->rxp_ctx[rx_idx] = NULL;
-       resp_hdr = pdcs->resp_hdr[rx_idx];
-       resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
-       dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
-                    sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
-
-       for (i = 0; i < num_frags; i++)
-               pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+       mssg.ctx = rx_ctx->rxp_ctx;
+       rx_ctx->rxp_ctx = NULL;
+       resp_hdr = rx_ctx->resp_hdr;
+       resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
+       dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
+                    DMA_FROM_DEVICE);
 
-       spin_unlock(&pdcs->pdc_lock);
+       pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
 
        dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
                pdcs->pdc_idx, num_frags);
@@ -659,12 +663,35 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
 
        dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
 
+       mbox_chan_received_data(chan, &mssg);
+
        pdcs->pdc_replies++;
-       /* if we read one or more rx descriptors, claim success */
-       if (num_frags > 0)
-               return PDC_SUCCESS;
-       else
-               return -EIO;
+       return PDC_SUCCESS;
+}
+
+/**
+ * pdc_receive() - Process as many responses as are available in the rx ring.
+ * @pdcs:  PDC state
+ *
+ * Called within the hard IRQ.
+ * Return:
+ */
+static int
+pdc_receive(struct pdc_state *pdcs)
+{
+       int rx_status;
+
+       /* read last_rx_curr from register once */
+       pdcs->last_rx_curr =
+           (ioread32(&pdcs->rxregs_64->status0) &
+            CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+
+       do {
+               /* Could be many frames ready */
+               rx_status = pdc_receive_one(pdcs);
+       } while (rx_status == PDC_SUCCESS);
+
+       return 0;
 }
 
 /**
@@ -766,8 +793,8 @@ static int pdc_tx_list_final(struct pdc_state *pdcs)
         * before chip starts to process new request
         */
        wmb();
-       iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
-       iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+       iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
+       iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
        pdcs->pdc_requests++;
 
        return PDC_SUCCESS;
@@ -796,6 +823,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
        u32 rx_pkt_cnt = 1;     /* Adding a single rx buffer */
        dma_addr_t daddr;
        void *vaddr;
+       struct pdc_rx_ctx *rx_ctx;
 
        rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
                                              pdcs->nrxpost);
@@ -806,7 +834,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
 
        /* allocate a buffer for the dma rx status */
        vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
-       if (!vaddr)
+       if (unlikely(!vaddr))
                return -ENOMEM;
 
        /*
@@ -819,15 +847,16 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
 
        /* This is always the first descriptor in the receive sequence */
        flags = D64_CTRL1_SOF;
-       pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+       pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
 
        if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
                flags |= D64_CTRL1_EOT;
 
-       pdcs->rxp_ctx[pdcs->rxout] = ctx;
-       pdcs->dst_sg[pdcs->rxout] = dst_sg;
-       pdcs->resp_hdr[pdcs->rxout] = vaddr;
-       pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+       rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
+       rx_ctx->rxp_ctx = ctx;
+       rx_ctx->dst_sg = dst_sg;
+       rx_ctx->resp_hdr = vaddr;
+       rx_ctx->resp_hdr_daddr = daddr;
        pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
        return PDC_SUCCESS;
 }
@@ -895,7 +924,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
                desc_w++;
                sg = sg_next(sg);
        }
-       pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+       pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
 
        return PDC_SUCCESS;
 }
@@ -903,7 +932,7 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
 /**
  * pdc_irq_handler() - Interrupt handler called in interrupt context.
  * @irq:      Interrupt number that has fired
- * @cookie:   PDC state for DMA engine that generated the interrupt
+ * @data:     device struct for DMA engine that generated the interrupt
  *
  * We have to clear the device interrupt status flags here. So cache the
  * status for later use in the thread function. Other than that, just return
@@ -912,88 +941,39 @@ static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
  * Return: IRQ_WAKE_THREAD if interrupt is ours
  *         IRQ_NONE otherwise
  */
-static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+static irqreturn_t pdc_irq_handler(int irq, void *data)
 {
-       struct pdc_state *pdcs = cookie;
+       struct device *dev = (struct device *)data;
+       struct pdc_state *pdcs = dev_get_drvdata(dev);
        u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
 
-       if (intstatus & PDC_XMTINTEN_0)
-               set_bit(PDC_XMTINT_0, &pdcs->intstatus);
-       if (intstatus & PDC_RCVINTEN_0)
-               set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+       if (unlikely(intstatus == 0))
+               return IRQ_NONE;
+
+       /* Disable interrupts until soft handler runs */
+       iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
 
        /* Clear interrupt flags in device */
        iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
 
        /* Wakeup IRQ thread */
-       if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
-               return IRQ_WAKE_THREAD;
-
-       return IRQ_NONE;
+       tasklet_schedule(&pdcs->rx_tasklet);
+       return IRQ_HANDLED;
 }
 
 /**
- * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
- * completed or data is available to receive.
- * @irq:    Interrupt number
- * @cookie: PDC state for PDC that generated the interrupt
- *
- * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
- * as many SPU response messages as are available and send each to the mailbox
- * client.
- *
- * Return: IRQ_HANDLED if we recognized and handled the interrupt
- *         IRQ_NONE otherwise
+ * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * a DMA receive interrupt. Reenables the receive interrupt.
+ * @data: PDC state structure
  */
-static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+static void pdc_tasklet_cb(unsigned long data)
 {
-       struct pdc_state *pdcs = cookie;
-       struct mbox_controller *mbc;
-       struct mbox_chan *chan;
-       bool tx_int;
-       bool rx_int;
-       int rx_status;
-       struct brcm_message mssg;
+       struct pdc_state *pdcs = (struct pdc_state *)data;
 
-       tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
-       rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+       pdc_receive(pdcs);
 
-       if (pdcs && (tx_int || rx_int)) {
-               dev_dbg(&pdcs->pdev->dev,
-                       "%s() got irq %d with tx_int %s, rx_int %s",
-                       __func__, irq,
-                       tx_int ? "set" : "clear", rx_int ? "set" : "clear");
-
-               mbc = &pdcs->mbc;
-               chan = &mbc->chans[0];
-
-               if (tx_int) {
-                       dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
-                       /* only one frame in flight at a time */
-                       mbox_chan_txdone(chan, PDC_SUCCESS);
-               }
-               if (rx_int) {
-                       while (1) {
-                               /* Could be many frames ready */
-                               memset(&mssg, 0, sizeof(mssg));
-                               mssg.type = BRCM_MESSAGE_SPU;
-                               rx_status = pdc_receive(pdcs, &mssg);
-                               if (rx_status >= 0) {
-                                       dev_dbg(&pdcs->pdev->dev,
-                                               "%s(): invoking client rx cb",
-                                               __func__);
-                                       mbox_chan_received_data(chan, &mssg);
-                               } else {
-                                       dev_dbg(&pdcs->pdev->dev,
-                                               "%s(): no SPU response available",
-                                               __func__);
-                                       break;
-                               }
-                       }
-               }
-               return IRQ_HANDLED;
-       }
-       return IRQ_NONE;
+       /* reenable interrupts */
+       iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
 }
 
 /**
@@ -1016,14 +996,14 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
 
        /* Allocate tx ring */
        tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
-       if (!tx.vbase) {
+       if (unlikely(!tx.vbase)) {
                err = -ENOMEM;
                goto done;
        }
 
        /* Allocate rx ring */
        rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
-       if (!rx.vbase) {
+       if (unlikely(!rx.vbase)) {
                err = -ENOMEM;
                goto fail_dealloc;
        }
@@ -1033,9 +1013,6 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
        dev_dbg(dev, " - base DMA addr of rx ring      %pad", &rx.dmabase);
        dev_dbg(dev, " - base virtual addr of rx ring  %p", rx.vbase);
 
-       /* lock after ring allocation to avoid scheduling while atomic */
-       spin_lock(&pdcs->pdc_lock);
-
        memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
        memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
 
@@ -1053,40 +1030,52 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
 
        /* Tell device the base DMA address of each ring */
        dma_reg = &pdcs->regs->dmaregs[ringset];
+
+       /* But first disable DMA and set curptr to 0 for both TX & RX */
+       iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+       iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
+                 &dma_reg->dmarcv.control);
+       iowrite32(0, &dma_reg->dmaxmt.ptr);
+       iowrite32(0, &dma_reg->dmarcv.ptr);
+
+       /* Set base DMA addresses */
        iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
-                 (void *)&dma_reg->dmaxmt.addrlow);
+                 &dma_reg->dmaxmt.addrlow);
        iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
-                 (void *)&dma_reg->dmaxmt.addrhigh);
+                 &dma_reg->dmaxmt.addrhigh);
 
        iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
-                 (void *)&dma_reg->dmarcv.addrlow);
+                 &dma_reg->dmarcv.addrlow);
        iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
-                 (void *)&dma_reg->dmarcv.addrhigh);
+                 &dma_reg->dmarcv.addrhigh);
+
+       /* Re-enable DMA */
+       iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
+       iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
+                 &dma_reg->dmarcv.control);
 
        /* Initialize descriptors */
        for (i = 0; i < PDC_RING_ENTRIES; i++) {
                /* Every tx descriptor can be used for start of frame. */
                if (i != pdcs->ntxpost) {
                        iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
-                                 (void *)&pdcs->txd_64[i].ctrl1);
+                                 &pdcs->txd_64[i].ctrl1);
                } else {
                        /* Last descriptor in ringset. Set End of Table. */
                        iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
-                                 D64_CTRL1_EOT,
-                                 (void *)&pdcs->txd_64[i].ctrl1);
+                                 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
                }
 
                /* Every rx descriptor can be used for start of frame */
                if (i != pdcs->nrxpost) {
                        iowrite32(D64_CTRL1_SOF,
-                                 (void *)&pdcs->rxd_64[i].ctrl1);
+                                 &pdcs->rxd_64[i].ctrl1);
                } else {
                        /* Last descriptor in ringset. Set End of Table. */
                        iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
-                                 (void *)&pdcs->rxd_64[i].ctrl1);
+                                 &pdcs->rxd_64[i].ctrl1);
                }
        }
-       spin_unlock(&pdcs->pdc_lock);
        return PDC_SUCCESS;
 
 fail_dealloc:
@@ -1110,6 +1099,80 @@ static void pdc_ring_free(struct pdc_state *pdcs)
        }
 }
 
+/**
+ * pdc_desc_count() - Count the number of DMA descriptors that will be required
+ * for a given scatterlist. Account for the max length of a DMA buffer.
+ * @sg:    Scatterlist to be DMA'd
+ * Return: Number of descriptors required
+ */
+static u32 pdc_desc_count(struct scatterlist *sg)
+{
+       u32 cnt = 0;
+
+       while (sg) {
+               cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
+               sg = sg_next(sg);
+       }
+       return cnt;
+}
+
+/**
+ * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
+ * and the rx ring has room for rx_cnt descriptors.
+ * @pdcs:  PDC state
+ * @tx_cnt: The number of descriptors required in the tx ring
+ * @rx_cnt: The number of descriptors required i the rx ring
+ *
+ * Return: true if one of the rings does not have enough space
+ *         false if sufficient space is available in both rings
+ */
+static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
+{
+       u32 rx_avail;
+       u32 tx_avail;
+       bool full = false;
+
+       /* Check if the tx and rx rings are likely to have enough space */
+       rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+                                             pdcs->nrxpost);
+       if (unlikely(rx_cnt > rx_avail)) {
+               pdcs->rx_ring_full++;
+               full = true;
+       }
+
+       if (likely(!full)) {
+               tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+                                                     pdcs->ntxpost);
+               if (unlikely(tx_cnt > tx_avail)) {
+                       pdcs->tx_ring_full++;
+                       full = true;
+               }
+       }
+       return full;
+}
+
+/**
+ * pdc_last_tx_done() - If both the tx and rx rings have at least
+ * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
+ * framework can submit another message.
+ * @chan:  mailbox channel to check
+ * Return: true if PDC can accept another message on this channel
+ */
+static bool pdc_last_tx_done(struct mbox_chan *chan)
+{
+       struct pdc_state *pdcs = chan->con_priv;
+       bool ret;
+
+       if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
+                                   PDC_RING_SPACE_MIN))) {
+               pdcs->last_tx_not_done++;
+               ret = false;
+       } else {
+               ret = true;
+       }
+       return ret;
+}
+
 /**
  * pdc_send_data() - mailbox send_data function
  * @chan:      The mailbox channel on which the data is sent. The channel
@@ -1141,29 +1204,43 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
        int src_nent;
        int dst_nent;
        int nent;
+       u32 tx_desc_req;
+       u32 rx_desc_req;
 
-       if (mssg->type != BRCM_MESSAGE_SPU)
+       if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
                return -ENOTSUPP;
 
        src_nent = sg_nents(mssg->spu.src);
-       if (src_nent) {
+       if (likely(src_nent)) {
                nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
-               if (nent == 0)
+               if (unlikely(nent == 0))
                        return -EIO;
        }
 
        dst_nent = sg_nents(mssg->spu.dst);
-       if (dst_nent) {
+       if (likely(dst_nent)) {
                nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
                                  DMA_FROM_DEVICE);
-               if (nent == 0) {
+               if (unlikely(nent == 0)) {
                        dma_unmap_sg(dev, mssg->spu.src, src_nent,
                                     DMA_TO_DEVICE);
                        return -EIO;
                }
        }
 
-       spin_lock(&pdcs->pdc_lock);
+       /*
+        * Check if the tx and rx rings have enough space. Do this prior to
+        * writing any tx or rx descriptors. Need to ensure that we do not write
+        * a partial set of descriptors, or write just rx descriptors but
+        * corresponding tx descriptors don't fit. Note that we want this check
+        * and the entire sequence of descriptor to happen without another
+        * thread getting in. The channel spin lock in the mailbox framework
+        * ensures this.
+        */
+       tx_desc_req = pdc_desc_count(mssg->spu.src);
+       rx_desc_req = pdc_desc_count(mssg->spu.dst);
+       if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
+               return -ENOSPC;
 
        /* Create rx descriptors to SPU catch response */
        err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
@@ -1173,9 +1250,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
        err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
        err |= pdc_tx_list_final(pdcs); /* initiate transfer */
 
-       spin_unlock(&pdcs->pdc_lock);
-
-       if (err)
+       if (unlikely(err))
                dev_err(&pdcs->pdev->dev,
                        "%s failed with error %d", __func__, err);
 
@@ -1224,32 +1299,50 @@ void pdc_hw_init(struct pdc_state *pdcs)
        /* initialize data structures */
        pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
        pdcs->txregs_64 = (struct dma64_regs *)
-           (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+           (((u8 *)pdcs->pdc_reg_vbase) +
                     PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
        pdcs->rxregs_64 = (struct dma64_regs *)
-           (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+           (((u8 *)pdcs->pdc_reg_vbase) +
                     PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
 
        pdcs->ntxd = PDC_RING_ENTRIES;
        pdcs->nrxd = PDC_RING_ENTRIES;
        pdcs->ntxpost = PDC_RING_ENTRIES - 1;
        pdcs->nrxpost = PDC_RING_ENTRIES - 1;
-       pdcs->regs->intmask = 0;
+       iowrite32(0, &pdcs->regs->intmask);
 
        dma_reg = &pdcs->regs->dmaregs[ringset];
-       iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
-       iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
 
-       iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+       /* Configure DMA but will enable later in pdc_ring_init() */
+       iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
 
        iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
-                 (void *)&dma_reg->dmarcv.control);
+                 &dma_reg->dmarcv.control);
+
+       /* Reset current index pointers after making sure DMA is disabled */
+       iowrite32(0, &dma_reg->dmaxmt.ptr);
+       iowrite32(0, &dma_reg->dmarcv.ptr);
 
        if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
                iowrite32(PDC_CKSUM_CTRL,
                          pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
 }
 
+/**
+ * pdc_hw_disable() - Disable the tx and rx control in the hw.
+ * @pdcs: PDC state structure
+ *
+ */
+static void pdc_hw_disable(struct pdc_state *pdcs)
+{
+       struct dma64 *dma_reg;
+
+       dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
+       iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
+       iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+                 &dma_reg->dmarcv.control);
+}
+
 /**
  * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
  * header returned with each response message.
@@ -1301,8 +1394,6 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
        struct device_node *dn = pdev->dev.of_node;
        int err;
 
-       pdcs->intstatus = 0;
-
        /* interrupt configuration */
        iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
        iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
@@ -1311,11 +1402,11 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
        pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
        dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
                dev_name(dev), pdcs->pdc_irq, pdcs);
-       err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
-                                       pdc_irq_handler,
-                                       pdc_irq_thread, 0, dev_name(dev), pdcs);
+
+       err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
+                              dev_name(dev), dev);
        if (err) {
-               dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+               dev_err(dev, "IRQ %u request failed with err %d\n",
                        pdcs->pdc_irq, err);
                return err;
        }
@@ -1324,6 +1415,7 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
 
 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
        .send_data = pdc_send_data,
+       .last_tx_done = pdc_last_tx_done,
        .startup = pdc_startup,
        .shutdown = pdc_shutdown
 };
@@ -1356,8 +1448,9 @@ static int pdc_mb_init(struct pdc_state *pdcs)
        if (!mbc->chans)
                return -ENOMEM;
 
-       mbc->txdone_irq = true;
-       mbc->txdone_poll = false;
+       mbc->txdone_irq = false;
+       mbc->txdone_poll = true;
+       mbc->txpoll_period = 1;
        for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
                mbc->chans[chan_index].con_priv = pdcs;
 
@@ -1427,7 +1520,6 @@ static int pdc_probe(struct platform_device *pdev)
                goto cleanup;
        }
 
-       spin_lock_init(&pdcs->pdc_lock);
        pdcs->pdev = pdev;
        platform_set_drvdata(pdev, pdcs);
        pdcs->pdc_idx = pdcg.num_spu;
@@ -1473,6 +1565,9 @@ static int pdc_probe(struct platform_device *pdev)
 
        pdc_hw_init(pdcs);
 
+       /* Init tasklet for deferred DMA rx processing */
+       tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
+
        err = pdc_interrupts_init(pdcs);
        if (err)
                goto cleanup_buf_pool;
@@ -1489,6 +1584,7 @@ static int pdc_probe(struct platform_device *pdev)
        return PDC_SUCCESS;
 
 cleanup_buf_pool:
+       tasklet_kill(&pdcs->rx_tasklet);
        dma_pool_destroy(pdcs->rx_buf_pool);
 
 cleanup_ring_pool:
@@ -1504,6 +1600,10 @@ static int pdc_remove(struct platform_device *pdev)
 
        pdc_free_debugfs();
 
+       tasklet_kill(&pdcs->rx_tasklet);
+
+       pdc_hw_disable(pdcs);
+
        mbox_controller_unregister(&pdcs->mbc);
 
        dma_pool_destroy(pdcs->rx_buf_pool);
index a334db5c9f1c126939873555671302b97ebecae9..41bcd339b68a1987eb1a62c1a6653c711341e350 100644 (file)
@@ -403,6 +403,7 @@ static const struct of_device_id sti_mailbox_match[] = {
        },
        { }
 };
+MODULE_DEVICE_TABLE(of, sti_mailbox_match);
 
 static int sti_mbox_probe(struct platform_device *pdev)
 {
index 9ca96e9db6bfb137863250f8b27b2232fc1c449d..9c79f8019d2a5f2f08df00ea6762f4398013c482 100644 (file)
 
 #include <linux/debugfs.h>
 #include <linux/err.h>
+#include <linux/fs.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mailbox_client.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/poll.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -39,6 +41,8 @@ struct mbox_test_device {
        char                    *signal;
        char                    *message;
        spinlock_t              lock;
+       wait_queue_head_t       waitq;
+       struct fasync_struct    *async_queue;
 };
 
 static ssize_t mbox_test_signal_write(struct file *filp,
@@ -81,6 +85,13 @@ static const struct file_operations mbox_test_signal_ops = {
        .llseek = generic_file_llseek,
 };
 
+static int mbox_test_message_fasync(int fd, struct file *filp, int on)
+{
+       struct mbox_test_device *tdev = filp->private_data;
+
+       return fasync_helper(fd, filp, on, &tdev->async_queue);
+}
+
 static ssize_t mbox_test_message_write(struct file *filp,
                                       const char __user *userbuf,
                                       size_t count, loff_t *ppos)
@@ -138,6 +149,20 @@ out:
        return ret < 0 ? ret : count;
 }
 
+static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
+{
+       unsigned char data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tdev->lock, flags);
+       data = tdev->rx_buffer[0];
+       spin_unlock_irqrestore(&tdev->lock, flags);
+
+       if (data != '\0')
+               return true;
+       return false;
+}
+
 static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
                                      size_t count, loff_t *ppos)
 {
@@ -147,6 +172,8 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
        int l = 0;
        int ret;
 
+       DECLARE_WAITQUEUE(wait, current);
+
        touser = kzalloc(MBOX_HEXDUMP_MAX_LEN + 1, GFP_KERNEL);
        if (!touser)
                return -ENOMEM;
@@ -155,15 +182,29 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
                ret = snprintf(touser, 20, "<NO RX CAPABILITY>\n");
                ret = simple_read_from_buffer(userbuf, count, ppos,
                                              touser, ret);
-               goto out;
+               goto kfree_err;
        }
 
-       if (tdev->rx_buffer[0] == '\0') {
-               ret = snprintf(touser, 9, "<EMPTY>\n");
-               ret = simple_read_from_buffer(userbuf, count, ppos,
-                                             touser, ret);
-               goto out;
-       }
+       add_wait_queue(&tdev->waitq, &wait);
+
+       do {
+               __set_current_state(TASK_INTERRUPTIBLE);
+
+               if (mbox_test_message_data_ready(tdev))
+                       break;
+
+               if (filp->f_flags & O_NONBLOCK) {
+                       ret = -EAGAIN;
+                       goto waitq_err;
+               }
+
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       goto waitq_err;
+               }
+               schedule();
+
+       } while (1);
 
        spin_lock_irqsave(&tdev->lock, flags);
 
@@ -185,14 +226,31 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
        spin_unlock_irqrestore(&tdev->lock, flags);
 
        ret = simple_read_from_buffer(userbuf, count, ppos, touser, MBOX_HEXDUMP_MAX_LEN);
-out:
+waitq_err:
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&tdev->waitq, &wait);
+kfree_err:
        kfree(touser);
        return ret;
 }
 
+static unsigned int
+mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
+{
+       struct mbox_test_device *tdev = filp->private_data;
+
+       poll_wait(filp, &tdev->waitq, wait);
+
+       if (mbox_test_message_data_ready(tdev))
+               return POLLIN | POLLRDNORM;
+       return 0;
+}
+
 static const struct file_operations mbox_test_message_ops = {
        .write  = mbox_test_message_write,
        .read   = mbox_test_message_read,
+       .fasync = mbox_test_message_fasync,
+       .poll   = mbox_test_message_poll,
        .open   = simple_open,
        .llseek = generic_file_llseek,
 };
@@ -234,6 +292,10 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
                memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
        }
        spin_unlock_irqrestore(&tdev->lock, flags);
+
+       wake_up_interruptible(&tdev->waitq);
+
+       kill_fasync(&tdev->async_queue, SIGIO, POLL_IN);
 }
 
 static void mbox_test_prepare_message(struct mbox_client *client, void *message)
@@ -290,6 +352,7 @@ static int mbox_test_probe(struct platform_device *pdev)
 {
        struct mbox_test_device *tdev;
        struct resource *res;
+       resource_size_t size;
        int ret;
 
        tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
@@ -298,14 +361,21 @@ static int mbox_test_probe(struct platform_device *pdev)
 
        /* It's okay for MMIO to be NULL */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       size = resource_size(res);
        tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(tdev->tx_mmio))
+       if (PTR_ERR(tdev->tx_mmio) == -EBUSY)
+               /* if reserved area in SRAM, try just ioremap */
+               tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+       else if (IS_ERR(tdev->tx_mmio))
                tdev->tx_mmio = NULL;
 
        /* If specified, second reg entry is Rx MMIO */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       size = resource_size(res);
        tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(tdev->rx_mmio))
+       if (PTR_ERR(tdev->rx_mmio) == -EBUSY)
+               tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
+       else if (IS_ERR(tdev->rx_mmio))
                tdev->rx_mmio = tdev->tx_mmio;
 
        tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
@@ -334,6 +404,7 @@ static int mbox_test_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       init_waitqueue_head(&tdev->waitq);
        dev_info(&pdev->dev, "Successfully registered\n");
 
        return 0;
@@ -357,6 +428,7 @@ static const struct of_device_id mbox_test_match[] = {
        { .compatible = "mailbox-test" },
        {},
 };
+MODULE_DEVICE_TABLE(of, mbox_test_match);
 
 static struct platform_driver mbox_test_driver = {
        .driver = {
index 1f32688c312d717639ecfe7de3ca94b35d31a637..dd9ecd354a3e001a1b4037f0e1ca2c92c5672957 100644 (file)
@@ -447,7 +447,6 @@ static int pcc_parse_subspace_irq(int id,
  */
 static int __init acpi_pcc_probe(void)
 {
-       acpi_size pcct_tbl_header_size;
        struct acpi_table_header *pcct_tbl;
        struct acpi_subtable_header *pcct_entry;
        struct acpi_table_pcct *acpi_pcct_tbl;
@@ -456,9 +455,7 @@ static int __init acpi_pcc_probe(void)
        acpi_status status = AE_OK;
 
        /* Search for PCCT */
-       status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
-                       &pcct_tbl,
-                       &pcct_tbl_header_size);
+       status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
 
        if (ACPI_FAILURE(status) || !pcct_tbl) {
                pr_warn("PCCT header not found.\n");
index 6b420a55c7459f1af42b9dd9c920dce63bd7d089..c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66 100644 (file)
@@ -425,7 +425,7 @@ struct cache {
         * until a gc finishes - otherwise we could pointlessly burn a ton of
         * cpu
         */
-       unsigned                invalidate_needs_gc:1;
+       unsigned                invalidate_needs_gc;
 
        bool                    discard; /* Get rid of? */
 
@@ -593,8 +593,8 @@ struct cache_set {
 
        /* Counts how many sectors bio_insert has added to the cache */
        atomic_t                sectors_to_gc;
+       wait_queue_head_t       gc_wait;
 
-       wait_queue_head_t       moving_gc_wait;
        struct keybuf           moving_gc_keys;
        /* Number of moving GC bios in flight */
        struct semaphore        moving_in_flight;
index 6fdd8e252760cbc11ff8cceb1c38fb85eccbcbad..a43eedd5804dd8a9c13b60d96c5bca59fd355b81 100644 (file)
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
        bch_moving_gc(c);
 }
 
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
 {
-       struct cache_set *c = arg;
        struct cache *ca;
        unsigned i;
 
-       while (1) {
-again:
-               bch_btree_gc(c);
+       for_each_cache(ca, c, i)
+               if (ca->invalidate_needs_gc)
+                       return true;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop())
-                       break;
+       if (atomic_read(&c->sectors_to_gc) < 0)
+               return true;
 
-               mutex_lock(&c->bucket_lock);
+       return false;
+}
 
-               for_each_cache(ca, c, i)
-                       if (ca->invalidate_needs_gc) {
-                               mutex_unlock(&c->bucket_lock);
-                               set_current_state(TASK_RUNNING);
-                               goto again;
-                       }
+static int bch_gc_thread(void *arg)
+{
+       struct cache_set *c = arg;
 
-               mutex_unlock(&c->bucket_lock);
+       while (1) {
+               wait_event_interruptible(c->gc_wait,
+                          kthread_should_stop() || gc_should_run(c));
 
-               schedule();
+               if (kthread_should_stop())
+                       break;
+
+               set_gc_sectors(c);
+               bch_btree_gc(c);
        }
 
        return 0;
@@ -1790,11 +1792,10 @@ again:
 
 int bch_gc_thread_start(struct cache_set *c)
 {
-       c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+       c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
        if (IS_ERR(c->gc_thread))
                return PTR_ERR(c->gc_thread);
 
-       set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
        return 0;
 }
 
index 5c391fa01bedbfba3f1dea062605460ccadc1c6a..9b80417cd547f52c264c1b4b993f3ee2155405f2 100644 (file)
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
-       if (c->gc_thread)
-               wake_up_process(c->gc_thread);
+       wake_up(&c->gc_wait);
 }
 
 #define MAP_DONE       0
index f49c5417527dcbb8e0a839a32ec5d354323d63a4..76d20875503c17c6f5956d7f7bfde4eaa90ffbcd 100644 (file)
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio, *n;
 
-       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
-               set_gc_sectors(op->c);
+       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
                wake_up_gc(op->c);
-       }
 
        if (op->bypass)
                return bch_data_invalidate(cl);
index 2fb5bfeb43e2e1668051667d6582a087b6bb14ad..3a19cbc8b230e5a7b77cafa89427b8e282c251dc 100644 (file)
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS          16 /* partition support */
 
 /* Superblock */
 
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        if (minor < 0)
                return minor;
 
+       minor *= BCACHE_MINORS;
+
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->disk = alloc_disk(1))) {
+           !(d->disk = alloc_disk(BCACHE_MINORS))) {
                ida_simple_remove(&bcache_minor, minor);
                return -ENOMEM;
        }
@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
        init_waitqueue_head(&c->bucket_wait);
+       init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);
 
        spin_lock_init(&c->btree_gc_time.lock);
@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
 
        for_each_cache(ca, c, i)
                c->nbuckets += ca->sb.nbuckets;
+       set_gc_sectors(c);
 
        if (CACHE_SYNC(&c->sb)) {
                LIST_HEAD(journal);
index 1ed0584f494e415d5529cdf28612c1e701b6e721..4ce3b6f118304048c0fb4d0db7c1d4da7463e8e7 100644 (file)
@@ -40,6 +40,22 @@ config MFD_ACT8945A
          linear regulators, along with a complete ActivePath battery
          charger.
 
+config MFD_SUN4I_GPADC
+       tristate "Allwinner sunxi platforms' GPADC MFD driver"
+       select MFD_CORE
+       select REGMAP_MMIO
+       select REGMAP_IRQ
+       depends on ARCH_SUNXI || COMPILE_TEST
+       help
+         Select this to get support for Allwinner SoCs (A10, A13 and A31) ADC.
+         This driver will only map the hardware interrupt and registers, you
+         have to select individual drivers based on this MFD to be able to use
+         the ADC or the thermal sensor. This will try to probe the ADC driver
+         sun4i-gpadc-iio and the hwmon driver iio_hwmon.
+
+         To compile this driver as a module, choose M here: the module will be
+         called sun4i-gpadc.
+
 config MFD_AS3711
        bool "AMS AS3711"
        select MFD_CORE
@@ -293,6 +309,7 @@ config MFD_DLN2
 
 config MFD_EXYNOS_LPASS
        tristate "Samsung Exynos SoC Low Power Audio Subsystem"
+       depends on ARCH_EXYNOS || COMPILE_TEST
        select MFD_CORE
        select REGMAP_MMIO
        help
@@ -563,7 +580,7 @@ config MFD_MAX14577
 config MFD_MAX77620
        bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
        depends on I2C=y
-       depends on OF
+       depends on OF || COMPILE_TEST
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
@@ -578,7 +595,7 @@ config MFD_MAX77620
 config MFD_MAX77686
        tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
        depends on I2C
-       depends on OF
+       depends on OF || COMPILE_TEST
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
@@ -877,7 +894,8 @@ config MFD_RN5T618
        select MFD_CORE
        select REGMAP_I2C
        help
-         Say yes here to add support for the Ricoh RN5T567 or R5T618 PMIC.
+         Say yes here to add support for the Ricoh RN5T567,
+          RN5T618, RC5T619 PMIC.
          This driver provides common support for accessing the device,
          additional drivers must be enabled in order to use the
          functionality of the device.
@@ -951,7 +969,7 @@ config MFD_SMSC
 
 config ABX500_CORE
        bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
-       default y if ARCH_U300 || ARCH_U8500
+       default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
        help
          Say yes here if you have the ABX500 Mixed Signal IC family
          chips. This core driver expose register access functions.
index 7bb5a50127cbb32bf3c2959705eafbb7c741a65b..dda4d4f73ad743b7cdde1f085dd494c0a1701656 100644 (file)
@@ -211,3 +211,4 @@ obj-$(CONFIG_INTEL_SOC_PMIC)        += intel-soc-pmic.o
 obj-$(CONFIG_MFD_MT6397)       += mt6397-core.o
 
 obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
+obj-$(CONFIG_MFD_SUN4I_GPADC)  += sun4i-gpadc.o
index 6a5a98806cb817a28c782117a2a171c6975525a5..099635bed18850a7efc4e781688cee434e48e04a 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/notifier.h>
 #include <linux/slab.h>
 #include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
@@ -628,20 +628,10 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100)
  exit_no_debugfs:
        return;
 }
-static inline void ab3100_remove_debugfs(void)
-{
-       debugfs_remove(ab3100_set_reg_file);
-       debugfs_remove(ab3100_get_reg_file);
-       debugfs_remove(ab3100_reg_file);
-       debugfs_remove(ab3100_dir);
-}
 #else
 static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
 {
 }
-static inline void ab3100_remove_debugfs(void)
-{
-}
 #endif
 
 /*
@@ -949,45 +939,22 @@ static int ab3100_probe(struct i2c_client *client,
        return err;
 }
 
-static int ab3100_remove(struct i2c_client *client)
-{
-       struct ab3100 *ab3100 = i2c_get_clientdata(client);
-
-       /* Unregister subdevices */
-       mfd_remove_devices(&client->dev);
-       ab3100_remove_debugfs();
-       i2c_unregister_device(ab3100->testreg_client);
-       return 0;
-}
-
 static const struct i2c_device_id ab3100_id[] = {
        { "ab3100", 0 },
        { }
 };
-MODULE_DEVICE_TABLE(i2c, ab3100_id);
 
 static struct i2c_driver ab3100_driver = {
        .driver = {
-               .name   = "ab3100",
+               .name                   = "ab3100",
+               .suppress_bind_attrs    = true,
        },
        .id_table       = ab3100_id,
        .probe          = ab3100_probe,
-       .remove         = ab3100_remove,
 };
 
 static int __init ab3100_i2c_init(void)
 {
        return i2c_add_driver(&ab3100_driver);
 }
-
-static void __exit ab3100_i2c_exit(void)
-{
-       i2c_del_driver(&ab3100_driver);
-}
-
 subsys_initcall(ab3100_i2c_init);
-module_exit(ab3100_i2c_exit);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
-MODULE_DESCRIPTION("AB3100 core driver");
-MODULE_LICENSE("GPL");
index 589eebfc13df9faf8ef5fed5c993e428a5a5ccf9..6e00124cef01a0772f24c4c5d880ec3f66332ef2 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/irqdomain.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/abx500.h>
@@ -123,6 +123,10 @@ static DEFINE_SPINLOCK(on_stat_lock);
 static u8 turn_on_stat_mask = 0xFF;
 static u8 turn_on_stat_set;
 static bool no_bm; /* No battery management */
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
 module_param(no_bm, bool, S_IRUGO);
 
 #define AB9540_MODEM_CTRL2_REG                 0x23
@@ -1324,25 +1328,6 @@ static int ab8500_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int ab8500_remove(struct platform_device *pdev)
-{
-       struct ab8500 *ab8500 = platform_get_drvdata(pdev);
-
-       if (((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
-                       ab8500->chip_id >= AB8500_CUT2P0) || is_ab8540(ab8500))
-               sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
-       else
-               sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
-
-       if ((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
-                       ab8500->chip_id >= AB8500_CUT2P0)
-               sysfs_remove_group(&ab8500->dev->kobj, &ab8505_attr_group);
-
-       mfd_remove_devices(ab8500->dev);
-
-       return 0;
-}
-
 static const struct platform_device_id ab8500_id[] = {
        { "ab8500-core", AB8500_VERSION_AB8500 },
        { "ab8505-i2c", AB8500_VERSION_AB8505 },
@@ -1354,9 +1339,9 @@ static const struct platform_device_id ab8500_id[] = {
 static struct platform_driver ab8500_core_driver = {
        .driver = {
                .name = "ab8500-core",
+               .suppress_bind_attrs = true,
        },
        .probe  = ab8500_probe,
-       .remove = ab8500_remove,
        .id_table = ab8500_id,
 };
 
@@ -1364,14 +1349,4 @@ static int __init ab8500_core_init(void)
 {
        return platform_driver_register(&ab8500_core_driver);
 }
-
-static void __exit ab8500_core_exit(void)
-{
-       platform_driver_unregister(&ab8500_core_driver);
-}
 core_initcall(ab8500_core_init);
-module_exit(ab8500_core_exit);
-
-MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
-MODULE_DESCRIPTION("AB8500 MFD core");
-MODULE_LICENSE("GPL v2");
index acf6c00b14b92c3bfb8f990df5778e5267c281ad..c1c815241e028375e3820e861251a8288bac730d 100644 (file)
@@ -74,7 +74,7 @@
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/fs.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/debugfs.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
@@ -3234,33 +3234,16 @@ err:
        return -ENOMEM;
 }
 
-static int ab8500_debug_remove(struct platform_device *plf)
-{
-       debugfs_remove_recursive(ab8500_dir);
-
-       return 0;
-}
-
 static struct platform_driver ab8500_debug_driver = {
        .driver = {
                .name = "ab8500-debug",
+               .suppress_bind_attrs = true,
        },
        .probe  = ab8500_debug_probe,
-       .remove = ab8500_debug_remove
 };
 
 static int __init ab8500_debug_init(void)
 {
        return platform_driver_register(&ab8500_debug_driver);
 }
-
-static void __exit ab8500_debug_exit(void)
-{
-       platform_driver_unregister(&ab8500_debug_driver);
-}
 subsys_initcall(ab8500_debug_init);
-module_exit(ab8500_debug_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 DEBUG");
-MODULE_LICENSE("GPL v2");
index 97dcadc8fa8bfd898c16f788851f6b43a06b979e..f4e94869d6129a60d1da63cd851cbf0183ceb8fd 100644 (file)
@@ -5,9 +5,9 @@
  * Author: Arun R Murthy <arun.murthy@stericsson.com>
  * Author: Daniel Willerud <daniel.willerud@stericsson.com>
  * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
  */
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
@@ -1054,11 +1054,7 @@ static int __init ab8500_gpadc_init(void)
 {
        return platform_driver_register(&ab8500_gpadc_driver);
 }
-
-static void __exit ab8500_gpadc_exit(void)
-{
-       platform_driver_unregister(&ab8500_gpadc_driver);
-}
+subsys_initcall_sync(ab8500_gpadc_init);
 
 /**
  * ab8540_gpadc_get_otp() - returns OTP values
@@ -1077,14 +1073,3 @@ void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
        *ibat_l  = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
        *ibat_h  = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
 }
-
-subsys_initcall_sync(ab8500_gpadc_init);
-module_exit(ab8500_gpadc_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Arun R Murthy");
-MODULE_AUTHOR("Daniel Willerud");
-MODULE_AUTHOR("Johan Palsson");
-MODULE_AUTHOR("M'boumba Cedric Madianga");
-MODULE_ALIAS("platform:ab8500_gpadc");
-MODULE_DESCRIPTION("AB8500 GPADC driver");
index 207cc497958a855c0abe012bbee749dfac650f93..80c0efa66ac13d1e5485cd9cee9461d79b43e594 100644 (file)
@@ -1,11 +1,14 @@
 /*
+ * AB8500 system control driver
+ *
  * Copyright (C) ST-Ericsson SA 2010
  * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
  * License terms: GNU General Public License (GPL) version 2
  */
 
 #include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 #include <linux/reboot.h>
@@ -158,7 +161,3 @@ static int __init ab8500_sysctrl_init(void)
        return platform_driver_register(&ab8500_sysctrl_driver);
 }
 arch_initcall(ab8500_sysctrl_init);
-
-MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com");
-MODULE_DESCRIPTION("AB8500 system control driver");
-MODULE_LICENSE("GPL v2");
index fe418995108c6f75ea23908d97e8e6cefd6ecefe..0d3846a4767cba7479f68d7ab4badded7bb3019b 100644 (file)
@@ -8,7 +8,8 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
 #include <linux/mfd/abx500.h>
 
 static LIST_HEAD(abx500_list);
@@ -150,7 +151,3 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
                return -ENOTSUPP;
 }
 EXPORT_SYMBOL(abx500_startup_irq_enabled);
-
-MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
-MODULE_DESCRIPTION("ABX500 core driver");
-MODULE_LICENSE("GPL");
index 41767f7239bbec98916da2de1ea1924b1b3beeba..b6d4bc63c42624388efc230355b2f0a0fd192f92 100644 (file)
@@ -1553,6 +1553,7 @@ EXPORT_SYMBOL_GPL(arizona_dev_init);
 
 int arizona_dev_exit(struct arizona *arizona)
 {
+       disable_irq(arizona->irq);
        pm_runtime_disable(arizona->dev);
 
        regulator_disable(arizona->dcvdd);
index 5e18d3c77582b02050da45db97ee87fc1777bab9..2e01975f042d5f63d0a29cdccb0000d0d1f358ad 100644 (file)
@@ -398,10 +398,10 @@ err_ctrlif:
 err_boot_done:
        free_irq(arizona->irq, arizona);
 err_main_irq:
-       regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+       regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
                            arizona->irq_chip);
 err_aod:
-       regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+       regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
                            arizona->aod_irq_chip);
 err:
        return ret;
@@ -413,9 +413,9 @@ int arizona_irq_exit(struct arizona *arizona)
                free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR),
                         arizona);
        free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
-       regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+       regmap_del_irq_chip(irq_find_mapping(arizona->virq, 1),
                            arizona->irq_chip);
-       regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+       regmap_del_irq_chip(irq_find_mapping(arizona->virq, 0),
                            arizona->aod_irq_chip);
        free_irq(arizona->irq, arizona);
 
index b1b865822c07e854050354d5c18b9010edeca6ca..d35a5fe6c950299787def746155a6c3fa61a99bc 100644 (file)
@@ -69,10 +69,11 @@ static const struct of_device_id axp20x_i2c_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
 
-/*
- * This is useless for OF-enabled devices, but it is needed by I2C subsystem
- */
 static const struct i2c_device_id axp20x_i2c_id[] = {
+       { "axp152", 0 },
+       { "axp202", 0 },
+       { "axp209", 0 },
+       { "axp221", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
index ba130be32e61363bef79298e5aa90c46bf8fb915..ed918de84238c33d83c9f60f26db069bec14a82d 100644 (file)
@@ -98,6 +98,7 @@ static const struct regmap_range axp22x_volatile_ranges[] = {
        regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE),
        regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
        regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+       regmap_reg_range(AXP22X_PMIC_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
        regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES),
 };
 
@@ -135,6 +136,7 @@ static const struct regmap_range axp806_writeable_ranges[] = {
        regmap_reg_range(AXP806_PWR_OUT_CTRL1, AXP806_CLDO3_V_CTRL),
        regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ2_EN),
        regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE),
+       regmap_reg_range(AXP806_REG_ADDR_EXT, AXP806_REG_ADDR_EXT),
 };
 
 static const struct regmap_range axp806_volatile_ranges[] = {
@@ -305,7 +307,7 @@ static const struct regmap_config axp806_regmap_config = {
        .val_bits       = 8,
        .wr_table       = &axp806_writeable_table,
        .volatile_table = &axp806_volatile_table,
-       .max_register   = AXP806_VREF_TEMP_WARN_L,
+       .max_register   = AXP806_REG_ADDR_EXT,
        .cache_type     = REGCACHE_RBTREE,
 };
 
index 0d76d690176b4efd46c04246a5ccfff0a223b0c0..c572a35a934136f1c23190a2f58f5fc1bb9a8192 100644 (file)
@@ -67,7 +67,7 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri,
        /* Secondary I2C slave address is the base address with A(2) asserted */
        bcm590xx->i2c_sec = i2c_new_dummy(i2c_pri->adapter,
                                          i2c_pri->addr | BIT(2));
-       if (IS_ERR_OR_NULL(bcm590xx->i2c_sec)) {
+       if (!bcm590xx->i2c_sec) {
                dev_err(&i2c_pri->dev, "failed to add secondary I2C device\n");
                return -ENODEV;
        }
index f6b78aafdb557ac2f8f97b0b8392a0b682b528ad..c090974340ad38c2013fbf716f9be6576c960012 100644 (file)
@@ -292,6 +292,7 @@ static const struct reg_default cs47l24_reg_default[] = {
        { 0x00000502, 0x0000 },    /* R1282  - AIF1 Rx Pin Ctrl */
        { 0x00000503, 0x0000 },    /* R1283  - AIF1 Rate Ctrl */
        { 0x00000504, 0x0000 },    /* R1284  - AIF1 Format */
+       { 0x00000505, 0x0040 },    /* R1285  - AIF1 Tx BCLK Rate */
        { 0x00000506, 0x0040 },    /* R1286  - AIF1 Rx BCLK Rate */
        { 0x00000507, 0x1818 },    /* R1287  - AIF1 Frame Ctrl 1 */
        { 0x00000508, 0x1818 },    /* R1288  - AIF1 Frame Ctrl 2 */
@@ -318,6 +319,7 @@ static const struct reg_default cs47l24_reg_default[] = {
        { 0x00000542, 0x0000 },    /* R1346  - AIF2 Rx Pin Ctrl */
        { 0x00000543, 0x0000 },    /* R1347  - AIF2 Rate Ctrl */
        { 0x00000544, 0x0000 },    /* R1348  - AIF2 Format */
+       { 0x00000545, 0x0040 },    /* R1349  - AIF2 Tx BCLK Rate */
        { 0x00000546, 0x0040 },    /* R1350  - AIF2 Rx BCLK Rate */
        { 0x00000547, 0x1818 },    /* R1351  - AIF2 Frame Ctrl 1 */
        { 0x00000548, 0x1818 },    /* R1352  - AIF2 Frame Ctrl 2 */
@@ -340,6 +342,7 @@ static const struct reg_default cs47l24_reg_default[] = {
        { 0x00000582, 0x0000 },    /* R1410  - AIF3 Rx Pin Ctrl */
        { 0x00000583, 0x0000 },    /* R1411  - AIF3 Rate Ctrl */
        { 0x00000584, 0x0000 },    /* R1412  - AIF3 Format */
+       { 0x00000585, 0x0040 },    /* R1413  - AIF3 Tx BCLK Rate */
        { 0x00000586, 0x0040 },    /* R1414  - AIF3 Rx BCLK Rate */
        { 0x00000587, 0x1818 },    /* R1415  - AIF3 Frame Ctrl 1 */
        { 0x00000588, 0x1818 },    /* R1416  - AIF3 Frame Ctrl 2 */
@@ -923,6 +926,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_AIF1_RX_PIN_CTRL:
        case ARIZONA_AIF1_RATE_CTRL:
        case ARIZONA_AIF1_FORMAT:
+       case ARIZONA_AIF1_TX_BCLK_RATE:
        case ARIZONA_AIF1_RX_BCLK_RATE:
        case ARIZONA_AIF1_FRAME_CTRL_1:
        case ARIZONA_AIF1_FRAME_CTRL_2:
@@ -949,6 +953,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_AIF2_RX_PIN_CTRL:
        case ARIZONA_AIF2_RATE_CTRL:
        case ARIZONA_AIF2_FORMAT:
+       case ARIZONA_AIF2_TX_BCLK_RATE:
        case ARIZONA_AIF2_RX_BCLK_RATE:
        case ARIZONA_AIF2_FRAME_CTRL_1:
        case ARIZONA_AIF2_FRAME_CTRL_2:
@@ -971,6 +976,7 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_AIF3_RX_PIN_CTRL:
        case ARIZONA_AIF3_RATE_CTRL:
        case ARIZONA_AIF3_FORMAT:
+       case ARIZONA_AIF3_TX_BCLK_RATE:
        case ARIZONA_AIF3_RX_BCLK_RATE:
        case ARIZONA_AIF3_FRAME_CTRL_1:
        case ARIZONA_AIF3_FRAME_CTRL_2:
index dff2f19296b881801a00afde363a53acf939b633..4d0a5f38038a75f893c20fb07e944dfc04375e29 100644 (file)
@@ -32,6 +32,7 @@
 #include <sound/pcm.h>
 
 #include <linux/mfd/davinci_voicecodec.h>
+#include <mach/hardware.h>
 
 static const struct regmap_config davinci_vc_regmap = {
        .reg_bits = 32,
index 77b2675cf8f5df53b035ae1f3a373b7cb50754b8..ac430a396a89945b0cad1542db8a11b68b497fa5 100644 (file)
@@ -187,6 +187,7 @@ static const struct of_device_id mx25_tsadc_ids[] = {
        { .compatible = "fsl,imx25-tsadc" },
        { /* Sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, mx25_tsadc_ids);
 
 static struct platform_driver mx25_tsadc_driver = {
        .driver = {
index 0fc62995695ba8e6912e85e62ac49a31a2cf8244..ba706adee38b551379e429f42b1de558d333c5b7 100644 (file)
@@ -169,6 +169,7 @@ static const struct of_device_id hi655x_pmic_match[] = {
        { .compatible = "hisilicon,hi655x-pmic", },
        {},
 };
+MODULE_DEVICE_TABLE(of, hi655x_pmic_match);
 
 static struct platform_driver hi655x_pmic_driver = {
        .driver = {
index 9ff243970e93ef1c025df40ca3e4474f59c371f5..78dbcf8b0befc90dea990644f24bf7b32e1c035e 100644 (file)
@@ -41,6 +41,7 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
 
        /* Probably it is enough to set this for iDMA capable devices only */
        pci_set_master(pdev);
+       pci_try_set_mwi(pdev);
 
        ret = intel_lpss_probe(&pdev->dev, info);
        if (ret)
index b99772b804d9e972296b6efef5786cab7b46097b..699c8c7c90528759c673192d14d804d9c46db89a 100644 (file)
@@ -519,7 +519,7 @@ static const struct acpi_device_id bxtwc_acpi_ids[] = {
        { "INT34D3", },
        { }
 };
-MODULE_DEVICE_TABLE(acpi, pmic_acpi_ids);
+MODULE_DEVICE_TABLE(acpi, bxtwc_acpi_ids);
 
 static struct platform_driver bxtwc_driver = {
        .probe = bxtwc_probe,
index c8dee47b45d96f6e4aca9862cfaad7555de41082..1ef7575547e69d715972685fa46bdf10eb91f619 100644 (file)
@@ -493,6 +493,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
        [LPC_LPT] = {
                .name = "Lynx Point",
                .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
        },
        [LPC_LPT_LP] = {
                .name = "Lynx Point_LP",
@@ -530,6 +531,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
        [LPC_9S] = {
                .name = "9 Series",
                .iTCO_version = 2,
+               .gpio_version = ICH_V5_GPIO,
        },
 };
 
index 8f8bacb67a15a4608de39efa80f30a0f7a550d02..ee9e9ea104447a47c7fd222a61e9f78d098bc121 100644 (file)
@@ -431,9 +431,6 @@ static void palmas_power_off(void)
        unsigned int addr;
        int ret, slave;
 
-       if (!palmas_dev)
-               return;
-
        slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
        addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
 
index 7f9620ec61e8f28f6954273c687f19e233d3c3d7..f08758f6b418f02fc1772770d321e9770b9d9435 100644 (file)
 #define        SSBI_REG_ADDR_IRQ_CONFIG        (SSBI_REG_ADDR_IRQ_BASE + 7)
 #define        SSBI_REG_ADDR_IRQ_RT_STATUS     (SSBI_REG_ADDR_IRQ_BASE + 8)
 
+#define        PM8821_SSBI_REG_ADDR_IRQ_BASE   0x100
+#define        PM8821_SSBI_REG_ADDR_IRQ_MASTER0 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0x30)
+#define        PM8821_SSBI_REG_ADDR_IRQ_MASTER1 (PM8821_SSBI_REG_ADDR_IRQ_BASE + 0xb0)
+#define        PM8821_SSBI_REG(m, b, offset) \
+                       ((m == 0) ? \
+                       (PM8821_SSBI_REG_ADDR_IRQ_MASTER0 + b + offset) : \
+                       (PM8821_SSBI_REG_ADDR_IRQ_MASTER1 + b + offset))
+#define        PM8821_SSBI_ADDR_IRQ_ROOT(m, b)         PM8821_SSBI_REG(m, b, 0x0)
+#define        PM8821_SSBI_ADDR_IRQ_CLEAR(m, b)        PM8821_SSBI_REG(m, b, 0x01)
+#define        PM8821_SSBI_ADDR_IRQ_MASK(m, b)         PM8821_SSBI_REG(m, b, 0x08)
+#define        PM8821_SSBI_ADDR_IRQ_RT_STATUS(m, b)    PM8821_SSBI_REG(m, b, 0x0f)
+
+#define        PM8821_BLOCKS_PER_MASTER        7
+
 #define        PM_IRQF_LVL_SEL                 0x01    /* level select */
 #define        PM_IRQF_MASK_FE                 0x02    /* mask falling edge */
 #define        PM_IRQF_MASK_RE                 0x04    /* mask rising edge */
@@ -54,6 +68,7 @@
 #define REG_HWREV_2            0x0E8  /* PMIC4 revision 2 */
 
 #define PM8XXX_NR_IRQS         256
+#define PM8821_NR_IRQS         112
 
 struct pm_irq_chip {
        struct regmap           *regmap;
@@ -65,6 +80,12 @@ struct pm_irq_chip {
        u8                      config[0];
 };
 
+struct pm_irq_data {
+       int num_irqs;
+       const struct irq_domain_ops  *irq_domain_ops;
+       void (*irq_handler)(struct irq_desc *desc);
+};
+
 static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
                                 unsigned int *ip)
 {
@@ -182,6 +203,78 @@ static void pm8xxx_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irq_chip, desc);
 }
 
+static void pm8821_irq_block_handler(struct pm_irq_chip *chip,
+                                    int master, int block)
+{
+       int pmirq, irq, i, ret;
+       unsigned int bits;
+
+       ret = regmap_read(chip->regmap,
+                         PM8821_SSBI_ADDR_IRQ_ROOT(master, block), &bits);
+       if (ret) {
+               pr_err("Reading block %d failed ret=%d", block, ret);
+               return;
+       }
+
+       /* Convert block offset to global block number */
+       block += (master * PM8821_BLOCKS_PER_MASTER) - 1;
+
+       /* Check IRQ bits */
+       for (i = 0; i < 8; i++) {
+               if (bits & BIT(i)) {
+                       pmirq = block * 8 + i;
+                       irq = irq_find_mapping(chip->irqdomain, pmirq);
+                       generic_handle_irq(irq);
+               }
+       }
+}
+
+static inline void pm8821_irq_master_handler(struct pm_irq_chip *chip,
+                                            int master, u8 master_val)
+{
+       int block;
+
+       for (block = 1; block < 8; block++)
+               if (master_val & BIT(block))
+                       pm8821_irq_block_handler(chip, master, block);
+}
+
+static void pm8821_irq_handler(struct irq_desc *desc)
+{
+       struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+       struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+       unsigned int master;
+       int ret;
+
+       chained_irq_enter(irq_chip, desc);
+       ret = regmap_read(chip->regmap,
+                         PM8821_SSBI_REG_ADDR_IRQ_MASTER0, &master);
+       if (ret) {
+               pr_err("Failed to read master 0 ret=%d\n", ret);
+               goto done;
+       }
+
+       /* bits 1 through 7 marks the first 7 blocks in master 0 */
+       if (master & GENMASK(7, 1))
+               pm8821_irq_master_handler(chip, 0, master);
+
+       /* bit 0 marks if master 1 contains any bits */
+       if (!(master & BIT(0)))
+               goto done;
+
+       ret = regmap_read(chip->regmap,
+                         PM8821_SSBI_REG_ADDR_IRQ_MASTER1, &master);
+       if (ret) {
+               pr_err("Failed to read master 1 ret=%d\n", ret);
+               goto done;
+       }
+
+       pm8821_irq_master_handler(chip, 1, master);
+
+done:
+       chained_irq_exit(irq_chip, desc);
+}
+
 static void pm8xxx_irq_mask_ack(struct irq_data *d)
 {
        struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
@@ -299,6 +392,104 @@ static const struct irq_domain_ops pm8xxx_irq_domain_ops = {
        .map = pm8xxx_irq_domain_map,
 };
 
+static void pm8821_irq_mask_ack(struct irq_data *d)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       unsigned int pmirq = irqd_to_hwirq(d);
+       u8 block, master;
+       int irq_bit, rc;
+
+       block = pmirq / 8;
+       master = block / PM8821_BLOCKS_PER_MASTER;
+       irq_bit = pmirq % 8;
+       block %= PM8821_BLOCKS_PER_MASTER;
+
+       rc = regmap_update_bits(chip->regmap,
+                               PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+                               BIT(irq_bit), BIT(irq_bit));
+       if (rc) {
+               pr_err("Failed to mask IRQ:%d rc=%d\n", pmirq, rc);
+               return;
+       }
+
+       rc = regmap_update_bits(chip->regmap,
+                               PM8821_SSBI_ADDR_IRQ_CLEAR(master, block),
+                               BIT(irq_bit), BIT(irq_bit));
+       if (rc)
+               pr_err("Failed to CLEAR IRQ:%d rc=%d\n", pmirq, rc);
+}
+
+static void pm8821_irq_unmask(struct irq_data *d)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       unsigned int pmirq = irqd_to_hwirq(d);
+       int irq_bit, rc;
+       u8 block, master;
+
+       block = pmirq / 8;
+       master = block / PM8821_BLOCKS_PER_MASTER;
+       irq_bit = pmirq % 8;
+       block %= PM8821_BLOCKS_PER_MASTER;
+
+       rc = regmap_update_bits(chip->regmap,
+                               PM8821_SSBI_ADDR_IRQ_MASK(master, block),
+                               BIT(irq_bit), ~BIT(irq_bit));
+       if (rc)
+               pr_err("Failed to read/write unmask IRQ:%d rc=%d\n", pmirq, rc);
+
+}
+
+static int pm8821_irq_get_irqchip_state(struct irq_data *d,
+                                       enum irqchip_irq_state which,
+                                       bool *state)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       int rc, pmirq = irqd_to_hwirq(d);
+       u8 block, irq_bit, master;
+       unsigned int bits;
+
+       block = pmirq / 8;
+       master = block / PM8821_BLOCKS_PER_MASTER;
+       irq_bit = pmirq % 8;
+       block %= PM8821_BLOCKS_PER_MASTER;
+
+       rc = regmap_read(chip->regmap,
+               PM8821_SSBI_ADDR_IRQ_RT_STATUS(master, block), &bits);
+       if (rc) {
+               pr_err("Reading Status of IRQ %d failed rc=%d\n", pmirq, rc);
+               return rc;
+       }
+
+       *state = !!(bits & BIT(irq_bit));
+
+       return rc;
+}
+
+static struct irq_chip pm8821_irq_chip = {
+       .name           = "pm8821",
+       .irq_mask_ack   = pm8821_irq_mask_ack,
+       .irq_unmask     = pm8821_irq_unmask,
+       .irq_get_irqchip_state = pm8821_irq_get_irqchip_state,
+       .flags          = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int pm8821_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                                  irq_hw_number_t hwirq)
+{
+       struct pm_irq_chip *chip = d->host_data;
+
+       irq_set_chip_and_handler(irq, &pm8821_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, chip);
+       irq_set_noprobe(irq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops pm8821_irq_domain_ops = {
+       .xlate = irq_domain_xlate_twocell,
+       .map = pm8821_irq_domain_map,
+};
+
 static const struct regmap_config ssbi_regmap_config = {
        .reg_bits = 16,
        .val_bits = 8,
@@ -308,22 +499,41 @@ static const struct regmap_config ssbi_regmap_config = {
        .reg_write = ssbi_reg_write
 };
 
+static const struct pm_irq_data pm8xxx_data = {
+       .num_irqs = PM8XXX_NR_IRQS,
+       .irq_domain_ops = &pm8xxx_irq_domain_ops,
+       .irq_handler = pm8xxx_irq_handler,
+};
+
+static const struct pm_irq_data pm8821_data = {
+       .num_irqs = PM8821_NR_IRQS,
+       .irq_domain_ops = &pm8821_irq_domain_ops,
+       .irq_handler = pm8821_irq_handler,
+};
+
 static const struct of_device_id pm8xxx_id_table[] = {
-       { .compatible = "qcom,pm8018", },
-       { .compatible = "qcom,pm8058", },
-       { .compatible = "qcom,pm8921", },
+       { .compatible = "qcom,pm8018", .data = &pm8xxx_data},
+       { .compatible = "qcom,pm8058", .data = &pm8xxx_data},
+       { .compatible = "qcom,pm8821", .data = &pm8821_data},
+       { .compatible = "qcom,pm8921", .data = &pm8xxx_data},
        { }
 };
 MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
 
 static int pm8xxx_probe(struct platform_device *pdev)
 {
+       const struct pm_irq_data *data;
        struct regmap *regmap;
        int irq, rc;
        unsigned int val;
        u32 rev;
        struct pm_irq_chip *chip;
-       unsigned int nirqs = PM8XXX_NR_IRQS;
+
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "No matching driver data found\n");
+               return -EINVAL;
+       }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -354,25 +564,26 @@ static int pm8xxx_probe(struct platform_device *pdev)
        rev |= val << BITS_PER_BYTE;
 
        chip = devm_kzalloc(&pdev->dev, sizeof(*chip) +
-                                       sizeof(chip->config[0]) * nirqs,
-                                       GFP_KERNEL);
+                           sizeof(chip->config[0]) * data->num_irqs,
+                           GFP_KERNEL);
        if (!chip)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, chip);
        chip->regmap = regmap;
-       chip->num_irqs = nirqs;
+       chip->num_irqs = data->num_irqs;
        chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
        chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
        spin_lock_init(&chip->pm_irq_lock);
 
-       chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, nirqs,
-                                               &pm8xxx_irq_domain_ops,
+       chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+                                               data->num_irqs,
+                                               data->irq_domain_ops,
                                                chip);
        if (!chip->irqdomain)
                return -ENODEV;
 
-       irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
+       irq_set_chained_handler_and_data(irq, data->irq_handler, chip);
        irq_set_irq_wake(irq, 1);
 
        rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
index 0f8acc5882a45261ffb5f90cffc23807d90b3412..2c9acdba7c2d36d5d5853bd3073a724d991e1426 100644 (file)
@@ -290,6 +290,24 @@ static void rk808_device_shutdown(void)
                dev_err(&rk808_i2c_client->dev, "power off error!\n");
 }
 
+static void rk818_device_shutdown(void)
+{
+       int ret;
+       struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+
+       if (!rk808) {
+               dev_warn(&rk808_i2c_client->dev,
+                        "have no rk818, so do nothing here\n");
+               return;
+       }
+
+       ret = regmap_update_bits(rk808->regmap,
+                                RK818_DEVCTRL_REG,
+                                DEV_OFF, DEV_OFF);
+       if (ret)
+               dev_err(&rk808_i2c_client->dev, "power off error!\n");
+}
+
 static const struct of_device_id rk808_of_match[] = {
        { .compatible = "rockchip,rk808" },
        { .compatible = "rockchip,rk818" },
@@ -304,6 +322,7 @@ static int rk808_probe(struct i2c_client *client,
        struct rk808 *rk808;
        const struct rk808_reg_data *pre_init_reg;
        const struct mfd_cell *cells;
+       void (*pm_pwroff_fn)(void);
        int nr_pre_init_regs;
        int nr_cells;
        int pm_off = 0;
@@ -331,6 +350,7 @@ static int rk808_probe(struct i2c_client *client,
                nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
                cells = rk808s;
                nr_cells = ARRAY_SIZE(rk808s);
+               pm_pwroff_fn = rk808_device_shutdown;
                break;
        case RK818_ID:
                rk808->regmap_cfg = &rk818_regmap_config;
@@ -339,6 +359,7 @@ static int rk808_probe(struct i2c_client *client,
                nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
                cells = rk818s;
                nr_cells = ARRAY_SIZE(rk818s);
+               pm_pwroff_fn = rk818_device_shutdown;
                break;
        default:
                dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
@@ -393,7 +414,7 @@ static int rk808_probe(struct i2c_client *client,
                                "rockchip,system-power-controller");
        if (pm_off && !pm_power_off) {
                rk808_i2c_client = client;
-               pm_power_off = rk808_device_shutdown;
+               pm_power_off = pm_pwroff_fn;
        }
 
        return 0;
index ee94080e1cbb704a6106b97d63fffe74e64a5daa..8131d1975745ec51a6219979f517827508c65497 100644 (file)
@@ -87,6 +87,7 @@ static int rn5t618_restart(struct notifier_block *this,
 static const struct of_device_id rn5t618_of_match[] = {
        { .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 },
        { .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 },
+       { .compatible = "ricoh,rc5t619", .data = (void *)RC5T619 },
        { }
 };
 MODULE_DEVICE_TABLE(of, rn5t618_of_match);
index c180b7533bbad7e953db2ff4c627cf7102a5fb62..e6a3d999a376a68877933b6e3d7b8403e299e1d4 100644 (file)
@@ -753,7 +753,7 @@ static int si476x_core_probe(struct i2c_client *client,
                                       ARRAY_SIZE(core->supplies),
                                       core->supplies);
        if (rval) {
-               dev_err(&client->dev, "Failet to gett all of the regulators\n");
+               dev_err(&client->dev, "Failed to get all of the regulators\n");
                goto free_gpio;
        }
 
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
new file mode 100644 (file)
index 0000000..9cfc881
--- /dev/null
@@ -0,0 +1,181 @@
+/* ADC MFD core driver for sunxi platforms
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/sun4i-gpadc.h>
+
+#define ARCH_SUN4I_A10 0
+#define ARCH_SUN5I_A13 1
+#define ARCH_SUN6I_A31 2
+
+static struct resource adc_resources[] = {
+       DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_FIFO_DATA, "FIFO_DATA_PENDING"),
+       DEFINE_RES_IRQ_NAMED(SUN4I_GPADC_IRQ_TEMP_DATA, "TEMP_DATA_PENDING"),
+};
+
+static const struct regmap_irq sun4i_gpadc_regmap_irq[] = {
+       REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_FIFO_DATA, 0,
+                      SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN),
+       REGMAP_IRQ_REG(SUN4I_GPADC_IRQ_TEMP_DATA, 0,
+                      SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN),
+};
+
+static const struct regmap_irq_chip sun4i_gpadc_regmap_irq_chip = {
+       .name = "sun4i_gpadc_irq_chip",
+       .status_base = SUN4I_GPADC_INT_FIFOS,
+       .ack_base = SUN4I_GPADC_INT_FIFOS,
+       .mask_base = SUN4I_GPADC_INT_FIFOC,
+       .init_ack_masked = true,
+       .mask_invert = true,
+       .irqs = sun4i_gpadc_regmap_irq,
+       .num_irqs = ARRAY_SIZE(sun4i_gpadc_regmap_irq),
+       .num_regs = 1,
+};
+
+static struct mfd_cell sun4i_gpadc_cells[] = {
+       {
+               .name   = "sun4i-a10-gpadc-iio",
+               .resources = adc_resources,
+               .num_resources = ARRAY_SIZE(adc_resources),
+       },
+       { .name = "iio_hwmon" }
+};
+
+static struct mfd_cell sun5i_gpadc_cells[] = {
+       {
+               .name   = "sun5i-a13-gpadc-iio",
+               .resources = adc_resources,
+               .num_resources = ARRAY_SIZE(adc_resources),
+       },
+       { .name = "iio_hwmon" },
+};
+
+static struct mfd_cell sun6i_gpadc_cells[] = {
+       {
+               .name   = "sun6i-a31-gpadc-iio",
+               .resources = adc_resources,
+               .num_resources = ARRAY_SIZE(adc_resources),
+       },
+       { .name = "iio_hwmon" },
+};
+
+static const struct regmap_config sun4i_gpadc_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .fast_io = true,
+};
+
+static const struct of_device_id sun4i_gpadc_of_match[] = {
+       {
+               .compatible = "allwinner,sun4i-a10-ts",
+               .data = (void *)ARCH_SUN4I_A10,
+       }, {
+               .compatible = "allwinner,sun5i-a13-ts",
+               .data = (void *)ARCH_SUN5I_A13,
+       }, {
+               .compatible = "allwinner,sun6i-a31-ts",
+               .data = (void *)ARCH_SUN6I_A31,
+       }, { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_match);
+
+static int sun4i_gpadc_probe(struct platform_device *pdev)
+{
+       struct sun4i_gpadc_dev *dev;
+       struct resource *mem;
+       const struct of_device_id *of_id;
+       const struct mfd_cell *cells;
+       unsigned int irq, size;
+       int ret;
+
+       of_id = of_match_node(sun4i_gpadc_of_match, pdev->dev.of_node);
+       if (!of_id)
+               return -EINVAL;
+
+       switch ((long)of_id->data) {
+       case ARCH_SUN4I_A10:
+               cells = sun4i_gpadc_cells;
+               size = ARRAY_SIZE(sun4i_gpadc_cells);
+               break;
+       case ARCH_SUN5I_A13:
+               cells = sun5i_gpadc_cells;
+               size = ARRAY_SIZE(sun5i_gpadc_cells);
+               break;
+       case ARCH_SUN6I_A31:
+               cells = sun6i_gpadc_cells;
+               size = ARRAY_SIZE(sun6i_gpadc_cells);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dev->base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(dev->base))
+               return PTR_ERR(dev->base);
+
+       dev->dev = &pdev->dev;
+       dev_set_drvdata(dev->dev, dev);
+
+       dev->regmap = devm_regmap_init_mmio(dev->dev, dev->base,
+                                           &sun4i_gpadc_regmap_config);
+       if (IS_ERR(dev->regmap)) {
+               ret = PTR_ERR(dev->regmap);
+               dev_err(&pdev->dev, "failed to init regmap: %d\n", ret);
+               return ret;
+       }
+
+       /* Disable all interrupts */
+       regmap_write(dev->regmap, SUN4I_GPADC_INT_FIFOC, 0);
+
+       irq = platform_get_irq(pdev, 0);
+       ret = devm_regmap_add_irq_chip(&pdev->dev, dev->regmap, irq,
+                                      IRQF_ONESHOT, 0,
+                                      &sun4i_gpadc_regmap_irq_chip,
+                                      &dev->regmap_irqc);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to add irq chip: %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_mfd_add_devices(dev->dev, 0, cells, size, NULL, 0, NULL);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to add MFD devices: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static struct platform_driver sun4i_gpadc_driver = {
+       .driver = {
+               .name = "sun4i-gpadc",
+               .of_match_table = of_match_ptr(sun4i_gpadc_of_match),
+       },
+       .probe = sun4i_gpadc_probe,
+};
+
+module_platform_driver(sun4i_gpadc_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi platforms' GPADC MFD core driver");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
index 274bf39968aaa1a9c9527fe0093cc7790d05f1f6..cc9e563f23aa6072d6160c9b33abe4c901bc1584 100644 (file)
@@ -53,7 +53,7 @@ int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg)
 EXPORT_SYMBOL_GPL(tc3589x_reg_read);
 
 /**
- * tc3589x_reg_read() - write a single TC3589x register
+ * tc3589x_reg_write() - write a single TC3589x register
  * @tc3589x:   Device to write to
  * @reg:       Register to read
  * @data:      Value to write
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(tc3589x_block_write);
  * @tc3589x:   Device to write to
  * @reg:       Register to write
  * @mask:      Mask of bits to set
- * @values:    Value to set
+ * @val:       Value to set
  */
 int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val)
 {
index 9a4d8684dd32bd5beaf73547271bd2b1ee42431f..f769c7d4e335ac20da4db26ae5d7eb024d0b9987 100644 (file)
@@ -42,26 +42,6 @@ static struct resource pb_resources[] = {
        DEFINE_RES_IRQ_NAMED(TPS65217_IRQ_PB, "PB"),
 };
 
-struct tps65217_irq {
-       int mask;
-       int interrupt;
-};
-
-static const struct tps65217_irq tps65217_irqs[] = {
-       [TPS65217_IRQ_PB] = {
-               .mask = TPS65217_INT_PBM,
-               .interrupt = TPS65217_INT_PBI,
-       },
-       [TPS65217_IRQ_AC] = {
-               .mask = TPS65217_INT_ACM,
-               .interrupt = TPS65217_INT_ACI,
-       },
-       [TPS65217_IRQ_USB] = {
-               .mask = TPS65217_INT_USBM,
-               .interrupt = TPS65217_INT_USBI,
-       },
-};
-
 static void tps65217_irq_lock(struct irq_data *data)
 {
        struct tps65217 *tps = irq_data_get_irq_chip_data(data);
@@ -74,37 +54,32 @@ static void tps65217_irq_sync_unlock(struct irq_data *data)
        struct tps65217 *tps = irq_data_get_irq_chip_data(data);
        int ret;
 
-       ret = tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
-                               TPS65217_PROTECT_NONE);
+       ret = tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+                               tps->irq_mask, TPS65217_PROTECT_NONE);
        if (ret != 0)
                dev_err(tps->dev, "Failed to sync IRQ masks\n");
 
        mutex_unlock(&tps->irq_lock);
 }
 
-static inline const struct tps65217_irq *
-irq_to_tps65217_irq(struct tps65217 *tps, struct irq_data *data)
-{
-       return &tps65217_irqs[data->hwirq];
-}
-
 static void tps65217_irq_enable(struct irq_data *data)
 {
        struct tps65217 *tps = irq_data_get_irq_chip_data(data);
-       const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+       u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
 
-       tps->irq_mask &= ~irq_data->mask;
+       tps->irq_mask &= ~mask;
 }
 
 static void tps65217_irq_disable(struct irq_data *data)
 {
        struct tps65217 *tps = irq_data_get_irq_chip_data(data);
-       const struct tps65217_irq *irq_data = irq_to_tps65217_irq(tps, data);
+       u8 mask = BIT(data->hwirq) << TPS65217_INT_SHIFT;
 
-       tps->irq_mask |= irq_data->mask;
+       tps->irq_mask |= mask;
 }
 
 static struct irq_chip tps65217_irq_chip = {
+       .name                   = "tps65217",
        .irq_bus_lock           = tps65217_irq_lock,
        .irq_bus_sync_unlock    = tps65217_irq_sync_unlock,
        .irq_enable             = tps65217_irq_enable,
@@ -149,8 +124,8 @@ static irqreturn_t tps65217_irq_thread(int irq, void *data)
                return IRQ_NONE;
        }
 
-       for (i = 0; i < ARRAY_SIZE(tps65217_irqs); i++) {
-               if (status & tps65217_irqs[i].interrupt) {
+       for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+               if (status & BIT(i)) {
                        handle_nested_irq(irq_find_mapping(tps->irq_domain, i));
                        handled = true;
                }
@@ -188,10 +163,9 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
        tps->irq = irq;
 
        /* Mask all interrupt sources */
-       tps->irq_mask = (TPS65217_INT_RESERVEDM | TPS65217_INT_PBM
-                       | TPS65217_INT_ACM | TPS65217_INT_USBM);
-       tps65217_reg_write(tps, TPS65217_REG_INT, tps->irq_mask,
-                       TPS65217_PROTECT_NONE);
+       tps->irq_mask = TPS65217_INT_MASK;
+       tps65217_set_bits(tps, TPS65217_REG_INT, TPS65217_INT_MASK,
+                         TPS65217_INT_MASK, TPS65217_PROTECT_NONE);
 
        tps->irq_domain = irq_domain_add_linear(tps->dev->of_node,
                TPS65217_NUM_IRQ, &tps65217_irq_domain_ops, tps);
@@ -209,6 +183,8 @@ static int tps65217_irq_init(struct tps65217 *tps, int irq)
                return ret;
        }
 
+       enable_irq_wake(irq);
+
        return 0;
 }
 
@@ -424,6 +400,24 @@ static int tps65217_probe(struct i2c_client *client,
        return 0;
 }
 
+static int tps65217_remove(struct i2c_client *client)
+{
+       struct tps65217 *tps = i2c_get_clientdata(client);
+       unsigned int virq;
+       int i;
+
+       for (i = 0; i < TPS65217_NUM_IRQ; i++) {
+               virq = irq_find_mapping(tps->irq_domain, i);
+               if (virq)
+                       irq_dispose_mapping(virq);
+       }
+
+       irq_domain_remove(tps->irq_domain);
+       tps->irq_domain = NULL;
+
+       return 0;
+}
+
 static const struct i2c_device_id tps65217_id_table[] = {
        {"tps65217", TPS65217},
        { /* sentinel */ }
@@ -437,6 +431,7 @@ static struct i2c_driver tps65217_driver = {
        },
        .id_table       = tps65217_id_table,
        .probe          = tps65217_probe,
+       .remove         = tps65217_remove,
 };
 
 static int __init tps65217_init(void)
index ba610adbdbff33503f65cea2ceda2273f95fc2fb..13834a0d28172fe5334f0385571fafb34cc21248 100644 (file)
 
 #define TPS65218_PASSWORD_REGS_UNLOCK   0x7D
 
-/**
- * tps65218_reg_read: Read a single tps65218 register.
- *
- * @tps: Device to read from.
- * @reg: Register to read.
- * @val: Contians the value
- */
-int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
-                       unsigned int *val)
-{
-       return regmap_read(tps->regmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65218_reg_read);
+static const struct mfd_cell tps65218_cells[] = {
+       {
+               .name = "tps65218-pwrbutton",
+               .of_compatible = "ti,tps65218-pwrbutton",
+       },
+       {
+               .name = "tps65218-gpio",
+               .of_compatible = "ti,tps65218-gpio",
+       },
+       { .name = "tps65218-regulator", },
+};
 
 /**
  * tps65218_reg_write: Write a single tps65218 register.
@@ -93,7 +91,7 @@ static int tps65218_update_bits(struct tps65218 *tps, unsigned int reg,
        int ret;
        unsigned int data;
 
-       ret = tps65218_reg_read(tps, reg, &data);
+       ret = regmap_read(tps->regmap, reg, &data);
        if (ret) {
                dev_err(tps->dev, "Read from reg 0x%x failed\n", reg);
                return ret;
@@ -251,7 +249,7 @@ static int tps65218_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       ret = tps65218_reg_read(tps, TPS65218_REG_CHIPID, &chipid);
+       ret = regmap_read(tps->regmap, TPS65218_REG_CHIPID, &chipid);
        if (ret) {
                dev_err(tps->dev, "Failed to read chipid: %d\n", ret);
                return ret;
@@ -259,8 +257,10 @@ static int tps65218_probe(struct i2c_client *client,
 
        tps->rev = chipid & TPS65218_CHIPID_REV_MASK;
 
-       ret = of_platform_populate(client->dev.of_node, NULL, NULL,
-                                  &client->dev);
+       ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65218_cells,
+                             ARRAY_SIZE(tps65218_cells), NULL, 0,
+                             regmap_irq_get_domain(tps->irq_data));
+
        if (ret < 0)
                goto err_irq;
 
index a88cfa80dbc4816f3c236d4c4282f59af09893e0..f33567bc428d1f7a2a6e00ef1b962ea8851d417c 100644 (file)
@@ -77,6 +77,23 @@ static struct regmap_irq_chip tps65912_irq_chip = {
        .init_ack_masked = true,
 };
 
+static const struct regmap_range tps65912_yes_ranges[] = {
+       regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
+};
+
+static const struct regmap_access_table tps65912_volatile_table = {
+       .yes_ranges = tps65912_yes_ranges,
+       .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
+};
+
+const struct regmap_config tps65912_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .volatile_table = &tps65912_volatile_table,
+};
+EXPORT_SYMBOL_GPL(tps65912_regmap_config);
+
 int tps65912_device_init(struct tps65912 *tps)
 {
        int ret;
index ab8b23b5bd22b085d5fbd1a16338cd36a82db729..853113d97c1e8311e812625f79be82a862e71704 100644 (file)
@@ -244,752 +244,752 @@ const struct regmap_irq_chip wm5102_irq = {
 };
 
 static const struct reg_default wm5102_reg_default[] = {
-       { 0x00000008, 0x0019 },   /* R8     - Ctrl IF SPI CFG 1 */ 
-       { 0x00000009, 0x0001 },   /* R9     - Ctrl IF I2C1 CFG 1 */ 
-       { 0x00000020, 0x0000 },   /* R32    - Tone Generator 1 */ 
-       { 0x00000021, 0x1000 },   /* R33    - Tone Generator 2 */ 
-       { 0x00000022, 0x0000 },   /* R34    - Tone Generator 3 */ 
-       { 0x00000023, 0x1000 },   /* R35    - Tone Generator 4 */ 
-       { 0x00000024, 0x0000 },   /* R36    - Tone Generator 5 */ 
-       { 0x00000030, 0x0000 },   /* R48    - PWM Drive 1 */ 
-       { 0x00000031, 0x0100 },   /* R49    - PWM Drive 2 */ 
-       { 0x00000032, 0x0100 },   /* R50    - PWM Drive 3 */ 
-       { 0x00000040, 0x0000 },   /* R64    - Wake control */ 
-       { 0x00000041, 0x0000 },   /* R65    - Sequence control */ 
-       { 0x00000061, 0x01FF },   /* R97    - Sample Rate Sequence Select 1 */ 
-       { 0x00000062, 0x01FF },   /* R98    - Sample Rate Sequence Select 2 */ 
-       { 0x00000063, 0x01FF },   /* R99    - Sample Rate Sequence Select 3 */ 
-       { 0x00000064, 0x01FF },   /* R100   - Sample Rate Sequence Select 4 */ 
+       { 0x00000008, 0x0019 },   /* R8     - Ctrl IF SPI CFG 1 */
+       { 0x00000009, 0x0001 },   /* R9     - Ctrl IF I2C1 CFG 1 */
+       { 0x00000020, 0x0000 },   /* R32    - Tone Generator 1 */
+       { 0x00000021, 0x1000 },   /* R33    - Tone Generator 2 */
+       { 0x00000022, 0x0000 },   /* R34    - Tone Generator 3 */
+       { 0x00000023, 0x1000 },   /* R35    - Tone Generator 4 */
+       { 0x00000024, 0x0000 },   /* R36    - Tone Generator 5 */
+       { 0x00000030, 0x0000 },   /* R48    - PWM Drive 1 */
+       { 0x00000031, 0x0100 },   /* R49    - PWM Drive 2 */
+       { 0x00000032, 0x0100 },   /* R50    - PWM Drive 3 */
+       { 0x00000040, 0x0000 },   /* R64    - Wake control */
+       { 0x00000041, 0x0000 },   /* R65    - Sequence control */
+       { 0x00000061, 0x01FF },   /* R97    - Sample Rate Sequence Select 1 */
+       { 0x00000062, 0x01FF },   /* R98    - Sample Rate Sequence Select 2 */
+       { 0x00000063, 0x01FF },   /* R99    - Sample Rate Sequence Select 3 */
+       { 0x00000064, 0x01FF },   /* R100   - Sample Rate Sequence Select 4 */
        { 0x00000066, 0x01FF },   /* R102   - Always On Triggers Sequence Select 1 */
        { 0x00000067, 0x01FF },   /* R103   - Always On Triggers Sequence Select 2 */
        { 0x00000068, 0x01FF },   /* R104   - Always On Triggers Sequence Select 3 */
        { 0x00000069, 0x01FF },   /* R105   - Always On Triggers Sequence Select 4 */
        { 0x0000006A, 0x01FF },   /* R106   - Always On Triggers Sequence Select 5 */
        { 0x0000006B, 0x01FF },   /* R107   - Always On Triggers Sequence Select 6 */
-       { 0x00000070, 0x0000 },   /* R112   - Comfort Noise Generator */ 
-       { 0x00000090, 0x0000 },   /* R144   - Haptics Control 1 */ 
-       { 0x00000091, 0x7FFF },   /* R145   - Haptics Control 2 */ 
-       { 0x00000092, 0x0000 },   /* R146   - Haptics phase 1 intensity */ 
-       { 0x00000093, 0x0000 },   /* R147   - Haptics phase 1 duration */ 
-       { 0x00000094, 0x0000 },   /* R148   - Haptics phase 2 intensity */ 
-       { 0x00000095, 0x0000 },   /* R149   - Haptics phase 2 duration */ 
-       { 0x00000096, 0x0000 },   /* R150   - Haptics phase 3 intensity */ 
-       { 0x00000097, 0x0000 },   /* R151   - Haptics phase 3 duration */ 
+       { 0x00000070, 0x0000 },   /* R112   - Comfort Noise Generator */
+       { 0x00000090, 0x0000 },   /* R144   - Haptics Control 1 */
+       { 0x00000091, 0x7FFF },   /* R145   - Haptics Control 2 */
+       { 0x00000092, 0x0000 },   /* R146   - Haptics phase 1 intensity */
+       { 0x00000093, 0x0000 },   /* R147   - Haptics phase 1 duration */
+       { 0x00000094, 0x0000 },   /* R148   - Haptics phase 2 intensity */
+       { 0x00000095, 0x0000 },   /* R149   - Haptics phase 2 duration */
+       { 0x00000096, 0x0000 },   /* R150   - Haptics phase 3 intensity */
+       { 0x00000097, 0x0000 },   /* R151   - Haptics phase 3 duration */
        { 0x00000100, 0x0002 },   /* R256   - Clock 32k 1 */
-       { 0x00000101, 0x0304 },   /* R257   - System Clock 1 */ 
-       { 0x00000102, 0x0011 },   /* R258   - Sample rate 1 */ 
-       { 0x00000103, 0x0011 },   /* R259   - Sample rate 2 */ 
-       { 0x00000104, 0x0011 },   /* R260   - Sample rate 3 */ 
-       { 0x00000112, 0x0305 },   /* R274   - Async clock 1 */ 
-       { 0x00000113, 0x0011 },   /* R275   - Async sample rate 1 */ 
+       { 0x00000101, 0x0304 },   /* R257   - System Clock 1 */
+       { 0x00000102, 0x0011 },   /* R258   - Sample rate 1 */
+       { 0x00000103, 0x0011 },   /* R259   - Sample rate 2 */
+       { 0x00000104, 0x0011 },   /* R260   - Sample rate 3 */
+       { 0x00000112, 0x0305 },   /* R274   - Async clock 1 */
+       { 0x00000113, 0x0011 },   /* R275   - Async sample rate 1 */
        { 0x00000114, 0x0011 },   /* R276   - Async sample rate 2 */
-       { 0x00000149, 0x0000 },   /* R329   - Output system clock */ 
-       { 0x0000014A, 0x0000 },   /* R330   - Output async clock */ 
-       { 0x00000152, 0x0000 },   /* R338   - Rate Estimator 1 */ 
-       { 0x00000153, 0x0000 },   /* R339   - Rate Estimator 2 */ 
-       { 0x00000154, 0x0000 },   /* R340   - Rate Estimator 3 */ 
-       { 0x00000155, 0x0000 },   /* R341   - Rate Estimator 4 */ 
-       { 0x00000156, 0x0000 },   /* R342   - Rate Estimator 5 */ 
-       { 0x00000161, 0x0000 },   /* R353   - Dynamic Frequency Scaling 1 */ 
+       { 0x00000149, 0x0000 },   /* R329   - Output system clock */
+       { 0x0000014A, 0x0000 },   /* R330   - Output async clock */
+       { 0x00000152, 0x0000 },   /* R338   - Rate Estimator 1 */
+       { 0x00000153, 0x0000 },   /* R339   - Rate Estimator 2 */
+       { 0x00000154, 0x0000 },   /* R340   - Rate Estimator 3 */
+       { 0x00000155, 0x0000 },   /* R341   - Rate Estimator 4 */
+       { 0x00000156, 0x0000 },   /* R342   - Rate Estimator 5 */
+       { 0x00000161, 0x0000 },   /* R353   - Dynamic Frequency Scaling 1 */
        { 0x00000171, 0x0000 },   /* R369   - FLL1 Control 1 */
-       { 0x00000172, 0x0008 },   /* R370   - FLL1 Control 2 */ 
-       { 0x00000173, 0x0018 },   /* R371   - FLL1 Control 3 */ 
-       { 0x00000174, 0x007D },   /* R372   - FLL1 Control 4 */ 
-       { 0x00000175, 0x0004 },   /* R373   - FLL1 Control 5 */ 
-       { 0x00000176, 0x0000 },   /* R374   - FLL1 Control 6 */ 
+       { 0x00000172, 0x0008 },   /* R370   - FLL1 Control 2 */
+       { 0x00000173, 0x0018 },   /* R371   - FLL1 Control 3 */
+       { 0x00000174, 0x007D },   /* R372   - FLL1 Control 4 */
+       { 0x00000175, 0x0004 },   /* R373   - FLL1 Control 5 */
+       { 0x00000176, 0x0000 },   /* R374   - FLL1 Control 6 */
        { 0x00000179, 0x0000 },   /* R377   - FLL1 Control 7 */
-       { 0x00000181, 0x0000 },   /* R385   - FLL1 Synchroniser 1 */ 
-       { 0x00000182, 0x0000 },   /* R386   - FLL1 Synchroniser 2 */ 
-       { 0x00000183, 0x0000 },   /* R387   - FLL1 Synchroniser 3 */ 
-       { 0x00000184, 0x0000 },   /* R388   - FLL1 Synchroniser 4 */ 
-       { 0x00000185, 0x0000 },   /* R389   - FLL1 Synchroniser 5 */ 
-       { 0x00000186, 0x0000 },   /* R390   - FLL1 Synchroniser 6 */ 
+       { 0x00000181, 0x0000 },   /* R385   - FLL1 Synchroniser 1 */
+       { 0x00000182, 0x0000 },   /* R386   - FLL1 Synchroniser 2 */
+       { 0x00000183, 0x0000 },   /* R387   - FLL1 Synchroniser 3 */
+       { 0x00000184, 0x0000 },   /* R388   - FLL1 Synchroniser 4 */
+       { 0x00000185, 0x0000 },   /* R389   - FLL1 Synchroniser 5 */
+       { 0x00000186, 0x0000 },   /* R390   - FLL1 Synchroniser 6 */
        { 0x00000187, 0x0001 },   /* R391   - FLL1 Synchroniser 7 */
-       { 0x00000189, 0x0000 },   /* R393   - FLL1 Spread Spectrum */ 
-       { 0x0000018A, 0x0004 },   /* R394   - FLL1 GPIO Clock */ 
-       { 0x00000191, 0x0000 },   /* R401   - FLL2 Control 1 */ 
-       { 0x00000192, 0x0008 },   /* R402   - FLL2 Control 2 */ 
-       { 0x00000193, 0x0018 },   /* R403   - FLL2 Control 3 */ 
-       { 0x00000194, 0x007D },   /* R404   - FLL2 Control 4 */ 
-       { 0x00000195, 0x0004 },   /* R405   - FLL2 Control 5 */ 
-       { 0x00000196, 0x0000 },   /* R406   - FLL2 Control 6 */ 
+       { 0x00000189, 0x0000 },   /* R393   - FLL1 Spread Spectrum */
+       { 0x0000018A, 0x0004 },   /* R394   - FLL1 GPIO Clock */
+       { 0x00000191, 0x0000 },   /* R401   - FLL2 Control 1 */
+       { 0x00000192, 0x0008 },   /* R402   - FLL2 Control 2 */
+       { 0x00000193, 0x0018 },   /* R403   - FLL2 Control 3 */
+       { 0x00000194, 0x007D },   /* R404   - FLL2 Control 4 */
+       { 0x00000195, 0x0004 },   /* R405   - FLL2 Control 5 */
+       { 0x00000196, 0x0000 },   /* R406   - FLL2 Control 6 */
        { 0x00000199, 0x0000 },   /* R409   - FLL2 Control 7 */
-       { 0x000001A1, 0x0000 },   /* R417   - FLL2 Synchroniser 1 */ 
-       { 0x000001A2, 0x0000 },   /* R418   - FLL2 Synchroniser 2 */ 
-       { 0x000001A3, 0x0000 },   /* R419   - FLL2 Synchroniser 3 */ 
-       { 0x000001A4, 0x0000 },   /* R420   - FLL2 Synchroniser 4 */ 
-       { 0x000001A5, 0x0000 },   /* R421   - FLL2 Synchroniser 5 */ 
-       { 0x000001A6, 0x0000 },   /* R422   - FLL2 Synchroniser 6 */ 
+       { 0x000001A1, 0x0000 },   /* R417   - FLL2 Synchroniser 1 */
+       { 0x000001A2, 0x0000 },   /* R418   - FLL2 Synchroniser 2 */
+       { 0x000001A3, 0x0000 },   /* R419   - FLL2 Synchroniser 3 */
+       { 0x000001A4, 0x0000 },   /* R420   - FLL2 Synchroniser 4 */
+       { 0x000001A5, 0x0000 },   /* R421   - FLL2 Synchroniser 5 */
+       { 0x000001A6, 0x0000 },   /* R422   - FLL2 Synchroniser 6 */
        { 0x000001A7, 0x0001 },   /* R423   - FLL2 Synchroniser 7 */
-       { 0x000001A9, 0x0000 },   /* R425   - FLL2 Spread Spectrum */ 
-       { 0x000001AA, 0x0004 },   /* R426   - FLL2 GPIO Clock */ 
-       { 0x00000200, 0x0006 },   /* R512   - Mic Charge Pump 1 */ 
-       { 0x00000210, 0x00D4 },   /* R528   - LDO1 Control 1 */ 
+       { 0x000001A9, 0x0000 },   /* R425   - FLL2 Spread Spectrum */
+       { 0x000001AA, 0x0004 },   /* R426   - FLL2 GPIO Clock */
+       { 0x00000200, 0x0006 },   /* R512   - Mic Charge Pump 1 */
+       { 0x00000210, 0x00D4 },   /* R528   - LDO1 Control 1 */
        { 0x00000212, 0x0000 },   /* R530   - LDO1 Control 2 */
-       { 0x00000213, 0x0344 },   /* R531   - LDO2 Control 1 */ 
-       { 0x00000218, 0x01A6 },   /* R536   - Mic Bias Ctrl 1 */ 
-       { 0x00000219, 0x01A6 },   /* R537   - Mic Bias Ctrl 2 */ 
-       { 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */ 
-       { 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */ 
-       { 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */ 
+       { 0x00000213, 0x0344 },   /* R531   - LDO2 Control 1 */
+       { 0x00000218, 0x01A6 },   /* R536   - Mic Bias Ctrl 1 */
+       { 0x00000219, 0x01A6 },   /* R537   - Mic Bias Ctrl 2 */
+       { 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */
+       { 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */
+       { 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */
        { 0x000002A2, 0x0000 },   /* R674   - Micd clamp control */
-       { 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */ 
-       { 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */ 
+       { 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */
+       { 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */
        { 0x000002A6, 0x3737 },   /* R678   - Mic Detect Level 1 */
        { 0x000002A7, 0x2C37 },   /* R679   - Mic Detect Level 2 */
        { 0x000002A8, 0x1422 },   /* R680   - Mic Detect Level 3 */
        { 0x000002A9, 0x030A },   /* R681   - Mic Detect Level 4 */
-       { 0x000002C3, 0x0000 },   /* R707   - Mic noise mix control 1 */ 
-       { 0x000002CB, 0x0000 },   /* R715   - Isolation control */ 
-       { 0x000002D3, 0x0000 },   /* R723   - Jack detect analogue */ 
-       { 0x00000300, 0x0000 },   /* R768   - Input Enables */ 
-       { 0x00000308, 0x0000 },   /* R776   - Input Rate */ 
-       { 0x00000309, 0x0022 },   /* R777   - Input Volume Ramp */ 
-       { 0x00000310, 0x2080 },   /* R784   - IN1L Control */ 
-       { 0x00000311, 0x0180 },   /* R785   - ADC Digital Volume 1L */ 
-       { 0x00000312, 0x0000 },   /* R786   - DMIC1L Control */ 
-       { 0x00000314, 0x0080 },   /* R788   - IN1R Control */ 
-       { 0x00000315, 0x0180 },   /* R789   - ADC Digital Volume 1R */ 
-       { 0x00000316, 0x0000 },   /* R790   - DMIC1R Control */ 
-       { 0x00000318, 0x2080 },   /* R792   - IN2L Control */ 
-       { 0x00000319, 0x0180 },   /* R793   - ADC Digital Volume 2L */ 
-       { 0x0000031A, 0x0000 },   /* R794   - DMIC2L Control */ 
-       { 0x0000031C, 0x0080 },   /* R796   - IN2R Control */ 
-       { 0x0000031D, 0x0180 },   /* R797   - ADC Digital Volume 2R */ 
-       { 0x0000031E, 0x0000 },   /* R798   - DMIC2R Control */ 
-       { 0x00000320, 0x2080 },   /* R800   - IN3L Control */ 
-       { 0x00000321, 0x0180 },   /* R801   - ADC Digital Volume 3L */ 
-       { 0x00000322, 0x0000 },   /* R802   - DMIC3L Control */ 
-       { 0x00000324, 0x0080 },   /* R804   - IN3R Control */ 
-       { 0x00000325, 0x0180 },   /* R805   - ADC Digital Volume 3R */ 
-       { 0x00000326, 0x0000 },   /* R806   - DMIC3R Control */ 
-       { 0x00000400, 0x0000 },   /* R1024  - Output Enables 1 */ 
-       { 0x00000408, 0x0000 },   /* R1032  - Output Rate 1 */ 
-       { 0x00000409, 0x0022 },   /* R1033  - Output Volume Ramp */ 
+       { 0x000002C3, 0x0000 },   /* R707   - Mic noise mix control 1 */
+       { 0x000002CB, 0x0000 },   /* R715   - Isolation control */
+       { 0x000002D3, 0x0000 },   /* R723   - Jack detect analogue */
+       { 0x00000300, 0x0000 },   /* R768   - Input Enables */
+       { 0x00000308, 0x0000 },   /* R776   - Input Rate */
+       { 0x00000309, 0x0022 },   /* R777   - Input Volume Ramp */
+       { 0x00000310, 0x2080 },   /* R784   - IN1L Control */
+       { 0x00000311, 0x0180 },   /* R785   - ADC Digital Volume 1L */
+       { 0x00000312, 0x0000 },   /* R786   - DMIC1L Control */
+       { 0x00000314, 0x0080 },   /* R788   - IN1R Control */
+       { 0x00000315, 0x0180 },   /* R789   - ADC Digital Volume 1R */
+       { 0x00000316, 0x0000 },   /* R790   - DMIC1R Control */
+       { 0x00000318, 0x2080 },   /* R792   - IN2L Control */
+       { 0x00000319, 0x0180 },   /* R793   - ADC Digital Volume 2L */
+       { 0x0000031A, 0x0000 },   /* R794   - DMIC2L Control */
+       { 0x0000031C, 0x0080 },   /* R796   - IN2R Control */
+       { 0x0000031D, 0x0180 },   /* R797   - ADC Digital Volume 2R */
+       { 0x0000031E, 0x0000 },   /* R798   - DMIC2R Control */
+       { 0x00000320, 0x2080 },   /* R800   - IN3L Control */
+       { 0x00000321, 0x0180 },   /* R801   - ADC Digital Volume 3L */
+       { 0x00000322, 0x0000 },   /* R802   - DMIC3L Control */
+       { 0x00000324, 0x0080 },   /* R804   - IN3R Control */
+       { 0x00000325, 0x0180 },   /* R805   - ADC Digital Volume 3R */
+       { 0x00000326, 0x0000 },   /* R806   - DMIC3R Control */
+       { 0x00000400, 0x0000 },   /* R1024  - Output Enables 1 */
+       { 0x00000408, 0x0000 },   /* R1032  - Output Rate 1 */
+       { 0x00000409, 0x0022 },   /* R1033  - Output Volume Ramp */
        { 0x00000410, 0x6080 },   /* R1040  - Output Path Config 1L */
-       { 0x00000411, 0x0180 },   /* R1041  - DAC Digital Volume 1L */ 
+       { 0x00000411, 0x0180 },   /* R1041  - DAC Digital Volume 1L */
        { 0x00000412, 0x0081 },   /* R1042  - DAC Volume Limit 1L */
-       { 0x00000413, 0x0001 },   /* R1043  - Noise Gate Select 1L */ 
-       { 0x00000414, 0x0080 },   /* R1044  - Output Path Config 1R */ 
-       { 0x00000415, 0x0180 },   /* R1045  - DAC Digital Volume 1R */ 
+       { 0x00000413, 0x0001 },   /* R1043  - Noise Gate Select 1L */
+       { 0x00000414, 0x0080 },   /* R1044  - Output Path Config 1R */
+       { 0x00000415, 0x0180 },   /* R1045  - DAC Digital Volume 1R */
        { 0x00000416, 0x0081 },   /* R1046  - DAC Volume Limit 1R */
-       { 0x00000417, 0x0002 },   /* R1047  - Noise Gate Select 1R */ 
+       { 0x00000417, 0x0002 },   /* R1047  - Noise Gate Select 1R */
        { 0x00000418, 0xA080 },   /* R1048  - Output Path Config 2L */
-       { 0x00000419, 0x0180 },   /* R1049  - DAC Digital Volume 2L */ 
+       { 0x00000419, 0x0180 },   /* R1049  - DAC Digital Volume 2L */
        { 0x0000041A, 0x0081 },   /* R1050  - DAC Volume Limit 2L */
-       { 0x0000041B, 0x0004 },   /* R1051  - Noise Gate Select 2L */ 
-       { 0x0000041C, 0x0080 },   /* R1052  - Output Path Config 2R */ 
-       { 0x0000041D, 0x0180 },   /* R1053  - DAC Digital Volume 2R */ 
+       { 0x0000041B, 0x0004 },   /* R1051  - Noise Gate Select 2L */
+       { 0x0000041C, 0x0080 },   /* R1052  - Output Path Config 2R */
+       { 0x0000041D, 0x0180 },   /* R1053  - DAC Digital Volume 2R */
        { 0x0000041E, 0x0081 },   /* R1054  - DAC Volume Limit 2R */
-       { 0x0000041F, 0x0008 },   /* R1055  - Noise Gate Select 2R */ 
+       { 0x0000041F, 0x0008 },   /* R1055  - Noise Gate Select 2R */
        { 0x00000420, 0xA080 },   /* R1056  - Output Path Config 3L */
-       { 0x00000421, 0x0180 },   /* R1057  - DAC Digital Volume 3L */ 
+       { 0x00000421, 0x0180 },   /* R1057  - DAC Digital Volume 3L */
        { 0x00000422, 0x0081 },   /* R1058  - DAC Volume Limit 3L */
-       { 0x00000423, 0x0010 },   /* R1059  - Noise Gate Select 3L */ 
+       { 0x00000423, 0x0010 },   /* R1059  - Noise Gate Select 3L */
        { 0x00000428, 0xE000 },   /* R1064  - Output Path Config 4L */
-       { 0x00000429, 0x0180 },   /* R1065  - DAC Digital Volume 4L */ 
+       { 0x00000429, 0x0180 },   /* R1065  - DAC Digital Volume 4L */
        { 0x0000042A, 0x0081 },   /* R1066  - Out Volume 4L */
-       { 0x0000042B, 0x0040 },   /* R1067  - Noise Gate Select 4L */ 
-       { 0x0000042D, 0x0180 },   /* R1069  - DAC Digital Volume 4R */ 
+       { 0x0000042B, 0x0040 },   /* R1067  - Noise Gate Select 4L */
+       { 0x0000042D, 0x0180 },   /* R1069  - DAC Digital Volume 4R */
        { 0x0000042E, 0x0081 },   /* R1070  - Out Volume 4R */
-       { 0x0000042F, 0x0080 },   /* R1071  - Noise Gate Select 4R */ 
-       { 0x00000430, 0x0000 },   /* R1072  - Output Path Config 5L */ 
-       { 0x00000431, 0x0180 },   /* R1073  - DAC Digital Volume 5L */ 
+       { 0x0000042F, 0x0080 },   /* R1071  - Noise Gate Select 4R */
+       { 0x00000430, 0x0000 },   /* R1072  - Output Path Config 5L */
+       { 0x00000431, 0x0180 },   /* R1073  - DAC Digital Volume 5L */
        { 0x00000432, 0x0081 },   /* R1074  - DAC Volume Limit 5L */
-       { 0x00000433, 0x0100 },   /* R1075  - Noise Gate Select 5L */ 
-       { 0x00000435, 0x0180 },   /* R1077  - DAC Digital Volume 5R */ 
+       { 0x00000433, 0x0100 },   /* R1075  - Noise Gate Select 5L */
+       { 0x00000435, 0x0180 },   /* R1077  - DAC Digital Volume 5R */
        { 0x00000436, 0x0081 },   /* R1078  - DAC Volume Limit 5R */
        { 0x00000437, 0x0200 },   /* R1079  - Noise Gate Select 5R */
        { 0x00000440, 0x0FFF },   /* R1088  - DRE Enable */
        { 0x00000442, 0x3F0A },   /* R1090  - DRE Control 2 */
        { 0x00000443, 0xDC1F },   /* R1090  - DRE Control 3 */
-       { 0x00000450, 0x0000 },   /* R1104  - DAC AEC Control 1 */ 
+       { 0x00000450, 0x0000 },   /* R1104  - DAC AEC Control 1 */
        { 0x00000458, 0x000B },   /* R1112  - Noise Gate Control */
-       { 0x00000490, 0x0069 },   /* R1168  - PDM SPK1 CTRL 1 */ 
-       { 0x00000491, 0x0000 },   /* R1169  - PDM SPK1 CTRL 2 */ 
-       { 0x00000500, 0x000C },   /* R1280  - AIF1 BCLK Ctrl */ 
-       { 0x00000501, 0x0008 },   /* R1281  - AIF1 Tx Pin Ctrl */ 
-       { 0x00000502, 0x0000 },   /* R1282  - AIF1 Rx Pin Ctrl */ 
-       { 0x00000503, 0x0000 },   /* R1283  - AIF1 Rate Ctrl */ 
-       { 0x00000504, 0x0000 },   /* R1284  - AIF1 Format */ 
-       { 0x00000505, 0x0040 },   /* R1285  - AIF1 Tx BCLK Rate */ 
-       { 0x00000506, 0x0040 },   /* R1286  - AIF1 Rx BCLK Rate */ 
-       { 0x00000507, 0x1818 },   /* R1287  - AIF1 Frame Ctrl 1 */ 
-       { 0x00000508, 0x1818 },   /* R1288  - AIF1 Frame Ctrl 2 */ 
-       { 0x00000509, 0x0000 },   /* R1289  - AIF1 Frame Ctrl 3 */ 
-       { 0x0000050A, 0x0001 },   /* R1290  - AIF1 Frame Ctrl 4 */ 
-       { 0x0000050B, 0x0002 },   /* R1291  - AIF1 Frame Ctrl 5 */ 
-       { 0x0000050C, 0x0003 },   /* R1292  - AIF1 Frame Ctrl 6 */ 
-       { 0x0000050D, 0x0004 },   /* R1293  - AIF1 Frame Ctrl 7 */ 
-       { 0x0000050E, 0x0005 },   /* R1294  - AIF1 Frame Ctrl 8 */ 
-       { 0x0000050F, 0x0006 },   /* R1295  - AIF1 Frame Ctrl 9 */ 
-       { 0x00000510, 0x0007 },   /* R1296  - AIF1 Frame Ctrl 10 */ 
-       { 0x00000511, 0x0000 },   /* R1297  - AIF1 Frame Ctrl 11 */ 
-       { 0x00000512, 0x0001 },   /* R1298  - AIF1 Frame Ctrl 12 */ 
-       { 0x00000513, 0x0002 },   /* R1299  - AIF1 Frame Ctrl 13 */ 
-       { 0x00000514, 0x0003 },   /* R1300  - AIF1 Frame Ctrl 14 */ 
-       { 0x00000515, 0x0004 },   /* R1301  - AIF1 Frame Ctrl 15 */ 
-       { 0x00000516, 0x0005 },   /* R1302  - AIF1 Frame Ctrl 16 */ 
-       { 0x00000517, 0x0006 },   /* R1303  - AIF1 Frame Ctrl 17 */ 
-       { 0x00000518, 0x0007 },   /* R1304  - AIF1 Frame Ctrl 18 */ 
-       { 0x00000519, 0x0000 },   /* R1305  - AIF1 Tx Enables */ 
-       { 0x0000051A, 0x0000 },   /* R1306  - AIF1 Rx Enables */ 
-       { 0x00000540, 0x000C },   /* R1344  - AIF2 BCLK Ctrl */ 
-       { 0x00000541, 0x0008 },   /* R1345  - AIF2 Tx Pin Ctrl */ 
-       { 0x00000542, 0x0000 },   /* R1346  - AIF2 Rx Pin Ctrl */ 
-       { 0x00000543, 0x0000 },   /* R1347  - AIF2 Rate Ctrl */ 
-       { 0x00000544, 0x0000 },   /* R1348  - AIF2 Format */ 
-       { 0x00000545, 0x0040 },   /* R1349  - AIF2 Tx BCLK Rate */ 
-       { 0x00000546, 0x0040 },   /* R1350  - AIF2 Rx BCLK Rate */ 
-       { 0x00000547, 0x1818 },   /* R1351  - AIF2 Frame Ctrl 1 */ 
-       { 0x00000548, 0x1818 },   /* R1352  - AIF2 Frame Ctrl 2 */ 
-       { 0x00000549, 0x0000 },   /* R1353  - AIF2 Frame Ctrl 3 */ 
-       { 0x0000054A, 0x0001 },   /* R1354  - AIF2 Frame Ctrl 4 */ 
-       { 0x00000551, 0x0000 },   /* R1361  - AIF2 Frame Ctrl 11 */ 
-       { 0x00000552, 0x0001 },   /* R1362  - AIF2 Frame Ctrl 12 */ 
-       { 0x00000559, 0x0000 },   /* R1369  - AIF2 Tx Enables */ 
-       { 0x0000055A, 0x0000 },   /* R1370  - AIF2 Rx Enables */ 
-       { 0x00000580, 0x000C },   /* R1408  - AIF3 BCLK Ctrl */ 
-       { 0x00000581, 0x0008 },   /* R1409  - AIF3 Tx Pin Ctrl */ 
-       { 0x00000582, 0x0000 },   /* R1410  - AIF3 Rx Pin Ctrl */ 
-       { 0x00000583, 0x0000 },   /* R1411  - AIF3 Rate Ctrl */ 
-       { 0x00000584, 0x0000 },   /* R1412  - AIF3 Format */ 
-       { 0x00000585, 0x0040 },   /* R1413  - AIF3 Tx BCLK Rate */ 
-       { 0x00000586, 0x0040 },   /* R1414  - AIF3 Rx BCLK Rate */ 
-       { 0x00000587, 0x1818 },   /* R1415  - AIF3 Frame Ctrl 1 */ 
-       { 0x00000588, 0x1818 },   /* R1416  - AIF3 Frame Ctrl 2 */ 
-       { 0x00000589, 0x0000 },   /* R1417  - AIF3 Frame Ctrl 3 */ 
-       { 0x0000058A, 0x0001 },   /* R1418  - AIF3 Frame Ctrl 4 */ 
-       { 0x00000591, 0x0000 },   /* R1425  - AIF3 Frame Ctrl 11 */ 
-       { 0x00000592, 0x0001 },   /* R1426  - AIF3 Frame Ctrl 12 */ 
-       { 0x00000599, 0x0000 },   /* R1433  - AIF3 Tx Enables */ 
-       { 0x0000059A, 0x0000 },   /* R1434  - AIF3 Rx Enables */ 
-       { 0x000005E3, 0x0004 },   /* R1507  - SLIMbus Framer Ref Gear */ 
-       { 0x000005E5, 0x0000 },   /* R1509  - SLIMbus Rates 1 */ 
-       { 0x000005E6, 0x0000 },   /* R1510  - SLIMbus Rates 2 */ 
-       { 0x000005E7, 0x0000 },   /* R1511  - SLIMbus Rates 3 */ 
-       { 0x000005E8, 0x0000 },   /* R1512  - SLIMbus Rates 4 */ 
-       { 0x000005E9, 0x0000 },   /* R1513  - SLIMbus Rates 5 */ 
-       { 0x000005EA, 0x0000 },   /* R1514  - SLIMbus Rates 6 */ 
-       { 0x000005EB, 0x0000 },   /* R1515  - SLIMbus Rates 7 */ 
-       { 0x000005EC, 0x0000 },   /* R1516  - SLIMbus Rates 8 */ 
-       { 0x000005F5, 0x0000 },   /* R1525  - SLIMbus RX Channel Enable */ 
-       { 0x000005F6, 0x0000 },   /* R1526  - SLIMbus TX Channel Enable */ 
-       { 0x00000640, 0x0000 },   /* R1600  - PWM1MIX Input 1 Source */ 
-       { 0x00000641, 0x0080 },   /* R1601  - PWM1MIX Input 1 Volume */ 
-       { 0x00000642, 0x0000 },   /* R1602  - PWM1MIX Input 2 Source */ 
-       { 0x00000643, 0x0080 },   /* R1603  - PWM1MIX Input 2 Volume */ 
-       { 0x00000644, 0x0000 },   /* R1604  - PWM1MIX Input 3 Source */ 
-       { 0x00000645, 0x0080 },   /* R1605  - PWM1MIX Input 3 Volume */ 
-       { 0x00000646, 0x0000 },   /* R1606  - PWM1MIX Input 4 Source */ 
-       { 0x00000647, 0x0080 },   /* R1607  - PWM1MIX Input 4 Volume */ 
-       { 0x00000648, 0x0000 },   /* R1608  - PWM2MIX Input 1 Source */ 
-       { 0x00000649, 0x0080 },   /* R1609  - PWM2MIX Input 1 Volume */ 
-       { 0x0000064A, 0x0000 },   /* R1610  - PWM2MIX Input 2 Source */ 
-       { 0x0000064B, 0x0080 },   /* R1611  - PWM2MIX Input 2 Volume */ 
-       { 0x0000064C, 0x0000 },   /* R1612  - PWM2MIX Input 3 Source */ 
-       { 0x0000064D, 0x0080 },   /* R1613  - PWM2MIX Input 3 Volume */ 
-       { 0x0000064E, 0x0000 },   /* R1614  - PWM2MIX Input 4 Source */ 
-       { 0x0000064F, 0x0080 },   /* R1615  - PWM2MIX Input 4 Volume */ 
-       { 0x00000660, 0x0000 },   /* R1632  - MICMIX Input 1 Source */ 
-       { 0x00000661, 0x0080 },   /* R1633  - MICMIX Input 1 Volume */ 
-       { 0x00000662, 0x0000 },   /* R1634  - MICMIX Input 2 Source */ 
-       { 0x00000663, 0x0080 },   /* R1635  - MICMIX Input 2 Volume */ 
-       { 0x00000664, 0x0000 },   /* R1636  - MICMIX Input 3 Source */ 
-       { 0x00000665, 0x0080 },   /* R1637  - MICMIX Input 3 Volume */ 
-       { 0x00000666, 0x0000 },   /* R1638  - MICMIX Input 4 Source */ 
-       { 0x00000667, 0x0080 },   /* R1639  - MICMIX Input 4 Volume */ 
-       { 0x00000668, 0x0000 },   /* R1640  - NOISEMIX Input 1 Source */ 
-       { 0x00000669, 0x0080 },   /* R1641  - NOISEMIX Input 1 Volume */ 
-       { 0x0000066A, 0x0000 },   /* R1642  - NOISEMIX Input 2 Source */ 
-       { 0x0000066B, 0x0080 },   /* R1643  - NOISEMIX Input 2 Volume */ 
-       { 0x0000066C, 0x0000 },   /* R1644  - NOISEMIX Input 3 Source */ 
-       { 0x0000066D, 0x0080 },   /* R1645  - NOISEMIX Input 3 Volume */ 
-       { 0x0000066E, 0x0000 },   /* R1646  - NOISEMIX Input 4 Source */ 
-       { 0x0000066F, 0x0080 },   /* R1647  - NOISEMIX Input 4 Volume */ 
-       { 0x00000680, 0x0000 },   /* R1664  - OUT1LMIX Input 1 Source */ 
-       { 0x00000681, 0x0080 },   /* R1665  - OUT1LMIX Input 1 Volume */ 
-       { 0x00000682, 0x0000 },   /* R1666  - OUT1LMIX Input 2 Source */ 
-       { 0x00000683, 0x0080 },   /* R1667  - OUT1LMIX Input 2 Volume */ 
-       { 0x00000684, 0x0000 },   /* R1668  - OUT1LMIX Input 3 Source */ 
-       { 0x00000685, 0x0080 },   /* R1669  - OUT1LMIX Input 3 Volume */ 
-       { 0x00000686, 0x0000 },   /* R1670  - OUT1LMIX Input 4 Source */ 
-       { 0x00000687, 0x0080 },   /* R1671  - OUT1LMIX Input 4 Volume */ 
-       { 0x00000688, 0x0000 },   /* R1672  - OUT1RMIX Input 1 Source */ 
-       { 0x00000689, 0x0080 },   /* R1673  - OUT1RMIX Input 1 Volume */ 
-       { 0x0000068A, 0x0000 },   /* R1674  - OUT1RMIX Input 2 Source */ 
-       { 0x0000068B, 0x0080 },   /* R1675  - OUT1RMIX Input 2 Volume */ 
-       { 0x0000068C, 0x0000 },   /* R1676  - OUT1RMIX Input 3 Source */ 
-       { 0x0000068D, 0x0080 },   /* R1677  - OUT1RMIX Input 3 Volume */ 
-       { 0x0000068E, 0x0000 },   /* R1678  - OUT1RMIX Input 4 Source */ 
-       { 0x0000068F, 0x0080 },   /* R1679  - OUT1RMIX Input 4 Volume */ 
-       { 0x00000690, 0x0000 },   /* R1680  - OUT2LMIX Input 1 Source */ 
-       { 0x00000691, 0x0080 },   /* R1681  - OUT2LMIX Input 1 Volume */ 
-       { 0x00000692, 0x0000 },   /* R1682  - OUT2LMIX Input 2 Source */ 
-       { 0x00000693, 0x0080 },   /* R1683  - OUT2LMIX Input 2 Volume */ 
-       { 0x00000694, 0x0000 },   /* R1684  - OUT2LMIX Input 3 Source */ 
-       { 0x00000695, 0x0080 },   /* R1685  - OUT2LMIX Input 3 Volume */ 
-       { 0x00000696, 0x0000 },   /* R1686  - OUT2LMIX Input 4 Source */ 
-       { 0x00000697, 0x0080 },   /* R1687  - OUT2LMIX Input 4 Volume */ 
-       { 0x00000698, 0x0000 },   /* R1688  - OUT2RMIX Input 1 Source */ 
-       { 0x00000699, 0x0080 },   /* R1689  - OUT2RMIX Input 1 Volume */ 
-       { 0x0000069A, 0x0000 },   /* R1690  - OUT2RMIX Input 2 Source */ 
-       { 0x0000069B, 0x0080 },   /* R1691  - OUT2RMIX Input 2 Volume */ 
-       { 0x0000069C, 0x0000 },   /* R1692  - OUT2RMIX Input 3 Source */ 
-       { 0x0000069D, 0x0080 },   /* R1693  - OUT2RMIX Input 3 Volume */ 
-       { 0x0000069E, 0x0000 },   /* R1694  - OUT2RMIX Input 4 Source */ 
-       { 0x0000069F, 0x0080 },   /* R1695  - OUT2RMIX Input 4 Volume */ 
-       { 0x000006A0, 0x0000 },   /* R1696  - OUT3LMIX Input 1 Source */ 
-       { 0x000006A1, 0x0080 },   /* R1697  - OUT3LMIX Input 1 Volume */ 
-       { 0x000006A2, 0x0000 },   /* R1698  - OUT3LMIX Input 2 Source */ 
-       { 0x000006A3, 0x0080 },   /* R1699  - OUT3LMIX Input 2 Volume */ 
-       { 0x000006A4, 0x0000 },   /* R1700  - OUT3LMIX Input 3 Source */ 
-       { 0x000006A5, 0x0080 },   /* R1701  - OUT3LMIX Input 3 Volume */ 
-       { 0x000006A6, 0x0000 },   /* R1702  - OUT3LMIX Input 4 Source */ 
-       { 0x000006A7, 0x0080 },   /* R1703  - OUT3LMIX Input 4 Volume */ 
-       { 0x000006B0, 0x0000 },   /* R1712  - OUT4LMIX Input 1 Source */ 
-       { 0x000006B1, 0x0080 },   /* R1713  - OUT4LMIX Input 1 Volume */ 
-       { 0x000006B2, 0x0000 },   /* R1714  - OUT4LMIX Input 2 Source */ 
-       { 0x000006B3, 0x0080 },   /* R1715  - OUT4LMIX Input 2 Volume */ 
-       { 0x000006B4, 0x0000 },   /* R1716  - OUT4LMIX Input 3 Source */ 
-       { 0x000006B5, 0x0080 },   /* R1717  - OUT4LMIX Input 3 Volume */ 
-       { 0x000006B6, 0x0000 },   /* R1718  - OUT4LMIX Input 4 Source */ 
-       { 0x000006B7, 0x0080 },   /* R1719  - OUT4LMIX Input 4 Volume */ 
-       { 0x000006B8, 0x0000 },   /* R1720  - OUT4RMIX Input 1 Source */ 
-       { 0x000006B9, 0x0080 },   /* R1721  - OUT4RMIX Input 1 Volume */ 
-       { 0x000006BA, 0x0000 },   /* R1722  - OUT4RMIX Input 2 Source */ 
-       { 0x000006BB, 0x0080 },   /* R1723  - OUT4RMIX Input 2 Volume */ 
-       { 0x000006BC, 0x0000 },   /* R1724  - OUT4RMIX Input 3 Source */ 
-       { 0x000006BD, 0x0080 },   /* R1725  - OUT4RMIX Input 3 Volume */ 
-       { 0x000006BE, 0x0000 },   /* R1726  - OUT4RMIX Input 4 Source */ 
-       { 0x000006BF, 0x0080 },   /* R1727  - OUT4RMIX Input 4 Volume */ 
-       { 0x000006C0, 0x0000 },   /* R1728  - OUT5LMIX Input 1 Source */ 
-       { 0x000006C1, 0x0080 },   /* R1729  - OUT5LMIX Input 1 Volume */ 
-       { 0x000006C2, 0x0000 },   /* R1730  - OUT5LMIX Input 2 Source */ 
-       { 0x000006C3, 0x0080 },   /* R1731  - OUT5LMIX Input 2 Volume */ 
-       { 0x000006C4, 0x0000 },   /* R1732  - OUT5LMIX Input 3 Source */ 
-       { 0x000006C5, 0x0080 },   /* R1733  - OUT5LMIX Input 3 Volume */ 
-       { 0x000006C6, 0x0000 },   /* R1734  - OUT5LMIX Input 4 Source */ 
-       { 0x000006C7, 0x0080 },   /* R1735  - OUT5LMIX Input 4 Volume */ 
-       { 0x000006C8, 0x0000 },   /* R1736  - OUT5RMIX Input 1 Source */ 
-       { 0x000006C9, 0x0080 },   /* R1737  - OUT5RMIX Input 1 Volume */ 
-       { 0x000006CA, 0x0000 },   /* R1738  - OUT5RMIX Input 2 Source */ 
-       { 0x000006CB, 0x0080 },   /* R1739  - OUT5RMIX Input 2 Volume */ 
-       { 0x000006CC, 0x0000 },   /* R1740  - OUT5RMIX Input 3 Source */ 
-       { 0x000006CD, 0x0080 },   /* R1741  - OUT5RMIX Input 3 Volume */ 
-       { 0x000006CE, 0x0000 },   /* R1742  - OUT5RMIX Input 4 Source */ 
-       { 0x000006CF, 0x0080 },   /* R1743  - OUT5RMIX Input 4 Volume */ 
-       { 0x00000700, 0x0000 },   /* R1792  - AIF1TX1MIX Input 1 Source */ 
-       { 0x00000701, 0x0080 },   /* R1793  - AIF1TX1MIX Input 1 Volume */ 
-       { 0x00000702, 0x0000 },   /* R1794  - AIF1TX1MIX Input 2 Source */ 
-       { 0x00000703, 0x0080 },   /* R1795  - AIF1TX1MIX Input 2 Volume */ 
-       { 0x00000704, 0x0000 },   /* R1796  - AIF1TX1MIX Input 3 Source */ 
-       { 0x00000705, 0x0080 },   /* R1797  - AIF1TX1MIX Input 3 Volume */ 
-       { 0x00000706, 0x0000 },   /* R1798  - AIF1TX1MIX Input 4 Source */ 
-       { 0x00000707, 0x0080 },   /* R1799  - AIF1TX1MIX Input 4 Volume */ 
-       { 0x00000708, 0x0000 },   /* R1800  - AIF1TX2MIX Input 1 Source */ 
-       { 0x00000709, 0x0080 },   /* R1801  - AIF1TX2MIX Input 1 Volume */ 
-       { 0x0000070A, 0x0000 },   /* R1802  - AIF1TX2MIX Input 2 Source */ 
-       { 0x0000070B, 0x0080 },   /* R1803  - AIF1TX2MIX Input 2 Volume */ 
-       { 0x0000070C, 0x0000 },   /* R1804  - AIF1TX2MIX Input 3 Source */ 
-       { 0x0000070D, 0x0080 },   /* R1805  - AIF1TX2MIX Input 3 Volume */ 
-       { 0x0000070E, 0x0000 },   /* R1806  - AIF1TX2MIX Input 4 Source */ 
-       { 0x0000070F, 0x0080 },   /* R1807  - AIF1TX2MIX Input 4 Volume */ 
-       { 0x00000710, 0x0000 },   /* R1808  - AIF1TX3MIX Input 1 Source */ 
-       { 0x00000711, 0x0080 },   /* R1809  - AIF1TX3MIX Input 1 Volume */ 
-       { 0x00000712, 0x0000 },   /* R1810  - AIF1TX3MIX Input 2 Source */ 
-       { 0x00000713, 0x0080 },   /* R1811  - AIF1TX3MIX Input 2 Volume */ 
-       { 0x00000714, 0x0000 },   /* R1812  - AIF1TX3MIX Input 3 Source */ 
-       { 0x00000715, 0x0080 },   /* R1813  - AIF1TX3MIX Input 3 Volume */ 
-       { 0x00000716, 0x0000 },   /* R1814  - AIF1TX3MIX Input 4 Source */ 
-       { 0x00000717, 0x0080 },   /* R1815  - AIF1TX3MIX Input 4 Volume */ 
-       { 0x00000718, 0x0000 },   /* R1816  - AIF1TX4MIX Input 1 Source */ 
-       { 0x00000719, 0x0080 },   /* R1817  - AIF1TX4MIX Input 1 Volume */ 
-       { 0x0000071A, 0x0000 },   /* R1818  - AIF1TX4MIX Input 2 Source */ 
-       { 0x0000071B, 0x0080 },   /* R1819  - AIF1TX4MIX Input 2 Volume */ 
-       { 0x0000071C, 0x0000 },   /* R1820  - AIF1TX4MIX Input 3 Source */ 
-       { 0x0000071D, 0x0080 },   /* R1821  - AIF1TX4MIX Input 3 Volume */ 
-       { 0x0000071E, 0x0000 },   /* R1822  - AIF1TX4MIX Input 4 Source */ 
-       { 0x0000071F, 0x0080 },   /* R1823  - AIF1TX4MIX Input 4 Volume */ 
-       { 0x00000720, 0x0000 },   /* R1824  - AIF1TX5MIX Input 1 Source */ 
-       { 0x00000721, 0x0080 },   /* R1825  - AIF1TX5MIX Input 1 Volume */ 
-       { 0x00000722, 0x0000 },   /* R1826  - AIF1TX5MIX Input 2 Source */ 
-       { 0x00000723, 0x0080 },   /* R1827  - AIF1TX5MIX Input 2 Volume */ 
-       { 0x00000724, 0x0000 },   /* R1828  - AIF1TX5MIX Input 3 Source */ 
-       { 0x00000725, 0x0080 },   /* R1829  - AIF1TX5MIX Input 3 Volume */ 
-       { 0x00000726, 0x0000 },   /* R1830  - AIF1TX5MIX Input 4 Source */ 
-       { 0x00000727, 0x0080 },   /* R1831  - AIF1TX5MIX Input 4 Volume */ 
-       { 0x00000728, 0x0000 },   /* R1832  - AIF1TX6MIX Input 1 Source */ 
-       { 0x00000729, 0x0080 },   /* R1833  - AIF1TX6MIX Input 1 Volume */ 
-       { 0x0000072A, 0x0000 },   /* R1834  - AIF1TX6MIX Input 2 Source */ 
-       { 0x0000072B, 0x0080 },   /* R1835  - AIF1TX6MIX Input 2 Volume */ 
-       { 0x0000072C, 0x0000 },   /* R1836  - AIF1TX6MIX Input 3 Source */ 
-       { 0x0000072D, 0x0080 },   /* R1837  - AIF1TX6MIX Input 3 Volume */ 
-       { 0x0000072E, 0x0000 },   /* R1838  - AIF1TX6MIX Input 4 Source */ 
-       { 0x0000072F, 0x0080 },   /* R1839  - AIF1TX6MIX Input 4 Volume */ 
-       { 0x00000730, 0x0000 },   /* R1840  - AIF1TX7MIX Input 1 Source */ 
-       { 0x00000731, 0x0080 },   /* R1841  - AIF1TX7MIX Input 1 Volume */ 
-       { 0x00000732, 0x0000 },   /* R1842  - AIF1TX7MIX Input 2 Source */ 
-       { 0x00000733, 0x0080 },   /* R1843  - AIF1TX7MIX Input 2 Volume */ 
-       { 0x00000734, 0x0000 },   /* R1844  - AIF1TX7MIX Input 3 Source */ 
-       { 0x00000735, 0x0080 },   /* R1845  - AIF1TX7MIX Input 3 Volume */ 
-       { 0x00000736, 0x0000 },   /* R1846  - AIF1TX7MIX Input 4 Source */ 
-       { 0x00000737, 0x0080 },   /* R1847  - AIF1TX7MIX Input 4 Volume */ 
-       { 0x00000738, 0x0000 },   /* R1848  - AIF1TX8MIX Input 1 Source */ 
-       { 0x00000739, 0x0080 },   /* R1849  - AIF1TX8MIX Input 1 Volume */ 
-       { 0x0000073A, 0x0000 },   /* R1850  - AIF1TX8MIX Input 2 Source */ 
-       { 0x0000073B, 0x0080 },   /* R1851  - AIF1TX8MIX Input 2 Volume */ 
-       { 0x0000073C, 0x0000 },   /* R1852  - AIF1TX8MIX Input 3 Source */ 
-       { 0x0000073D, 0x0080 },   /* R1853  - AIF1TX8MIX Input 3 Volume */ 
-       { 0x0000073E, 0x0000 },   /* R1854  - AIF1TX8MIX Input 4 Source */ 
-       { 0x0000073F, 0x0080 },   /* R1855  - AIF1TX8MIX Input 4 Volume */ 
-       { 0x00000740, 0x0000 },   /* R1856  - AIF2TX1MIX Input 1 Source */ 
-       { 0x00000741, 0x0080 },   /* R1857  - AIF2TX1MIX Input 1 Volume */ 
-       { 0x00000742, 0x0000 },   /* R1858  - AIF2TX1MIX Input 2 Source */ 
-       { 0x00000743, 0x0080 },   /* R1859  - AIF2TX1MIX Input 2 Volume */ 
-       { 0x00000744, 0x0000 },   /* R1860  - AIF2TX1MIX Input 3 Source */ 
-       { 0x00000745, 0x0080 },   /* R1861  - AIF2TX1MIX Input 3 Volume */ 
-       { 0x00000746, 0x0000 },   /* R1862  - AIF2TX1MIX Input 4 Source */ 
-       { 0x00000747, 0x0080 },   /* R1863  - AIF2TX1MIX Input 4 Volume */ 
-       { 0x00000748, 0x0000 },   /* R1864  - AIF2TX2MIX Input 1 Source */ 
-       { 0x00000749, 0x0080 },   /* R1865  - AIF2TX2MIX Input 1 Volume */ 
-       { 0x0000074A, 0x0000 },   /* R1866  - AIF2TX2MIX Input 2 Source */ 
-       { 0x0000074B, 0x0080 },   /* R1867  - AIF2TX2MIX Input 2 Volume */ 
-       { 0x0000074C, 0x0000 },   /* R1868  - AIF2TX2MIX Input 3 Source */ 
-       { 0x0000074D, 0x0080 },   /* R1869  - AIF2TX2MIX Input 3 Volume */ 
-       { 0x0000074E, 0x0000 },   /* R1870  - AIF2TX2MIX Input 4 Source */ 
-       { 0x0000074F, 0x0080 },   /* R1871  - AIF2TX2MIX Input 4 Volume */ 
-       { 0x00000780, 0x0000 },   /* R1920  - AIF3TX1MIX Input 1 Source */ 
-       { 0x00000781, 0x0080 },   /* R1921  - AIF3TX1MIX Input 1 Volume */ 
-       { 0x00000782, 0x0000 },   /* R1922  - AIF3TX1MIX Input 2 Source */ 
-       { 0x00000783, 0x0080 },   /* R1923  - AIF3TX1MIX Input 2 Volume */ 
-       { 0x00000784, 0x0000 },   /* R1924  - AIF3TX1MIX Input 3 Source */ 
-       { 0x00000785, 0x0080 },   /* R1925  - AIF3TX1MIX Input 3 Volume */ 
-       { 0x00000786, 0x0000 },   /* R1926  - AIF3TX1MIX Input 4 Source */ 
-       { 0x00000787, 0x0080 },   /* R1927  - AIF3TX1MIX Input 4 Volume */ 
-       { 0x00000788, 0x0000 },   /* R1928  - AIF3TX2MIX Input 1 Source */ 
-       { 0x00000789, 0x0080 },   /* R1929  - AIF3TX2MIX Input 1 Volume */ 
-       { 0x0000078A, 0x0000 },   /* R1930  - AIF3TX2MIX Input 2 Source */ 
-       { 0x0000078B, 0x0080 },   /* R1931  - AIF3TX2MIX Input 2 Volume */ 
-       { 0x0000078C, 0x0000 },   /* R1932  - AIF3TX2MIX Input 3 Source */ 
-       { 0x0000078D, 0x0080 },   /* R1933  - AIF3TX2MIX Input 3 Volume */ 
-       { 0x0000078E, 0x0000 },   /* R1934  - AIF3TX2MIX Input 4 Source */ 
-       { 0x0000078F, 0x0080 },   /* R1935  - AIF3TX2MIX Input 4 Volume */ 
-       { 0x000007C0, 0x0000 },   /* R1984  - SLIMTX1MIX Input 1 Source */ 
-       { 0x000007C1, 0x0080 },   /* R1985  - SLIMTX1MIX Input 1 Volume */ 
-       { 0x000007C2, 0x0000 },   /* R1986  - SLIMTX1MIX Input 2 Source */ 
-       { 0x000007C3, 0x0080 },   /* R1987  - SLIMTX1MIX Input 2 Volume */ 
-       { 0x000007C4, 0x0000 },   /* R1988  - SLIMTX1MIX Input 3 Source */ 
-       { 0x000007C5, 0x0080 },   /* R1989  - SLIMTX1MIX Input 3 Volume */ 
-       { 0x000007C6, 0x0000 },   /* R1990  - SLIMTX1MIX Input 4 Source */ 
-       { 0x000007C7, 0x0080 },   /* R1991  - SLIMTX1MIX Input 4 Volume */ 
-       { 0x000007C8, 0x0000 },   /* R1992  - SLIMTX2MIX Input 1 Source */ 
-       { 0x000007C9, 0x0080 },   /* R1993  - SLIMTX2MIX Input 1 Volume */ 
-       { 0x000007CA, 0x0000 },   /* R1994  - SLIMTX2MIX Input 2 Source */ 
-       { 0x000007CB, 0x0080 },   /* R1995  - SLIMTX2MIX Input 2 Volume */ 
-       { 0x000007CC, 0x0000 },   /* R1996  - SLIMTX2MIX Input 3 Source */ 
-       { 0x000007CD, 0x0080 },   /* R1997  - SLIMTX2MIX Input 3 Volume */ 
-       { 0x000007CE, 0x0000 },   /* R1998  - SLIMTX2MIX Input 4 Source */ 
-       { 0x000007CF, 0x0080 },   /* R1999  - SLIMTX2MIX Input 4 Volume */ 
-       { 0x000007D0, 0x0000 },   /* R2000  - SLIMTX3MIX Input 1 Source */ 
-       { 0x000007D1, 0x0080 },   /* R2001  - SLIMTX3MIX Input 1 Volume */ 
-       { 0x000007D2, 0x0000 },   /* R2002  - SLIMTX3MIX Input 2 Source */ 
-       { 0x000007D3, 0x0080 },   /* R2003  - SLIMTX3MIX Input 2 Volume */ 
-       { 0x000007D4, 0x0000 },   /* R2004  - SLIMTX3MIX Input 3 Source */ 
-       { 0x000007D5, 0x0080 },   /* R2005  - SLIMTX3MIX Input 3 Volume */ 
-       { 0x000007D6, 0x0000 },   /* R2006  - SLIMTX3MIX Input 4 Source */ 
-       { 0x000007D7, 0x0080 },   /* R2007  - SLIMTX3MIX Input 4 Volume */ 
-       { 0x000007D8, 0x0000 },   /* R2008  - SLIMTX4MIX Input 1 Source */ 
-       { 0x000007D9, 0x0080 },   /* R2009  - SLIMTX4MIX Input 1 Volume */ 
-       { 0x000007DA, 0x0000 },   /* R2010  - SLIMTX4MIX Input 2 Source */ 
-       { 0x000007DB, 0x0080 },   /* R2011  - SLIMTX4MIX Input 2 Volume */ 
-       { 0x000007DC, 0x0000 },   /* R2012  - SLIMTX4MIX Input 3 Source */ 
-       { 0x000007DD, 0x0080 },   /* R2013  - SLIMTX4MIX Input 3 Volume */ 
-       { 0x000007DE, 0x0000 },   /* R2014  - SLIMTX4MIX Input 4 Source */ 
-       { 0x000007DF, 0x0080 },   /* R2015  - SLIMTX4MIX Input 4 Volume */ 
-       { 0x000007E0, 0x0000 },   /* R2016  - SLIMTX5MIX Input 1 Source */ 
-       { 0x000007E1, 0x0080 },   /* R2017  - SLIMTX5MIX Input 1 Volume */ 
-       { 0x000007E2, 0x0000 },   /* R2018  - SLIMTX5MIX Input 2 Source */ 
-       { 0x000007E3, 0x0080 },   /* R2019  - SLIMTX5MIX Input 2 Volume */ 
-       { 0x000007E4, 0x0000 },   /* R2020  - SLIMTX5MIX Input 3 Source */ 
-       { 0x000007E5, 0x0080 },   /* R2021  - SLIMTX5MIX Input 3 Volume */ 
-       { 0x000007E6, 0x0000 },   /* R2022  - SLIMTX5MIX Input 4 Source */ 
-       { 0x000007E7, 0x0080 },   /* R2023  - SLIMTX5MIX Input 4 Volume */ 
-       { 0x000007E8, 0x0000 },   /* R2024  - SLIMTX6MIX Input 1 Source */ 
-       { 0x000007E9, 0x0080 },   /* R2025  - SLIMTX6MIX Input 1 Volume */ 
-       { 0x000007EA, 0x0000 },   /* R2026  - SLIMTX6MIX Input 2 Source */ 
-       { 0x000007EB, 0x0080 },   /* R2027  - SLIMTX6MIX Input 2 Volume */ 
-       { 0x000007EC, 0x0000 },   /* R2028  - SLIMTX6MIX Input 3 Source */ 
-       { 0x000007ED, 0x0080 },   /* R2029  - SLIMTX6MIX Input 3 Volume */ 
-       { 0x000007EE, 0x0000 },   /* R2030  - SLIMTX6MIX Input 4 Source */ 
-       { 0x000007EF, 0x0080 },   /* R2031  - SLIMTX6MIX Input 4 Volume */ 
-       { 0x000007F0, 0x0000 },   /* R2032  - SLIMTX7MIX Input 1 Source */ 
-       { 0x000007F1, 0x0080 },   /* R2033  - SLIMTX7MIX Input 1 Volume */ 
-       { 0x000007F2, 0x0000 },   /* R2034  - SLIMTX7MIX Input 2 Source */ 
-       { 0x000007F3, 0x0080 },   /* R2035  - SLIMTX7MIX Input 2 Volume */ 
-       { 0x000007F4, 0x0000 },   /* R2036  - SLIMTX7MIX Input 3 Source */ 
-       { 0x000007F5, 0x0080 },   /* R2037  - SLIMTX7MIX Input 3 Volume */ 
-       { 0x000007F6, 0x0000 },   /* R2038  - SLIMTX7MIX Input 4 Source */ 
-       { 0x000007F7, 0x0080 },   /* R2039  - SLIMTX7MIX Input 4 Volume */ 
-       { 0x000007F8, 0x0000 },   /* R2040  - SLIMTX8MIX Input 1 Source */ 
-       { 0x000007F9, 0x0080 },   /* R2041  - SLIMTX8MIX Input 1 Volume */ 
-       { 0x000007FA, 0x0000 },   /* R2042  - SLIMTX8MIX Input 2 Source */ 
-       { 0x000007FB, 0x0080 },   /* R2043  - SLIMTX8MIX Input 2 Volume */ 
-       { 0x000007FC, 0x0000 },   /* R2044  - SLIMTX8MIX Input 3 Source */ 
-       { 0x000007FD, 0x0080 },   /* R2045  - SLIMTX8MIX Input 3 Volume */ 
-       { 0x000007FE, 0x0000 },   /* R2046  - SLIMTX8MIX Input 4 Source */ 
-       { 0x000007FF, 0x0080 },   /* R2047  - SLIMTX8MIX Input 4 Volume */ 
-       { 0x00000880, 0x0000 },   /* R2176  - EQ1MIX Input 1 Source */ 
-       { 0x00000881, 0x0080 },   /* R2177  - EQ1MIX Input 1 Volume */ 
-       { 0x00000882, 0x0000 },   /* R2178  - EQ1MIX Input 2 Source */ 
-       { 0x00000883, 0x0080 },   /* R2179  - EQ1MIX Input 2 Volume */ 
-       { 0x00000884, 0x0000 },   /* R2180  - EQ1MIX Input 3 Source */ 
-       { 0x00000885, 0x0080 },   /* R2181  - EQ1MIX Input 3 Volume */ 
-       { 0x00000886, 0x0000 },   /* R2182  - EQ1MIX Input 4 Source */ 
-       { 0x00000887, 0x0080 },   /* R2183  - EQ1MIX Input 4 Volume */ 
-       { 0x00000888, 0x0000 },   /* R2184  - EQ2MIX Input 1 Source */ 
-       { 0x00000889, 0x0080 },   /* R2185  - EQ2MIX Input 1 Volume */ 
-       { 0x0000088A, 0x0000 },   /* R2186  - EQ2MIX Input 2 Source */ 
-       { 0x0000088B, 0x0080 },   /* R2187  - EQ2MIX Input 2 Volume */ 
-       { 0x0000088C, 0x0000 },   /* R2188  - EQ2MIX Input 3 Source */ 
-       { 0x0000088D, 0x0080 },   /* R2189  - EQ2MIX Input 3 Volume */ 
-       { 0x0000088E, 0x0000 },   /* R2190  - EQ2MIX Input 4 Source */ 
-       { 0x0000088F, 0x0080 },   /* R2191  - EQ2MIX Input 4 Volume */ 
-       { 0x00000890, 0x0000 },   /* R2192  - EQ3MIX Input 1 Source */ 
-       { 0x00000891, 0x0080 },   /* R2193  - EQ3MIX Input 1 Volume */ 
-       { 0x00000892, 0x0000 },   /* R2194  - EQ3MIX Input 2 Source */ 
-       { 0x00000893, 0x0080 },   /* R2195  - EQ3MIX Input 2 Volume */ 
-       { 0x00000894, 0x0000 },   /* R2196  - EQ3MIX Input 3 Source */ 
-       { 0x00000895, 0x0080 },   /* R2197  - EQ3MIX Input 3 Volume */ 
-       { 0x00000896, 0x0000 },   /* R2198  - EQ3MIX Input 4 Source */ 
-       { 0x00000897, 0x0080 },   /* R2199  - EQ3MIX Input 4 Volume */ 
-       { 0x00000898, 0x0000 },   /* R2200  - EQ4MIX Input 1 Source */ 
-       { 0x00000899, 0x0080 },   /* R2201  - EQ4MIX Input 1 Volume */ 
-       { 0x0000089A, 0x0000 },   /* R2202  - EQ4MIX Input 2 Source */ 
-       { 0x0000089B, 0x0080 },   /* R2203  - EQ4MIX Input 2 Volume */ 
-       { 0x0000089C, 0x0000 },   /* R2204  - EQ4MIX Input 3 Source */ 
-       { 0x0000089D, 0x0080 },   /* R2205  - EQ4MIX Input 3 Volume */ 
-       { 0x0000089E, 0x0000 },   /* R2206  - EQ4MIX Input 4 Source */ 
-       { 0x0000089F, 0x0080 },   /* R2207  - EQ4MIX Input 4 Volume */ 
-       { 0x000008C0, 0x0000 },   /* R2240  - DRC1LMIX Input 1 Source */ 
-       { 0x000008C1, 0x0080 },   /* R2241  - DRC1LMIX Input 1 Volume */ 
-       { 0x000008C2, 0x0000 },   /* R2242  - DRC1LMIX Input 2 Source */ 
-       { 0x000008C3, 0x0080 },   /* R2243  - DRC1LMIX Input 2 Volume */ 
-       { 0x000008C4, 0x0000 },   /* R2244  - DRC1LMIX Input 3 Source */ 
-       { 0x000008C5, 0x0080 },   /* R2245  - DRC1LMIX Input 3 Volume */ 
-       { 0x000008C6, 0x0000 },   /* R2246  - DRC1LMIX Input 4 Source */ 
-       { 0x000008C7, 0x0080 },   /* R2247  - DRC1LMIX Input 4 Volume */ 
-       { 0x000008C8, 0x0000 },   /* R2248  - DRC1RMIX Input 1 Source */ 
-       { 0x000008C9, 0x0080 },   /* R2249  - DRC1RMIX Input 1 Volume */ 
-       { 0x000008CA, 0x0000 },   /* R2250  - DRC1RMIX Input 2 Source */ 
-       { 0x000008CB, 0x0080 },   /* R2251  - DRC1RMIX Input 2 Volume */ 
-       { 0x000008CC, 0x0000 },   /* R2252  - DRC1RMIX Input 3 Source */ 
-       { 0x000008CD, 0x0080 },   /* R2253  - DRC1RMIX Input 3 Volume */ 
-       { 0x000008CE, 0x0000 },   /* R2254  - DRC1RMIX Input 4 Source */ 
-       { 0x000008CF, 0x0080 },   /* R2255  - DRC1RMIX Input 4 Volume */ 
-       { 0x00000900, 0x0000 },   /* R2304  - HPLP1MIX Input 1 Source */ 
-       { 0x00000901, 0x0080 },   /* R2305  - HPLP1MIX Input 1 Volume */ 
-       { 0x00000902, 0x0000 },   /* R2306  - HPLP1MIX Input 2 Source */ 
-       { 0x00000903, 0x0080 },   /* R2307  - HPLP1MIX Input 2 Volume */ 
-       { 0x00000904, 0x0000 },   /* R2308  - HPLP1MIX Input 3 Source */ 
-       { 0x00000905, 0x0080 },   /* R2309  - HPLP1MIX Input 3 Volume */ 
-       { 0x00000906, 0x0000 },   /* R2310  - HPLP1MIX Input 4 Source */ 
-       { 0x00000907, 0x0080 },   /* R2311  - HPLP1MIX Input 4 Volume */ 
-       { 0x00000908, 0x0000 },   /* R2312  - HPLP2MIX Input 1 Source */ 
-       { 0x00000909, 0x0080 },   /* R2313  - HPLP2MIX Input 1 Volume */ 
-       { 0x0000090A, 0x0000 },   /* R2314  - HPLP2MIX Input 2 Source */ 
-       { 0x0000090B, 0x0080 },   /* R2315  - HPLP2MIX Input 2 Volume */ 
-       { 0x0000090C, 0x0000 },   /* R2316  - HPLP2MIX Input 3 Source */ 
-       { 0x0000090D, 0x0080 },   /* R2317  - HPLP2MIX Input 3 Volume */ 
-       { 0x0000090E, 0x0000 },   /* R2318  - HPLP2MIX Input 4 Source */ 
-       { 0x0000090F, 0x0080 },   /* R2319  - HPLP2MIX Input 4 Volume */ 
-       { 0x00000910, 0x0000 },   /* R2320  - HPLP3MIX Input 1 Source */ 
-       { 0x00000911, 0x0080 },   /* R2321  - HPLP3MIX Input 1 Volume */ 
-       { 0x00000912, 0x0000 },   /* R2322  - HPLP3MIX Input 2 Source */ 
-       { 0x00000913, 0x0080 },   /* R2323  - HPLP3MIX Input 2 Volume */ 
-       { 0x00000914, 0x0000 },   /* R2324  - HPLP3MIX Input 3 Source */ 
-       { 0x00000915, 0x0080 },   /* R2325  - HPLP3MIX Input 3 Volume */ 
-       { 0x00000916, 0x0000 },   /* R2326  - HPLP3MIX Input 4 Source */ 
-       { 0x00000917, 0x0080 },   /* R2327  - HPLP3MIX Input 4 Volume */ 
-       { 0x00000918, 0x0000 },   /* R2328  - HPLP4MIX Input 1 Source */ 
-       { 0x00000919, 0x0080 },   /* R2329  - HPLP4MIX Input 1 Volume */ 
-       { 0x0000091A, 0x0000 },   /* R2330  - HPLP4MIX Input 2 Source */ 
-       { 0x0000091B, 0x0080 },   /* R2331  - HPLP4MIX Input 2 Volume */ 
-       { 0x0000091C, 0x0000 },   /* R2332  - HPLP4MIX Input 3 Source */ 
-       { 0x0000091D, 0x0080 },   /* R2333  - HPLP4MIX Input 3 Volume */ 
-       { 0x0000091E, 0x0000 },   /* R2334  - HPLP4MIX Input 4 Source */ 
-       { 0x0000091F, 0x0080 },   /* R2335  - HPLP4MIX Input 4 Volume */ 
-       { 0x00000940, 0x0000 },   /* R2368  - DSP1LMIX Input 1 Source */ 
-       { 0x00000941, 0x0080 },   /* R2369  - DSP1LMIX Input 1 Volume */ 
-       { 0x00000942, 0x0000 },   /* R2370  - DSP1LMIX Input 2 Source */ 
-       { 0x00000943, 0x0080 },   /* R2371  - DSP1LMIX Input 2 Volume */ 
-       { 0x00000944, 0x0000 },   /* R2372  - DSP1LMIX Input 3 Source */ 
-       { 0x00000945, 0x0080 },   /* R2373  - DSP1LMIX Input 3 Volume */ 
-       { 0x00000946, 0x0000 },   /* R2374  - DSP1LMIX Input 4 Source */ 
-       { 0x00000947, 0x0080 },   /* R2375  - DSP1LMIX Input 4 Volume */ 
-       { 0x00000948, 0x0000 },   /* R2376  - DSP1RMIX Input 1 Source */ 
-       { 0x00000949, 0x0080 },   /* R2377  - DSP1RMIX Input 1 Volume */ 
-       { 0x0000094A, 0x0000 },   /* R2378  - DSP1RMIX Input 2 Source */ 
-       { 0x0000094B, 0x0080 },   /* R2379  - DSP1RMIX Input 2 Volume */ 
-       { 0x0000094C, 0x0000 },   /* R2380  - DSP1RMIX Input 3 Source */ 
-       { 0x0000094D, 0x0080 },   /* R2381  - DSP1RMIX Input 3 Volume */ 
-       { 0x0000094E, 0x0000 },   /* R2382  - DSP1RMIX Input 4 Source */ 
-       { 0x0000094F, 0x0080 },   /* R2383  - DSP1RMIX Input 4 Volume */ 
-       { 0x00000950, 0x0000 },   /* R2384  - DSP1AUX1MIX Input 1 Source */ 
-       { 0x00000958, 0x0000 },   /* R2392  - DSP1AUX2MIX Input 1 Source */ 
-       { 0x00000960, 0x0000 },   /* R2400  - DSP1AUX3MIX Input 1 Source */ 
-       { 0x00000968, 0x0000 },   /* R2408  - DSP1AUX4MIX Input 1 Source */ 
-       { 0x00000970, 0x0000 },   /* R2416  - DSP1AUX5MIX Input 1 Source */ 
-       { 0x00000978, 0x0000 },   /* R2424  - DSP1AUX6MIX Input 1 Source */ 
-       { 0x00000A80, 0x0000 },   /* R2688  - ASRC1LMIX Input 1 Source */ 
-       { 0x00000A88, 0x0000 },   /* R2696  - ASRC1RMIX Input 1 Source */ 
-       { 0x00000A90, 0x0000 },   /* R2704  - ASRC2LMIX Input 1 Source */ 
-       { 0x00000A98, 0x0000 },   /* R2712  - ASRC2RMIX Input 1 Source */ 
-       { 0x00000B00, 0x0000 },   /* R2816  - ISRC1DEC1MIX Input 1 Source */ 
-       { 0x00000B08, 0x0000 },   /* R2824  - ISRC1DEC2MIX Input 1 Source */ 
-       { 0x00000B20, 0x0000 },   /* R2848  - ISRC1INT1MIX Input 1 Source */ 
-       { 0x00000B28, 0x0000 },   /* R2856  - ISRC1INT2MIX Input 1 Source */ 
-       { 0x00000B40, 0x0000 },   /* R2880  - ISRC2DEC1MIX Input 1 Source */ 
-       { 0x00000B48, 0x0000 },   /* R2888  - ISRC2DEC2MIX Input 1 Source */ 
-       { 0x00000B60, 0x0000 },   /* R2912  - ISRC2INT1MIX Input 1 Source */ 
-       { 0x00000B68, 0x0000 },   /* R2920  - ISRC2INT2MIX Input 1 Source */ 
-       { 0x00000C00, 0xA101 },   /* R3072  - GPIO1 CTRL */ 
-       { 0x00000C01, 0xA101 },   /* R3073  - GPIO2 CTRL */ 
-       { 0x00000C02, 0xA101 },   /* R3074  - GPIO3 CTRL */ 
-       { 0x00000C03, 0xA101 },   /* R3075  - GPIO4 CTRL */ 
-       { 0x00000C04, 0xA101 },   /* R3076  - GPIO5 CTRL */ 
-       { 0x00000C0F, 0x0400 },   /* R3087  - IRQ CTRL 1 */ 
-       { 0x00000C10, 0x1000 },   /* R3088  - GPIO Debounce Config */ 
-       { 0x00000C20, 0x8002 },   /* R3104  - Misc Pad Ctrl 1 */ 
+       { 0x00000490, 0x0069 },   /* R1168  - PDM SPK1 CTRL 1 */
+       { 0x00000491, 0x0000 },   /* R1169  - PDM SPK1 CTRL 2 */
+       { 0x00000500, 0x000C },   /* R1280  - AIF1 BCLK Ctrl */
+       { 0x00000501, 0x0008 },   /* R1281  - AIF1 Tx Pin Ctrl */
+       { 0x00000502, 0x0000 },   /* R1282  - AIF1 Rx Pin Ctrl */
+       { 0x00000503, 0x0000 },   /* R1283  - AIF1 Rate Ctrl */
+       { 0x00000504, 0x0000 },   /* R1284  - AIF1 Format */
+       { 0x00000505, 0x0040 },   /* R1285  - AIF1 Tx BCLK Rate */
+       { 0x00000506, 0x0040 },   /* R1286  - AIF1 Rx BCLK Rate */
+       { 0x00000507, 0x1818 },   /* R1287  - AIF1 Frame Ctrl 1 */
+       { 0x00000508, 0x1818 },   /* R1288  - AIF1 Frame Ctrl 2 */
+       { 0x00000509, 0x0000 },   /* R1289  - AIF1 Frame Ctrl 3 */
+       { 0x0000050A, 0x0001 },   /* R1290  - AIF1 Frame Ctrl 4 */
+       { 0x0000050B, 0x0002 },   /* R1291  - AIF1 Frame Ctrl 5 */
+       { 0x0000050C, 0x0003 },   /* R1292  - AIF1 Frame Ctrl 6 */
+       { 0x0000050D, 0x0004 },   /* R1293  - AIF1 Frame Ctrl 7 */
+       { 0x0000050E, 0x0005 },   /* R1294  - AIF1 Frame Ctrl 8 */
+       { 0x0000050F, 0x0006 },   /* R1295  - AIF1 Frame Ctrl 9 */
+       { 0x00000510, 0x0007 },   /* R1296  - AIF1 Frame Ctrl 10 */
+       { 0x00000511, 0x0000 },   /* R1297  - AIF1 Frame Ctrl 11 */
+       { 0x00000512, 0x0001 },   /* R1298  - AIF1 Frame Ctrl 12 */
+       { 0x00000513, 0x0002 },   /* R1299  - AIF1 Frame Ctrl 13 */
+       { 0x00000514, 0x0003 },   /* R1300  - AIF1 Frame Ctrl 14 */
+       { 0x00000515, 0x0004 },   /* R1301  - AIF1 Frame Ctrl 15 */
+       { 0x00000516, 0x0005 },   /* R1302  - AIF1 Frame Ctrl 16 */
+       { 0x00000517, 0x0006 },   /* R1303  - AIF1 Frame Ctrl 17 */
+       { 0x00000518, 0x0007 },   /* R1304  - AIF1 Frame Ctrl 18 */
+       { 0x00000519, 0x0000 },   /* R1305  - AIF1 Tx Enables */
+       { 0x0000051A, 0x0000 },   /* R1306  - AIF1 Rx Enables */
+       { 0x00000540, 0x000C },   /* R1344  - AIF2 BCLK Ctrl */
+       { 0x00000541, 0x0008 },   /* R1345  - AIF2 Tx Pin Ctrl */
+       { 0x00000542, 0x0000 },   /* R1346  - AIF2 Rx Pin Ctrl */
+       { 0x00000543, 0x0000 },   /* R1347  - AIF2 Rate Ctrl */
+       { 0x00000544, 0x0000 },   /* R1348  - AIF2 Format */
+       { 0x00000545, 0x0040 },   /* R1349  - AIF2 Tx BCLK Rate */
+       { 0x00000546, 0x0040 },   /* R1350  - AIF2 Rx BCLK Rate */
+       { 0x00000547, 0x1818 },   /* R1351  - AIF2 Frame Ctrl 1 */
+       { 0x00000548, 0x1818 },   /* R1352  - AIF2 Frame Ctrl 2 */
+       { 0x00000549, 0x0000 },   /* R1353  - AIF2 Frame Ctrl 3 */
+       { 0x0000054A, 0x0001 },   /* R1354  - AIF2 Frame Ctrl 4 */
+       { 0x00000551, 0x0000 },   /* R1361  - AIF2 Frame Ctrl 11 */
+       { 0x00000552, 0x0001 },   /* R1362  - AIF2 Frame Ctrl 12 */
+       { 0x00000559, 0x0000 },   /* R1369  - AIF2 Tx Enables */
+       { 0x0000055A, 0x0000 },   /* R1370  - AIF2 Rx Enables */
+       { 0x00000580, 0x000C },   /* R1408  - AIF3 BCLK Ctrl */
+       { 0x00000581, 0x0008 },   /* R1409  - AIF3 Tx Pin Ctrl */
+       { 0x00000582, 0x0000 },   /* R1410  - AIF3 Rx Pin Ctrl */
+       { 0x00000583, 0x0000 },   /* R1411  - AIF3 Rate Ctrl */
+       { 0x00000584, 0x0000 },   /* R1412  - AIF3 Format */
+       { 0x00000585, 0x0040 },   /* R1413  - AIF3 Tx BCLK Rate */
+       { 0x00000586, 0x0040 },   /* R1414  - AIF3 Rx BCLK Rate */
+       { 0x00000587, 0x1818 },   /* R1415  - AIF3 Frame Ctrl 1 */
+       { 0x00000588, 0x1818 },   /* R1416  - AIF3 Frame Ctrl 2 */
+       { 0x00000589, 0x0000 },   /* R1417  - AIF3 Frame Ctrl 3 */
+       { 0x0000058A, 0x0001 },   /* R1418  - AIF3 Frame Ctrl 4 */
+       { 0x00000591, 0x0000 },   /* R1425  - AIF3 Frame Ctrl 11 */
+       { 0x00000592, 0x0001 },   /* R1426  - AIF3 Frame Ctrl 12 */
+       { 0x00000599, 0x0000 },   /* R1433  - AIF3 Tx Enables */
+       { 0x0000059A, 0x0000 },   /* R1434  - AIF3 Rx Enables */
+       { 0x000005E3, 0x0004 },   /* R1507  - SLIMbus Framer Ref Gear */
+       { 0x000005E5, 0x0000 },   /* R1509  - SLIMbus Rates 1 */
+       { 0x000005E6, 0x0000 },   /* R1510  - SLIMbus Rates 2 */
+       { 0x000005E7, 0x0000 },   /* R1511  - SLIMbus Rates 3 */
+       { 0x000005E8, 0x0000 },   /* R1512  - SLIMbus Rates 4 */
+       { 0x000005E9, 0x0000 },   /* R1513  - SLIMbus Rates 5 */
+       { 0x000005EA, 0x0000 },   /* R1514  - SLIMbus Rates 6 */
+       { 0x000005EB, 0x0000 },   /* R1515  - SLIMbus Rates 7 */
+       { 0x000005EC, 0x0000 },   /* R1516  - SLIMbus Rates 8 */
+       { 0x000005F5, 0x0000 },   /* R1525  - SLIMbus RX Channel Enable */
+       { 0x000005F6, 0x0000 },   /* R1526  - SLIMbus TX Channel Enable */
+       { 0x00000640, 0x0000 },   /* R1600  - PWM1MIX Input 1 Source */
+       { 0x00000641, 0x0080 },   /* R1601  - PWM1MIX Input 1 Volume */
+       { 0x00000642, 0x0000 },   /* R1602  - PWM1MIX Input 2 Source */
+       { 0x00000643, 0x0080 },   /* R1603  - PWM1MIX Input 2 Volume */
+       { 0x00000644, 0x0000 },   /* R1604  - PWM1MIX Input 3 Source */
+       { 0x00000645, 0x0080 },   /* R1605  - PWM1MIX Input 3 Volume */
+       { 0x00000646, 0x0000 },   /* R1606  - PWM1MIX Input 4 Source */
+       { 0x00000647, 0x0080 },   /* R1607  - PWM1MIX Input 4 Volume */
+       { 0x00000648, 0x0000 },   /* R1608  - PWM2MIX Input 1 Source */
+       { 0x00000649, 0x0080 },   /* R1609  - PWM2MIX Input 1 Volume */
+       { 0x0000064A, 0x0000 },   /* R1610  - PWM2MIX Input 2 Source */
+       { 0x0000064B, 0x0080 },   /* R1611  - PWM2MIX Input 2 Volume */
+       { 0x0000064C, 0x0000 },   /* R1612  - PWM2MIX Input 3 Source */
+       { 0x0000064D, 0x0080 },   /* R1613  - PWM2MIX Input 3 Volume */
+       { 0x0000064E, 0x0000 },   /* R1614  - PWM2MIX Input 4 Source */
+       { 0x0000064F, 0x0080 },   /* R1615  - PWM2MIX Input 4 Volume */
+       { 0x00000660, 0x0000 },   /* R1632  - MICMIX Input 1 Source */
+       { 0x00000661, 0x0080 },   /* R1633  - MICMIX Input 1 Volume */
+       { 0x00000662, 0x0000 },   /* R1634  - MICMIX Input 2 Source */
+       { 0x00000663, 0x0080 },   /* R1635  - MICMIX Input 2 Volume */
+       { 0x00000664, 0x0000 },   /* R1636  - MICMIX Input 3 Source */
+       { 0x00000665, 0x0080 },   /* R1637  - MICMIX Input 3 Volume */
+       { 0x00000666, 0x0000 },   /* R1638  - MICMIX Input 4 Source */
+       { 0x00000667, 0x0080 },   /* R1639  - MICMIX Input 4 Volume */
+       { 0x00000668, 0x0000 },   /* R1640  - NOISEMIX Input 1 Source */
+       { 0x00000669, 0x0080 },   /* R1641  - NOISEMIX Input 1 Volume */
+       { 0x0000066A, 0x0000 },   /* R1642  - NOISEMIX Input 2 Source */
+       { 0x0000066B, 0x0080 },   /* R1643  - NOISEMIX Input 2 Volume */
+       { 0x0000066C, 0x0000 },   /* R1644  - NOISEMIX Input 3 Source */
+       { 0x0000066D, 0x0080 },   /* R1645  - NOISEMIX Input 3 Volume */
+       { 0x0000066E, 0x0000 },   /* R1646  - NOISEMIX Input 4 Source */
+       { 0x0000066F, 0x0080 },   /* R1647  - NOISEMIX Input 4 Volume */
+       { 0x00000680, 0x0000 },   /* R1664  - OUT1LMIX Input 1 Source */
+       { 0x00000681, 0x0080 },   /* R1665  - OUT1LMIX Input 1 Volume */
+       { 0x00000682, 0x0000 },   /* R1666  - OUT1LMIX Input 2 Source */
+       { 0x00000683, 0x0080 },   /* R1667  - OUT1LMIX Input 2 Volume */
+       { 0x00000684, 0x0000 },   /* R1668  - OUT1LMIX Input 3 Source */
+       { 0x00000685, 0x0080 },   /* R1669  - OUT1LMIX Input 3 Volume */
+       { 0x00000686, 0x0000 },   /* R1670  - OUT1LMIX Input 4 Source */
+       { 0x00000687, 0x0080 },   /* R1671  - OUT1LMIX Input 4 Volume */
+       { 0x00000688, 0x0000 },   /* R1672  - OUT1RMIX Input 1 Source */
+       { 0x00000689, 0x0080 },   /* R1673  - OUT1RMIX Input 1 Volume */
+       { 0x0000068A, 0x0000 },   /* R1674  - OUT1RMIX Input 2 Source */
+       { 0x0000068B, 0x0080 },   /* R1675  - OUT1RMIX Input 2 Volume */
+       { 0x0000068C, 0x0000 },   /* R1676  - OUT1RMIX Input 3 Source */
+       { 0x0000068D, 0x0080 },   /* R1677  - OUT1RMIX Input 3 Volume */
+       { 0x0000068E, 0x0000 },   /* R1678  - OUT1RMIX Input 4 Source */
+       { 0x0000068F, 0x0080 },   /* R1679  - OUT1RMIX Input 4 Volume */
+       { 0x00000690, 0x0000 },   /* R1680  - OUT2LMIX Input 1 Source */
+       { 0x00000691, 0x0080 },   /* R1681  - OUT2LMIX Input 1 Volume */
+       { 0x00000692, 0x0000 },   /* R1682  - OUT2LMIX Input 2 Source */
+       { 0x00000693, 0x0080 },   /* R1683  - OUT2LMIX Input 2 Volume */
+       { 0x00000694, 0x0000 },   /* R1684  - OUT2LMIX Input 3 Source */
+       { 0x00000695, 0x0080 },   /* R1685  - OUT2LMIX Input 3 Volume */
+       { 0x00000696, 0x0000 },   /* R1686  - OUT2LMIX Input 4 Source */
+       { 0x00000697, 0x0080 },   /* R1687  - OUT2LMIX Input 4 Volume */
+       { 0x00000698, 0x0000 },   /* R1688  - OUT2RMIX Input 1 Source */
+       { 0x00000699, 0x0080 },   /* R1689  - OUT2RMIX Input 1 Volume */
+       { 0x0000069A, 0x0000 },   /* R1690  - OUT2RMIX Input 2 Source */
+       { 0x0000069B, 0x0080 },   /* R1691  - OUT2RMIX Input 2 Volume */
+       { 0x0000069C, 0x0000 },   /* R1692  - OUT2RMIX Input 3 Source */
+       { 0x0000069D, 0x0080 },   /* R1693  - OUT2RMIX Input 3 Volume */
+       { 0x0000069E, 0x0000 },   /* R1694  - OUT2RMIX Input 4 Source */
+       { 0x0000069F, 0x0080 },   /* R1695  - OUT2RMIX Input 4 Volume */
+       { 0x000006A0, 0x0000 },   /* R1696  - OUT3LMIX Input 1 Source */
+       { 0x000006A1, 0x0080 },   /* R1697  - OUT3LMIX Input 1 Volume */
+       { 0x000006A2, 0x0000 },   /* R1698  - OUT3LMIX Input 2 Source */
+       { 0x000006A3, 0x0080 },   /* R1699  - OUT3LMIX Input 2 Volume */
+       { 0x000006A4, 0x0000 },   /* R1700  - OUT3LMIX Input 3 Source */
+       { 0x000006A5, 0x0080 },   /* R1701  - OUT3LMIX Input 3 Volume */
+       { 0x000006A6, 0x0000 },   /* R1702  - OUT3LMIX Input 4 Source */
+       { 0x000006A7, 0x0080 },   /* R1703  - OUT3LMIX Input 4 Volume */
+       { 0x000006B0, 0x0000 },   /* R1712  - OUT4LMIX Input 1 Source */
+       { 0x000006B1, 0x0080 },   /* R1713  - OUT4LMIX Input 1 Volume */
+       { 0x000006B2, 0x0000 },   /* R1714  - OUT4LMIX Input 2 Source */
+       { 0x000006B3, 0x0080 },   /* R1715  - OUT4LMIX Input 2 Volume */
+       { 0x000006B4, 0x0000 },   /* R1716  - OUT4LMIX Input 3 Source */
+       { 0x000006B5, 0x0080 },   /* R1717  - OUT4LMIX Input 3 Volume */
+       { 0x000006B6, 0x0000 },   /* R1718  - OUT4LMIX Input 4 Source */
+       { 0x000006B7, 0x0080 },   /* R1719  - OUT4LMIX Input 4 Volume */
+       { 0x000006B8, 0x0000 },   /* R1720  - OUT4RMIX Input 1 Source */
+       { 0x000006B9, 0x0080 },   /* R1721  - OUT4RMIX Input 1 Volume */
+       { 0x000006BA, 0x0000 },   /* R1722  - OUT4RMIX Input 2 Source */
+       { 0x000006BB, 0x0080 },   /* R1723  - OUT4RMIX Input 2 Volume */
+       { 0x000006BC, 0x0000 },   /* R1724  - OUT4RMIX Input 3 Source */
+       { 0x000006BD, 0x0080 },   /* R1725  - OUT4RMIX Input 3 Volume */
+       { 0x000006BE, 0x0000 },   /* R1726  - OUT4RMIX Input 4 Source */
+       { 0x000006BF, 0x0080 },   /* R1727  - OUT4RMIX Input 4 Volume */
+       { 0x000006C0, 0x0000 },   /* R1728  - OUT5LMIX Input 1 Source */
+       { 0x000006C1, 0x0080 },   /* R1729  - OUT5LMIX Input 1 Volume */
+       { 0x000006C2, 0x0000 },   /* R1730  - OUT5LMIX Input 2 Source */
+       { 0x000006C3, 0x0080 },   /* R1731  - OUT5LMIX Input 2 Volume */
+       { 0x000006C4, 0x0000 },   /* R1732  - OUT5LMIX Input 3 Source */
+       { 0x000006C5, 0x0080 },   /* R1733  - OUT5LMIX Input 3 Volume */
+       { 0x000006C6, 0x0000 },   /* R1734  - OUT5LMIX Input 4 Source */
+       { 0x000006C7, 0x0080 },   /* R1735  - OUT5LMIX Input 4 Volume */
+       { 0x000006C8, 0x0000 },   /* R1736  - OUT5RMIX Input 1 Source */
+       { 0x000006C9, 0x0080 },   /* R1737  - OUT5RMIX Input 1 Volume */
+       { 0x000006CA, 0x0000 },   /* R1738  - OUT5RMIX Input 2 Source */
+       { 0x000006CB, 0x0080 },   /* R1739  - OUT5RMIX Input 2 Volume */
+       { 0x000006CC, 0x0000 },   /* R1740  - OUT5RMIX Input 3 Source */
+       { 0x000006CD, 0x0080 },   /* R1741  - OUT5RMIX Input 3 Volume */
+       { 0x000006CE, 0x0000 },   /* R1742  - OUT5RMIX Input 4 Source */
+       { 0x000006CF, 0x0080 },   /* R1743  - OUT5RMIX Input 4 Volume */
+       { 0x00000700, 0x0000 },   /* R1792  - AIF1TX1MIX Input 1 Source */
+       { 0x00000701, 0x0080 },   /* R1793  - AIF1TX1MIX Input 1 Volume */
+       { 0x00000702, 0x0000 },   /* R1794  - AIF1TX1MIX Input 2 Source */
+       { 0x00000703, 0x0080 },   /* R1795  - AIF1TX1MIX Input 2 Volume */
+       { 0x00000704, 0x0000 },   /* R1796  - AIF1TX1MIX Input 3 Source */
+       { 0x00000705, 0x0080 },   /* R1797  - AIF1TX1MIX Input 3 Volume */
+       { 0x00000706, 0x0000 },   /* R1798  - AIF1TX1MIX Input 4 Source */
+       { 0x00000707, 0x0080 },   /* R1799  - AIF1TX1MIX Input 4 Volume */
+       { 0x00000708, 0x0000 },   /* R1800  - AIF1TX2MIX Input 1 Source */
+       { 0x00000709, 0x0080 },   /* R1801  - AIF1TX2MIX Input 1 Volume */
+       { 0x0000070A, 0x0000 },   /* R1802  - AIF1TX2MIX Input 2 Source */
+       { 0x0000070B, 0x0080 },   /* R1803  - AIF1TX2MIX Input 2 Volume */
+       { 0x0000070C, 0x0000 },   /* R1804  - AIF1TX2MIX Input 3 Source */
+       { 0x0000070D, 0x0080 },   /* R1805  - AIF1TX2MIX Input 3 Volume */
+       { 0x0000070E, 0x0000 },   /* R1806  - AIF1TX2MIX Input 4 Source */
+       { 0x0000070F, 0x0080 },   /* R1807  - AIF1TX2MIX Input 4 Volume */
+       { 0x00000710, 0x0000 },   /* R1808  - AIF1TX3MIX Input 1 Source */
+       { 0x00000711, 0x0080 },   /* R1809  - AIF1TX3MIX Input 1 Volume */
+       { 0x00000712, 0x0000 },   /* R1810  - AIF1TX3MIX Input 2 Source */
+       { 0x00000713, 0x0080 },   /* R1811  - AIF1TX3MIX Input 2 Volume */
+       { 0x00000714, 0x0000 },   /* R1812  - AIF1TX3MIX Input 3 Source */
+       { 0x00000715, 0x0080 },   /* R1813  - AIF1TX3MIX Input 3 Volume */
+       { 0x00000716, 0x0000 },   /* R1814  - AIF1TX3MIX Input 4 Source */
+       { 0x00000717, 0x0080 },   /* R1815  - AIF1TX3MIX Input 4 Volume */
+       { 0x00000718, 0x0000 },   /* R1816  - AIF1TX4MIX Input 1 Source */
+       { 0x00000719, 0x0080 },   /* R1817  - AIF1TX4MIX Input 1 Volume */
+       { 0x0000071A, 0x0000 },   /* R1818  - AIF1TX4MIX Input 2 Source */
+       { 0x0000071B, 0x0080 },   /* R1819  - AIF1TX4MIX Input 2 Volume */
+       { 0x0000071C, 0x0000 },   /* R1820  - AIF1TX4MIX Input 3 Source */
+       { 0x0000071D, 0x0080 },   /* R1821  - AIF1TX4MIX Input 3 Volume */
+       { 0x0000071E, 0x0000 },   /* R1822  - AIF1TX4MIX Input 4 Source */
+       { 0x0000071F, 0x0080 },   /* R1823  - AIF1TX4MIX Input 4 Volume */
+       { 0x00000720, 0x0000 },   /* R1824  - AIF1TX5MIX Input 1 Source */
+       { 0x00000721, 0x0080 },   /* R1825  - AIF1TX5MIX Input 1 Volume */
+       { 0x00000722, 0x0000 },   /* R1826  - AIF1TX5MIX Input 2 Source */
+       { 0x00000723, 0x0080 },   /* R1827  - AIF1TX5MIX Input 2 Volume */
+       { 0x00000724, 0x0000 },   /* R1828  - AIF1TX5MIX Input 3 Source */
+       { 0x00000725, 0x0080 },   /* R1829  - AIF1TX5MIX Input 3 Volume */
+       { 0x00000726, 0x0000 },   /* R1830  - AIF1TX5MIX Input 4 Source */
+       { 0x00000727, 0x0080 },   /* R1831  - AIF1TX5MIX Input 4 Volume */
+       { 0x00000728, 0x0000 },   /* R1832  - AIF1TX6MIX Input 1 Source */
+       { 0x00000729, 0x0080 },   /* R1833  - AIF1TX6MIX Input 1 Volume */
+       { 0x0000072A, 0x0000 },   /* R1834  - AIF1TX6MIX Input 2 Source */
+       { 0x0000072B, 0x0080 },   /* R1835  - AIF1TX6MIX Input 2 Volume */
+       { 0x0000072C, 0x0000 },   /* R1836  - AIF1TX6MIX Input 3 Source */
+       { 0x0000072D, 0x0080 },   /* R1837  - AIF1TX6MIX Input 3 Volume */
+       { 0x0000072E, 0x0000 },   /* R1838  - AIF1TX6MIX Input 4 Source */
+       { 0x0000072F, 0x0080 },   /* R1839  - AIF1TX6MIX Input 4 Volume */
+       { 0x00000730, 0x0000 },   /* R1840  - AIF1TX7MIX Input 1 Source */
+       { 0x00000731, 0x0080 },   /* R1841  - AIF1TX7MIX Input 1 Volume */
+       { 0x00000732, 0x0000 },   /* R1842  - AIF1TX7MIX Input 2 Source */
+       { 0x00000733, 0x0080 },   /* R1843  - AIF1TX7MIX Input 2 Volume */
+       { 0x00000734, 0x0000 },   /* R1844  - AIF1TX7MIX Input 3 Source */
+       { 0x00000735, 0x0080 },   /* R1845  - AIF1TX7MIX Input 3 Volume */
+       { 0x00000736, 0x0000 },   /* R1846  - AIF1TX7MIX Input 4 Source */
+       { 0x00000737, 0x0080 },   /* R1847  - AIF1TX7MIX Input 4 Volume */
+       { 0x00000738, 0x0000 },   /* R1848  - AIF1TX8MIX Input 1 Source */
+       { 0x00000739, 0x0080 },   /* R1849  - AIF1TX8MIX Input 1 Volume */
+       { 0x0000073A, 0x0000 },   /* R1850  - AIF1TX8MIX Input 2 Source */
+       { 0x0000073B, 0x0080 },   /* R1851  - AIF1TX8MIX Input 2 Volume */
+       { 0x0000073C, 0x0000 },   /* R1852  - AIF1TX8MIX Input 3 Source */
+       { 0x0000073D, 0x0080 },   /* R1853  - AIF1TX8MIX Input 3 Volume */
+       { 0x0000073E, 0x0000 },   /* R1854  - AIF1TX8MIX Input 4 Source */
+       { 0x0000073F, 0x0080 },   /* R1855  - AIF1TX8MIX Input 4 Volume */
+       { 0x00000740, 0x0000 },   /* R1856  - AIF2TX1MIX Input 1 Source */
+       { 0x00000741, 0x0080 },   /* R1857  - AIF2TX1MIX Input 1 Volume */
+       { 0x00000742, 0x0000 },   /* R1858  - AIF2TX1MIX Input 2 Source */
+       { 0x00000743, 0x0080 },   /* R1859  - AIF2TX1MIX Input 2 Volume */
+       { 0x00000744, 0x0000 },   /* R1860  - AIF2TX1MIX Input 3 Source */
+       { 0x00000745, 0x0080 },   /* R1861  - AIF2TX1MIX Input 3 Volume */
+       { 0x00000746, 0x0000 },   /* R1862  - AIF2TX1MIX Input 4 Source */
+       { 0x00000747, 0x0080 },   /* R1863  - AIF2TX1MIX Input 4 Volume */
+       { 0x00000748, 0x0000 },   /* R1864  - AIF2TX2MIX Input 1 Source */
+       { 0x00000749, 0x0080 },   /* R1865  - AIF2TX2MIX Input 1 Volume */
+       { 0x0000074A, 0x0000 },   /* R1866  - AIF2TX2MIX Input 2 Source */
+       { 0x0000074B, 0x0080 },   /* R1867  - AIF2TX2MIX Input 2 Volume */
+       { 0x0000074C, 0x0000 },   /* R1868  - AIF2TX2MIX Input 3 Source */
+       { 0x0000074D, 0x0080 },   /* R1869  - AIF2TX2MIX Input 3 Volume */
+       { 0x0000074E, 0x0000 },   /* R1870  - AIF2TX2MIX Input 4 Source */
+       { 0x0000074F, 0x0080 },   /* R1871  - AIF2TX2MIX Input 4 Volume */
+       { 0x00000780, 0x0000 },   /* R1920  - AIF3TX1MIX Input 1 Source */
+       { 0x00000781, 0x0080 },   /* R1921  - AIF3TX1MIX Input 1 Volume */
+       { 0x00000782, 0x0000 },   /* R1922  - AIF3TX1MIX Input 2 Source */
+       { 0x00000783, 0x0080 },   /* R1923  - AIF3TX1MIX Input 2 Volume */
+       { 0x00000784, 0x0000 },   /* R1924  - AIF3TX1MIX Input 3 Source */
+       { 0x00000785, 0x0080 },   /* R1925  - AIF3TX1MIX Input 3 Volume */
+       { 0x00000786, 0x0000 },   /* R1926  - AIF3TX1MIX Input 4 Source */
+       { 0x00000787, 0x0080 },   /* R1927  - AIF3TX1MIX Input 4 Volume */
+       { 0x00000788, 0x0000 },   /* R1928  - AIF3TX2MIX Input 1 Source */
+       { 0x00000789, 0x0080 },   /* R1929  - AIF3TX2MIX Input 1 Volume */
+       { 0x0000078A, 0x0000 },   /* R1930  - AIF3TX2MIX Input 2 Source */
+       { 0x0000078B, 0x0080 },   /* R1931  - AIF3TX2MIX Input 2 Volume */
+       { 0x0000078C, 0x0000 },   /* R1932  - AIF3TX2MIX Input 3 Source */
+       { 0x0000078D, 0x0080 },   /* R1933  - AIF3TX2MIX Input 3 Volume */
+       { 0x0000078E, 0x0000 },   /* R1934  - AIF3TX2MIX Input 4 Source */
+       { 0x0000078F, 0x0080 },   /* R1935  - AIF3TX2MIX Input 4 Volume */
+       { 0x000007C0, 0x0000 },   /* R1984  - SLIMTX1MIX Input 1 Source */
+       { 0x000007C1, 0x0080 },   /* R1985  - SLIMTX1MIX Input 1 Volume */
+       { 0x000007C2, 0x0000 },   /* R1986  - SLIMTX1MIX Input 2 Source */
+       { 0x000007C3, 0x0080 },   /* R1987  - SLIMTX1MIX Input 2 Volume */
+       { 0x000007C4, 0x0000 },   /* R1988  - SLIMTX1MIX Input 3 Source */
+       { 0x000007C5, 0x0080 },   /* R1989  - SLIMTX1MIX Input 3 Volume */
+       { 0x000007C6, 0x0000 },   /* R1990  - SLIMTX1MIX Input 4 Source */
+       { 0x000007C7, 0x0080 },   /* R1991  - SLIMTX1MIX Input 4 Volume */
+       { 0x000007C8, 0x0000 },   /* R1992  - SLIMTX2MIX Input 1 Source */
+       { 0x000007C9, 0x0080 },   /* R1993  - SLIMTX2MIX Input 1 Volume */
+       { 0x000007CA, 0x0000 },   /* R1994  - SLIMTX2MIX Input 2 Source */
+       { 0x000007CB, 0x0080 },   /* R1995  - SLIMTX2MIX Input 2 Volume */
+       { 0x000007CC, 0x0000 },   /* R1996  - SLIMTX2MIX Input 3 Source */
+       { 0x000007CD, 0x0080 },   /* R1997  - SLIMTX2MIX Input 3 Volume */
+       { 0x000007CE, 0x0000 },   /* R1998  - SLIMTX2MIX Input 4 Source */
+       { 0x000007CF, 0x0080 },   /* R1999  - SLIMTX2MIX Input 4 Volume */
+       { 0x000007D0, 0x0000 },   /* R2000  - SLIMTX3MIX Input 1 Source */
+       { 0x000007D1, 0x0080 },   /* R2001  - SLIMTX3MIX Input 1 Volume */
+       { 0x000007D2, 0x0000 },   /* R2002  - SLIMTX3MIX Input 2 Source */
+       { 0x000007D3, 0x0080 },   /* R2003  - SLIMTX3MIX Input 2 Volume */
+       { 0x000007D4, 0x0000 },   /* R2004  - SLIMTX3MIX Input 3 Source */
+       { 0x000007D5, 0x0080 },   /* R2005  - SLIMTX3MIX Input 3 Volume */
+       { 0x000007D6, 0x0000 },   /* R2006  - SLIMTX3MIX Input 4 Source */
+       { 0x000007D7, 0x0080 },   /* R2007  - SLIMTX3MIX Input 4 Volume */
+       { 0x000007D8, 0x0000 },   /* R2008  - SLIMTX4MIX Input 1 Source */
+       { 0x000007D9, 0x0080 },   /* R2009  - SLIMTX4MIX Input 1 Volume */
+       { 0x000007DA, 0x0000 },   /* R2010  - SLIMTX4MIX Input 2 Source */
+       { 0x000007DB, 0x0080 },   /* R2011  - SLIMTX4MIX Input 2 Volume */
+       { 0x000007DC, 0x0000 },   /* R2012  - SLIMTX4MIX Input 3 Source */
+       { 0x000007DD, 0x0080 },   /* R2013  - SLIMTX4MIX Input 3 Volume */
+       { 0x000007DE, 0x0000 },   /* R2014  - SLIMTX4MIX Input 4 Source */
+       { 0x000007DF, 0x0080 },   /* R2015  - SLIMTX4MIX Input 4 Volume */
+       { 0x000007E0, 0x0000 },   /* R2016  - SLIMTX5MIX Input 1 Source */
+       { 0x000007E1, 0x0080 },   /* R2017  - SLIMTX5MIX Input 1 Volume */
+       { 0x000007E2, 0x0000 },   /* R2018  - SLIMTX5MIX Input 2 Source */
+       { 0x000007E3, 0x0080 },   /* R2019  - SLIMTX5MIX Input 2 Volume */
+       { 0x000007E4, 0x0000 },   /* R2020  - SLIMTX5MIX Input 3 Source */
+       { 0x000007E5, 0x0080 },   /* R2021  - SLIMTX5MIX Input 3 Volume */
+       { 0x000007E6, 0x0000 },   /* R2022  - SLIMTX5MIX Input 4 Source */
+       { 0x000007E7, 0x0080 },   /* R2023  - SLIMTX5MIX Input 4 Volume */
+       { 0x000007E8, 0x0000 },   /* R2024  - SLIMTX6MIX Input 1 Source */
+       { 0x000007E9, 0x0080 },   /* R2025  - SLIMTX6MIX Input 1 Volume */
+       { 0x000007EA, 0x0000 },   /* R2026  - SLIMTX6MIX Input 2 Source */
+       { 0x000007EB, 0x0080 },   /* R2027  - SLIMTX6MIX Input 2 Volume */
+       { 0x000007EC, 0x0000 },   /* R2028  - SLIMTX6MIX Input 3 Source */
+       { 0x000007ED, 0x0080 },   /* R2029  - SLIMTX6MIX Input 3 Volume */
+       { 0x000007EE, 0x0000 },   /* R2030  - SLIMTX6MIX Input 4 Source */
+       { 0x000007EF, 0x0080 },   /* R2031  - SLIMTX6MIX Input 4 Volume */
+       { 0x000007F0, 0x0000 },   /* R2032  - SLIMTX7MIX Input 1 Source */
+       { 0x000007F1, 0x0080 },   /* R2033  - SLIMTX7MIX Input 1 Volume */
+       { 0x000007F2, 0x0000 },   /* R2034  - SLIMTX7MIX Input 2 Source */
+       { 0x000007F3, 0x0080 },   /* R2035  - SLIMTX7MIX Input 2 Volume */
+       { 0x000007F4, 0x0000 },   /* R2036  - SLIMTX7MIX Input 3 Source */
+       { 0x000007F5, 0x0080 },   /* R2037  - SLIMTX7MIX Input 3 Volume */
+       { 0x000007F6, 0x0000 },   /* R2038  - SLIMTX7MIX Input 4 Source */
+       { 0x000007F7, 0x0080 },   /* R2039  - SLIMTX7MIX Input 4 Volume */
+       { 0x000007F8, 0x0000 },   /* R2040  - SLIMTX8MIX Input 1 Source */
+       { 0x000007F9, 0x0080 },   /* R2041  - SLIMTX8MIX Input 1 Volume */
+       { 0x000007FA, 0x0000 },   /* R2042  - SLIMTX8MIX Input 2 Source */
+       { 0x000007FB, 0x0080 },   /* R2043  - SLIMTX8MIX Input 2 Volume */
+       { 0x000007FC, 0x0000 },   /* R2044  - SLIMTX8MIX Input 3 Source */
+       { 0x000007FD, 0x0080 },   /* R2045  - SLIMTX8MIX Input 3 Volume */
+       { 0x000007FE, 0x0000 },   /* R2046  - SLIMTX8MIX Input 4 Source */
+       { 0x000007FF, 0x0080 },   /* R2047  - SLIMTX8MIX Input 4 Volume */
+       { 0x00000880, 0x0000 },   /* R2176  - EQ1MIX Input 1 Source */
+       { 0x00000881, 0x0080 },   /* R2177  - EQ1MIX Input 1 Volume */
+       { 0x00000882, 0x0000 },   /* R2178  - EQ1MIX Input 2 Source */
+       { 0x00000883, 0x0080 },   /* R2179  - EQ1MIX Input 2 Volume */
+       { 0x00000884, 0x0000 },   /* R2180  - EQ1MIX Input 3 Source */
+       { 0x00000885, 0x0080 },   /* R2181  - EQ1MIX Input 3 Volume */
+       { 0x00000886, 0x0000 },   /* R2182  - EQ1MIX Input 4 Source */
+       { 0x00000887, 0x0080 },   /* R2183  - EQ1MIX Input 4 Volume */
+       { 0x00000888, 0x0000 },   /* R2184  - EQ2MIX Input 1 Source */
+       { 0x00000889, 0x0080 },   /* R2185  - EQ2MIX Input 1 Volume */
+       { 0x0000088A, 0x0000 },   /* R2186  - EQ2MIX Input 2 Source */
+       { 0x0000088B, 0x0080 },   /* R2187  - EQ2MIX Input 2 Volume */
+       { 0x0000088C, 0x0000 },   /* R2188  - EQ2MIX Input 3 Source */
+       { 0x0000088D, 0x0080 },   /* R2189  - EQ2MIX Input 3 Volume */
+       { 0x0000088E, 0x0000 },   /* R2190  - EQ2MIX Input 4 Source */
+       { 0x0000088F, 0x0080 },   /* R2191  - EQ2MIX Input 4 Volume */
+       { 0x00000890, 0x0000 },   /* R2192  - EQ3MIX Input 1 Source */
+       { 0x00000891, 0x0080 },   /* R2193  - EQ3MIX Input 1 Volume */
+       { 0x00000892, 0x0000 },   /* R2194  - EQ3MIX Input 2 Source */
+       { 0x00000893, 0x0080 },   /* R2195  - EQ3MIX Input 2 Volume */
+       { 0x00000894, 0x0000 },   /* R2196  - EQ3MIX Input 3 Source */
+       { 0x00000895, 0x0080 },   /* R2197  - EQ3MIX Input 3 Volume */
+       { 0x00000896, 0x0000 },   /* R2198  - EQ3MIX Input 4 Source */
+       { 0x00000897, 0x0080 },   /* R2199  - EQ3MIX Input 4 Volume */
+       { 0x00000898, 0x0000 },   /* R2200  - EQ4MIX Input 1 Source */
+       { 0x00000899, 0x0080 },   /* R2201  - EQ4MIX Input 1 Volume */
+       { 0x0000089A, 0x0000 },   /* R2202  - EQ4MIX Input 2 Source */
+       { 0x0000089B, 0x0080 },   /* R2203  - EQ4MIX Input 2 Volume */
+       { 0x0000089C, 0x0000 },   /* R2204  - EQ4MIX Input 3 Source */
+       { 0x0000089D, 0x0080 },   /* R2205  - EQ4MIX Input 3 Volume */
+       { 0x0000089E, 0x0000 },   /* R2206  - EQ4MIX Input 4 Source */
+       { 0x0000089F, 0x0080 },   /* R2207  - EQ4MIX Input 4 Volume */
+       { 0x000008C0, 0x0000 },   /* R2240  - DRC1LMIX Input 1 Source */
+       { 0x000008C1, 0x0080 },   /* R2241  - DRC1LMIX Input 1 Volume */
+       { 0x000008C2, 0x0000 },   /* R2242  - DRC1LMIX Input 2 Source */
+       { 0x000008C3, 0x0080 },   /* R2243  - DRC1LMIX Input 2 Volume */
+       { 0x000008C4, 0x0000 },   /* R2244  - DRC1LMIX Input 3 Source */
+       { 0x000008C5, 0x0080 },   /* R2245  - DRC1LMIX Input 3 Volume */
+       { 0x000008C6, 0x0000 },   /* R2246  - DRC1LMIX Input 4 Source */
+       { 0x000008C7, 0x0080 },   /* R2247  - DRC1LMIX Input 4 Volume */
+       { 0x000008C8, 0x0000 },   /* R2248  - DRC1RMIX Input 1 Source */
+       { 0x000008C9, 0x0080 },   /* R2249  - DRC1RMIX Input 1 Volume */
+       { 0x000008CA, 0x0000 },   /* R2250  - DRC1RMIX Input 2 Source */
+       { 0x000008CB, 0x0080 },   /* R2251  - DRC1RMIX Input 2 Volume */
+       { 0x000008CC, 0x0000 },   /* R2252  - DRC1RMIX Input 3 Source */
+       { 0x000008CD, 0x0080 },   /* R2253  - DRC1RMIX Input 3 Volume */
+       { 0x000008CE, 0x0000 },   /* R2254  - DRC1RMIX Input 4 Source */
+       { 0x000008CF, 0x0080 },   /* R2255  - DRC1RMIX Input 4 Volume */
+       { 0x00000900, 0x0000 },   /* R2304  - HPLP1MIX Input 1 Source */
+       { 0x00000901, 0x0080 },   /* R2305  - HPLP1MIX Input 1 Volume */
+       { 0x00000902, 0x0000 },   /* R2306  - HPLP1MIX Input 2 Source */
+       { 0x00000903, 0x0080 },   /* R2307  - HPLP1MIX Input 2 Volume */
+       { 0x00000904, 0x0000 },   /* R2308  - HPLP1MIX Input 3 Source */
+       { 0x00000905, 0x0080 },   /* R2309  - HPLP1MIX Input 3 Volume */
+       { 0x00000906, 0x0000 },   /* R2310  - HPLP1MIX Input 4 Source */
+       { 0x00000907, 0x0080 },   /* R2311  - HPLP1MIX Input 4 Volume */
+       { 0x00000908, 0x0000 },   /* R2312  - HPLP2MIX Input 1 Source */
+       { 0x00000909, 0x0080 },   /* R2313  - HPLP2MIX Input 1 Volume */
+       { 0x0000090A, 0x0000 },   /* R2314  - HPLP2MIX Input 2 Source */
+       { 0x0000090B, 0x0080 },   /* R2315  - HPLP2MIX Input 2 Volume */
+       { 0x0000090C, 0x0000 },   /* R2316  - HPLP2MIX Input 3 Source */
+       { 0x0000090D, 0x0080 },   /* R2317  - HPLP2MIX Input 3 Volume */
+       { 0x0000090E, 0x0000 },   /* R2318  - HPLP2MIX Input 4 Source */
+       { 0x0000090F, 0x0080 },   /* R2319  - HPLP2MIX Input 4 Volume */
+       { 0x00000910, 0x0000 },   /* R2320  - HPLP3MIX Input 1 Source */
+       { 0x00000911, 0x0080 },   /* R2321  - HPLP3MIX Input 1 Volume */
+       { 0x00000912, 0x0000 },   /* R2322  - HPLP3MIX Input 2 Source */
+       { 0x00000913, 0x0080 },   /* R2323  - HPLP3MIX Input 2 Volume */
+       { 0x00000914, 0x0000 },   /* R2324  - HPLP3MIX Input 3 Source */
+       { 0x00000915, 0x0080 },   /* R2325  - HPLP3MIX Input 3 Volume */
+       { 0x00000916, 0x0000 },   /* R2326  - HPLP3MIX Input 4 Source */
+       { 0x00000917, 0x0080 },   /* R2327  - HPLP3MIX Input 4 Volume */
+       { 0x00000918, 0x0000 },   /* R2328  - HPLP4MIX Input 1 Source */
+       { 0x00000919, 0x0080 },   /* R2329  - HPLP4MIX Input 1 Volume */
+       { 0x0000091A, 0x0000 },   /* R2330  - HPLP4MIX Input 2 Source */
+       { 0x0000091B, 0x0080 },   /* R2331  - HPLP4MIX Input 2 Volume */
+       { 0x0000091C, 0x0000 },   /* R2332  - HPLP4MIX Input 3 Source */
+       { 0x0000091D, 0x0080 },   /* R2333  - HPLP4MIX Input 3 Volume */
+       { 0x0000091E, 0x0000 },   /* R2334  - HPLP4MIX Input 4 Source */
+       { 0x0000091F, 0x0080 },   /* R2335  - HPLP4MIX Input 4 Volume */
+       { 0x00000940, 0x0000 },   /* R2368  - DSP1LMIX Input 1 Source */
+       { 0x00000941, 0x0080 },   /* R2369  - DSP1LMIX Input 1 Volume */
+       { 0x00000942, 0x0000 },   /* R2370  - DSP1LMIX Input 2 Source */
+       { 0x00000943, 0x0080 },   /* R2371  - DSP1LMIX Input 2 Volume */
+       { 0x00000944, 0x0000 },   /* R2372  - DSP1LMIX Input 3 Source */
+       { 0x00000945, 0x0080 },   /* R2373  - DSP1LMIX Input 3 Volume */
+       { 0x00000946, 0x0000 },   /* R2374  - DSP1LMIX Input 4 Source */
+       { 0x00000947, 0x0080 },   /* R2375  - DSP1LMIX Input 4 Volume */
+       { 0x00000948, 0x0000 },   /* R2376  - DSP1RMIX Input 1 Source */
+       { 0x00000949, 0x0080 },   /* R2377  - DSP1RMIX Input 1 Volume */
+       { 0x0000094A, 0x0000 },   /* R2378  - DSP1RMIX Input 2 Source */
+       { 0x0000094B, 0x0080 },   /* R2379  - DSP1RMIX Input 2 Volume */
+       { 0x0000094C, 0x0000 },   /* R2380  - DSP1RMIX Input 3 Source */
+       { 0x0000094D, 0x0080 },   /* R2381  - DSP1RMIX Input 3 Volume */
+       { 0x0000094E, 0x0000 },   /* R2382  - DSP1RMIX Input 4 Source */
+       { 0x0000094F, 0x0080 },   /* R2383  - DSP1RMIX Input 4 Volume */
+       { 0x00000950, 0x0000 },   /* R2384  - DSP1AUX1MIX Input 1 Source */
+       { 0x00000958, 0x0000 },   /* R2392  - DSP1AUX2MIX Input 1 Source */
+       { 0x00000960, 0x0000 },   /* R2400  - DSP1AUX3MIX Input 1 Source */
+       { 0x00000968, 0x0000 },   /* R2408  - DSP1AUX4MIX Input 1 Source */
+       { 0x00000970, 0x0000 },   /* R2416  - DSP1AUX5MIX Input 1 Source */
+       { 0x00000978, 0x0000 },   /* R2424  - DSP1AUX6MIX Input 1 Source */
+       { 0x00000A80, 0x0000 },   /* R2688  - ASRC1LMIX Input 1 Source */
+       { 0x00000A88, 0x0000 },   /* R2696  - ASRC1RMIX Input 1 Source */
+       { 0x00000A90, 0x0000 },   /* R2704  - ASRC2LMIX Input 1 Source */
+       { 0x00000A98, 0x0000 },   /* R2712  - ASRC2RMIX Input 1 Source */
+       { 0x00000B00, 0x0000 },   /* R2816  - ISRC1DEC1MIX Input 1 Source */
+       { 0x00000B08, 0x0000 },   /* R2824  - ISRC1DEC2MIX Input 1 Source */
+       { 0x00000B20, 0x0000 },   /* R2848  - ISRC1INT1MIX Input 1 Source */
+       { 0x00000B28, 0x0000 },   /* R2856  - ISRC1INT2MIX Input 1 Source */
+       { 0x00000B40, 0x0000 },   /* R2880  - ISRC2DEC1MIX Input 1 Source */
+       { 0x00000B48, 0x0000 },   /* R2888  - ISRC2DEC2MIX Input 1 Source */
+       { 0x00000B60, 0x0000 },   /* R2912  - ISRC2INT1MIX Input 1 Source */
+       { 0x00000B68, 0x0000 },   /* R2920  - ISRC2INT2MIX Input 1 Source */
+       { 0x00000C00, 0xA101 },   /* R3072  - GPIO1 CTRL */
+       { 0x00000C01, 0xA101 },   /* R3073  - GPIO2 CTRL */
+       { 0x00000C02, 0xA101 },   /* R3074  - GPIO3 CTRL */
+       { 0x00000C03, 0xA101 },   /* R3075  - GPIO4 CTRL */
+       { 0x00000C04, 0xA101 },   /* R3076  - GPIO5 CTRL */
+       { 0x00000C0F, 0x0400 },   /* R3087  - IRQ CTRL 1 */
+       { 0x00000C10, 0x1000 },   /* R3088  - GPIO Debounce Config */
+       { 0x00000C20, 0x8002 },   /* R3104  - Misc Pad Ctrl 1 */
        { 0x00000C21, 0x0001 },   /* R3105  - Misc Pad Ctrl 2 */
-       { 0x00000C22, 0x0000 },   /* R3106  - Misc Pad Ctrl 3 */ 
-       { 0x00000C23, 0x0000 },   /* R3107  - Misc Pad Ctrl 4 */ 
-       { 0x00000C24, 0x0000 },   /* R3108  - Misc Pad Ctrl 5 */ 
-       { 0x00000C25, 0x0000 },   /* R3109  - Misc Pad Ctrl 6 */ 
-       { 0x00000D08, 0xFFFF },   /* R3336  - Interrupt Status 1 Mask */ 
-       { 0x00000D09, 0xFFFF },   /* R3337  - Interrupt Status 2 Mask */ 
-       { 0x00000D0A, 0xFFFF },   /* R3338  - Interrupt Status 3 Mask */ 
-       { 0x00000D0B, 0xFFFF },   /* R3339  - Interrupt Status 4 Mask */ 
-       { 0x00000D0C, 0xFEFF },   /* R3340  - Interrupt Status 5 Mask */ 
-       { 0x00000D0F, 0x0000 },   /* R3343  - Interrupt Control */ 
-       { 0x00000D18, 0xFFFF },   /* R3352  - IRQ2 Status 1 Mask */ 
-       { 0x00000D19, 0xFFFF },   /* R3353  - IRQ2 Status 2 Mask */ 
-       { 0x00000D1A, 0xFFFF },   /* R3354  - IRQ2 Status 3 Mask */ 
-       { 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */ 
-       { 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */ 
-       { 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */ 
+       { 0x00000C22, 0x0000 },   /* R3106  - Misc Pad Ctrl 3 */
+       { 0x00000C23, 0x0000 },   /* R3107  - Misc Pad Ctrl 4 */
+       { 0x00000C24, 0x0000 },   /* R3108  - Misc Pad Ctrl 5 */
+       { 0x00000C25, 0x0000 },   /* R3109  - Misc Pad Ctrl 6 */
+       { 0x00000D08, 0xFFFF },   /* R3336  - Interrupt Status 1 Mask */
+       { 0x00000D09, 0xFFFF },   /* R3337  - Interrupt Status 2 Mask */
+       { 0x00000D0A, 0xFFFF },   /* R3338  - Interrupt Status 3 Mask */
+       { 0x00000D0B, 0xFFFF },   /* R3339  - Interrupt Status 4 Mask */
+       { 0x00000D0C, 0xFEFF },   /* R3340  - Interrupt Status 5 Mask */
+       { 0x00000D0F, 0x0000 },   /* R3343  - Interrupt Control */
+       { 0x00000D18, 0xFFFF },   /* R3352  - IRQ2 Status 1 Mask */
+       { 0x00000D19, 0xFFFF },   /* R3353  - IRQ2 Status 2 Mask */
+       { 0x00000D1A, 0xFFFF },   /* R3354  - IRQ2 Status 3 Mask */
+       { 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */
+       { 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */
+       { 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */
        { 0x00000D41, 0x0000 },   /* R3393  - ADSP2 IRQ0 */
-       { 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */ 
-       { 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */ 
-       { 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */ 
-       { 0x00000E00, 0x0000 },   /* R3584  - FX_Ctrl1 */ 
-       { 0x00000E10, 0x6318 },   /* R3600  - EQ1_1 */ 
-       { 0x00000E11, 0x6300 },   /* R3601  - EQ1_2 */ 
-       { 0x00000E12, 0x0FC8 },   /* R3602  - EQ1_3 */ 
-       { 0x00000E13, 0x03FE },   /* R3603  - EQ1_4 */ 
-       { 0x00000E14, 0x00E0 },   /* R3604  - EQ1_5 */ 
-       { 0x00000E15, 0x1EC4 },   /* R3605  - EQ1_6 */ 
-       { 0x00000E16, 0xF136 },   /* R3606  - EQ1_7 */ 
-       { 0x00000E17, 0x0409 },   /* R3607  - EQ1_8 */ 
-       { 0x00000E18, 0x04CC },   /* R3608  - EQ1_9 */ 
-       { 0x00000E19, 0x1C9B },   /* R3609  - EQ1_10 */ 
-       { 0x00000E1A, 0xF337 },   /* R3610  - EQ1_11 */ 
-       { 0x00000E1B, 0x040B },   /* R3611  - EQ1_12 */ 
-       { 0x00000E1C, 0x0CBB },   /* R3612  - EQ1_13 */ 
-       { 0x00000E1D, 0x16F8 },   /* R3613  - EQ1_14 */ 
-       { 0x00000E1E, 0xF7D9 },   /* R3614  - EQ1_15 */ 
-       { 0x00000E1F, 0x040A },   /* R3615  - EQ1_16 */ 
-       { 0x00000E20, 0x1F14 },   /* R3616  - EQ1_17 */ 
-       { 0x00000E21, 0x058C },   /* R3617  - EQ1_18 */ 
-       { 0x00000E22, 0x0563 },   /* R3618  - EQ1_19 */ 
-       { 0x00000E23, 0x4000 },   /* R3619  - EQ1_20 */ 
-       { 0x00000E24, 0x0B75 },   /* R3620  - EQ1_21 */ 
-       { 0x00000E26, 0x6318 },   /* R3622  - EQ2_1 */ 
-       { 0x00000E27, 0x6300 },   /* R3623  - EQ2_2 */ 
-       { 0x00000E28, 0x0FC8 },   /* R3624  - EQ2_3 */ 
-       { 0x00000E29, 0x03FE },   /* R3625  - EQ2_4 */ 
-       { 0x00000E2A, 0x00E0 },   /* R3626  - EQ2_5 */ 
-       { 0x00000E2B, 0x1EC4 },   /* R3627  - EQ2_6 */ 
-       { 0x00000E2C, 0xF136 },   /* R3628  - EQ2_7 */ 
-       { 0x00000E2D, 0x0409 },   /* R3629  - EQ2_8 */ 
-       { 0x00000E2E, 0x04CC },   /* R3630  - EQ2_9 */ 
-       { 0x00000E2F, 0x1C9B },   /* R3631  - EQ2_10 */ 
-       { 0x00000E30, 0xF337 },   /* R3632  - EQ2_11 */ 
-       { 0x00000E31, 0x040B },   /* R3633  - EQ2_12 */ 
-       { 0x00000E32, 0x0CBB },   /* R3634  - EQ2_13 */ 
-       { 0x00000E33, 0x16F8 },   /* R3635  - EQ2_14 */ 
-       { 0x00000E34, 0xF7D9 },   /* R3636  - EQ2_15 */ 
-       { 0x00000E35, 0x040A },   /* R3637  - EQ2_16 */ 
-       { 0x00000E36, 0x1F14 },   /* R3638  - EQ2_17 */ 
-       { 0x00000E37, 0x058C },   /* R3639  - EQ2_18 */ 
-       { 0x00000E38, 0x0563 },   /* R3640  - EQ2_19 */ 
-       { 0x00000E39, 0x4000 },   /* R3641  - EQ2_20 */ 
-       { 0x00000E3A, 0x0B75 },   /* R3642  - EQ2_21 */ 
-       { 0x00000E3C, 0x6318 },   /* R3644  - EQ3_1 */ 
-       { 0x00000E3D, 0x6300 },   /* R3645  - EQ3_2 */ 
-       { 0x00000E3E, 0x0FC8 },   /* R3646  - EQ3_3 */ 
-       { 0x00000E3F, 0x03FE },   /* R3647  - EQ3_4 */ 
-       { 0x00000E40, 0x00E0 },   /* R3648  - EQ3_5 */ 
-       { 0x00000E41, 0x1EC4 },   /* R3649  - EQ3_6 */ 
-       { 0x00000E42, 0xF136 },   /* R3650  - EQ3_7 */ 
-       { 0x00000E43, 0x0409 },   /* R3651  - EQ3_8 */ 
-       { 0x00000E44, 0x04CC },   /* R3652  - EQ3_9 */ 
-       { 0x00000E45, 0x1C9B },   /* R3653  - EQ3_10 */ 
-       { 0x00000E46, 0xF337 },   /* R3654  - EQ3_11 */ 
-       { 0x00000E47, 0x040B },   /* R3655  - EQ3_12 */ 
-       { 0x00000E48, 0x0CBB },   /* R3656  - EQ3_13 */ 
-       { 0x00000E49, 0x16F8 },   /* R3657  - EQ3_14 */ 
-       { 0x00000E4A, 0xF7D9 },   /* R3658  - EQ3_15 */ 
-       { 0x00000E4B, 0x040A },   /* R3659  - EQ3_16 */ 
-       { 0x00000E4C, 0x1F14 },   /* R3660  - EQ3_17 */ 
-       { 0x00000E4D, 0x058C },   /* R3661  - EQ3_18 */ 
-       { 0x00000E4E, 0x0563 },   /* R3662  - EQ3_19 */ 
-       { 0x00000E4F, 0x4000 },   /* R3663  - EQ3_20 */ 
-       { 0x00000E50, 0x0B75 },   /* R3664  - EQ3_21 */ 
-       { 0x00000E52, 0x6318 },   /* R3666  - EQ4_1 */ 
-       { 0x00000E53, 0x6300 },   /* R3667  - EQ4_2 */ 
-       { 0x00000E54, 0x0FC8 },   /* R3668  - EQ4_3 */ 
-       { 0x00000E55, 0x03FE },   /* R3669  - EQ4_4 */ 
-       { 0x00000E56, 0x00E0 },   /* R3670  - EQ4_5 */ 
-       { 0x00000E57, 0x1EC4 },   /* R3671  - EQ4_6 */ 
-       { 0x00000E58, 0xF136 },   /* R3672  - EQ4_7 */ 
-       { 0x00000E59, 0x0409 },   /* R3673  - EQ4_8 */ 
-       { 0x00000E5A, 0x04CC },   /* R3674  - EQ4_9 */ 
-       { 0x00000E5B, 0x1C9B },   /* R3675  - EQ4_10 */ 
-       { 0x00000E5C, 0xF337 },   /* R3676  - EQ4_11 */ 
-       { 0x00000E5D, 0x040B },   /* R3677  - EQ4_12 */ 
-       { 0x00000E5E, 0x0CBB },   /* R3678  - EQ4_13 */ 
-       { 0x00000E5F, 0x16F8 },   /* R3679  - EQ4_14 */ 
-       { 0x00000E60, 0xF7D9 },   /* R3680  - EQ4_15 */ 
-       { 0x00000E61, 0x040A },   /* R3681  - EQ4_16 */ 
-       { 0x00000E62, 0x1F14 },   /* R3682  - EQ4_17 */ 
-       { 0x00000E63, 0x058C },   /* R3683  - EQ4_18 */ 
-       { 0x00000E64, 0x0563 },   /* R3684  - EQ4_19 */ 
-       { 0x00000E65, 0x4000 },   /* R3685  - EQ4_20 */ 
-       { 0x00000E66, 0x0B75 },   /* R3686  - EQ4_21 */ 
-       { 0x00000E80, 0x0018 },   /* R3712  - DRC1 ctrl1 */ 
-       { 0x00000E81, 0x0933 },   /* R3713  - DRC1 ctrl2 */ 
-       { 0x00000E82, 0x0018 },   /* R3714  - DRC1 ctrl3 */ 
-       { 0x00000E83, 0x0000 },   /* R3715  - DRC1 ctrl4 */ 
-       { 0x00000E84, 0x0000 },   /* R3716  - DRC1 ctrl5 */ 
-       { 0x00000EC0, 0x0000 },   /* R3776  - HPLPF1_1 */ 
-       { 0x00000EC1, 0x0000 },   /* R3777  - HPLPF1_2 */ 
-       { 0x00000EC4, 0x0000 },   /* R3780  - HPLPF2_1 */ 
-       { 0x00000EC5, 0x0000 },   /* R3781  - HPLPF2_2 */ 
-       { 0x00000EC8, 0x0000 },   /* R3784  - HPLPF3_1 */ 
-       { 0x00000EC9, 0x0000 },   /* R3785  - HPLPF3_2 */ 
-       { 0x00000ECC, 0x0000 },   /* R3788  - HPLPF4_1 */ 
-       { 0x00000ECD, 0x0000 },   /* R3789  - HPLPF4_2 */ 
-       { 0x00000EE0, 0x0000 },   /* R3808  - ASRC_ENABLE */ 
-       { 0x00000EE2, 0x0000 },   /* R3810  - ASRC_RATE1 */ 
+       { 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */
+       { 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */
+       { 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */
+       { 0x00000E00, 0x0000 },   /* R3584  - FX_Ctrl1 */
+       { 0x00000E10, 0x6318 },   /* R3600  - EQ1_1 */
+       { 0x00000E11, 0x6300 },   /* R3601  - EQ1_2 */
+       { 0x00000E12, 0x0FC8 },   /* R3602  - EQ1_3 */
+       { 0x00000E13, 0x03FE },   /* R3603  - EQ1_4 */
+       { 0x00000E14, 0x00E0 },   /* R3604  - EQ1_5 */
+       { 0x00000E15, 0x1EC4 },   /* R3605  - EQ1_6 */
+       { 0x00000E16, 0xF136 },   /* R3606  - EQ1_7 */
+       { 0x00000E17, 0x0409 },   /* R3607  - EQ1_8 */
+       { 0x00000E18, 0x04CC },   /* R3608  - EQ1_9 */
+       { 0x00000E19, 0x1C9B },   /* R3609  - EQ1_10 */
+       { 0x00000E1A, 0xF337 },   /* R3610  - EQ1_11 */
+       { 0x00000E1B, 0x040B },   /* R3611  - EQ1_12 */
+       { 0x00000E1C, 0x0CBB },   /* R3612  - EQ1_13 */
+       { 0x00000E1D, 0x16F8 },   /* R3613  - EQ1_14 */
+       { 0x00000E1E, 0xF7D9 },   /* R3614  - EQ1_15 */
+       { 0x00000E1F, 0x040A },   /* R3615  - EQ1_16 */
+       { 0x00000E20, 0x1F14 },   /* R3616  - EQ1_17 */
+       { 0x00000E21, 0x058C },   /* R3617  - EQ1_18 */
+       { 0x00000E22, 0x0563 },   /* R3618  - EQ1_19 */
+       { 0x00000E23, 0x4000 },   /* R3619  - EQ1_20 */
+       { 0x00000E24, 0x0B75 },   /* R3620  - EQ1_21 */
+       { 0x00000E26, 0x6318 },   /* R3622  - EQ2_1 */
+       { 0x00000E27, 0x6300 },   /* R3623  - EQ2_2 */
+       { 0x00000E28, 0x0FC8 },   /* R3624  - EQ2_3 */
+       { 0x00000E29, 0x03FE },   /* R3625  - EQ2_4 */
+       { 0x00000E2A, 0x00E0 },   /* R3626  - EQ2_5 */
+       { 0x00000E2B, 0x1EC4 },   /* R3627  - EQ2_6 */
+       { 0x00000E2C, 0xF136 },   /* R3628  - EQ2_7 */
+       { 0x00000E2D, 0x0409 },   /* R3629  - EQ2_8 */
+       { 0x00000E2E, 0x04CC },   /* R3630  - EQ2_9 */
+       { 0x00000E2F, 0x1C9B },   /* R3631  - EQ2_10 */
+       { 0x00000E30, 0xF337 },   /* R3632  - EQ2_11 */
+       { 0x00000E31, 0x040B },   /* R3633  - EQ2_12 */
+       { 0x00000E32, 0x0CBB },   /* R3634  - EQ2_13 */
+       { 0x00000E33, 0x16F8 },   /* R3635  - EQ2_14 */
+       { 0x00000E34, 0xF7D9 },   /* R3636  - EQ2_15 */
+       { 0x00000E35, 0x040A },   /* R3637  - EQ2_16 */
+       { 0x00000E36, 0x1F14 },   /* R3638  - EQ2_17 */
+       { 0x00000E37, 0x058C },   /* R3639  - EQ2_18 */
+       { 0x00000E38, 0x0563 },   /* R3640  - EQ2_19 */
+       { 0x00000E39, 0x4000 },   /* R3641  - EQ2_20 */
+       { 0x00000E3A, 0x0B75 },   /* R3642  - EQ2_21 */
+       { 0x00000E3C, 0x6318 },   /* R3644  - EQ3_1 */
+       { 0x00000E3D, 0x6300 },   /* R3645  - EQ3_2 */
+       { 0x00000E3E, 0x0FC8 },   /* R3646  - EQ3_3 */
+       { 0x00000E3F, 0x03FE },   /* R3647  - EQ3_4 */
+       { 0x00000E40, 0x00E0 },   /* R3648  - EQ3_5 */
+       { 0x00000E41, 0x1EC4 },   /* R3649  - EQ3_6 */
+       { 0x00000E42, 0xF136 },   /* R3650  - EQ3_7 */
+       { 0x00000E43, 0x0409 },   /* R3651  - EQ3_8 */
+       { 0x00000E44, 0x04CC },   /* R3652  - EQ3_9 */
+       { 0x00000E45, 0x1C9B },   /* R3653  - EQ3_10 */
+       { 0x00000E46, 0xF337 },   /* R3654  - EQ3_11 */
+       { 0x00000E47, 0x040B },   /* R3655  - EQ3_12 */
+       { 0x00000E48, 0x0CBB },   /* R3656  - EQ3_13 */
+       { 0x00000E49, 0x16F8 },   /* R3657  - EQ3_14 */
+       { 0x00000E4A, 0xF7D9 },   /* R3658  - EQ3_15 */
+       { 0x00000E4B, 0x040A },   /* R3659  - EQ3_16 */
+       { 0x00000E4C, 0x1F14 },   /* R3660  - EQ3_17 */
+       { 0x00000E4D, 0x058C },   /* R3661  - EQ3_18 */
+       { 0x00000E4E, 0x0563 },   /* R3662  - EQ3_19 */
+       { 0x00000E4F, 0x4000 },   /* R3663  - EQ3_20 */
+       { 0x00000E50, 0x0B75 },   /* R3664  - EQ3_21 */
+       { 0x00000E52, 0x6318 },   /* R3666  - EQ4_1 */
+       { 0x00000E53, 0x6300 },   /* R3667  - EQ4_2 */
+       { 0x00000E54, 0x0FC8 },   /* R3668  - EQ4_3 */
+       { 0x00000E55, 0x03FE },   /* R3669  - EQ4_4 */
+       { 0x00000E56, 0x00E0 },   /* R3670  - EQ4_5 */
+       { 0x00000E57, 0x1EC4 },   /* R3671  - EQ4_6 */
+       { 0x00000E58, 0xF136 },   /* R3672  - EQ4_7 */
+       { 0x00000E59, 0x0409 },   /* R3673  - EQ4_8 */
+       { 0x00000E5A, 0x04CC },   /* R3674  - EQ4_9 */
+       { 0x00000E5B, 0x1C9B },   /* R3675  - EQ4_10 */
+       { 0x00000E5C, 0xF337 },   /* R3676  - EQ4_11 */
+       { 0x00000E5D, 0x040B },   /* R3677  - EQ4_12 */
+       { 0x00000E5E, 0x0CBB },   /* R3678  - EQ4_13 */
+       { 0x00000E5F, 0x16F8 },   /* R3679  - EQ4_14 */
+       { 0x00000E60, 0xF7D9 },   /* R3680  - EQ4_15 */
+       { 0x00000E61, 0x040A },   /* R3681  - EQ4_16 */
+       { 0x00000E62, 0x1F14 },   /* R3682  - EQ4_17 */
+       { 0x00000E63, 0x058C },   /* R3683  - EQ4_18 */
+       { 0x00000E64, 0x0563 },   /* R3684  - EQ4_19 */
+       { 0x00000E65, 0x4000 },   /* R3685  - EQ4_20 */
+       { 0x00000E66, 0x0B75 },   /* R3686  - EQ4_21 */
+       { 0x00000E80, 0x0018 },   /* R3712  - DRC1 ctrl1 */
+       { 0x00000E81, 0x0933 },   /* R3713  - DRC1 ctrl2 */
+       { 0x00000E82, 0x0018 },   /* R3714  - DRC1 ctrl3 */
+       { 0x00000E83, 0x0000 },   /* R3715  - DRC1 ctrl4 */
+       { 0x00000E84, 0x0000 },   /* R3716  - DRC1 ctrl5 */
+       { 0x00000EC0, 0x0000 },   /* R3776  - HPLPF1_1 */
+       { 0x00000EC1, 0x0000 },   /* R3777  - HPLPF1_2 */
+       { 0x00000EC4, 0x0000 },   /* R3780  - HPLPF2_1 */
+       { 0x00000EC5, 0x0000 },   /* R3781  - HPLPF2_2 */
+       { 0x00000EC8, 0x0000 },   /* R3784  - HPLPF3_1 */
+       { 0x00000EC9, 0x0000 },   /* R3785  - HPLPF3_2 */
+       { 0x00000ECC, 0x0000 },   /* R3788  - HPLPF4_1 */
+       { 0x00000ECD, 0x0000 },   /* R3789  - HPLPF4_2 */
+       { 0x00000EE0, 0x0000 },   /* R3808  - ASRC_ENABLE */
+       { 0x00000EE2, 0x0000 },   /* R3810  - ASRC_RATE1 */
        { 0x00000EE3, 0x4000 },   /* R3811  - ASRC_RATE2 */
-       { 0x00000EF0, 0x0000 },   /* R3824  - ISRC 1 CTRL 1 */ 
-       { 0x00000EF1, 0x0000 },   /* R3825  - ISRC 1 CTRL 2 */ 
-       { 0x00000EF2, 0x0000 },   /* R3826  - ISRC 1 CTRL 3 */ 
-       { 0x00000EF3, 0x0000 },   /* R3827  - ISRC 2 CTRL 1 */ 
-       { 0x00000EF4, 0x0000 },   /* R3828  - ISRC 2 CTRL 2 */ 
-       { 0x00000EF5, 0x0000 },   /* R3829  - ISRC 2 CTRL 3 */ 
-       { 0x00001100, 0x0010 },   /* R4352  - DSP1 Control 1 */ 
+       { 0x00000EF0, 0x0000 },   /* R3824  - ISRC 1 CTRL 1 */
+       { 0x00000EF1, 0x0000 },   /* R3825  - ISRC 1 CTRL 2 */
+       { 0x00000EF2, 0x0000 },   /* R3826  - ISRC 1 CTRL 3 */
+       { 0x00000EF3, 0x0000 },   /* R3827  - ISRC 2 CTRL 1 */
+       { 0x00000EF4, 0x0000 },   /* R3828  - ISRC 2 CTRL 2 */
+       { 0x00000EF5, 0x0000 },   /* R3829  - ISRC 2 CTRL 3 */
+       { 0x00001100, 0x0010 },   /* R4352  - DSP1 Control 1 */
 };
 
 static bool wm5102_readable_register(struct device *dev, unsigned int reg)
index 8588dbad330119149ad112c0bf493e1c979d59a3..953d0790ffd566e967058f7eeb640ff51377cf45 100644 (file)
@@ -406,8 +406,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
                goto err;
        }
 
-       ret = regulator_bulk_enable(wm8994->num_supplies,
-                                   wm8994->supplies);
+       ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies);
        if (ret != 0) {
                dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
                goto err_regulator_free;
@@ -612,8 +611,7 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
 {
        pm_runtime_disable(wm8994->dev);
        wm8994_irq_exit(wm8994);
-       regulator_bulk_disable(wm8994->num_supplies,
-                              wm8994->supplies);
+       regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
        regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
        mfd_remove_devices(wm8994->dev);
 }
index 543eadd230e55c8918c45c3ef3b6428cbb73ed97..1076b9d89df38e26bfb088fae3586a6c577fd70e 100644 (file)
@@ -496,8 +496,7 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  * Returns enum mmc_blk_status after checking errors.
  */
 static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
-                                     struct mmc_request *mrq,
-                                     struct mmc_async_req *next_req)
+                                                     struct mmc_request *mrq)
 {
        struct mmc_command *cmd;
        struct mmc_context_info *context_info = &host->context_info;
@@ -507,7 +506,7 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
                wait_event_interruptible(context_info->wait,
                                (context_info->is_done_rcv ||
                                 context_info->is_new_req));
-               context_info->is_waiting_last_req = false;
+
                if (context_info->is_done_rcv) {
                        context_info->is_done_rcv = false;
                        cmd = mrq->cmd;
@@ -527,10 +526,9 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
                                __mmc_start_request(host, mrq);
                                continue; /* wait for done/new event again */
                        }
-               } else if (context_info->is_new_req) {
-                       if (!next_req)
-                               return MMC_BLK_NEW_REQUEST;
                }
+
+               return MMC_BLK_NEW_REQUEST;
        }
        mmc_retune_release(host);
        return status;
@@ -660,7 +658,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                mmc_pre_req(host, areq->mrq);
 
        if (host->areq) {
-               status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+               status = mmc_wait_for_data_req_done(host, host->areq->mrq);
                if (status == MMC_BLK_NEW_REQUEST) {
                        if (ret_stat)
                                *ret_stat = status;
index deb90c2ff6b423e63352283c8dfdbb935bfb4e27..a614f37faf27e05e52d851353024384e5cbc3f41 100644 (file)
@@ -223,6 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
 static int mmc_read_ssr(struct mmc_card *card)
 {
        unsigned int au, es, et, eo;
+       u32 *raw_ssr;
        int i;
 
        if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +232,21 @@ static int mmc_read_ssr(struct mmc_card *card)
                return 0;
        }
 
-       if (mmc_app_sd_status(card, card->raw_ssr)) {
+       raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+       if (!raw_ssr)
+               return -ENOMEM;
+
+       if (mmc_app_sd_status(card, raw_ssr)) {
                pr_warn("%s: problem reading SD Status register\n",
                        mmc_hostname(card->host));
+               kfree(raw_ssr);
                return 0;
        }
 
        for (i = 0; i < 16; i++)
-               card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+               card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+       kfree(raw_ssr);
 
        /*
         * UNSTUFF_BITS only works with four u32s so we have to offset the
index 1501cfdac4734246aa6746ed5ee8d2678122d964..4b0ecb981842248b1cc4ea8edaf35dc4bac753e1 100644 (file)
@@ -262,6 +262,7 @@ disable_clk:
 }
 
 static const struct of_device_id sdhci_cdns_match[] = {
+       { .compatible = "socionext,uniphier-sd4hc" },
        { .compatible = "cdns,sd4hc" },
        { /* sentinel */ }
 };
index 111991e5b9a0e7ecf587eaf402a5ecd0d2906f55..23909804ffb840d3187f21f67180a634a769425e 100644 (file)
@@ -1576,6 +1576,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        unsigned long flags;
        u8 ctrl;
 
+       if (ios->power_mode == MMC_POWER_UNDEFINED)
+               return;
+
        spin_lock_irqsave(&host->lock, flags);
 
        if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -2938,22 +2941,24 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
 
        sdhci_init(host, 0);
 
-       /* Force clock and power re-program */
-       host->pwr = 0;
-       host->clock = 0;
-       mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
-       mmc->ops->set_ios(mmc, &mmc->ios);
+       if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+               /* Force clock and power re-program */
+               host->pwr = 0;
+               host->clock = 0;
+               mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+               mmc->ops->set_ios(mmc, &mmc->ios);
 
-       if ((host_flags & SDHCI_PV_ENABLED) &&
-               !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
-               spin_lock_irqsave(&host->lock, flags);
-               sdhci_enable_preset_value(host, true);
-               spin_unlock_irqrestore(&host->lock, flags);
-       }
+               if ((host_flags & SDHCI_PV_ENABLED) &&
+                   !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+                       spin_lock_irqsave(&host->lock, flags);
+                       sdhci_enable_preset_value(host, true);
+                       spin_unlock_irqrestore(&host->lock, flags);
+               }
 
-       if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
-           mmc->ops->hs400_enhanced_strobe)
-               mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+               if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+                   mmc->ops->hs400_enhanced_strobe)
+                       mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+       }
 
        spin_lock_irqsave(&host->lock, flags);
 
index 6e16e441f85e09596a437f78f5c7891f2c91988f..e4c28fed61d50866c5a7955d8de6ee458eae7834 100644 (file)
@@ -166,7 +166,6 @@ source "drivers/net/ethernet/seeq/Kconfig"
 source "drivers/net/ethernet/silan/Kconfig"
 source "drivers/net/ethernet/sis/Kconfig"
 source "drivers/net/ethernet/sfc/Kconfig"
-source "drivers/net/ethernet/sfc/falcon/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
index 16e12c45904be6d4a24b858d5f4265a78adbc3d9..21f80f5744ba2f048b4d7e0a8a8862e45977a339 100644 (file)
@@ -1469,6 +1469,12 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
        p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
        p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
                                           p->agl_prt_ctl_size);
+       if (!p->mix || !p->agl || !p->agl_prt_ctl) {
+               dev_err(&pdev->dev, "failed to map I/O memory\n");
+               result = -ENOMEM;
+               goto err;
+       }
+
        spin_lock_init(&p->lock);
 
        skb_queue_head_init(&p->tx_list);
index fba3b2ad382d61d31fa20210001361fc3bbc6d9c..a267173f59972f174947578935189b1e474dc94b 100644 (file)
@@ -76,6 +76,7 @@ enum {
        CPL_PASS_ESTABLISH    = 0x41,
        CPL_RX_DATA_DDP       = 0x42,
        CPL_PASS_ACCEPT_REQ   = 0x44,
+       CPL_RX_ISCSI_CMP      = 0x45,
        CPL_TRACE_PKT_T5      = 0x48,
        CPL_RX_ISCSI_DDP      = 0x49,
 
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
        __u8 status;
 };
 
+struct cpl_rx_iscsi_cmp {
+       union opcode_tid ot;
+       __be16 pdu_len_ddp;
+       __be16 len;
+       __be32 seq;
+       __be16 urg;
+       __u8 rsvd;
+       __u8 status;
+       __be32 ulp_crc;
+       __be32 ddpvld;
+};
+
 struct cpl_tx_data_iso {
        __be32 op_to_scsi;
        __u8   reserved1;
index 6cfa63a5e9b48b7f181f0d59916e17cf3f6b55f8..4c30c44b242e67e59d52f5cc00a230ba81ba68d4 100644 (file)
@@ -65,7 +65,7 @@
 /* Number of bytes of an RX frame that are copied to skb->data */
 #define BE_HDR_LEN             ((u16) 64)
 /* allocate extra space to allow tunneling decapsulation without head reallocation */
-#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
+#define BE_RX_SKB_ALLOC_SIZE   256
 
 #define BE_MAX_JUMBO_FRAME_SIZE        9018
 #define BE_MIN_MTU             256
index 79b7c84b7869939425f93a2175ef8254a2d6bb03..dc0850b3b517b9b02e3cd9a42cf98425a55d0df3 100644 (file)
@@ -1,6 +1,6 @@
 config FSL_FMAN
        tristate "FMan support"
-       depends on FSL_SOC || COMPILE_TEST
+       depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
        select GENERIC_ALLOCATOR
        select PHYLIB
        default n
index dafd9e1baba2cb420ea70826408d0474488ca246..f60845f0c6cad060b193fecdf00dd3a93127fb9c 100644 (file)
@@ -1890,6 +1890,7 @@ static int fman_reset(struct fman *fman)
 
                goto _return;
        } else {
+#ifdef CONFIG_PPC
                struct device_node *guts_node;
                struct ccsr_guts __iomem *guts_regs;
                u32 devdisr2, reg;
@@ -1921,6 +1922,7 @@ static int fman_reset(struct fman *fman)
 
                /* Enable all MACs */
                iowrite32be(reg, &guts_regs->devdisr2);
+#endif
 
                /* Perform FMan reset */
                iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
@@ -1932,25 +1934,31 @@ static int fman_reset(struct fman *fman)
                } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
                         FPM_RSTC_FM_RESET) && --count);
                if (count == 0) {
+#ifdef CONFIG_PPC
                        iounmap(guts_regs);
                        of_node_put(guts_node);
+#endif
                        err = -EBUSY;
                        goto _return;
                }
+#ifdef CONFIG_PPC
 
                /* Restore devdisr2 value */
                iowrite32be(devdisr2, &guts_regs->devdisr2);
 
                iounmap(guts_regs);
                of_node_put(guts_node);
+#endif
 
                goto _return;
 
+#ifdef CONFIG_PPC
 guts_regs:
                of_node_put(guts_node);
 guts_node:
                dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
                        __func__);
+#endif
        }
 _return:
        return err;
@@ -2868,6 +2876,13 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
 
        fman->dev = &of_dev->dev;
 
+       err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
+       if (err) {
+               dev_err(&of_dev->dev, "%s: of_platform_populate() failed\n",
+                       __func__);
+               goto fman_free;
+       }
+
        return fman;
 
 fman_node_put:
index 69ca42ce5dd5034803c557df92c7a8911aedd35e..0b31f8502adae2e86c292fb99437ee943c0794bf 100644 (file)
@@ -594,6 +594,7 @@ static const u16 phy2speed[] = {
        [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
        [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
        [PHY_INTERFACE_MODE_RTBI]               = SPEED_1000,
+       [PHY_INTERFACE_MODE_QSGMII]             = SPEED_1000,
        [PHY_INTERFACE_MODE_XGMII]              = SPEED_10000
 };
 
index ee7e9ce2f5b34b9bd6e4f5434e56a2a8bf7899f9..418ca1f3774aabbd1575c934243ac938124b54ad 100644 (file)
@@ -1316,10 +1316,11 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id hix5hd2_of_match[] = {
-       { .compatible = "hisilicon,hisi-gemac-v1", .data = (void *)GEMAC_V1 },
-       { .compatible = "hisilicon,hisi-gemac-v2", .data = (void *)GEMAC_V2 },
-       { .compatible = "hisilicon,hix5hd2-gemac", .data = (void *)GEMAC_V1 },
-       { .compatible = "hisilicon,hi3798cv200-gemac", .data = (void *)GEMAC_V2 },
+       { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+       { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+       { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+       { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+       { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
        {},
 };
 
@@ -1327,7 +1328,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
 
 static struct platform_driver hix5hd2_dev_driver = {
        .driver = {
-               .name = "hisi-gemac",
+               .name = "hisi-gmac",
                .of_match_table = hix5hd2_of_match,
        },
        .probe = hix5hd2_dev_probe,
@@ -1338,4 +1339,4 @@ module_platform_driver(hix5hd2_dev_driver);
 
 MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hisi-gemac");
+MODULE_ALIAS("platform:hisi-gmac");
index dabc5418efcc48287b1e30d95f8a05235755f119..cda04b3126bc688222eaebed0fbe5c59aa76f89b 100644 (file)
@@ -770,6 +770,17 @@ struct mvpp2_rx_desc {
        u32 reserved8;
 };
 
+struct mvpp2_txq_pcpu_buf {
+       /* Transmitted SKB */
+       struct sk_buff *skb;
+
+       /* Physical address of transmitted buffer */
+       dma_addr_t phys;
+
+       /* Size transmitted */
+       size_t size;
+};
+
 /* Per-CPU Tx queue control */
 struct mvpp2_txq_pcpu {
        int cpu;
@@ -785,11 +796,8 @@ struct mvpp2_txq_pcpu {
        /* Number of Tx DMA descriptors reserved for each CPU */
        int reserved_num;
 
-       /* Array of transmitted skb */
-       struct sk_buff **tx_skb;
-
-       /* Array of transmitted buffers' physical addresses */
-       dma_addr_t *tx_buffs;
+       /* Infos about transmitted buffers */
+       struct mvpp2_txq_pcpu_buf *buffs;
 
        /* Index of last TX DMA descriptor that was inserted */
        int txq_put_index;
@@ -979,10 +987,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
                              struct sk_buff *skb,
                              struct mvpp2_tx_desc *tx_desc)
 {
-       txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
-       if (skb)
-               txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
-                                                        tx_desc->buf_phys_addr;
+       struct mvpp2_txq_pcpu_buf *tx_buf =
+               txq_pcpu->buffs + txq_pcpu->txq_put_index;
+       tx_buf->skb = skb;
+       tx_buf->size = tx_desc->data_size;
+       tx_buf->phys = tx_desc->buf_phys_addr;
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
@@ -4401,17 +4410,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
        int i;
 
        for (i = 0; i < num; i++) {
-               dma_addr_t buf_phys_addr =
-                                   txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
-               struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+               struct mvpp2_txq_pcpu_buf *tx_buf =
+                       txq_pcpu->buffs + txq_pcpu->txq_get_index;
 
                mvpp2_txq_inc_get(txq_pcpu);
 
-               dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
-                                skb_headlen(skb), DMA_TO_DEVICE);
-               if (!skb)
+               dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+                                tx_buf->size, DMA_TO_DEVICE);
+               if (!tx_buf->skb)
                        continue;
-               dev_kfree_skb_any(skb);
+               dev_kfree_skb_any(tx_buf->skb);
        }
 }
 
@@ -4651,15 +4659,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                txq_pcpu->size = txq->size;
-               txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
-                                          sizeof(*txq_pcpu->tx_skb),
-                                          GFP_KERNEL);
-               if (!txq_pcpu->tx_skb)
-                       goto error;
-
-               txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
-                                            sizeof(dma_addr_t), GFP_KERNEL);
-               if (!txq_pcpu->tx_buffs)
+               txq_pcpu->buffs = kmalloc(txq_pcpu->size *
+                                         sizeof(struct mvpp2_txq_pcpu_buf),
+                                         GFP_KERNEL);
+               if (!txq_pcpu->buffs)
                        goto error;
 
                txq_pcpu->count = 0;
@@ -4673,8 +4676,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 error:
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-               kfree(txq_pcpu->tx_skb);
-               kfree(txq_pcpu->tx_buffs);
+               kfree(txq_pcpu->buffs);
        }
 
        dma_free_coherent(port->dev->dev.parent,
@@ -4693,8 +4695,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
 
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-               kfree(txq_pcpu->tx_skb);
-               kfree(txq_pcpu->tx_buffs);
+               kfree(txq_pcpu->buffs);
        }
 
        if (txq->descs)
index 3b026c151cf24f370137b4655b417d6e024d6dec..7431f633de3135f5ccee6a9f506892d5f13dff35 100644 (file)
@@ -75,7 +75,7 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
        struct rb_node *parent = NULL;
 
        while (*new) {
-               struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+               struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
                int result = counter->id - this->id;
 
                parent = *new;
index 53126bf68ea937e7b2c6951787c25450ec0ebdb5..01d0efa9c5c7419b6e2fa99ed6b99562811bcbd5 100644 (file)
@@ -942,7 +942,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
        char rauht_pl[MLXSW_REG_RAUHT_LEN];
        struct net_device *dev;
        bool entry_connected;
-       u8 nud_state;
+       u8 nud_state, dead;
        bool updating;
        bool removing;
        bool adding;
@@ -953,10 +953,11 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
        dip = ntohl(*((__be32 *) n->primary_key));
        memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
        nud_state = n->nud_state;
+       dead = n->dead;
        dev = n->dev;
        read_unlock_bh(&n->lock);
 
-       entry_connected = nud_state & NUD_VALID;
+       entry_connected = nud_state & NUD_VALID && !dead;
        adding = (!neigh_entry->offloaded) && entry_connected;
        updating = neigh_entry->offloaded && entry_connected;
        removing = neigh_entry->offloaded && !entry_connected;
@@ -1351,7 +1352,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_neigh_entry *neigh_entry;
        struct net_device *dev = fib_nh->nh_dev;
        struct neighbour *n;
-       u8 nud_state;
+       u8 nud_state, dead;
 
        /* Take a reference of neigh here ensuring that neigh would
         * not be detructed before the nexthop entry is finished.
@@ -1383,8 +1384,9 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
        list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
        read_lock_bh(&n->lock);
        nud_state = n->nud_state;
+       dead = n->dead;
        read_unlock_bh(&n->lock);
-       __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+       __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
 
        return 0;
 }
@@ -1394,6 +1396,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
 
+       __mlxsw_sp_nexthop_neigh_update(nh, true);
        list_del(&nh->neigh_list_node);
 
        /* If that is the last nexthop connected to that neigh, remove from
@@ -1452,6 +1455,8 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
                nh = &nh_grp->nexthops[i];
                mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
        }
+       mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+       WARN_ON_ONCE(nh_grp->adj_index_valid);
        kfree(nh_grp);
 }
 
index 46f7be85f5a387ce96be02c66af18b21750b7210..2c032629c36929f20fe3a53866a045f202e487e5 100644 (file)
@@ -1,3 +1,20 @@
+#
+# Solarflare device configuration
+#
+
+config NET_VENDOR_SOLARFLARE
+       bool "Solarflare devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Solarflare devices. If you say Y, you will be asked
+         for your specific card in the following questions.
+
+if NET_VENDOR_SOLARFLARE
+
 config SFC
        tristate "Solarflare SFC9000/SFC9100-family support"
        depends on PCI
@@ -44,3 +61,7 @@ config SFC_MCDI_LOGGING
          Driver-Interface) commands and responses, allowing debugging of
          driver/firmware interaction.  The tracing is actually enabled by
          a sysfs file 'mcdi_logging' under the PCI device.
+
+source "drivers/net/ethernet/sfc/falcon/Kconfig"
+
+endif # NET_VENDOR_SOLARFLARE
index 77ab0a85f0673d4b2f5748403ecc249d520a5f00..fa6e9704c07779b6bc70d635765044360ff6adab 100644 (file)
@@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
        int ret;
        struct device *dev = &bsp_priv->pdev->dev;
 
+       ret = gmac_clk_enable(bsp_priv, true);
+       if (ret)
+               return ret;
+
        /*rmii or rgmii*/
        if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
                dev_info(dev, "init for RGMII\n");
@@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
        if (ret)
                return ret;
 
-       ret = gmac_clk_enable(bsp_priv, true);
-       if (ret)
-               return ret;
-
        pm_runtime_enable(dev);
        pm_runtime_get_sync(dev);
 
index b21d03fe4f43ebba864ce3302eabc427e97acbb5..be3c91c7f211d94ad7386b77de73676933a46dcd 100644 (file)
@@ -539,7 +539,7 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
        mac->mii.reg_shift = 6;
        mac->mii.reg_mask = 0x000007C0;
        mac->mii.clk_csr_shift = 2;
-       mac->mii.clk_csr_mask = 0xF;
+       mac->mii.clk_csr_mask = GENMASK(5, 2);
 
        /* Get and dump the chip ID */
        *synopsys_id = stmmac_get_synopsys_id(hwid);
index a1d582f47b1ad19c07831fd9fa36ad8e358b3f0d..9dd2987e284dcb9f96fd3a48e98e606b26326d3a 100644 (file)
@@ -197,7 +197,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
        mac->mii.reg_shift = 6;
        mac->mii.reg_mask = 0x000007C0;
        mac->mii.clk_csr_shift = 2;
-       mac->mii.clk_csr_mask = 0xF;
+       mac->mii.clk_csr_mask = GENMASK(5, 2);
 
        /* Synopsys Id is not available on old chips */
        *synopsys_id = 0;
index a340fc8bd0debf305ff2cb2d6b72b1925ec8b86e..8816515e1bbbc5c4a1fd5a443a478f2061d27c50 100644 (file)
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
                 * descriptors for the same frame has to be set before, to
                 * avoid race condition.
                 */
-               wmb();
+               dma_wmb();
 
        p->des3 = cpu_to_le32(tdes3);
 }
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
                 * descriptors for the same frame has to be set before, to
                 * avoid race condition.
                 */
-               wmb();
+               dma_wmb();
 
        p->des3 = cpu_to_le32(tdes3);
 }
index ce97e522566a8910e05707870d5c3f511a20ee3b..f0d86321dfe22b0d4455436326c93a8af513764a 100644 (file)
@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
                 * descriptors for the same frame has to be set before, to
                 * avoid race condition.
                 */
-               wmb();
+               dma_wmb();
 
        p->des0 = cpu_to_le32(tdes0);
 }
index 3e405785b81c84e551e499af4fe85cf0c0983f40..bb40382e205deffd9a3312099723b72f84da18a5 100644 (file)
@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
         * descriptor and then barrier is needed to make sure that
         * all is coherent before granting the DMA engine.
         */
-       smp_wmb();
+       dma_wmb();
 
        if (netif_msg_pktdata(priv)) {
                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                 * descriptor and then barrier is needed to make sure that
                 * all is coherent before granting the DMA engine.
                 */
-               smp_wmb();
+               dma_wmb();
        }
 
        netdev_sent_queue(dev, skb->len);
@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                        netif_dbg(priv, rx_status, priv->dev,
                                  "refill entry #%d\n", entry);
                }
-               wmb();
+               dma_wmb();
 
                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
                        priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
                else
                        priv->hw->desc->set_rx_owner(p);
 
-               wmb();
+               dma_wmb();
 
                entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
        }
index 23322fd9e3acfba03cfc3b328322040649d27913..fda01f770eff4239cafaf12e940ee23a07ecb43f 100644 (file)
@@ -81,8 +81,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
        value |= (phyaddr << priv->hw->mii.addr_shift)
                & priv->hw->mii.addr_mask;
        value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-       value |= (priv->clk_csr & priv->hw->mii.clk_csr_mask)
-               << priv->hw->mii.clk_csr_shift;
+       value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+               & priv->hw->mii.clk_csr_mask;
        if (priv->plat->has_gmac4)
                value |= MII_GMAC4_READ;
 
@@ -122,8 +122,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
                & priv->hw->mii.addr_mask;
        value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
 
-       value |= ((priv->clk_csr & priv->hw->mii.clk_csr_mask)
-               << priv->hw->mii.clk_csr_shift);
+       value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+               & priv->hw->mii.clk_csr_mask;
        if (priv->plat->has_gmac4)
                value |= MII_GMAC4_WRITE;
 
index c7e547e4f2b1f4e233134b4e1a540ba1924d6f08..7d9e36f66735cfd7da35f5df0f17c4d003dc4e0c 100644 (file)
@@ -94,6 +94,7 @@
 
 /* offset relative to base of XGBE_SS_REG_INDEX */
 #define XGBE10_SGMII_MODULE_OFFSET     0x100
+#define IS_SS_ID_XGBE(d)               ((d)->ss_version == XGBE_SS_VERSION_10)
 /* offset relative to base of XGBE_SM_REG_INDEX */
 #define XGBE10_HOST_PORT_OFFSET                0x34
 #define XGBE10_SLAVE_PORT_OFFSET       0x64
@@ -1746,6 +1747,17 @@ static void keystone_set_msglevel(struct net_device *ndev, u32 value)
        netcp->msg_enable = value;
 }
 
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+       struct gbe_intf *gbe_intf;
+
+       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       if (!gbe_intf)
+               gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+       return gbe_intf;
+}
+
 static void keystone_get_stat_strings(struct net_device *ndev,
                                      uint32_t stringset, uint8_t *data)
 {
@@ -1754,7 +1766,7 @@ static void keystone_get_stat_strings(struct net_device *ndev,
        struct gbe_priv *gbe_dev;
        int i;
 
-       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       gbe_intf = keystone_get_intf_data(netcp);
        if (!gbe_intf)
                return;
        gbe_dev = gbe_intf->gbe_dev;
@@ -1778,7 +1790,7 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
        struct gbe_intf *gbe_intf;
        struct gbe_priv *gbe_dev;
 
-       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       gbe_intf = keystone_get_intf_data(netcp);
        if (!gbe_intf)
                return -EINVAL;
        gbe_dev = gbe_intf->gbe_dev;
@@ -1896,7 +1908,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev,
        struct gbe_intf *gbe_intf;
        struct gbe_priv *gbe_dev;
 
-       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       gbe_intf = keystone_get_intf_data(netcp);
        if (!gbe_intf)
                return;
 
@@ -1920,7 +1932,7 @@ static int keystone_get_link_ksettings(struct net_device *ndev,
        if (!phy)
                return -EINVAL;
 
-       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       gbe_intf = keystone_get_intf_data(netcp);
        if (!gbe_intf)
                return -EINVAL;
 
@@ -1953,7 +1965,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev,
        if (!phy)
                return -EINVAL;
 
-       gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+       gbe_intf = keystone_get_intf_data(netcp);
        if (!gbe_intf)
                return -EINVAL;
 
@@ -2311,7 +2323,7 @@ static void gbe_init_host_port(struct gbe_priv *priv)
        int bypass_en = 1;
 
        /* Host Tx Pri */
-       if (IS_SS_ID_NU(priv))
+       if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
                writel(HOST_TX_PRI_MAP_DEFAULT,
                       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
 
index e26398b5a7dcfec95bb2ca33cb521753a832eeab..d0a68bdd5f63b5934fd8dbf227929a1291fb848c 100644 (file)
@@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
        r = queue->rx_curr_get ;
        while (queue->rx_used) {
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
-               DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
+               DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ;
                r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
                frag_count = 1 ;
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
@@ -1645,7 +1645,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
        DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
        if (frame_status & LAN_TX) {
                /* '*t' is already defined */
-               DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
+               DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ;
                t->txd_virt = virt ;
                t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
                t->txd_tbadr = cpu_to_le32(phys) ;
@@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
        __le32  tbctrl;
 
        NDD_TRACE("THSB",mb,fc,0) ;
-       DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
+       DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ;
 
        mb->sm_off-- ;  /* set to fc */
        mb->sm_len++ ;  /* + fc */
@@ -1960,7 +1960,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
 
                        do {
                                DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
-                               DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
+                               DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ;
                                tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
 
                                if (tbctrl & BMU_OWN || !queue->tx_used){
@@ -1988,7 +1988,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
                        }
                        else {
 #ifndef PASS_1ST_TXD_2_TX_COMP
-                               DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
+                               DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ;
                                mac_drv_tx_complete(smc,t2) ;
 #else
                                DB_TX("mac_drv_tx_comp for TxD 0x%x",
@@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
                tx_used = queue->tx_used ;
                while (tx_used) {
                        DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
-                       DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
+                       DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ;
                        t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
                        DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
                        t = t->txd_next ;
index 441b4dc79450c1009151692565586c53060e5c83..52fa162a31e097af411cd20592a05fc501c5bbed 100644 (file)
@@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
        SMbuf           *reply ;
 
        sm = smtod(mb,struct smt_header *) ;
-       DB_SMT("SMT: processing PMF frame at %x len %d\n",sm,mb->sm_len) ;
+       DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ;
 #ifdef DEBUG
        dump_smt(smc,sm,"PMF Received") ;
 #endif
index cd78b7cacc753150a7e63f1ecd33d59e75d12e5e..e80a08903fcf413ed27a11c04fc3eb8a8683cba6 100644 (file)
@@ -504,7 +504,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
 #endif
 
        smt_swap_para(sm,(int) mb->sm_len,1) ;
-       DB_SMT("SMT : received packet [%s] at 0x%x\n",
+       DB_SMT("SMT : received packet [%s] at 0x%p\n",
                smt_type_name[m_fc(mb) & 0xf],sm) ;
        DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
                smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
index 9c06f8028f0c349629365cbdc82f63f63b55c098..92b08383cafa8b88e8d5b79ea3a5c0da9998770f 100644 (file)
@@ -1187,8 +1187,8 @@ static int genphy_config_advert(struct phy_device *phydev)
  */
 static int genphy_config_eee_advert(struct phy_device *phydev)
 {
-       u32 broken = phydev->eee_broken_modes;
-       u32 old_adv, adv;
+       int broken = phydev->eee_broken_modes;
+       int old_adv, adv;
 
        /* Nothing to disable */
        if (!broken)
@@ -1665,7 +1665,7 @@ static void of_set_phy_supported(struct phy_device *phydev)
 static void of_set_phy_eee_broken(struct phy_device *phydev)
 {
        struct device_node *node = phydev->mdio.dev.of_node;
-       u32 broken;
+       u32 broken = 0;
 
        if (!IS_ENABLED(CONFIG_OF_MDIO))
                return;
@@ -1673,8 +1673,20 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
        if (!node)
                return;
 
-       if (!of_property_read_u32(node, "eee-broken-modes", &broken))
-               phydev->eee_broken_modes = broken;
+       if (of_property_read_bool(node, "eee-broken-100tx"))
+               broken |= MDIO_EEE_100TX;
+       if (of_property_read_bool(node, "eee-broken-1000t"))
+               broken |= MDIO_EEE_1000T;
+       if (of_property_read_bool(node, "eee-broken-10gt"))
+               broken |= MDIO_EEE_10GT;
+       if (of_property_read_bool(node, "eee-broken-1000kx"))
+               broken |= MDIO_EEE_1000KX;
+       if (of_property_read_bool(node, "eee-broken-10gkx4"))
+               broken |= MDIO_EEE_10GKX4;
+       if (of_property_read_bool(node, "eee-broken-10gkr"))
+               broken |= MDIO_EEE_10GKR;
+
+       phydev->eee_broken_modes = broken;
 }
 
 /**
index 08327e005cccf27fc18db64d234aaefd0dcd1a1f..5deeda61d6d3df0531c8fb1431186dc6d7125ecc 100644 (file)
@@ -333,9 +333,9 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 static void virtnet_xdp_xmit(struct virtnet_info *vi,
                             struct receive_queue *rq,
                             struct send_queue *sq,
-                            struct xdp_buff *xdp)
+                            struct xdp_buff *xdp,
+                            void *data)
 {
-       struct page *page = virt_to_head_page(xdp->data);
        struct virtio_net_hdr_mrg_rxbuf *hdr;
        unsigned int num_sg, len;
        void *xdp_sent;
@@ -343,32 +343,46 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
 
        /* Free up any pending old buffers before queueing new ones. */
        while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-               struct page *sent_page = virt_to_head_page(xdp_sent);
+               if (vi->mergeable_rx_bufs) {
+                       struct page *sent_page = virt_to_head_page(xdp_sent);
 
-               if (vi->mergeable_rx_bufs)
                        put_page(sent_page);
-               else
-                       give_pages(rq, sent_page);
+               } else { /* small buffer */
+                       struct sk_buff *skb = xdp_sent;
+
+                       kfree_skb(skb);
+               }
        }
 
-       /* Zero header and leave csum up to XDP layers */
-       hdr = xdp->data;
-       memset(hdr, 0, vi->hdr_len);
+       if (vi->mergeable_rx_bufs) {
+               /* Zero header and leave csum up to XDP layers */
+               hdr = xdp->data;
+               memset(hdr, 0, vi->hdr_len);
+
+               num_sg = 1;
+               sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+       } else { /* small buffer */
+               struct sk_buff *skb = data;
+
+               /* Zero header and leave csum up to XDP layers */
+               hdr = skb_vnet_hdr(skb);
+               memset(hdr, 0, vi->hdr_len);
 
-       num_sg = 1;
-       sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
+               num_sg = 2;
+               sg_init_table(sq->sg, 2);
+               sg_set_buf(sq->sg, hdr, vi->hdr_len);
+               skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+       }
        err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
-                                  xdp->data, GFP_ATOMIC);
+                                  data, GFP_ATOMIC);
        if (unlikely(err)) {
-               if (vi->mergeable_rx_bufs)
+               if (vi->mergeable_rx_bufs) {
+                       struct page *page = virt_to_head_page(xdp->data);
+
                        put_page(page);
-               else
-                       give_pages(rq, page);
+               } else /* small buffer */
+                       kfree_skb(data);
                return; // On error abort to avoid unnecessary kick
-       } else if (!vi->mergeable_rx_bufs) {
-               /* If not mergeable bufs must be big packets so cleanup pages */
-               give_pages(rq, (struct page *)page->private);
-               page->private = 0;
        }
 
        virtqueue_kick(sq->vq);
@@ -377,23 +391,26 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
 static u32 do_xdp_prog(struct virtnet_info *vi,
                       struct receive_queue *rq,
                       struct bpf_prog *xdp_prog,
-                      struct page *page, int offset, int len)
+                      void *data, int len)
 {
        int hdr_padded_len;
        struct xdp_buff xdp;
+       void *buf;
        unsigned int qp;
        u32 act;
-       u8 *buf;
-
-       buf = page_address(page) + offset;
 
-       if (vi->mergeable_rx_bufs)
+       if (vi->mergeable_rx_bufs) {
                hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
-       else
-               hdr_padded_len = sizeof(struct padded_vnet_hdr);
+               xdp.data = data + hdr_padded_len;
+               xdp.data_end = xdp.data + (len - vi->hdr_len);
+               buf = data;
+       } else { /* small buffers */
+               struct sk_buff *skb = data;
 
-       xdp.data = buf + hdr_padded_len;
-       xdp.data_end = xdp.data + (len - vi->hdr_len);
+               xdp.data = skb->data;
+               xdp.data_end = xdp.data + len;
+               buf = skb->data;
+       }
 
        act = bpf_prog_run_xdp(xdp_prog, &xdp);
        switch (act) {
@@ -403,8 +420,8 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
                qp = vi->curr_queue_pairs -
                        vi->xdp_queue_pairs +
                        smp_processor_id();
-               xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4);
-               virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp);
+               xdp.data = buf;
+               virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
                return XDP_TX;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -414,26 +431,17 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
        }
 }
 
-static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len)
+static struct sk_buff *receive_small(struct net_device *dev,
+                                    struct virtnet_info *vi,
+                                    struct receive_queue *rq,
+                                    void *buf, unsigned int len)
 {
        struct sk_buff * skb = buf;
+       struct bpf_prog *xdp_prog;
 
        len -= vi->hdr_len;
        skb_trim(skb, len);
 
-       return skb;
-}
-
-static struct sk_buff *receive_big(struct net_device *dev,
-                                  struct virtnet_info *vi,
-                                  struct receive_queue *rq,
-                                  void *buf,
-                                  unsigned int len)
-{
-       struct bpf_prog *xdp_prog;
-       struct page *page = buf;
-       struct sk_buff *skb;
-
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
@@ -442,7 +450,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
 
                if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
                        goto err_xdp;
-               act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
+               act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
                switch (act) {
                case XDP_PASS:
                        break;
@@ -456,18 +464,33 @@ static struct sk_buff *receive_big(struct net_device *dev,
        }
        rcu_read_unlock();
 
-       skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+       return skb;
+
+err_xdp:
+       rcu_read_unlock();
+       dev->stats.rx_dropped++;
+       kfree_skb(skb);
+xdp_xmit:
+       return NULL;
+}
+
+static struct sk_buff *receive_big(struct net_device *dev,
+                                  struct virtnet_info *vi,
+                                  struct receive_queue *rq,
+                                  void *buf,
+                                  unsigned int len)
+{
+       struct page *page = buf;
+       struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+
        if (unlikely(!skb))
                goto err;
 
        return skb;
 
-err_xdp:
-       rcu_read_unlock();
 err:
        dev->stats.rx_dropped++;
        give_pages(rq, page);
-xdp_xmit:
        return NULL;
 }
 
@@ -483,7 +506,7 @@ xdp_xmit:
  * anymore.
  */
 static struct page *xdp_linearize_page(struct receive_queue *rq,
-                                      u16 num_buf,
+                                      u16 *num_buf,
                                       struct page *p,
                                       int offset,
                                       unsigned int *len)
@@ -497,7 +520,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
        memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
        page_off += *len;
 
-       while (--num_buf) {
+       while (--*num_buf) {
                unsigned int buflen;
                unsigned long ctx;
                void *buf;
@@ -507,19 +530,22 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
                if (unlikely(!ctx))
                        goto err_buf;
 
+               buf = mergeable_ctx_to_buf_address(ctx);
+               p = virt_to_head_page(buf);
+               off = buf - page_address(p);
+
                /* guard against a misconfigured or uncooperative backend that
                 * is sending packet larger than the MTU.
                 */
-               if ((page_off + buflen) > PAGE_SIZE)
+               if ((page_off + buflen) > PAGE_SIZE) {
+                       put_page(p);
                        goto err_buf;
-
-               buf = mergeable_ctx_to_buf_address(ctx);
-               p = virt_to_head_page(buf);
-               off = buf - page_address(p);
+               }
 
                memcpy(page_address(page) + page_off,
                       page_address(p) + off, buflen);
                page_off += buflen;
+               put_page(p);
        }
 
        *len = page_off;
@@ -552,16 +578,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                struct page *xdp_page;
                u32 act;
 
-               /* No known backend devices should send packets with
-                * more than a single buffer when XDP conditions are
-                * met. However it is not strictly illegal so the case
-                * is handled as an exception and a warning is thrown.
-                */
+               /* This happens when rx buffer size is underestimated */
                if (unlikely(num_buf > 1)) {
-                       bpf_warn_invalid_xdp_buffer();
-
                        /* linearize data for XDP */
-                       xdp_page = xdp_linearize_page(rq, num_buf,
+                       xdp_page = xdp_linearize_page(rq, &num_buf,
                                                      page, offset, &len);
                        if (!xdp_page)
                                goto err_xdp;
@@ -575,16 +595,25 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                 * the receive path after XDP is loaded. In practice I
                 * was not able to create this condition.
                 */
-               if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+               if (unlikely(hdr->hdr.gso_type))
                        goto err_xdp;
 
-               act = do_xdp_prog(vi, rq, xdp_prog, page, offset, len);
+               act = do_xdp_prog(vi, rq, xdp_prog,
+                                 page_address(xdp_page) + offset, len);
                switch (act) {
                case XDP_PASS:
-                       if (unlikely(xdp_page != page))
-                               __free_pages(xdp_page, 0);
+                       /* We can only create skb based on xdp_page. */
+                       if (unlikely(xdp_page != page)) {
+                               rcu_read_unlock();
+                               put_page(page);
+                               head_skb = page_to_skb(vi, rq, xdp_page,
+                                                      0, len, PAGE_SIZE);
+                               ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+                               return head_skb;
+                       }
                        break;
                case XDP_TX:
+                       ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
                        if (unlikely(xdp_page != page))
                                goto err_xdp;
                        rcu_read_unlock();
@@ -593,6 +622,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                default:
                        if (unlikely(xdp_page != page))
                                __free_pages(xdp_page, 0);
+                       ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
                        goto err_xdp;
                }
        }
@@ -704,7 +734,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
        else if (vi->big_packets)
                skb = receive_big(dev, vi, rq, buf, len);
        else
-               skb = receive_small(vi, buf, len);
+               skb = receive_small(dev, vi, rq, buf, len);
 
        if (unlikely(!skb))
                return;
@@ -1678,7 +1708,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
        int i, err;
 
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
-           virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6)) {
+           virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+           virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+           virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
                netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
                return -EOPNOTSUPP;
        }
index c893314a191f6b473af17f593c65d8b63ed5dbc8..50d6ee6afe26fc362f843af5314c1644cf033e8f 100644 (file)
@@ -8271,7 +8271,7 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
                dma_unmap_single(ar->dev,
                                 ar->wmi.mem_chunks[i].paddr,
                                 ar->wmi.mem_chunks[i].len,
-                                DMA_TO_DEVICE);
+                                DMA_BIDIRECTIONAL);
                kfree(ar->wmi.mem_chunks[i].vaddr);
        }
 
index 486afa98a5b8d1d73a56829f6d16ecb1316cb66e..4e2f3ac266c3750d069a2176b97dd1726b310f57 100644 (file)
@@ -2713,7 +2713,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                fifo_list = &txq->txq_fifo[txq->txq_tailidx];
                if (list_empty(fifo_list)) {
                        ath_txq_unlock(sc, txq);
-                       return;
+                       break;
                }
 
                bf = list_first_entry(fifo_list, struct ath_buf, list);
index ccae3bbe7db24deb3a5656c2ba9556ad6dbf7cfa..7ffc4aba5bab0683fe993d1d5eef9e8e3d8389d0 100644 (file)
@@ -6868,7 +6868,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
 
        err = brcmf_p2p_attach(cfg, p2pdev_forced);
        if (err) {
-               brcmf_err("P2P initilisation failed (%d)\n", err);
+               brcmf_err("P2P initialisation failed (%d)\n", err);
                goto wiphy_unreg_out;
        }
        err = brcmf_btcoex_attach(cfg);
@@ -6893,7 +6893,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
        err = brcmf_fweh_activate_events(ifp);
        if (err) {
                brcmf_err("FWEH activation failed (%d)\n", err);
-               goto wiphy_unreg_out;
+               goto detach;
        }
 
        /* Fill in some of the advertised nl80211 supported features */
@@ -6908,6 +6908,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
 
        return cfg;
 
+detach:
+       brcmf_btcoex_detach(cfg);
+       brcmf_p2p_detach(&cfg->p2p);
 wiphy_unreg_out:
        wiphy_unregister(cfg->wiphy);
 priv_out:
index f273cab0da1022647842caf27020b01d613f05ba..9a25e79a46cf2d256c70c905bf9d03ebbe19afe8 100644 (file)
@@ -137,6 +137,7 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
        pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
        pfn.wsec = cpu_to_le32(0);
        pfn.infra = cpu_to_le32(1);
+       pfn.flags = 0;
        if (active)
                pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
        pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
index 2caa4ad04dba4a6e66a02b74173ab1b016ca7af1..ded1493fee9c975742ede341b991b4446a48e3d7 100644 (file)
@@ -1829,7 +1829,8 @@ bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
        pskb = __skb_dequeue(&ring->queue);
-       dev_kfree_skb_irq(pskb);
+       if (pskb)
+               dev_kfree_skb_irq(pskb);
 
        /*this is wrong, fill_tx_cmddesc needs update*/
        pdesc = &ring->desc[0];
index 2fd7dc2e8fc4b742f505b7c54961f1be4dc927de..3d21a154dce79deceeff77cd16ef5c6bf2a71978 100644 (file)
@@ -50,7 +50,7 @@
 #define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
-               
+
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
 {
        struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
 
-       return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+       return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
                       ndev->cmbloc, ndev->cmbsz);
 }
 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
index eb0f5b13841a505ea4157e949cf179d71d26175e..9aafbb03482d735fcc55bdfab3df83249900a024 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
+#include <linux/regmap.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
                           DCDC5, DCDC6, LDO1, LS3 };
 
-#define TPS65218_REGULATOR(_name, _id, _type, _ops, _n, _vr, _vm, _er, _em, \
-                          _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
+#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+                          _em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
        {                                                       \
                .name                   = _name,                \
+               .of_match               = _of,                  \
                .id                     = _id,                  \
                .ops                    = &_ops,                \
                .n_voltages             = _n,                   \
@@ -54,14 +56,6 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
                .bypass_mask    = _sm,                          \
        }                                                       \
 
-#define TPS65218_INFO(_id, _nm, _min, _max)    \
-       [_id] = {                                       \
-               .id             = _id,                  \
-               .name           = _nm,                  \
-               .min_uV         = _min,                 \
-               .max_uV         = _max,                 \
-       }
-
 static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
        REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
        REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
@@ -77,36 +71,6 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
        REGULATOR_LINEAR_RANGE(1600000, 0x10, 0x34, 50000),
 };
 
-static struct tps_info tps65218_pmic_regs[] = {
-       TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
-       TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
-       TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
-       TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
-       TPS65218_INFO(DCDC5, "DCDC5", 1000000, 1000000),
-       TPS65218_INFO(DCDC6, "DCDC6", 1800000, 1800000),
-       TPS65218_INFO(LDO1, "LDO1", 900000, 3400000),
-       TPS65218_INFO(LS3, "LS3", -1, -1),
-};
-
-#define TPS65218_OF_MATCH(comp, label) \
-       { \
-               .compatible = comp, \
-               .data = &label, \
-       }
-
-static const struct of_device_id tps65218_of_match[] = {
-       TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
-       TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
-       TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
-       TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
-       TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
-       TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
-       TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
-       TPS65218_OF_MATCH("ti,tps65218-ls3", tps65218_pmic_regs[LS3]),
-       { }
-};
-MODULE_DEVICE_TABLE(of, tps65218_of_match);
-
 static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
                                         unsigned selector)
 {
@@ -188,7 +152,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
        if (rid == TPS65218_DCDC_3 && tps->rev == TPS65218_REV_2_1)
                return 0;
 
-       if (!tps->info[rid]->strobe) {
+       if (!tps->strobes[rid]) {
                if (rid == TPS65218_DCDC_3)
                        tps->info[rid]->strobe = 3;
                else
@@ -197,8 +161,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
 
        return tps65218_set_bits(tps, dev->desc->bypass_reg,
                                 dev->desc->bypass_mask,
-                                tps->info[rid]->strobe,
-                                TPS65218_PROTECT_L1);
+                                tps->strobes[rid], TPS65218_PROTECT_L1);
 }
 
 /* Operations permitted on DCDC1, DCDC2 */
@@ -272,7 +235,7 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev)
        unsigned int index;
        struct tps65218 *tps = rdev_get_drvdata(dev);
 
-       retval = tps65218_reg_read(tps, dev->desc->csel_reg, &index);
+       retval = regmap_read(tps->regmap, dev->desc->csel_reg, &index);
        if (retval < 0)
                return retval;
 
@@ -300,104 +263,104 @@ static struct regulator_ops tps65218_dcdc56_pmic_ops = {
 };
 
 static const struct regulator_desc regulators[] = {
-       TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, REGULATOR_VOLTAGE,
-                          tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC1,
+       TPS65218_REGULATOR("DCDC1", "regulator-dcdc1", TPS65218_DCDC_1,
+                          REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+                          TPS65218_REG_CONTROL_DCDC1,
                           TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
                           2, 4000, 0, TPS65218_REG_SEQ3,
                           TPS65218_SEQ3_DC1_SEQ_MASK),
-       TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, REGULATOR_VOLTAGE,
-                          tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC2,
+       TPS65218_REGULATOR("DCDC2", "regulator-dcdc2", TPS65218_DCDC_2,
+                          REGULATOR_VOLTAGE, tps65218_dcdc12_ops, 64,
+                          TPS65218_REG_CONTROL_DCDC2,
                           TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
                           2, 4000, 0, TPS65218_REG_SEQ3,
                           TPS65218_SEQ3_DC2_SEQ_MASK),
-       TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, REGULATOR_VOLTAGE,
-                          tps65218_ldo1_dcdc34_ops, 64,
+       TPS65218_REGULATOR("DCDC3", "regulator-dcdc3", TPS65218_DCDC_3,
+                          REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
                           TPS65218_REG_CONTROL_DCDC3,
                           TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
                           0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
-       TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, REGULATOR_VOLTAGE,
-                          tps65218_ldo1_dcdc34_ops, 53,
+       TPS65218_REGULATOR("DCDC4", "regulator-dcdc4", TPS65218_DCDC_4,
+                          REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 53,
                           TPS65218_REG_CONTROL_DCDC4,
                           TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
                           0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
-       TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, REGULATOR_VOLTAGE,
-                          tps65218_dcdc56_pmic_ops, 1, -1, -1,
-                          TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, 0,
-                          NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
+       TPS65218_REGULATOR("DCDC5", "regulator-dcdc5", TPS65218_DCDC_5,
+                          REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+                          -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0,
+                          0, NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
                           TPS65218_SEQ5_DC5_SEQ_MASK),
-       TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, REGULATOR_VOLTAGE,
-                          tps65218_dcdc56_pmic_ops, 1, -1, -1,
-                          TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, 0,
-                          NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
+       TPS65218_REGULATOR("DCDC6", "regulator-dcdc6", TPS65218_DCDC_6,
+                          REGULATOR_VOLTAGE, tps65218_dcdc56_pmic_ops, 1, -1,
+                          -1, TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0,
+                          0, NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
                           TPS65218_SEQ5_DC6_SEQ_MASK),
-       TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, REGULATOR_VOLTAGE,
-                          tps65218_ldo1_dcdc34_ops, 64,
+       TPS65218_REGULATOR("LDO1", "regulator-ldo1", TPS65218_LDO_1,
+                          REGULATOR_VOLTAGE, tps65218_ldo1_dcdc34_ops, 64,
                           TPS65218_REG_CONTROL_LDO1,
                           TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
                           TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
                           2, 0, 0, TPS65218_REG_SEQ6,
                           TPS65218_SEQ6_LDO1_SEQ_MASK),
-       TPS65218_REGULATOR("LS3", TPS65218_LS_3, REGULATOR_CURRENT,
-                          tps65218_ls3_ops, 0, 0, 0, TPS65218_REG_ENABLE2,
-                          TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2,
-                          TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0),
+       TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3,
+                          REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0,
+                          TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN,
+                          TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK,
+                          NULL, 0, 0, 0, 0, 0),
 };
 
 static int tps65218_regulator_probe(struct platform_device *pdev)
 {
        struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
-       struct regulator_init_data *init_data;
-       const struct tps_info   *template;
        struct regulator_dev *rdev;
-       const struct of_device_id       *match;
        struct regulator_config config = { };
-       int id, ret;
+       int i, ret;
        unsigned int val;
 
-       match = of_match_device(tps65218_of_match, &pdev->dev);
-       if (!match)
-               return -ENODEV;
-
-       template = match->data;
-       id = template->id;
-       init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
-                                              &regulators[id]);
-
-       platform_set_drvdata(pdev, tps);
-
-       tps->info[id] = &tps65218_pmic_regs[id];
        config.dev = &pdev->dev;
-       config.init_data = init_data;
+       config.dev->of_node = tps->dev->of_node;
        config.driver_data = tps;
        config.regmap = tps->regmap;
-       config.of_node = pdev->dev.of_node;
 
-       rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
-       if (IS_ERR(rdev)) {
-               dev_err(tps->dev, "failed to register %s regulator\n",
-                       pdev->name);
-               return PTR_ERR(rdev);
-       }
+       /* Allocate memory for strobes */
+       tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
+                                   TPS65218_NUM_REGULATOR, GFP_KERNEL);
 
-       ret = tps65218_reg_read(tps, regulators[id].bypass_reg, &val);
-       if (ret)
-               return ret;
+       for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+               rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(tps->dev, "failed to register %s regulator\n",
+                               pdev->name);
+                       return PTR_ERR(rdev);
+               }
 
-       tps->info[id]->strobe = val & regulators[id].bypass_mask;
+               ret = regmap_read(tps->regmap, regulators[i].bypass_reg, &val);
+               if (ret)
+                       return ret;
+
+               tps->strobes[i] = val & regulators[i].bypass_mask;
+       }
 
        return 0;
 }
 
+static const struct platform_device_id tps65218_regulator_id_table[] = {
+       { "tps65218-regulator", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65218_regulator_id_table);
+
 static struct platform_driver tps65218_regulator_driver = {
        .driver = {
                .name = "tps65218-pmic",
-               .of_match_table = tps65218_of_match,
        },
        .probe = tps65218_regulator_probe,
+       .id_table = tps65218_regulator_id_table,
 };
 
 module_platform_driver(tps65218_regulator_driver);
index 581001989937ce1e0aaab11c26136d5e11b4fa4d..d5bf36ec8a751326062e47abe80ddd1f61a5f43b 100644 (file)
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
 
 
 /**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
  * @tag: identifier for event
  * @erp: erp_action running
  */
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
 {
        struct zfcp_dbf *dbf = erp->adapter->dbf;
        struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,10 +320,20 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
        else
                rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
 
-       debug_event(dbf->rec, 1, rec, sizeof(*rec));
+       debug_event(dbf->rec, level, rec, sizeof(*rec));
        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+       zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
 /**
  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
  * @tag: identifier for event
index 36d07584271d569d27ec2eeb3706235d6459e026..db186d44cfafb6036a7e452e68e2c8f078cff6d1 100644 (file)
@@ -2,7 +2,7 @@
  * zfcp device driver
  * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2016
  */
 
 #ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
        struct zfcp_dbf_scsi            scsi_buf;
 };
 
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+       struct fsf_qtcb *qtcb = req->qtcb;
+       u32 fsf_stat = qtcb->header.fsf_status;
+       struct fcp_resp *fcp_rsp;
+       u8 rsp_flags, fr_status;
+
+       if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+               return false; /* not an FCP response */
+       fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+       rsp_flags = fcp_rsp->fr_flags;
+       fr_status = fcp_rsp->fr_status;
+       return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+               (rsp_flags == FCP_RESID_UNDER) &&
+               (fr_status == SAM_STAT_GOOD);
+}
+
 static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
                zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
        } else if (qtcb->header.fsf_status != FSF_GOOD) {
-               zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+               zfcp_dbf_hba_fsf_resp("fs_ferr",
+                                     zfcp_dbf_hba_fsf_resp_suppress(req)
+                                     ? 5 : 1, req);
 
        } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
                   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
        _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
 }
 
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+                                         struct zfcp_fsf_req *fsf_req)
+{
+       _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
 #endif /* ZFCP_DBF_H */
index a59d678125bd0e0ad0bd1ca74b0d42985abb25d8..7ccfce55903423f5e998fe3d93f6942a5879376f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
        }
 }
 
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+       unsigned long flags;
+       struct zfcp_adapter *adapter = port->adapter;
+       int port_status;
+       struct Scsi_Host *shost = adapter->scsi_host;
+       struct scsi_device *sdev;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       port_status = atomic_read(&port->status);
+       if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED)    == 0 ||
+           (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+                           ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+               /* new ERP of severity >= port triggered elsewhere meanwhile or
+                * local link down (adapter erp_failed but not clear unblock)
+                */
+               zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+               write_unlock_irqrestore(&adapter->erp_lock, flags);
+               return;
+       }
+       spin_lock(shost->host_lock);
+       __shost_for_each_device(sdev, shost) {
+               struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+               int lun_status;
+
+               if (zsdev->port != port)
+                       continue;
+               /* LUN under port of interest */
+               lun_status = atomic_read(&zsdev->status);
+               if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+                       continue; /* unblock rport despite failed LUNs */
+               /* LUN recovery not given up yet [maybe follow-up pending] */
+               if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+                   (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+                       /* LUN blocked:
+                        * not yet unblocked [LUN recovery pending]
+                        * or meanwhile blocked [new LUN recovery triggered]
+                        */
+                       zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+                       spin_unlock(shost->host_lock);
+                       write_unlock_irqrestore(&adapter->erp_lock, flags);
+                       return;
+               }
+       }
+       /* now port has no child or all children have completed recovery,
+        * and no ERP of severity >= port was meanwhile triggered elsewhere
+        */
+       zfcp_scsi_schedule_rport_register(port);
+       spin_unlock(shost->host_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 {
        struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
        case ZFCP_ERP_ACTION_REOPEN_LUN:
                if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
                        scsi_device_put(sdev);
+               zfcp_erp_try_rport_unblock(port);
                break;
 
        case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
                 */
                if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
                        if (result == ZFCP_ERP_SUCCEEDED)
-                               zfcp_scsi_schedule_rport_register(port);
+                               zfcp_erp_try_rport_unblock(port);
                /* fall through */
        case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
                put_device(&port->dev);
index 968a0ab4b398c23329f5713b07091db1ab80d92d..9afdbc32b23f6386a7dce2b51e4cf67ad562d215 100644 (file)
@@ -3,7 +3,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
                              struct zfcp_port *, struct scsi_device *, u8, u8);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+                                struct zfcp_erp_action *erp);
 extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
index be1c04b334c51f678d643e4c488173f8fd6be0ee..ea3c76ac0de14dc8ea9ad147d1f5b9ab80cc1172 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #ifndef FSF_H
@@ -78,6 +78,7 @@
 #define FSF_APP_TAG_CHECK_FAILURE              0x00000082
 #define FSF_REF_TAG_CHECK_FAILURE              0x00000083
 #define FSF_ADAPTER_STATUS_AVAILABLE           0x000000AD
+#define FSF_FCP_RSP_AVAILABLE                  0x000000AF
 #define FSF_UNKNOWN_COMMAND                    0x000000E2
 #define FSF_UNKNOWN_OP_SUBTYPE                  0x000000E3
 #define FSF_INVALID_COMMAND_OPTION              0x000000E5
index 7c2c6194dfca58e1eb2fe6466abf64fd928deb96..703fce59befef0be884449e00addad4a49981d61 100644 (file)
@@ -4,7 +4,7 @@
  * Data structure and helper functions for tracking pending FSF
  * requests.
  *
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2016
  */
 
 #ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
        spin_unlock_irqrestore(&rl->lock, flags);
 }
 
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ *     function will be the target-request; the second parameter is the same
+ *     pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+                          void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+       struct zfcp_fsf_req *req;
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&rl->lock, flags);
+       for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+               list_for_each_entry(req, &rl->buckets[i], list)
+                       f(req, data);
+       spin_unlock_irqrestore(&rl->lock, flags);
+}
+
 #endif /* ZFCP_REQLIST_H */
index 9069f98a18172e754c943010654de65e91c2fb7c..07ffdbb5107f732082e88c94b0362b845e7e17d6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
        }
 
        if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
-               /* This could be either
-                * open LUN pending: this is temporary, will result in
-                *      open LUN or ERP_FAILED, so retry command
+               /* This could be
                 * call to rport_delete pending: mimic retry from
                 *      fc_remote_port_chkready until rport is BLOCKED
                 */
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
        return retval;
 }
 
+struct zfcp_scsi_req_filter {
+       u8 tmf_scope;
+       u32 lun_handle;
+       u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+       struct zfcp_scsi_req_filter *filter =
+               (struct zfcp_scsi_req_filter *)data;
+
+       /* already aborted - prevent side-effects - or not a SCSI command */
+       if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+               return;
+
+       /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+       if (old_req->qtcb->header.port_handle != filter->port_handle)
+               return;
+
+       if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+           old_req->qtcb->header.lun_handle != filter->lun_handle)
+               return;
+
+       zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+       old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+       struct zfcp_adapter *adapter = zsdev->port->adapter;
+       struct zfcp_scsi_req_filter filter = {
+               .tmf_scope = FCP_TMF_TGT_RESET,
+               .port_handle = zsdev->port->handle,
+       };
+       unsigned long flags;
+
+       if (tm_flags == FCP_TMF_LUN_RESET) {
+               filter.tmf_scope = FCP_TMF_LUN_RESET;
+               filter.lun_handle = zsdev->lun_handle;
+       }
+
+       /*
+        * abort_lock secures against other processings - in the abort-function
+        * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+        */
+       write_lock_irqsave(&adapter->abort_lock, flags);
+       zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+                                  &filter);
+       write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
 static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
        if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
                zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
                retval = FAILED;
-       } else
+       } else {
                zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+               zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+       }
 
        zfcp_fsf_req_free(fsf_req);
        return retval;
index a56a7b243e91fae96b05cae0118d96e9d284dd7b..316f87fe32997138911d064a84b2395a1001276f 100644 (file)
@@ -1,8 +1,8 @@
 /*
    3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
-   Modifications By: Tom Couch <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
+   Modifications By: Tom Couch
 
    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
    Copyright (C) 2010 LSI Corporation.
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 
    Note: This version of the driver does not contain a bundled firmware
          image.
index 0fdc83cfa0e1a28a42757ae52f434523238075b3..b6c208cc474f0e4ae5d37b0c7465c99129121cc7 100644 (file)
@@ -1,8 +1,8 @@
 /*
    3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
-   Modifications By: Tom Couch <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
+   Modifications By: Tom Couch
 
    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
    Copyright (C) 2010 LSI Corporation.
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 */
 
 #ifndef _3W_9XXX_H
index f8374850f714dd09c53aa1eb25a38bbcb7d0ee8d..970d8fa6bd53eb0135343483589727757cc21464 100644 (file)
@@ -1,7 +1,7 @@
 /*
    3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
 
    Copyright (C) 2009 LSI Corporation.
 
    LSI 3ware 9750 6Gb/s SAS/SATA-RAID
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 
    History
    -------
index fec6449c7595132f706439277cd396b4dc66c0a8..05e77d84c16d95254544750c4e297a35887fc5dc 100644 (file)
@@ -1,7 +1,7 @@
 /*
    3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
 
    Copyright (C) 2009 LSI Corporation.
 
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 */
 
 #ifndef _3W_SAS_H
index 25aba1613e2157f7a2e468007c202a16015b3f40..aa412ab0276523077cf09fda7ddbe086f41ec89b 100644 (file)
@@ -1,7 +1,7 @@
 /* 
    3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
    Modifications By: Joel Jacobson <linux@3ware.com>
                     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 
    Bugs/Comments/Suggestions should be mailed to:                            
-   linuxraid@lsi.com
 
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
+
 
    History
    -------
index 6f65e663d3932108edaed6b75eba328d469f4fce..69e80c1ed1ca642806698177f0c345e2d600b2c5 100644 (file)
@@ -1,7 +1,7 @@
 /* 
    3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
    
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
    Modifications By: Joel Jacobson <linux@3ware.com>
                     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 
    Bugs/Comments/Suggestions should be mailed to:                            
-   linuxraid@lsi.com
+
+   aradford@gmail.com
    
    For more information, goto:
    http://www.lsi.com
index dfa93347c752479aeff9f4c307911add845a880e..a4f6b0d955159cde292b9c7f09811d5d542c3fa8 100644 (file)
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
 
 source "drivers/scsi/qla2xxx/Kconfig"
 source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
 
 config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
index a2d03957cbe2e85626199e854f6ad1e26f5128ee..736b77414a4baae3fe9520dc1eea00df591b4d7f 100644 (file)
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM)               += ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)  += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI)          += libiscsi.o qedi/
 obj-$(CONFIG_BE2ISCSI)         += libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_ESAS2R)      += esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)     += pmcraid.o
index d849ffa378b1ef19f80af760d53ae29774007e7c..4f5ca794bb71507a90879af23f0f4ea998fea9ec 100644 (file)
@@ -97,9 +97,6 @@
  * and macros and include this file in your driver.
  *
  * These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
  * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
  * for commands that return with a CHECK CONDITION status.
  *
  * NCR5380_dma_residual   - residual byte count
  *
  * The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID.  If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
  */
 
 #ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
 }
 #endif
 
-
-static int probe_irq;
-
-/**
- * probe_intr  -       helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
-       probe_irq = irq;
-       return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq   -       find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
-                                               int possible)
-{
-       struct NCR5380_hostdata *hostdata = shost_priv(instance);
-       unsigned long timeout;
-       int trying_irqs, i, mask;
-
-       for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
-               if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
-                       trying_irqs |= mask;
-
-       timeout = jiffies + msecs_to_jiffies(250);
-       probe_irq = NO_IRQ;
-
-       /*
-        * A interrupt is triggered whenever BSY = false, SEL = true
-        * and a bit set in the SELECT_ENABLE_REG is asserted on the
-        * SCSI bus.
-        *
-        * Note that the bus is only driven when the phase control signals
-        * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
-        * to zero.
-        */
-
-       NCR5380_write(TARGET_COMMAND_REG, 0);
-       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-       NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
-       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
-       while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
-               schedule_timeout_uninterruptible(1);
-
-       NCR5380_write(SELECT_ENABLE_REG, 0);
-       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-       for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
-               if (trying_irqs & mask)
-                       free_irq(i, NULL);
-
-       return probe_irq;
-}
-
 /**
  * NCR58380_info - report driver and host information
  * @instance: relevant scsi host instance
index 3c6ce5434449677026551e8d1acd5c447785bb8e..51a3567a6fb2a36a3eb44fbf94edf5536e333fe4 100644 (file)
 
 #define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
 
-/*
- * These are "special" values for the irq and dma_channel fields of the 
- * Scsi_Host structure
- */
-
-#define DMA_NONE       255
-#define IRQ_AUTO       254
-#define DMA_AUTO       254
-#define PORT_AUTO      0xffff  /* autoprobe io port for 53c400a */
-
 #ifndef NO_IRQ
 #define NO_IRQ         0
 #endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
 #define NCR5380_dprint_phase(flg, arg) do {} while (0)
 #endif
 
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
 static int NCR5380_init(struct Scsi_Host *instance, int flags);
 static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
 static void NCR5380_exit(struct Scsi_Host *instance);
index e4f3e22fcbd9b3d4463374375e7ee32542f409d4..3ecbf20ca29f96b970cd4b14c38eb3fdbb3b1511 100644 (file)
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
        { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
        { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
        { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
-       { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
        { 0,}
 };
 MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
        { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
        { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
        { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
 };
 
 /**
index 9e6f647ff1c16828f48d5abed6ce248155106977..9a2fdc305cf2a9a0c7b4f4fabae4191bb9f12ad1 100644 (file)
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                                struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
-       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                        csk, &req->local_ip, ntohs(req->local_port),
                        &req->peer_ip, ntohs(req->peer_port),
                        csk->atid, csk->rss_qid);
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                struct cpl_t5_act_open_req *req =
                                (struct cpl_t5_act_open_req *)skb->head;
                u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                        csk, &req->local_ip, ntohs(req->local_port),
                        &req->peer_ip, ntohs(req->peer_port),
                        csk->atid, csk->rss_qid);
+       } else {
+               struct cpl_t6_act_open_req *req =
+                               (struct cpl_t6_act_open_req *)skb->head;
+               u32 isn = (prandom_u32() & ~7UL) - 1;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+                                                           qid_atid));
+               req->local_port = csk->saddr.sin_port;
+               req->peer_port = csk->daddr.sin_port;
+               req->local_ip = csk->saddr.sin_addr.s_addr;
+               req->peer_ip = csk->daddr.sin_addr.s_addr;
+               req->opt0 = cpu_to_be64(opt0);
+               req->params = cpu_to_be64(FILTER_TUPLE_V(
+                               cxgb4_select_ntuple(
+                                       csk->cdev->ports[csk->port_id],
+                                       csk->l2t)));
+               req->rsvd = cpu_to_be32(isn);
+
+               opt2 |= T5_ISS_VALID;
+               opt2 |= RX_FC_DISABLE_F;
+               opt2 |= T5_OPT_2_VALID_F;
+
+               req->opt2 = cpu_to_be32(opt2);
+               req->rsvd2 = cpu_to_be32(0);
+               req->opt3 = cpu_to_be32(0);
+
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+                         csk, &req->local_ip, ntohs(req->local_port),
+                         &req->peer_ip, ntohs(req->peer_port),
+                         csk->atid, csk->rss_qid);
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 
        pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
-                      (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+                      (&csk->saddr), (&csk->daddr),
+                      CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
                       csk->state, csk->flags, csk->atid, csk->rss_qid);
 
        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                               struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
-       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
 
        opt2 = RX_CHANNEL_V(0) |
                RSS_QUEUE_VALID_F |
-               RX_FC_DISABLE_F |
                RSS_QUEUE_V(csk->rss_qid);
 
-       if (t4) {
+       if (is_t4(lldi->adapter_type)) {
                struct cpl_act_open_req6 *req =
                            (struct cpl_act_open_req6 *)skb->head;
 
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                req->params = cpu_to_be32(cxgb4_select_ntuple(
                                          csk->cdev->ports[csk->port_id],
                                          csk->l2t));
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                struct cpl_t5_act_open_req6 *req =
                                (struct cpl_t5_act_open_req6 *)skb->head;
 
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
                                          csk->cdev->ports[csk->port_id],
                                          csk->l2t)));
+       } else {
+               struct cpl_t6_act_open_req6 *req =
+                               (struct cpl_t6_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= RX_FC_DISABLE_F;
+               opt2 |= T5_OPT_2_VALID_F;
+
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t)));
+
+               req->rsvd2 = cpu_to_be32(0);
+               req->opt3 = cpu_to_be32(0);
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 
        pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
-               t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+               CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+               csk->flags, csk->atid,
                &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
                &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
                csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
                       (&csk->saddr), (&csk->daddr),
                       atid, tid, csk, csk->state, csk->flags, rcv_isn);
 
-       module_put(THIS_MODULE);
+       module_put(cdev->owner);
 
        cxgbi_sock_get(csk);
        csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (is_neg_adv(status))
                goto rel_skb;
 
-       module_put(THIS_MODULE);
+       module_put(cdev->owner);
 
        if (status && status != CPL_ERR_TCAM_FULL &&
            status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
        __kfree_skb(skb);
 }
 
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+       struct cxgbi_sock *csk;
+       struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+       struct tid_info *t = lldi->tids;
+       struct sk_buff *lskb;
+       u32 tid = GET_TID(cpl);
+       u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+       csk = lookup_tid(t, tid);
+       if (unlikely(!csk)) {
+               pr_err("can't find conn. for tid %u.\n", tid);
+               goto rel_skb;
+       }
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+                 csk, csk->state, csk->flags, csk->tid, skb,
+                 skb->len, pdu_len_ddp);
+
+       spin_lock_bh(&csk->lock);
+
+       if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+                         csk, csk->state, csk->flags, csk->tid);
+
+               if (csk->state != CTP_ABORTING)
+                       goto abort_conn;
+               else
+                       goto discard;
+       }
+
+       cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+       cxgbi_skcb_flags(skb) = 0;
+
+       skb_reset_transport_header(skb);
+       __skb_pull(skb, sizeof(*cpl));
+       __pskb_trim(skb, ntohs(cpl->len));
+
+       if (!csk->skb_ulp_lhdr)
+               csk->skb_ulp_lhdr = skb;
+
+       lskb = csk->skb_ulp_lhdr;
+       cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+                 csk, csk->state, csk->flags, skb, lskb);
+
+       __skb_queue_tail(&csk->receive_queue, skb);
+       spin_unlock_bh(&csk->lock);
+       return;
+
+abort_conn:
+       send_abort_req(csk);
+discard:
+       spin_unlock_bh(&csk->lock);
+rel_skb:
+       __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+                     struct sk_buff *skb, u32 ddpvld)
+{
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+                       csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+       }
+
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+                       csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+       }
+
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+               log_debug(1 << CXGBI_DBG_PDU_RX,
+                         "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+                         csk, skb, ddpvld);
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+       }
+
+       if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+           !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+               log_debug(1 << CXGBI_DBG_PDU_RX,
+                         "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+                         csk, skb, ddpvld);
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+       }
+}
+
 static void do_rx_data_ddp(struct cxgbi_device *cdev,
                                  struct sk_buff *skb)
 {
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
        unsigned int tid = GET_TID(rpl);
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
-       unsigned int status = ntohl(rpl->ddpvld);
+       u32 ddpvld = be32_to_cpu(rpl->ddpvld);
 
        csk = lookup_tid(t, tid);
        if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
-               csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+               csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
 
        spin_lock_bh(&csk->lock);
 
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
 
-       if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
-               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
-                       csk, lskb, status, cxgbi_skcb_flags(lskb));
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
-       }
-       if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
-               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
-                       csk, lskb, status, cxgbi_skcb_flags(lskb));
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
-       }
-       if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
-               log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
-                       csk, lskb, status);
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
-       }
-       if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
-               !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
-               log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
-                       csk, lskb, status);
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
-       }
+       cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
                csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
        __kfree_skb(skb);
 }
 
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+       struct cxgbi_sock *csk;
+       struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+       struct tid_info *t = lldi->tids;
+       struct sk_buff *data_skb = NULL;
+       u32 tid = GET_TID(rpl);
+       u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+       u32 seq = be32_to_cpu(rpl->seq);
+       u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+       csk = lookup_tid(t, tid);
+       if (unlikely(!csk)) {
+               pr_err("can't find connection for tid %u.\n", tid);
+               goto rel_skb;
+       }
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+                 "pdu_len_ddp %u, status %u.\n",
+                 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+                 ntohs(rpl->len), pdu_len_ddp,  rpl->status);
+
+       spin_lock_bh(&csk->lock);
+
+       if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+                         csk, csk->state, csk->flags, csk->tid);
+
+               if (csk->state != CTP_ABORTING)
+                       goto abort_conn;
+               else
+                       goto discard;
+       }
+
+       cxgbi_skcb_tcp_seq(skb) = seq;
+       cxgbi_skcb_flags(skb) = 0;
+       cxgbi_skcb_rx_pdulen(skb) = 0;
+
+       skb_reset_transport_header(skb);
+       __skb_pull(skb, sizeof(*rpl));
+       __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+       csk->rcv_nxt = seq + pdu_len_ddp;
+
+       if (csk->skb_ulp_lhdr) {
+               data_skb = skb_peek(&csk->receive_queue);
+               if (!data_skb ||
+                   !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+                       pr_err("Error! freelist data not found 0x%p, tid %u\n",
+                              data_skb, tid);
+
+                       goto abort_conn;
+               }
+               __skb_unlink(data_skb, &csk->receive_queue);
+
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+               __skb_queue_tail(&csk->receive_queue, skb);
+               __skb_queue_tail(&csk->receive_queue, data_skb);
+       } else {
+                __skb_queue_tail(&csk->receive_queue, skb);
+       }
+
+       csk->skb_ulp_lhdr = NULL;
+
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+       cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+       cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+       log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+                 csk, skb, cxgbi_skcb_flags(skb));
+
+       cxgbi_conn_pdu_ready(csk);
+       spin_unlock_bh(&csk->lock);
+
+       return;
+
+abort_conn:
+       send_abort_req(csk);
+discard:
+       spin_unlock_bh(&csk->lock);
+rel_skb:
+       __kfree_skb(skb);
+}
+
 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
 {
        struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
        void *daddr;
        unsigned int step;
        unsigned int size, size6;
-       int t4 = is_t4(lldi->adapter_type);
        unsigned int linkspeed;
        unsigned int rcv_winf, snd_winf;
 
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
                cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
 #endif
 
-       if (t4) {
+       if (is_t4(lldi->adapter_type)) {
                size = sizeof(struct cpl_act_open_req);
                size6 = sizeof(struct cpl_act_open_req6);
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                size = sizeof(struct cpl_t5_act_open_req);
                size6 = sizeof(struct cpl_t5_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t6_act_open_req);
+               size6 = sizeof(struct cpl_t6_act_open_req6);
        }
 
        if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->mtu = dst_mtu(csk->dst);
        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
        csk->tx_chan = cxgb4_port_chan(ndev);
-       /* SMT two entries per row */
-       csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+       csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+                                        cxgb4_port_viid(ndev));
        step = lldi->ntxq / lldi->nchan;
        csk->txq_idx = cxgb4_port_idx(ndev) * step;
        step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
                       csk->mtu, csk->mss_idx, csk->smac_idx);
 
        /* must wait for either a act_open_rpl or act_open_establish */
-       try_module_get(THIS_MODULE);
+       if (!try_module_get(cdev->owner)) {
+               pr_err("%s, try_module_get failed.\n", ndev->name);
+               goto rel_resource;
+       }
+
        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
        if (csk->csk_family == AF_INET)
                send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
        [CPL_FW4_ACK] = do_fw4_ack,
        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
-       [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+       [CPL_ISCSI_DATA] = do_rx_iscsi_data,
        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
        [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+       [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
        [CPL_RX_DATA] = do_rx_data,
 };
 
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
        cdev->nports = lldi->nports;
        cdev->mtus = lldi->mtus;
        cdev->nmtus = NMTUS;
-       cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+       cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+                                CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
        cdev->itp = &cxgb4i_iscsi_transport;
+       cdev->owner = THIS_MODULE;
 
        cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
                        << FW_VIID_PFN_S;
index 2ffe029ff2b6ff29fbaaada58d277d89d2643f31..9167bcd9fffe9b3a5fea9005671239a96551aace 100644 (file)
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                        n->dev->name, ndev->name, mtu);
        }
 
+       if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+               pr_info("%s interface not up.\n", ndev->name);
+               err = -ENETDOWN;
+               goto rel_neigh;
+       }
+
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
        if (!cdev) {
                pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
        }
        ndev = n->dev;
 
+       if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+               pr_info("%s interface not up.\n", ndev->name);
+               err = -ENETDOWN;
+               goto rel_rt;
+       }
+
        if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
                pr_info("multi-cast route %pI6 port %u, dev %s.\n",
                        daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 {
        struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+       struct module *owner = csk->cdev->owner;
 
        log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
                csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
        __kfree_skb(skb);
+
+       module_put(owner);
 }
 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
 
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
                return -EIO;
        }
 
+       if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+           cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+               /* If completion flag is set and data is directly
+                * placed in to the host memory then update
+                * task->exp_datasn to the datasn in completion
+                * iSCSI hdr as T6 adapter generates completion only
+                * for the last pdu of a sequence.
+                */
+               itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+               struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+               u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+                                                       skb->data)->datasn);
+               if (task && task->sc) {
+                       struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+                       tcp_task->exp_datasn = data_sn;
+               }
+       }
+
        return read_pdu_skb(conn, skb, 0, 0);
 }
 
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
                csk->rcv_wup, cdev->rx_credit_thres,
                csk->rcv_win);
 
+       if (!cdev->rx_credit_thres)
+               return;
+
        if (csk->state != CTP_ESTABLISHED)
                return;
 
        credits = csk->copied_seq - csk->rcv_wup;
        if (unlikely(!credits))
                return;
-       if (unlikely(cdev->rx_credit_thres == 0))
-               return;
-
        must_send = credits + 16384 >= csk->rcv_win;
        if (must_send || credits >= cdev->rx_credit_thres)
                csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
index e7802738f5d28e0b7e7fe4e3f6bf8075197b838f..95ba99044c3e9c2af6f0a8deb656b878c15cf656 100644 (file)
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
        SKCBF_RX_HDR,           /* received pdu header */
        SKCBF_RX_DATA,          /* received pdu payload */
        SKCBF_RX_STATUS,        /* received ddp status */
+       SKCBF_RX_ISCSI_COMPL,   /* received iscsi completion */
        SKCBF_RX_DATA_DDPD,     /* pdu payload ddp'd */
        SKCBF_RX_HCRC_ERR,      /* header digest error */
        SKCBF_RX_DCRC_ERR,      /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
        struct pci_dev *pdev;
        struct dentry *debugfs_root;
        struct iscsi_transport *itp;
+       struct module *owner;
 
        unsigned int pfvf;
        unsigned int rx_credit_thres;
index de5147a8c959aaa7cc070d55a9040e9f1ead37fd..6f9665d50d84bb485d3ba9cf99da05bae0f1c018 100644 (file)
@@ -37,7 +37,7 @@
 #define MAX_CARDS 8
 
 /* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
 static int ncr_addr;
 static int ncr_5380;
 static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
 module_param(dtc_3181e, int, 0);
 module_param(hp_c2502, int, 0);
 
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
 module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
 
 static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
 module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
 MODULE_ALIAS("g_NCR5380_mmio");
 MODULE_LICENSE("GPL");
 
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+       /*
+        * An interrupt is triggered whenever BSY = false, SEL = true
+        * and a bit set in the SELECT_ENABLE_REG is asserted on the
+        * SCSI bus.
+        *
+        * Note that the bus is only driven when the phase control signals
+        * (I/O, C/D, and MSG) match those in the TCR.
+        */
+       NCR5380_write(TARGET_COMMAND_REG,
+                     PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+       NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+       NCR5380_write(INITIATOR_COMMAND_REG,
+                     ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+       msleep(1);
+
+       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+       NCR5380_write(SELECT_ENABLE_REG, 0);
+       NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+       int irq_mask, irq;
+
+       NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+       irq_mask = probe_irq_on();
+       g_NCR5380_trigger_irq(instance);
+       irq = probe_irq_off(irq_mask);
+       NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+       if (irq <= 0)
+               return NO_IRQ;
+       return irq;
+}
+
 /*
  * Configure I/O address of 53C400A or DTC436 by writing magic numbers
  * to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
        outb(magic[3], 0x379);
        outb(magic[4], 0x379);
 
-       /* allowed IRQs for HP C2502 */
-       if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
-               irq = 0;
+       if (irq == 9)
+               irq = 2;
+
        if (idx >= 0 && idx <= 7)
                cfg = 0x80 | idx | (irq << 4);
        outb(cfg, 0x379);
 }
 
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+       return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+       while (*irq_table != -1) {
+               if (!request_irq(*irq_table, legacy_empty_irq_handler,
+                                IRQF_PROBE_SHARED, "Test IRQ",
+                                (void *)irq_table)) {
+                       free_irq(*irq_table, (void *) irq_table);
+                       return *irq_table;
+               }
+               irq_table++;
+       }
+       return -1;
+}
+
 static unsigned int ncr_53c400a_ports[] = {
        0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
 };
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = {   /* 53C400A & DTC436 */
 static u8 hp_c2502_magic[] = { /* HP C2502 */
        0x0f, 0x22, 0xf0, 0x20, 0x80
 };
+static int hp_c2502_irqs[] = {
+       9, 5, 7, 3, 4, -1
+};
 
 static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
                        struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
                }
        }
 
+       /* Check for vacant slot */
+       NCR5380_write(MODE_REG, 0);
+       if (NCR5380_read(MODE_REG) != 0) {
+               ret = -ENODEV;
+               goto out_unregister;
+       }
+
        ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
        if (ret)
                goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 
        NCR5380_maybe_reset_bus(instance);
 
-       if (irq != IRQ_AUTO)
-               instance->irq = irq;
-       else
-               instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
        /* Compatibility with documented NCR5380 kernel parameters */
-       if (instance->irq == 255)
-               instance->irq = NO_IRQ;
+       if (irq == 255 || irq == 0)
+               irq = NO_IRQ;
+       else if (irq == -1)
+               irq = IRQ_AUTO;
+
+       if (board == BOARD_HP_C2502) {
+               int *irq_table = hp_c2502_irqs;
+               int board_irq = -1;
+
+               switch (irq) {
+               case NO_IRQ:
+                       board_irq = 0;
+                       break;
+               case IRQ_AUTO:
+                       board_irq = legacy_find_free_irq(irq_table);
+                       break;
+               default:
+                       while (*irq_table != -1)
+                               if (*irq_table++ == irq)
+                                       board_irq = irq;
+               }
+
+               if (board_irq <= 0) {
+                       board_irq = 0;
+                       irq = NO_IRQ;
+               }
+
+               magic_configure(port_idx, board_irq, magic);
+       }
+
+       if (irq == IRQ_AUTO) {
+               instance->irq = g_NCR5380_probe_irq(instance);
+               if (instance->irq == NO_IRQ)
+                       shost_printk(KERN_INFO, instance, "no irq detected\n");
+       } else {
+               instance->irq = irq;
+               if (instance->irq == NO_IRQ)
+                       shost_printk(KERN_INFO, instance, "no irq provided\n");
+       }
 
        if (instance->irq != NO_IRQ) {
-               /* set IRQ for HP C2502 */
-               if (board == BOARD_HP_C2502)
-                       magic_configure(port_idx, instance->irq, magic);
                if (request_irq(instance->irq, generic_NCR5380_intr,
                                0, "NCR5380", instance)) {
-                       printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
                        instance->irq = NO_IRQ;
+                       shost_printk(KERN_INFO, instance,
+                                    "irq %d denied\n", instance->irq);
+               } else {
+                       shost_printk(KERN_INFO, instance,
+                                    "irq %d acquired\n", instance->irq);
                }
        }
 
-       if (instance->irq == NO_IRQ) {
-               printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
-               printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
-       }
-
        ret = scsi_add_host(instance, pdev);
        if (ret)
                goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
        int ret = 0;
 
        /* compatibility with old-style parameters */
-       if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+       if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
                irq[0] = ncr_irq;
                base[0] = ncr_addr;
                if (ncr_5380)
index 3ce5b65ccb00b618012ef0825390a6a098950cd6..81b22d989648b0107f6c0f3128cc25348a88e775 100644 (file)
@@ -51,4 +51,6 @@
 #define BOARD_DTC3181E 3
 #define BOARD_HP_C2502 4
 
+#define IRQ_AUTO       254
+
 #endif /* GENERIC_NCR5380_H */
index 691a0931695238cf07fcfc544dfe33740cb38a1c..cbc0c5fe5a60188515dab3dd99be83ce62732e1c 100644 (file)
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
 
        /* Device is not on the list, add it. */
        device = kmalloc(sizeof(*device), GFP_KERNEL);
-       if (!device) {
-               dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+       if (!device)
                return;
-       }
+
        memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
        spin_lock_irqsave(&h->offline_device_lock, flags);
        list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
 
        h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
                                GFP_KERNEL);
-       if (!h->cmd_sg_list) {
-               dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+       if (!h->cmd_sg_list)
                return -ENOMEM;
-       }
+
        for (i = 0; i < h->nr_cmds; i++) {
                h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
                                                h->chainsize, GFP_KERNEL);
-               if (!h->cmd_sg_list[i]) {
-                       dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+               if (!h->cmd_sg_list[i])
                        goto clean;
-               }
+
        }
        return 0;
 
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
                struct bmic_sense_subsystem_info *ssi;
 
                ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
-               if (ssi == NULL) {
-                       dev_warn(&h->pdev->dev,
-                               "%s: out of memory\n", __func__);
+               if (!ssi)
                        return;
-               }
 
                rc = hpsa_bmic_sense_subsystem_information(h,
                                        scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
 
                currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
                if (!currentsd[i]) {
-                       dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
-                               __FILE__, __LINE__);
                        h->drv_req_rescan = 1;
                        goto out;
                }
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
         */
 
        if (!h->lastlogicals)
-               goto out;
+               return rc;
 
        logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
-       if (!logdev) {
-               dev_warn(&h->pdev->dev,
-                       "Out of memory, can't track lun changes.\n");
-               goto out;
-       }
+       if (!logdev)
+               return rc;
+
        if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
                dev_warn(&h->pdev->dev,
                        "report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
                return;
 
        options = kzalloc(sizeof(*options), GFP_KERNEL);
-       if (!options) {
-               dev_err(&h->pdev->dev,
-                       "Error: failed to disable rld caching, during alloc.\n");
+       if (!options)
                return;
-       }
 
        c = cmd_alloc(h);
 
index d9534ee6ef524fb1fab03680bdde2ed34a4202b7..50cd01165e355b092fb954d2399f42798d5e3e00 100644 (file)
@@ -95,6 +95,7 @@ static int fast_fail = 1;
 static int client_reserve = 1;
 static char partition_name[97] = "UNKNOWN";
 static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
                while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
                        ibmvscsi_handle_crq(crq, hostdata);
                        crq->valid = VIOSRP_CRQ_FREE;
+                       wmb();
                }
 
                vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
                        vio_disable_interrupts(vdev);
                        ibmvscsi_handle_crq(crq, hostdata);
                        crq->valid = VIOSRP_CRQ_FREE;
+                       wmb();
                } else {
                        done = 1;
                }
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
        if (unlikely(rsp->opcode != SRP_RSP)) {
                if (printk_ratelimit())
                        dev_warn(evt_struct->hostdata->dev,
-                                "bad SRP RSP type %d\n", rsp->opcode);
+                                "bad SRP RSP type %#02x\n", rsp->opcode);
        }
        
        if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
 
        dev_set_drvdata(&vdev->dev, hostdata);
+       list_add_tail(&hostdata->host_list, &ibmvscsi_head);
        return 0;
 
       add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+       list_del(&hostdata->host_list);
        unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
index e0f6c3aeb4eef35aa390afaa5d7cd6174d8f7a48..3a7875575616e3eec27443a0db7ef9882fcc2528 100644 (file)
@@ -90,6 +90,7 @@ struct event_pool {
 
 /* all driver data associated with a host adapter */
 struct ibmvscsi_host_data {
+       struct list_head host_list;
        atomic_t request_limit;
        int client_migrated;
        int reset_crq;
index c9fa3565c671e9f1b0f4e16e847c713cd6165078..2583e8b50b21c2cfa6ec3d10e624adb8141d5c9d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/string.h>
+#include <linux/delay.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 98b0ca79a5c5e2e04c526ca741c504377c75ca9e..65c6189885ab08f24212bce5f008c1973539a0f7 100644 (file)
@@ -26,6 +26,7 @@
 #ifndef __H_IBMVSCSI_TGT
 #define __H_IBMVSCSI_TGT
 
+#include <linux/interrupt.h>
 #include "libsrp.h"
 
 #define SYS_ID_NAME_LEN                64
index 8de0eda8cd006ac9ec0192256c56aec182bd08b9..394fe1338d0976a42f183e328dfaed02f540560f 100644 (file)
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
        u8      block;
        u8      tlr_snoop_check;
        u8      ignore_delay_remove;
+       /* Iopriority Command Handling */
+       u8      ncq_prio_enable;
+
 };
 
 #define MPT3_CMD_NOT_USED      0x8000  /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
        struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
        u16 smid);
 
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
 #endif /* MPT3SAS_BASE_H_INCLUDED */
index 050bd788ad029818de9151eaafb3584387755c36..95f0f24bac05598e1c8246cb1078de9163a66e5d 100644 (file)
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
 
 /*********** diagnostic trigger suppport *** END ****************************/
 
-
-
 /*****************************************/
 
 struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
 
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+       bool ncq_prio_enable = 0;
+
+       if (kstrtobool(buf, &ncq_prio_enable))
+               return -EINVAL;
+
+       if (!scsih_ncq_prio_supp(sdev))
+               return -EINVAL;
+
+       sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+       return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+                  _ctl_device_ncq_prio_enable_show,
+                  _ctl_device_ncq_prio_enable_store);
+
 struct device_attribute *mpt3sas_dev_attrs[] = {
        &dev_attr_sas_address,
        &dev_attr_sas_device_handle,
+       &dev_attr_sas_ncq_prio_enable,
        NULL,
 };
 
index 5c8f75247d739489313613e2f18965fd63b80e86..b5c966e319d315474b94703b93ab0343013dd973 100644 (file)
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _raid_device *raid_device;
+       struct request *rq = scmd->request;
+       int class;
        Mpi2SCSIIORequest_t *mpi_request;
        u32 mpi_control;
        u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 
        /* set tags */
        mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+       /* NCQ Prio supported, make sure control indicated high priority */
+       if (sas_device_priv_data->ncq_prio_enable) {
+               class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+               if (class == IOPRIO_CLASS_RT)
+                       mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+       }
        /* Make sure Device is not raid volume.
         * We do not expose raid functionality to upper layer for warpdrive.
         */
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
        return PCI_ERS_RESULT_RECOVERED;
 }
 
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+       unsigned char *buf;
+       bool ncq_prio_supp = false;
+
+       if (!scsi_device_supports_vpd(sdev))
+               return ncq_prio_supp;
+
+       buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+       if (!buf)
+               return ncq_prio_supp;
+
+       if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+               ncq_prio_supp = (buf[213] >> 4) & 1;
+
+       kfree(buf);
+       return ncq_prio_supp;
+}
 /*
  * The pci device ids are defined in mpi/mpi2_cnfg.h.
  */
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644 (file)
index 0000000..23ca8a2
--- /dev/null
@@ -0,0 +1,10 @@
+config QEDI
+       tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+       depends on PCI && SCSI
+       depends on QED
+       select SCSI_ISCSI_ATTRS
+       select QED_LL2
+       select QED_ISCSI
+       ---help---
+       This driver supports iSCSI offload for the QLogic FastLinQ
+       41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644 (file)
index 0000000..2b3e16b
--- /dev/null
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+           qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644 (file)
index 0000000..5ca3e8c
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME               "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL       0
+#define QEDI_MODE_RECOVERY     1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE   1
+#define QEDI_MAX_ISCSI_TASK            4096
+#define QEDI_MAX_TASK_NUM              0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA   1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD     256     /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON   1024
+
+#define QEDI_MAX_BD_LEN                0xffff
+#define QEDI_BD_SPLIT_SZ       0x1000
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_FAST_SGE_COUNT    4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF         8
+#define MIN_NUM_CPUS_MSIX(x)   min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN     60000
+#define QEDI_LOCAL_PORT_MAX     61024
+#define QEDI_LOCAL_PORT_RANGE   (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID        0xffff
+#define TX_RX_RING             16
+#define RX_RING                        (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE    0x400
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_PAGE_ALIGN(addr)  ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK         (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_PATH_HANDLE       0xFE0000000UL
+
+struct qedi_uio_ctrl {
+       /* meta data */
+       u32 uio_hsi_version;
+
+       /* user writes */
+       u32 host_tx_prod;
+       u32 host_rx_cons;
+       u32 host_rx_bd_cons;
+       u32 host_tx_pkt_len;
+       u32 host_rx_cons_cnt;
+
+       /* driver writes */
+       u32 hw_tx_cons;
+       u32 hw_rx_prod;
+       u32 hw_rx_bd_prod;
+       u32 hw_rx_prod_cnt;
+
+       /* other */
+       u8 mac_addr[6];
+       u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+       u32 rx_pkt_index;
+       u32 rx_pkt_len;
+       u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT       (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT   (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD         (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD         (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x)    ((((x) & (QEDI_MAX_RX_DESC_CNT)) ==     \
+                                 (QEDI_MAX_RX_DESC_CNT - 1)) ?         \
+                                (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+       struct uio_info         qedi_uinfo;
+       u32                     uio_dev;
+       struct list_head        list;
+
+       u32                     ll2_ring_size;
+       void                    *ll2_ring;
+
+       u32                     ll2_buf_size;
+       void                    *ll2_buf;
+
+       void                    *rx_pkt;
+       void                    *tx_pkt;
+
+       struct qedi_ctx         *qedi;
+       struct pci_dev          *pdev;
+       void                    *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+       struct list_head list;
+       struct sk_buff *skb;
+       u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE           MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE           2048
+#define QEDI_CMDQ_SIZE         QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+       u64 hw_p_cq;    /* Completion queue PBL */
+       u64 hw_p_rq;    /* Request queue PBL */
+       u64 hw_p_cmdq;  /* Command queue PBL */
+};
+
+struct global_queue {
+       union iscsi_cqe *cq;
+       dma_addr_t cq_dma;
+       u32 cq_mem_size;
+       u32 cq_cons_idx; /* Completion queue consumer index */
+
+       void *cq_pbl;
+       dma_addr_t cq_pbl_dma;
+       u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+       struct qed_sb_info      *sb_info;
+       u16                     sb_id;
+#define QEDI_NAME_SIZE         16
+       char                    name[QEDI_NAME_SIZE];
+       struct qedi_ctx         *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+       struct list_head list;
+       struct iscsi_cqe_solicited cqe;
+       u16     que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+       void *cid_que_base;
+       u32 *cid_que;
+       u32 cid_q_prod_idx;
+       u32 cid_q_cons_idx;
+       u32 cid_q_max_idx;
+       u32 cid_free_cnt;
+       struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+       spinlock_t      lock;   /* Port id lock */
+       u16             start;
+       u16             max;
+       u16             next;
+       unsigned long   *table;
+};
+
+struct qedi_itt_map {
+       __le32  itt;
+       struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE             2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ              0
+#define QEDI_IO_TRACE_RSP              1
+       u8 direction;
+       u16 task_id;
+       u32 cid;
+       u32 port_id;    /* Remote port fabric ID */
+       int lun;
+       u8 op;          /* SCSI CDB */
+       u8 lba[4];
+       unsigned int bufflen;   /* SCSI buffer length */
+       unsigned int sg_count;  /* Number of SG elements */
+       u8 fast_sgs;            /* number of fast sgls */
+       u8 slow_sgs;            /* number of slow sgls */
+       u8 cached_sgs;          /* number of cached sgls */
+       int result;             /* Result passed back to mid-layer */
+       unsigned long jiffies;  /* Time stamp when I/O logged */
+       int refcount;           /* Reference count for task id */
+       unsigned int blk_req_cpu; /* CPU that the task is queued on by
+                                  * blk layer
+                                  */
+       unsigned int req_cpu;   /* CPU that the task is queued on */
+       unsigned int intr_cpu;  /* Interrupt CPU that the task is received on */
+       unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+                                 * returned to blk layer
+                                 */
+       bool cached_sge;
+       bool slow_sge;
+       bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM           256
+#define QEDI_BDQ_BUF_SIZE      256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+       void *buf_addr;
+       dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+       struct qedi_dbg_ctx dbg_ctx;
+       struct Scsi_Host *shost;
+       struct pci_dev *pdev;
+       struct qed_dev *cdev;
+       struct qed_dev_iscsi_info dev_info;
+       struct qed_int_info int_info;
+       struct qedi_glbl_q_params *p_cpuq;
+       struct global_queue **global_queues;
+       /* uio declaration */
+       struct qedi_uio_dev *udev;
+       struct list_head ll2_skb_list;
+       spinlock_t ll2_lock;    /* Light L2 lock */
+       spinlock_t hba_lock;    /* per port lock */
+       struct task_struct *ll2_recv_thread;
+       unsigned long flags;
+#define UIO_DEV_OPENED         1
+#define QEDI_IOTHREAD_WAKE     2
+#define QEDI_IN_RECOVERY       5
+#define QEDI_IN_OFFLINE                6
+
+       u8 mac[ETH_ALEN];
+       u32 src_ip[4];
+       u8 ip_type;
+
+       /* Physical address of above array */
+       dma_addr_t hw_p_cpuq;
+
+       struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+       void *bdq_pbl;
+       dma_addr_t bdq_pbl_dma;
+       size_t bdq_pbl_mem_size;
+       void *bdq_pbl_list;
+       dma_addr_t bdq_pbl_list_dma;
+       u8 bdq_pbl_list_num_entries;
+       void __iomem *bdq_primary_prod;
+       void __iomem *bdq_secondary_prod;
+       u16 bdq_prod_idx;
+       u16 rq_num_entries;
+
+       u32 msix_count;
+       u32 max_sqes;
+       u8 num_queues;
+       u32 max_active_conns;
+
+       struct iscsi_cid_queue cid_que;
+       struct qedi_endpoint **ep_tbl;
+       struct qedi_portid_tbl lcl_port_tbl;
+
+       /* Rx fast path intr context */
+       struct qed_sb_info      *sb_array;
+       struct qedi_fastpath    *fp_array;
+       struct qed_iscsi_tid    tasks;
+
+#define QEDI_LINK_DOWN         0
+#define QEDI_LINK_UP           1
+       atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID   0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID   (MAX_ISCSI_TASK_ENTRIES + 1)
+       unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+       struct qedi_itt_map *itt_map;
+       u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+       struct qed_pf_params pf_params;
+
+       struct workqueue_struct *tmf_thread;
+       struct workqueue_struct *offload_thread;
+
+       u16 ll2_mtu;
+
+       struct workqueue_struct *dpc_wq;
+
+       spinlock_t task_idx_lock;       /* To protect gbl context */
+       s32 last_tidx_alloc;
+       s32 last_tidx_clear;
+
+       struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+       spinlock_t io_trace_lock;       /* prtect trace Log buf */
+       u16 io_trace_idx;
+       unsigned int intr_cpu;
+       u32 cached_sgls;
+       bool use_cached_sge;
+       u32 slow_sgls;
+       bool use_slow_sge;
+       u32 fast_sgls;
+       bool use_fast_sge;
+
+       atomic_t num_offloads;
+};
+
+struct qedi_work {
+       struct list_head list;
+       struct qedi_ctx *qedi;
+       union iscsi_cqe cqe;
+       u16     que_idx;
+       bool is_solicited;
+};
+
+struct qedi_percpu_s {
+       struct task_struct *iothread;
+       struct list_head work_list;
+       spinlock_t p_work_lock;         /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+       return (info->blocks[tid / info->num_tids_per_block] +
+               (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644 (file)
index 0000000..2bdedb9
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+            const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                      nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+             const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & QEDI_LOG_WARN))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                       nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+               const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_notice("[%s]:[%s:%d]:%d: %pV",
+                         dev_name(&qedi->pdev->dev), nfunc, line,
+                         qedi->host_no, &vaf);
+       else
+               pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+             u32 level, const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & level))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                       nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+       int ret = 0;
+
+       for (; iter->name; iter++) {
+               ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+                                           iter->attr);
+               if (ret)
+                       pr_err("Unable to create sysfs %s attr, err(%d).\n",
+                              iter->name, ret);
+       }
+       return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+       for (; iter->name; iter++)
+               sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644 (file)
index 0000000..c55572b
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT       0x1             /* Set default logging mask */
+#define QEDI_LOG_INFO          0x2             /* Informational logs,
+                                                * MAC address, WWPN, WWNN
+                                                */
+#define QEDI_LOG_DISC          0x4             /* Init, discovery, rport */
+#define QEDI_LOG_LL2           0x8             /* LL2, VLAN logs */
+#define QEDI_LOG_CONN          0x10            /* Connection setup, cleanup */
+#define QEDI_LOG_EVT           0x20            /* Events, link, mtu */
+#define QEDI_LOG_TIMER         0x40            /* Timer events */
+#define QEDI_LOG_MP_REQ                0x80            /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM       0x100           /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL         0x200           /* unsolicited event logs */
+#define QEDI_LOG_IO            0x400           /* scsi cmd, completion */
+#define QEDI_LOG_MQ            0x800           /* Multi Queue logs */
+#define QEDI_LOG_BSG           0x1000          /* BSG logs */
+#define QEDI_LOG_DEBUGFS       0x2000          /* debugFS logs */
+#define QEDI_LOG_LPORT         0x4000          /* lport logs */
+#define QEDI_LOG_ELS           0x8000          /* ELS logs */
+#define QEDI_LOG_NPIV          0x10000         /* NPIV logs */
+#define QEDI_LOG_SESS          0x20000         /* Conection setup, cleanup */
+#define QEDI_LOG_UIO           0x40000         /* iSCSI UIO logs */
+#define QEDI_LOG_TID           0x80000         /* FW TID context acquire,
+                                                * free
+                                                */
+#define QEDI_TRACK_TID         0x100000        /* Track TID state. To be
+                                                * enabled only at module load
+                                                * and not run-time.
+                                                */
+#define QEDI_TRACK_CMD_LIST    0x300000        /* Track active cmd list nodes,
+                                               * done with reference to TID,
+                                               * hence TRACK_TID also enabled.
+                                               */
+#define QEDI_LOG_NOTICE                0x40000000      /* Notice logs */
+#define QEDI_LOG_WARN          0x80000000      /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+       unsigned int host_no;
+       struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...)       \
+               qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...)      \
+               qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...)    \
+               qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...)       \
+               qedi_dbg_info(pdev, __func__, __LINE__, level, fmt,     \
+                             ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                 const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                  const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                    const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                  u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+       char *name;
+       struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+                          struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+                           struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+       char *oper_str;
+       ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+       char *name;
+       struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+       .owner  = THIS_MODULE, \
+       .open   = simple_open, \
+       .read   = drv##_dbg_##ops##_cmd_read, \
+       .write  = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+       .owner = THIS_MODULE, \
+       .open = drv##_dbg_##ops##_open, \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+       .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+                       struct qedi_debugfs_ops *dops,
+                       const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644 (file)
index 0000000..9559362
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+                  struct qedi_debugfs_ops *dops,
+                  const struct file_operations *fops)
+{
+       char host_dirname[32];
+       struct dentry *file_dentry = NULL;
+
+       sprintf(host_dirname, "host%u", qedi->host_no);
+       qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+       if (!qedi->bdf_dentry)
+               return;
+
+       while (dops) {
+               if (!(dops->name))
+                       break;
+
+               file_dentry = debugfs_create_file(dops->name, 0600,
+                                                 qedi->bdf_dentry, qedi,
+                                                 fops);
+               if (!file_dentry) {
+                       QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+                                 "Debugfs entry %s creation failed\n",
+                                 dops->name);
+                       debugfs_remove_recursive(qedi->bdf_dentry);
+                       return;
+               }
+               dops++;
+               fops++;
+       }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+       debugfs_remove_recursive(qedi->bdf_dentry);
+       qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+       qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+       if (!qedi_dbg_root)
+               QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+       debugfs_remove_recursive(qedi_dbg_root);
+       qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+       if (!do_not_recover)
+               do_not_recover = 1;
+
+       QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+                 do_not_recover);
+       return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+       if (do_not_recover)
+               do_not_recover = 0;
+
+       QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+                 do_not_recover);
+       return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+       { "enable", qedi_dbg_do_not_recover_enable },
+       { "disable", qedi_dbg_do_not_recover_disable },
+       { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+       { "gbl_ctx", NULL },
+       { "do_not_recover", qedi_dbg_do_not_recover_ops},
+       { "io_trace", NULL },
+       { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+                                 size_t count, loff_t *ppos)
+{
+       size_t cnt = 0;
+       struct qedi_dbg_ctx *qedi_dbg =
+                       (struct qedi_dbg_ctx *)filp->private_data;
+       struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+       if (*ppos)
+               return 0;
+
+       while (lof) {
+               if (!(lof->oper_str))
+                       break;
+
+               if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+                       cnt = lof->oper_func(qedi_dbg);
+                       break;
+               }
+
+               lof++;
+       }
+       return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+                                size_t count, loff_t *ppos)
+{
+       size_t cnt = 0;
+
+       if (*ppos)
+               return 0;
+
+       cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+       cnt = min_t(int, count, cnt - *ppos);
+       *ppos += cnt;
+       return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+       struct qedi_fastpath *fp = NULL;
+       struct qed_sb_info *sb_info = NULL;
+       struct status_block *sb = NULL;
+       struct global_queue *que = NULL;
+       int id;
+       u16 prod_idx;
+       struct qedi_ctx *qedi = s->private;
+       unsigned long flags;
+
+       seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+               fp = &qedi->fp_array[id];
+               sb_info = fp->sb_info;
+               sb = sb_info->sb_virt;
+               prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+                           STATUS_BLOCK_PROD_INDEX_MASK);
+               seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+               que = qedi->global_queues[fp->sb_id];
+               seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+               seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+               seq_puts(s, "=========== END ==================\n\n\n");
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+       return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+       struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+       struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+                                            dbg_ctx);
+
+       return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+       int id, idx = 0;
+       struct qedi_ctx *qedi = s->private;
+       struct qedi_io_log *io_log;
+       unsigned long flags;
+
+       seq_puts(s, " DUMP IO LOGS:\n");
+       spin_lock_irqsave(&qedi->io_trace_lock, flags);
+       idx = qedi->io_trace_idx;
+       for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+               io_log = &qedi->io_trace_buf[idx];
+               seq_printf(s, "iodir-%d:", io_log->direction);
+               seq_printf(s, "tid-0x%x:", io_log->task_id);
+               seq_printf(s, "cid-0x%x:", io_log->cid);
+               seq_printf(s, "lun-%d:", io_log->lun);
+               seq_printf(s, "op-0x%02x:", io_log->op);
+               seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+                          io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+               seq_printf(s, "buflen-%d:", io_log->bufflen);
+               seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+               seq_printf(s, "res-0x%08x:", io_log->result);
+               seq_printf(s, "jif-%lu:", io_log->jiffies);
+               seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+               seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+               seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+               seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+               idx++;
+               if (idx == QEDI_IO_TRACE_SIZE)
+                       idx = 0;
+       }
+       spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+       return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+       struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+       struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+                                            dbg_ctx);
+
+       return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+       qedi_dbg_fileops_seq(qedi, gbl_ctx),
+       qedi_dbg_fileops(qedi, do_not_recover),
+       qedi_dbg_fileops_seq(qedi, io_trace),
+       { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644 (file)
index 0000000..b1d3904
--- /dev/null
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+                              struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+       if (cmd->io_tbl.sge_valid && sc) {
+               cmd->io_tbl.sge_valid = 0;
+               scsi_dma_unmap(sc);
+       }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+                                    union iscsi_cqe *cqe,
+                                    struct iscsi_task *task,
+                                    struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_logout_rsp *resp_hdr;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_logout_response_hdr *cqe_logout_response;
+       struct qedi_cmd *cmd;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+       cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+       spin_lock(&session->back_lock);
+       resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = cqe_logout_response->opcode;
+       resp_hdr->flags = cqe_logout_response->flags;
+       resp_hdr->hlength = 0;
+
+       resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+       resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+       resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+       resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+       resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id,
+                         &cmd->io_cmd);
+       }
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+                                  union iscsi_cqe *cqe,
+                                  struct iscsi_task *task,
+                                  struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_task_context *task_ctx;
+       struct iscsi_text_rsp *resp_hdr_ptr;
+       struct iscsi_text_response_hdr *cqe_text_response;
+       struct qedi_cmd *cmd;
+       int pld_len;
+       u32 *tmp;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+       task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+       cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr_ptr->opcode = cqe_text_response->opcode;
+       resp_hdr_ptr->flags = cqe_text_response->flags;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_text_response->hdr_second_dword &
+               ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->ttt = cqe_text_response->ttt;
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+       pld_len = cqe_text_response->hdr_second_dword &
+                 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+       memset(task_ctx, '\0', sizeof(*task_ctx));
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id,
+                         &cmd->io_cmd);
+       }
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+                            qedi_conn->gen_pdu.resp_buf,
+                            (qedi_conn->gen_pdu.resp_wr_ptr -
+                             qedi_conn->gen_pdu.resp_buf));
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+       struct qedi_cmd *qedi_cmd =
+                               container_of(work, struct qedi_cmd, tmf_work);
+       struct qedi_conn *qedi_conn = qedi_cmd->conn;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_tm_rsp *resp_hdr_ptr;
+       struct iscsi_cls_session *cls_sess;
+       int rval = 0;
+
+       set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+       iscsi_block_session(session->cls_session);
+       rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+       if (rval) {
+               clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+               qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+               iscsi_unblock_session(session->cls_session);
+               return;
+       }
+
+       iscsi_unblock_session(session->cls_session);
+       qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+       spin_lock(&session->back_lock);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+       spin_unlock(&session->back_lock);
+       kfree(resp_hdr_ptr);
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+                                 union iscsi_cqe *cqe,
+                                 struct iscsi_task *task,
+                                 struct qedi_conn *qedi_conn)
+
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_tmf_response_hdr *cqe_tmp_response;
+       struct iscsi_tm_rsp *resp_hdr_ptr;
+       struct iscsi_tm *tmf_hdr;
+       struct qedi_cmd *qedi_cmd = NULL;
+       u32 *tmp;
+
+       cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+       qedi_cmd = task->dd_data;
+       qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+       if (!qedi_cmd->tmf_resp_buf) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to allocate resp buf, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               return;
+       }
+
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+       /* Fill up the header */
+       resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+       resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+       resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_tmp_response->hdr_second_dword &
+               ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn  = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+       tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+       if (likely(qedi_cmd->io_cmd_in_list)) {
+               qedi_cmd->io_cmd_in_list = false;
+               list_del_init(&qedi_cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+
+       if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+           ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+           ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+               INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+               queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+               goto unblock_sess;
+       }
+
+       qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+       kfree(resp_hdr_ptr);
+
+unblock_sess:
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+                                   union iscsi_cqe *cqe,
+                                   struct iscsi_task *task,
+                                   struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_task_context *task_ctx;
+       struct iscsi_login_rsp *resp_hdr_ptr;
+       struct iscsi_login_response_hdr *cqe_login_response;
+       struct qedi_cmd *cmd;
+       int pld_len;
+       u32 *tmp;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+
+       cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+       task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+       resp_hdr_ptr->opcode = cqe_login_response->opcode;
+       resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_login_response->hdr_second_dword &
+               ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->tsih = cqe_login_response->tsih;
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+       resp_hdr_ptr->status_class = cqe_login_response->status_class;
+       resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+       pld_len = cqe_login_response->hdr_second_dword &
+                 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+
+       memset(task_ctx, '\0', sizeof(*task_ctx));
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+                            qedi_conn->gen_pdu.resp_buf,
+                            (qedi_conn->gen_pdu.resp_wr_ptr -
+                            qedi_conn->gen_pdu.resp_buf));
+
+       spin_unlock(&session->back_lock);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+                               struct iscsi_cqe_unsolicited *cqe,
+                               char *ptr, int len)
+{
+       u16 idx = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+                 len, qedi->bdq_prod_idx,
+                 (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+       /* Obtain buffer address from rqe_opaque */
+       idx = cqe->rqe_opaque.lo;
+       if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+                         idx);
+               return;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+                 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+       switch (cqe->unsol_cqe_type) {
+       case ISCSI_CQE_UNSOLICITED_SINGLE:
+       case ISCSI_CQE_UNSOLICITED_FIRST:
+               if (len)
+                       memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+               break;
+       case ISCSI_CQE_UNSOLICITED_MIDDLE:
+       case ISCSI_CQE_UNSOLICITED_LAST:
+               break;
+       default:
+               break;
+       }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+                               struct iscsi_cqe_unsolicited *cqe,
+                               int count)
+{
+       u16 tmp;
+       u16 idx = 0;
+       struct scsi_bd *pbl;
+
+       /* Obtain buffer address from rqe_opaque */
+       idx = cqe->rqe_opaque.lo;
+       if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+                         idx);
+               return;
+       }
+
+       pbl = (struct scsi_bd *)qedi->bdq_pbl;
+       pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+       pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+       pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+                 pbl, pbl->address.hi, pbl->address.lo, idx);
+       pbl->opaque.hi = 0;
+       pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+       /* Increment producer to let f/w know we've handled the frame */
+       qedi->bdq_prod_idx += count;
+
+       writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+       tmp = readw(qedi->bdq_primary_prod);
+
+       writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+       tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+                                     struct iscsi_cqe_unsolicited *cqe,
+                                     u32 pdu_len, u32 num_bdqs,
+                                     char *bdq_data)
+{
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "num_bdqs [%d]\n", num_bdqs);
+
+       qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+       qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+                                  union iscsi_cqe *cqe,
+                                  struct iscsi_task *task,
+                                  struct qedi_conn *qedi_conn, u16 que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_nop_in_hdr *cqe_nop_in;
+       struct iscsi_nopin *hdr;
+       struct qedi_cmd *cmd;
+       int tgt_async_nop = 0;
+       u32 lun[2];
+       u32 pdu_len, num_bdqs;
+       char bdq_data[QEDI_BDQ_BUF_SIZE];
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+       cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+       pdu_len = cqe_nop_in->hdr_second_dword &
+                 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+       hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = cqe_nop_in->opcode;
+       hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+       hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+       hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pdu_len, num_bdqs, bdq_data);
+               hdr->itt = RESERVED_ITT;
+               tgt_async_nop = 1;
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+               goto done;
+       }
+
+       /* Response to one of our nop-outs */
+       if (task) {
+               cmd = task->dd_data;
+               hdr->flags = ISCSI_FLAG_CMD_FINAL;
+               hdr->itt = build_itt(cqe->cqe_solicited.itid,
+                                    conn->session->age);
+               lun[0] = 0xffffffff;
+               lun[1] = 0xffffffff;
+               memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                         "Freeing tid=0x%x for cid=0x%x\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id);
+               cmd->state = RESPONSE_RECEIVED;
+               spin_lock(&qedi_conn->list_lock);
+               if (likely(cmd->io_cmd_in_list)) {
+                       cmd->io_cmd_in_list = false;
+                       list_del_init(&cmd->io_cmd);
+                       qedi_conn->active_cmd_count--;
+               }
+
+               spin_unlock(&qedi_conn->list_lock);
+               qedi_clear_task_idx(qedi, cmd->task_id);
+       }
+
+done:
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+       spin_unlock_bh(&session->back_lock);
+       return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+                                   union iscsi_cqe *cqe,
+                                   struct iscsi_task *task,
+                                   struct qedi_conn *qedi_conn,
+                                   u16 que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_async_msg_hdr *cqe_async_msg;
+       struct iscsi_async *resp_hdr;
+       u32 lun[2];
+       u32 pdu_len, num_bdqs;
+       char bdq_data[QEDI_BDQ_BUF_SIZE];
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+
+       cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+       pdu_len = cqe_async_msg->hdr_second_dword &
+               ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pdu_len, num_bdqs, bdq_data);
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+
+       resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = cqe_async_msg->opcode;
+       resp_hdr->flags = 0x80;
+
+       lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+       lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+       memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+       resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+       resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+       resp_hdr->async_event = cqe_async_msg->async_event;
+       resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+       resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+       resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+       resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+                            pdu_len);
+
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+                                    union iscsi_cqe *cqe,
+                                    struct iscsi_task *task,
+                                    struct qedi_conn *qedi_conn,
+                                    uint16_t que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_reject_hdr *cqe_reject;
+       struct iscsi_reject *hdr;
+       u32 pld_len, num_bdqs;
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+       cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+       pld_len = cqe_reject->hdr_second_dword &
+                 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pld_len, num_bdqs, conn->data);
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+       hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = cqe_reject->opcode;
+       hdr->reason = cqe_reject->hdr_reason;
+       hdr->flags = cqe_reject->hdr_flags;
+       hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+                             ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+       hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+       hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+       hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+                            conn->data, pld_len);
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+                                union iscsi_cqe *cqe,
+                                struct iscsi_task *task,
+                                struct iscsi_conn *conn)
+{
+       struct scsi_cmnd *sc_cmd;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_scsi_rsp *hdr;
+       struct iscsi_data_in_hdr *cqe_data_in;
+       int datalen = 0;
+       struct qedi_conn *qedi_conn;
+       u32 iscsi_cid;
+       bool mark_cmd_node_deleted = false;
+       u8 cqe_err_bits = 0;
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+       cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+       cqe_err_bits =
+               cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+       spin_lock_bh(&session->back_lock);
+       /* get the scsi command */
+       sc_cmd = cmd->scsi_cmd;
+
+       if (!sc_cmd) {
+               QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+               goto error;
+       }
+
+       if (!sc_cmd->SCp.ptr) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "SCp.ptr is NULL, returned in another context.\n");
+               goto error;
+       }
+
+       if (!sc_cmd->request) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "sc_cmd->request is NULL, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       if (!sc_cmd->request->special) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "request->special is NULL so request not valid, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       if (!sc_cmd->request->q) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       qedi_iscsi_unmap_sg_list(cmd);
+
+       hdr = (struct iscsi_scsi_rsp *)task->hdr;
+       hdr->opcode = cqe_data_in->opcode;
+       hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+       hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+       hdr->response = cqe_data_in->reserved1;
+       hdr->cmd_status = cqe_data_in->status_rsvd;
+       hdr->flags = cqe_data_in->flags;
+       hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+       if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+               datalen = cqe_data_in->reserved2 &
+                         ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+               memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+       }
+
+       /* If f/w reports data underrun err then set residual to IO transfer
+        * length, set Underrun flag and clear Overrun flag explicitly
+        */
+       if (unlikely(cqe_err_bits &&
+                    GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+                         hdr->itt, cqe_data_in->flags, cmd->task_id,
+                         qedi_conn->iscsi_conn_id, hdr->residual_count,
+                         scsi_bufflen(sc_cmd));
+               hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+               hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+               hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+       }
+
+       spin_lock(&qedi_conn->list_lock);
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+               mark_cmd_node_deleted = true;
+       }
+       spin_unlock(&qedi_conn->list_lock);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+       cmd->state = RESPONSE_RECEIVED;
+       if (qedi_io_tracing)
+               qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+       qedi_clear_task_idx(qedi, cmd->task_id);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+                            conn->data, datalen);
+error:
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+                                 union iscsi_cqe *cqe,
+                                 struct iscsi_task *task,
+                                 struct qedi_conn *conn, uint16_t que_idx)
+{
+       struct iscsi_conn *iscsi_conn;
+       u32 hdr_opcode;
+
+       hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+       iscsi_conn = conn->cls_conn->dd_data;
+
+       switch (hdr_opcode) {
+       case ISCSI_OPCODE_SCSI_RESPONSE:
+       case ISCSI_OPCODE_DATA_IN:
+               qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+               break;
+       case ISCSI_OPCODE_LOGIN_RESPONSE:
+               qedi_process_login_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_TMF_RESPONSE:
+               qedi_process_tmf_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_TEXT_RESPONSE:
+               qedi_process_text_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_LOGOUT_RESPONSE:
+               qedi_process_logout_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_NOP_IN:
+               qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+               break;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+       }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+                                         struct iscsi_cqe_solicited *cqe,
+                                         struct iscsi_task *task,
+                                         struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+                 "itid=0x%x, cmd task id=0x%x\n",
+                 cqe->itid, cmd->task_id);
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+
+       spin_lock_bh(&session->back_lock);
+       __iscsi_put_task(task);
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+                                         struct iscsi_cqe_solicited *cqe,
+                                         struct iscsi_task *task,
+                                         struct iscsi_conn *conn)
+{
+       struct qedi_work_map *work, *work_tmp;
+       u32 proto_itt = cqe->itid;
+       u32 ptmp_itt = 0;
+       itt_t protoitt = 0;
+       int found = 0;
+       struct qedi_cmd *qedi_cmd = NULL;
+       u32 rtid = 0;
+       u32 iscsi_cid;
+       struct qedi_conn *qedi_conn;
+       struct qedi_cmd *cmd_new, *dbg_cmd;
+       struct iscsi_task *mtask;
+       struct iscsi_tm *tmf_hdr = NULL;
+
+       iscsi_cid = cqe->conn_id;
+       qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+       /* Based on this itt get the corresponding qedi_cmd */
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+                                list) {
+               if (work->rtid == proto_itt) {
+                       /* We found the command */
+                       qedi_cmd = work->qedi_cmd;
+                       if (!qedi_cmd->list_tmf_work) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                         "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+                                         proto_itt, qedi_conn->iscsi_conn_id);
+                               WARN_ON(1);
+                       }
+                       found = 1;
+                       mtask = qedi_cmd->task;
+                       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+                       rtid = work->rtid;
+
+                       list_del_init(&work->list);
+                       kfree(work);
+                       qedi_cmd->list_tmf_work = NULL;
+               }
+       }
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       if (found) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+                         proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+               if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_ABORT_TASK) {
+                       spin_lock_bh(&conn->session->back_lock);
+
+                       protoitt = build_itt(get_itt(tmf_hdr->rtt),
+                                            conn->session->age);
+                       task = iscsi_itt_to_task(conn, protoitt);
+
+                       spin_unlock_bh(&conn->session->back_lock);
+
+                       if (!task) {
+                               QEDI_NOTICE(&qedi->dbg_ctx,
+                                           "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+                                           get_itt(tmf_hdr->rtt),
+                                           qedi_conn->iscsi_conn_id);
+                               return;
+                       }
+
+                       dbg_cmd = task->dd_data;
+
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+                                 get_itt(tmf_hdr->rtt), get_itt(task->itt),
+                                 dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+                       if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+                               qedi_cmd->state = CLEANUP_RECV;
+
+                       qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+                       spin_lock(&qedi_conn->list_lock);
+                       list_del_init(&dbg_cmd->io_cmd);
+                       qedi_conn->active_cmd_count--;
+                       spin_unlock(&qedi_conn->list_lock);
+                       qedi_cmd->state = CLEANUP_RECV;
+                       wake_up_interruptible(&qedi_conn->wait_queue);
+               }
+       } else if (qedi_conn->cmd_cleanup_req > 0) {
+               spin_lock_bh(&conn->session->back_lock);
+               qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+               protoitt = build_itt(ptmp_itt, conn->session->age);
+               task = iscsi_itt_to_task(conn, protoitt);
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                         cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+                         qedi_conn->iscsi_conn_id);
+
+               spin_unlock_bh(&conn->session->back_lock);
+               if (!task) {
+                       QEDI_NOTICE(&qedi->dbg_ctx,
+                                   "task is null, itid=0x%x, cid=0x%x\n",
+                                   cqe->itid, qedi_conn->iscsi_conn_id);
+                       return;
+               }
+               qedi_conn->cmd_cleanup_cmpl++;
+               wake_up(&qedi_conn->wait_queue);
+               cmd_new = task->dd_data;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                         "Freeing tid=0x%x for cid=0x%x\n",
+                         cqe->itid, qedi_conn->iscsi_conn_id);
+               qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+       } else {
+               qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+               protoitt = build_itt(ptmp_itt, conn->session->age);
+               task = iscsi_itt_to_task(conn, protoitt);
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+                        protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+               WARN_ON(1);
+       }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+       struct qedi_ctx *qedi = work->qedi;
+       union iscsi_cqe *cqe = &work->cqe;
+       struct iscsi_task *task = NULL;
+       struct iscsi_nopout *nopout_hdr;
+       struct qedi_conn *q_conn;
+       struct iscsi_conn *conn;
+       struct qedi_cmd *qedi_cmd;
+       u32 comp_type;
+       u32 iscsi_cid;
+       u32 hdr_opcode;
+       u16 que_idx = work->que_idx;
+       u8 cqe_err_bits = 0;
+
+       comp_type = cqe->cqe_common.cqe_type;
+       hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+       cqe_err_bits =
+               cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+                 cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+       if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+               QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+               return;
+       }
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+       if (!q_conn) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Session no longer exists for cid=0x%x!!\n",
+                         iscsi_cid);
+               return;
+       }
+
+       conn = q_conn->cls_conn->dd_data;
+
+       if (unlikely(cqe_err_bits &&
+                    GET_FIELD(cqe_err_bits,
+                              CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+               iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+               return;
+       }
+
+       switch (comp_type) {
+       case ISCSI_CQE_TYPE_SOLICITED:
+       case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+               qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+               task = qedi_cmd->task;
+               if (!task) {
+                       QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+                       return;
+               }
+
+               /* Process NOPIN local completion */
+               nopout_hdr = (struct iscsi_nopout *)task->hdr;
+               if ((nopout_hdr->itt == RESERVED_ITT) &&
+                   (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+                       qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+                                                     task, q_conn);
+               } else {
+                       cqe->cqe_solicited.itid =
+                                              qedi_get_itt(cqe->cqe_solicited);
+                       /* Process other solicited responses */
+                       qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+               }
+               break;
+       case ISCSI_CQE_TYPE_UNSOLICITED:
+               switch (hdr_opcode) {
+               case ISCSI_OPCODE_NOP_IN:
+                       qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+                                               que_idx);
+                       break;
+               case ISCSI_OPCODE_ASYNC_MSG:
+                       qedi_process_async_mesg(qedi, cqe, task, q_conn,
+                                               que_idx);
+                       break;
+               case ISCSI_OPCODE_REJECT:
+                       qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+                                                que_idx);
+                       break;
+               }
+               goto exit_fp_process;
+       case ISCSI_CQE_TYPE_DUMMY:
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+               goto exit_fp_process;
+       case ISCSI_CQE_TYPE_TASK_CLEANUP:
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+               qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+                                             conn);
+               goto exit_fp_process;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+               break;
+       }
+
+exit_fp_process:
+       return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+                          u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+       struct iscsi_wqe *wqe;
+       struct iscsi_wqe_field *cont_field;
+       struct qedi_endpoint *ep;
+       struct scsi_cmnd *sc = task->sc;
+       struct iscsi_login_req *login_hdr;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       login_hdr = (struct iscsi_login_req *)task->hdr;
+       ep = qedi_conn->ep;
+       wqe = &ep->sq[ep->sq_prod_idx];
+
+       memset(wqe, 0, sizeof(*wqe));
+
+       ep->sq_prod_idx++;
+       ep->fw_sq_prod_idx++;
+       if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+               ep->sq_prod_idx = 0;
+
+       if (is_cleanup) {
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_TASK_CLEANUP);
+               wqe->task_id = tid;
+               return;
+       }
+
+       if (ptu_invalidate) {
+               SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+                         ISCSI_WQE_SET_PTU_INVALIDATE);
+       }
+
+       cont_field = &wqe->cont_prevtid_union.cont_field;
+
+       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+       case ISCSI_OP_TEXT:
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_MIDDLE_PATH);
+               SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+                         1);
+               cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+               break;
+       case ISCSI_OP_LOGOUT:
+       case ISCSI_OP_NOOP_OUT:
+       case ISCSI_OP_SCSI_TMFUNC:
+                SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                          ISCSI_WQE_TYPE_NORMAL);
+               break;
+       default:
+               if (!sc)
+                       break;
+
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+               cont_field->contlen_cdbsize_field =
+                               (sc->sc_data_direction == DMA_TO_DEVICE) ?
+                               scsi_bufflen(sc) : 0;
+               if (cmd->use_slowpath)
+                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+               else
+                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+                                 (sc->sc_data_direction ==
+                                  DMA_TO_DEVICE) ?
+                                 min((u16)QEDI_FAST_SGE_COUNT,
+                                     (u16)cmd->io_tbl.sge_valid) : 0);
+               break;
+       }
+
+       wqe->task_id = tid;
+       /* Make sure SQ data is coherent */
+       wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+       struct iscsi_db_data dbell = { 0 };
+
+       dbell.agg_flags = 0;
+
+       dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+       dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+       dbell.params |=
+                  DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+       dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+       writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+       /* Make sure fw write idx is coherent, and include both memory barriers
+        * as a failsafe as for some architectures the call is the same but on
+        * others they are two different assembly operations.
+        */
+       wmb();
+       mmiowb();
+       QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+                 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+                 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+                 qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_login_req *login_hdr;
+       struct iscsi_login_req_hdr *fw_login_req = NULL;
+       struct iscsi_cached_sge_ctx *cached_sge = NULL;
+       struct iscsi_sge *single_sge = NULL;
+       struct iscsi_sge *req_sge = NULL;
+       struct iscsi_sge *resp_sge = NULL;
+       struct qedi_cmd *qedi_cmd;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       login_hdr = (struct iscsi_login_req *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+       fw_login_req->opcode = login_hdr->opcode;
+       fw_login_req->version_min = login_hdr->min_version;
+       fw_login_req->version_max = login_hdr->max_version;
+       fw_login_req->flags_attr = login_hdr->flags;
+       fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+       fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+       fw_login_req->tsih = login_hdr->tsih;
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_login_req->cid = qedi_conn->iscsi_conn_id;
+       fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+       fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+       fw_login_req->exp_stat_sn = 0;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                            (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = 0x2;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+                                               ntoh24(login_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+       fw_task_ctx->ustorm_st_context.task_type = 0x2;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                ntoh24(login_hdr->dlength);
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+       struct iscsi_task_context *fw_task_ctx = NULL;
+       struct iscsi_logout *logout_hdr = NULL;
+       struct qedi_cmd *qedi_cmd = NULL;
+       s16  tid = 0;
+       s16 ptu_invalidate = 0;
+
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       logout_hdr = (struct iscsi_logout *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+       fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+       fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+       fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                 qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+       fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+                       struct iscsi_task *task, bool in_recovery)
+{
+       int rval;
+       struct iscsi_task *ctask;
+       struct qedi_cmd *cmd, *cmd_tmp;
+       struct iscsi_tm *tmf_hdr;
+       unsigned int lun = 0;
+       bool lun_reset = false;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+
+       /* From recovery, task is NULL or from tmf resp valid task */
+       if (task) {
+               tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+               if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                       ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+                       lun_reset = true;
+                       lun = scsilun_to_int(&tmf_hdr->lun);
+               }
+       }
+
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+                 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+                 in_recovery, lun_reset);
+
+       if (lun_reset)
+               spin_lock_bh(&session->back_lock);
+
+       spin_lock(&qedi_conn->list_lock);
+
+       list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+                                io_cmd) {
+               ctask = cmd->task;
+               if (ctask == task)
+                       continue;
+
+               if (lun_reset) {
+                       if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                         "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+                                         cmd->task_id, get_itt(ctask->itt),
+                                         cmd->scsi_cmd, cmd->scsi_cmd->device,
+                                         ctask->state, cmd->state,
+                                         qedi_conn->iscsi_conn_id);
+                               if (cmd->scsi_cmd->device->lun != lun)
+                                       continue;
+                       }
+               }
+               qedi_conn->cmd_cleanup_req++;
+               qedi_iscsi_cleanup_task(ctask, true);
+
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+                         &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+       }
+
+       spin_unlock(&qedi_conn->list_lock);
+
+       if (lun_reset)
+               spin_unlock_bh(&session->back_lock);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "cmd_cleanup_req=%d, cid=0x%x\n",
+                 qedi_conn->cmd_cleanup_req,
+                 qedi_conn->iscsi_conn_id);
+
+       rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                                ((qedi_conn->cmd_cleanup_req ==
+                                                qedi_conn->cmd_cleanup_cmpl) ||
+                                                qedi_conn->ep),
+                                                5 * HZ);
+       if (rval) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                         qedi_conn->cmd_cleanup_req,
+                         qedi_conn->cmd_cleanup_cmpl,
+                         qedi_conn->iscsi_conn_id);
+
+               return 0;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                 qedi_conn->cmd_cleanup_req,
+                 qedi_conn->cmd_cleanup_cmpl,
+                 qedi_conn->iscsi_conn_id);
+
+       iscsi_host_for_each_session(qedi->shost,
+                                   qedi_mark_device_missing);
+       qedi_ops->common->drain(qedi->cdev);
+
+       /* Enable IOs for all other sessions except current.*/
+       if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                             (qedi_conn->cmd_cleanup_req ==
+                                              qedi_conn->cmd_cleanup_cmpl),
+                                             5 * HZ)) {
+               iscsi_host_for_each_session(qedi->shost,
+                                           qedi_mark_device_available);
+               return -1;
+       }
+
+       iscsi_host_for_each_session(qedi->shost,
+                                   qedi_mark_device_available);
+
+       return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+                 struct iscsi_task *task)
+{
+       struct qedi_endpoint *qedi_ep;
+       int rval;
+
+       qedi_ep = qedi_conn->ep;
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       if (!qedi_ep) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Cannot proceed, ep already disconnected, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               return;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+                 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+       qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+       rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+       if (rval) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fatal error, need hard reset, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               WARN_ON(1);
+       }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+                                        struct qedi_conn *qedi_conn,
+                                        struct iscsi_task *task,
+                                        struct qedi_cmd *qedi_cmd,
+                                        struct qedi_work_map *list_work)
+{
+       struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+       int wait;
+
+       wait  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                                ((qedi_cmd->state ==
+                                                  CLEANUP_RECV) ||
+                                                ((qedi_cmd->type == TYPEIO) &&
+                                                 (cmd->state ==
+                                                  RESPONSE_RECEIVED))),
+                                                5 * HZ);
+       if (!wait) {
+               qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id);
+
+               return -1;
+       }
+       return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+       struct qedi_cmd *qedi_cmd =
+               container_of(work, struct qedi_cmd, tmf_work);
+       struct qedi_conn *qedi_conn = qedi_cmd->conn;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_cls_session *cls_sess;
+       struct qedi_work_map *list_work = NULL;
+       struct iscsi_task *mtask;
+       struct qedi_cmd *cmd;
+       struct iscsi_task *ctask;
+       struct iscsi_tm *tmf_hdr;
+       s16 rval = 0;
+       s16 tid = 0;
+
+       mtask = qedi_cmd->task;
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+       set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+       ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+       if (!ctask || !ctask->sc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+               goto abort_ret;
+       }
+
+       cmd = (struct qedi_cmd *)ctask->dd_data;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+                 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+                 qedi_conn->iscsi_conn_id);
+
+       if (do_not_recover) {
+               QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+                        do_not_recover);
+               goto abort_ret;
+       }
+
+       list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+       if (!list_work) {
+               QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+               goto abort_ret;
+       }
+
+       qedi_cmd->type = TYPEIO;
+       list_work->qedi_cmd = qedi_cmd;
+       list_work->rtid = cmd->task_id;
+       list_work->state = QEDI_WORK_SCHEDULED;
+       qedi_cmd->list_tmf_work = list_work;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+                 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+                 tmf_hdr->flags);
+
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       qedi_iscsi_cleanup_task(ctask, false);
+
+       rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+                                            list_work);
+       if (rval == -1) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "FW cleanup got escalated, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               goto ldel_exit;
+       }
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1) {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               goto ldel_exit;
+       }
+
+       qedi_cmd->task_id = tid;
+       qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       return;
+
+ldel_exit:
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       if (!qedi_cmd->list_tmf_work) {
+               list_del_init(&list_work->list);
+               qedi_cmd->list_tmf_work = NULL;
+               kfree(list_work);
+       }
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_del_init(&cmd->io_cmd);
+       qedi_conn->active_cmd_count--;
+       spin_unlock(&qedi_conn->list_lock);
+
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+                              struct iscsi_task *mtask)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_tmf_request_hdr *fw_tmf_request;
+       struct iscsi_sge *single_sge;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_cmd *cmd;
+       struct iscsi_task *ctask;
+       struct iscsi_tm *tmf_hdr;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       u32 lun[2];
+       s16 tid = 0, ptu_invalidate = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+       tid = qedi_cmd->task_id;
+       qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+       fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+       fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+       memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+       fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+       fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+
+       if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+            ISCSI_TM_FUNC_ABORT_TASK) {
+               ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+               if (!ctask || !ctask->sc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not get reference task\n");
+                       return 0;
+               }
+               cmd = (struct qedi_cmd *)ctask->dd_data;
+               fw_tmf_request->rtt =
+                               qedi_set_itt(cmd->task_id,
+                                            get_itt(tmf_hdr->rtt));
+       } else {
+               fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+       }
+
+       fw_tmf_request->opcode = tmf_hdr->opcode;
+       fw_tmf_request->function = tmf_hdr->flags;
+       fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+       fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+                 tid,  mtask->itt, qedi_conn->iscsi_conn_id);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *mtask)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_tm *tmf_hdr;
+       struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+       s16 tid = 0;
+
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+       qedi_cmd->task = mtask;
+
+       /* If abort task then schedule the work and return */
+       if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+           ISCSI_TM_FUNC_ABORT_TASK) {
+               qedi_cmd->state = CLEANUP_WAIT;
+               INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+               queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+       } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+                  ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+                  ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+               tid = qedi_get_task_idx(qedi);
+               if (tid == -1) {
+                       QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+                                qedi_conn->iscsi_conn_id);
+                       return -1;
+               }
+               qedi_cmd->task_id = tid;
+
+               qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+       } else {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               return -1;
+       }
+
+       return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+                        struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_text_request_hdr *fw_text_request;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_sge *single_sge;
+       struct qedi_cmd *qedi_cmd;
+       /* For 6.5 hdr iscsi_hdr */
+       struct iscsi_text *text_hdr;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       text_hdr = (struct iscsi_text *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_text_request =
+                       &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+       fw_text_request->opcode = text_hdr->opcode;
+       fw_text_request->flags_attr = text_hdr->flags;
+
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_text_request->ttt = text_hdr->ttt;
+       fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+       fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+       fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                    qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                                  qedi->tid_reuse_count[tid]++;
+
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                             (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = 0x2;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                     ntoh24(text_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+                                                     ntoh24(text_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.exp_data_sn =
+                                             be32_to_cpu(text_hdr->exp_statsn);
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+       fw_task_ctx->ustorm_st_context.task_type = 0x2;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       /*  Add command in active command list */
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task,
+                          char *datap, int data_len, int unsol)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_nop_out_hdr *fw_nop_out;
+       struct qedi_cmd *qedi_cmd;
+       /* For 6.5 hdr iscsi_hdr */
+       struct iscsi_nopout *nopout_hdr;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_sge *single_sge;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       u32 lun[2];
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1) {
+               QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+               return -ENOMEM;
+       }
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+       memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+       fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+       fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+       if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+               fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+               fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+               fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+               SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                         USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+       } else {
+               fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+               fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+               spin_lock(&qedi_conn->list_lock);
+               list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+               qedi_cmd->io_cmd_in_list = true;
+               qedi_conn->active_cmd_count++;
+               spin_unlock(&qedi_conn->list_lock);
+       }
+
+       fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+       fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+       fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                       (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+                        int bd_index)
+{
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       int frag_size, sg_frags;
+
+       sg_frags = 0;
+
+       while (sg_len) {
+               if (addr % QEDI_PAGE_SIZE)
+                       frag_size =
+                                  (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+               else
+                       frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+                                   (sg_len % QEDI_BD_SPLIT_SZ);
+
+               if (frag_size == 0)
+                       frag_size = QEDI_BD_SPLIT_SZ;
+
+               bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+               bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+               bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+               QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+                         "split sge %d: addr=%llx, len=%x",
+                         (bd_index + sg_frags), addr, frag_size);
+
+               addr += (u64)frag_size;
+               sg_frags++;
+               sg_len -= frag_size;
+       }
+       return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       struct scatterlist *sg;
+       int byte_count = 0;
+       int bd_count = 0;
+       int sg_count;
+       int sg_len;
+       int sg_frags;
+       u64 addr, end_addr;
+       int i;
+
+       WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+       sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+                             scsi_sg_count(sc), sc->sc_data_direction);
+
+       /*
+        * New condition to send single SGE as cached-SGL.
+        * Single SGE with length less than 64K.
+        */
+       sg = scsi_sglist(sc);
+       if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+               sg_len = sg_dma_len(sg);
+               addr = (u64)sg_dma_address(sg);
+
+               bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+               bd[bd_count].sge_addr.hi = (addr >> 32);
+               bd[bd_count].sge_len = (u16)sg_len;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                         "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+                         sg_count, addr, sg_len);
+
+               return ++bd_count;
+       }
+
+       scsi_for_each_sg(sc, sg, sg_count, i) {
+               sg_len = sg_dma_len(sg);
+               addr = (u64)sg_dma_address(sg);
+               end_addr = (addr + sg_len);
+
+               /*
+                * first sg elem in the 'list',
+                * check if end addr is page-aligned.
+                */
+               if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+                       cmd->use_slowpath = true;
+
+               /*
+                * last sg elem in the 'list',
+                * check if start addr is page-aligned.
+                */
+               else if ((i == (sg_count - 1)) &&
+                        (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+                       cmd->use_slowpath = true;
+
+               /*
+                * middle sg elements in list,
+                * check if start and end addr is page-aligned
+                */
+               else if ((i != 0) && (i != (sg_count - 1)) &&
+                        ((addr % QEDI_PAGE_SIZE) ||
+                        (end_addr % QEDI_PAGE_SIZE)))
+                       cmd->use_slowpath = true;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+                         i, sg_len);
+
+               if (sg_len > QEDI_BD_SPLIT_SZ) {
+                       sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+               } else {
+                       sg_frags = 1;
+                       bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+                       bd[bd_count].sge_addr.hi = addr >> 32;
+                       bd[bd_count].sge_len = sg_len;
+               }
+               byte_count += sg_len;
+               bd_count += sg_frags;
+       }
+
+       if (byte_count != scsi_bufflen(sc))
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+                        scsi_bufflen(sc));
+       else
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+                         byte_count);
+
+       WARN_ON(byte_count != scsi_bufflen(sc));
+
+       return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+       int bd_count;
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+       if (scsi_sg_count(sc)) {
+               bd_count  = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+               if (bd_count == 0)
+                       return;
+       } else {
+               struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+               bd[0].sge_addr.lo = 0;
+               bd[0].sge_addr.hi = 0;
+               bd[0].sge_len = 0;
+               bd_count = 0;
+       }
+       cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+       u32 dword;
+       int lpcnt;
+       u8 *srcp;
+
+       lpcnt = sc->cmd_len / sizeof(dword);
+       srcp = (u8 *)sc->cmnd;
+       while (lpcnt--) {
+               memcpy(&dword, (const void *)srcp, 4);
+               *dstp = cpu_to_be32(dword);
+               srcp += 4;
+               dstp++;
+       }
+       if (sc->cmd_len & 0x3) {
+               dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+               *dstp = cpu_to_be32(dword);
+       }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+                  u16 tid, int8_t direction)
+{
+       struct qedi_io_log *io_log;
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct scsi_cmnd *sc_cmd = task->sc;
+       unsigned long flags;
+       u8 op;
+
+       spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+       io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+       io_log->direction = direction;
+       io_log->task_id = tid;
+       io_log->cid = qedi_conn->iscsi_conn_id;
+       io_log->lun = sc_cmd->device->lun;
+       io_log->op = sc_cmd->cmnd[0];
+       op = sc_cmd->cmnd[0];
+       io_log->lba[0] = sc_cmd->cmnd[2];
+       io_log->lba[1] = sc_cmd->cmnd[3];
+       io_log->lba[2] = sc_cmd->cmnd[4];
+       io_log->lba[3] = sc_cmd->cmnd[5];
+       io_log->bufflen = scsi_bufflen(sc_cmd);
+       io_log->sg_count = scsi_sg_count(sc_cmd);
+       io_log->fast_sgs = qedi->fast_sgls;
+       io_log->cached_sgs = qedi->cached_sgls;
+       io_log->slow_sgs = qedi->slow_sgls;
+       io_log->cached_sge = qedi->use_cached_sge;
+       io_log->slow_sge = qedi->use_slow_sge;
+       io_log->fast_sge = qedi->use_fast_sge;
+       io_log->result = sc_cmd->result;
+       io_log->jiffies = jiffies;
+       io_log->blk_req_cpu = smp_processor_id();
+
+       if (direction == QEDI_IO_TRACE_REQ) {
+               /* For requests we only care about the submission CPU */
+               io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+               io_log->intr_cpu = 0;
+               io_log->blk_rsp_cpu = 0;
+       } else if (direction == QEDI_IO_TRACE_RSP) {
+               io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+               io_log->intr_cpu = qedi->intr_cpu;
+               io_log->blk_rsp_cpu = smp_processor_id();
+       }
+
+       qedi->io_trace_idx++;
+       if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+               qedi->io_trace_idx = 0;
+
+       qedi->use_cached_sge = false;
+       qedi->use_slow_sge = false;
+       qedi->use_fast_sge = false;
+
+       spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct iscsi_session *session = conn->session;
+       struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_phys_sgl_ctx *phys_sgl;
+       struct iscsi_virt_sgl_ctx *virt_sgl;
+       struct ystorm_iscsi_task_st_ctx *yst_cxt;
+       struct mstorm_iscsi_task_st_ctx *mst_cxt;
+       struct iscsi_sgl *sgl_struct;
+       struct iscsi_sge *single_sge;
+       struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       enum iscsi_task_type task_type;
+       struct iscsi_cmd_hdr *fw_cmd;
+       u32 lun[2];
+       u32 exp_data;
+       u16 cq_idx = smp_processor_id() % qedi->num_queues;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+       u8 num_fast_sgs;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       qedi_iscsi_map_sg_list(cmd);
+
+       int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+       if (sc->sc_data_direction == DMA_TO_DEVICE) {
+               if (conn->session->initial_r2t_en) {
+                       exp_data = min((conn->session->imm_data_en *
+                                       conn->max_xmit_dlength),
+                                      conn->session->first_burst);
+                       exp_data = min(exp_data, scsi_bufflen(sc));
+                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                         cpu_to_le32(exp_data);
+               } else {
+                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                             min(conn->session->first_burst, scsi_bufflen(sc));
+               }
+
+               SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+               task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+       } else {
+               if (scsi_bufflen(sc))
+                       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+               task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+       }
+
+       fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+       fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_update_itt_map(qedi, tid, task->itt, cmd);
+       fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+       fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+       fw_cmd->opcode = hdr->opcode;
+       qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+       fw_task_ctx->mstorm_st_context.sense_db.hi =
+                                       (u32)((u64)cmd->sense_buffer_dma >> 32);
+       fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+       fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                    qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                                  qedi->tid_reuse_count[tid]++;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+       fw_task_ctx->ustorm_st_context.exp_data_sn =
+                                                  be32_to_cpu(hdr->exp_statsn);
+       fw_task_ctx->ustorm_st_context.task_type = task_type;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+       num_fast_sgs = (cmd->io_tbl.sge_valid ?
+                       min((u16)QEDI_FAST_SGE_COUNT,
+                           (u16)cmd->io_tbl.sge_valid) : 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+                 cmd->io_tbl.sge_valid);
+
+       yst_cxt = &fw_task_ctx->ystorm_st_context;
+       mst_cxt = &fw_task_ctx->mstorm_st_context;
+       /* Tx path */
+       if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+               /* not considering  superIO or FastIO */
+               if (cmd->io_tbl.sge_valid == 1) {
+                       cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+                       cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+                       cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+                       cached_sge->sge.sge_len = bd[0].sge_len;
+                       qedi->cached_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 1);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+                       phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+                       phys_sgl->sgl_base.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+                       qedi->slow_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES,
+                                 min((u16)QEDI_FAST_SGE_COUNT,
+                                     (u16)cmd->io_tbl.sge_valid));
+                       virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+                       virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+                       virt_sgl->sgl_base.hi =
+                                     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       virt_sgl->sgl_initial_offset =
+                                (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+                       qedi->fast_sgls++;
+               }
+               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+       } else {
+       /* Rx path */
+               if (cmd->io_tbl.sge_valid == 1) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+                       single_sge = &mst_cxt->sgl_union.single_sge;
+                       single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+                       single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+                       single_sge->sge_len = bd[0].sge_len;
+                       qedi->cached_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+                       sgl_struct->sgl_addr.lo =
+                                               (u32)(cmd->io_tbl.sge_tbl_dma);
+                       sgl_struct->sgl_addr.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 1);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       sgl_struct->updated_sge_size = 0;
+                       sgl_struct->updated_sge_offset = 0;
+                       qedi->slow_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+                       sgl_struct->sgl_addr.lo =
+                                               (u32)(cmd->io_tbl.sge_tbl_dma);
+                       sgl_struct->sgl_addr.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       sgl_struct->byte_offset =
+                               (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       sgl_struct->updated_sge_size = 0;
+                       sgl_struct->updated_sge_offset = 0;
+                       qedi->fast_sgls++;
+               }
+               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+       }
+
+       if (cmd->io_tbl.sge_valid == 1)
+               /* Singel-SGL */
+               qedi->use_cached_sge = true;
+       else {
+               if (cmd->use_slowpath)
+                       qedi->use_slow_sge = true;
+               else
+                       qedi->use_fast_sge = true;
+       }
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+                 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+                 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+                 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+                 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+                 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+       /*  Add command in active command list */
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+       cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       if (qedi_io_tracing)
+               qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+       return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       s16 ptu_invalidate = 0;
+
+       QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+                 cmd->task_id, get_itt(task->itt), task->state,
+                 cmd->state, qedi_conn->iscsi_conn_id);
+
+       qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644 (file)
index 0000000..8e488de
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+                        struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task,
+                          char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+                           bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+                        struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+                             struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+                             struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+                       struct qedi_conn *qedi_conn,
+                       struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+                  u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+                 struct qedi_conn *qedi_conn,
+                 struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644 (file)
index 0000000..8ca44c7
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+       __le16 conn_id;
+       u8 invalid_command;
+       u8 cmd_hdr_type;
+       __le32 reserved1[2];
+       __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+       ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+       ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+       ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+       MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644 (file)
index 0000000..d6a2054
--- /dev/null
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+       struct qedi_conn *qedi_conn;
+       int i;
+
+       for (i = 0; i < qedi->max_active_conns; i++) {
+               qedi_conn = qedi_get_conn_from_id(qedi, i);
+               if (!qedi_conn)
+                       continue;
+
+               qedi_start_conn_recovery(qedi, qedi_conn);
+       }
+
+       return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *shost = cmd->device->host;
+       struct qedi_ctx *qedi;
+
+       qedi = iscsi_host_priv(shost);
+
+       return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+       .module = THIS_MODULE,
+       .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+       .proc_name = QEDI_MODULE_NAME,
+       .queuecommand = iscsi_queuecommand,
+       .eh_abort_handler = iscsi_eh_abort,
+       .eh_device_reset_handler = iscsi_eh_device_reset,
+       .eh_target_reset_handler = iscsi_eh_recover_target,
+       .eh_host_reset_handler = qedi_eh_host_reset,
+       .target_alloc = iscsi_target_alloc,
+       .change_queue_depth = scsi_change_queue_depth,
+       .can_queue = QEDI_MAX_ISCSI_TASK,
+       .this_id = -1,
+       .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+       .max_sectors = 0xffff,
+       .cmd_per_lun = 128,
+       .use_clustering = ENABLE_CLUSTERING,
+       .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+                                          struct qedi_conn *qedi_conn)
+{
+       if (qedi_conn->gen_pdu.resp_bd_tbl) {
+               dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                 qedi_conn->gen_pdu.resp_bd_tbl,
+                                 qedi_conn->gen_pdu.resp_bd_dma);
+               qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.req_bd_tbl) {
+               dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                 qedi_conn->gen_pdu.req_bd_tbl,
+                                 qedi_conn->gen_pdu.req_bd_dma);
+               qedi_conn->gen_pdu.req_bd_tbl = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.resp_buf) {
+               dma_free_coherent(&qedi->pdev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 qedi_conn->gen_pdu.resp_buf,
+                                 qedi_conn->gen_pdu.resp_dma_addr);
+               qedi_conn->gen_pdu.resp_buf = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.req_buf) {
+               dma_free_coherent(&qedi->pdev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 qedi_conn->gen_pdu.req_buf,
+                                 qedi_conn->gen_pdu.req_dma_addr);
+               qedi_conn->gen_pdu.req_buf = NULL;
+       }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+                                          struct qedi_conn *qedi_conn)
+{
+       qedi_conn->gen_pdu.req_buf =
+               dma_alloc_coherent(&qedi->pdev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &qedi_conn->gen_pdu.req_dma_addr,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.req_buf)
+               goto login_req_buf_failure;
+
+       qedi_conn->gen_pdu.req_buf_size = 0;
+       qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+       qedi_conn->gen_pdu.resp_buf =
+               dma_alloc_coherent(&qedi->pdev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &qedi_conn->gen_pdu.resp_dma_addr,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.resp_buf)
+               goto login_resp_buf_failure;
+
+       qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+       qedi_conn->gen_pdu.req_bd_tbl =
+               dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                  &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.req_bd_tbl)
+               goto login_req_bd_tbl_failure;
+
+       qedi_conn->gen_pdu.resp_bd_tbl =
+               dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                  &qedi_conn->gen_pdu.resp_bd_dma,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.resp_bd_tbl)
+               goto login_resp_bd_tbl_failure;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+                 "Allocation successful, cid=0x%x\n",
+                 qedi_conn->iscsi_conn_id);
+       return 0;
+
+login_resp_bd_tbl_failure:
+       dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                         qedi_conn->gen_pdu.req_bd_tbl,
+                         qedi_conn->gen_pdu.req_bd_dma);
+       qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+       dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         qedi_conn->gen_pdu.resp_buf,
+                         qedi_conn->gen_pdu.resp_dma_addr);
+       qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+       dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         qedi_conn->gen_pdu.req_buf,
+                         qedi_conn->gen_pdu.req_dma_addr);
+       qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+       iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+                         "login resource alloc failed!!\n");
+       return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+                                 struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct qedi_cmd *cmd = task->dd_data;
+
+               if (cmd->io_tbl.sge_tbl)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         QEDI_ISCSI_MAX_BDS_PER_CMD *
+                                         sizeof(struct iscsi_sge),
+                                         cmd->io_tbl.sge_tbl,
+                                         cmd->io_tbl.sge_tbl_dma);
+
+               if (cmd->sense_buffer)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         SCSI_SENSE_BUFFERSIZE,
+                                         cmd->sense_buffer,
+                                         cmd->sense_buffer_dma);
+       }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+                          struct qedi_cmd *cmd)
+{
+       struct qedi_io_bdt *io = &cmd->io_tbl;
+       struct iscsi_sge *sge;
+
+       io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+                                        QEDI_ISCSI_MAX_BDS_PER_CMD *
+                                        sizeof(*sge),
+                                        &io->sge_tbl_dma, GFP_KERNEL);
+       if (!io->sge_tbl) {
+               iscsi_session_printk(KERN_ERR, session,
+                                    "Could not allocate BD table.\n");
+               return -ENOMEM;
+       }
+
+       io->sge_valid = 0;
+       return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+                              struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct qedi_cmd *cmd = task->dd_data;
+
+               task->hdr = &cmd->hdr;
+               task->hdr_max = sizeof(struct iscsi_hdr);
+
+               if (qedi_alloc_sget(qedi, session, cmd))
+                       goto free_sgets;
+
+               cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+                                                      SCSI_SENSE_BUFFERSIZE,
+                                                      &cmd->sense_buffer_dma,
+                                                      GFP_KERNEL);
+               if (!cmd->sense_buffer)
+                       goto free_sgets;
+       }
+
+       return 0;
+
+free_sgets:
+       qedi_destroy_cmd_pool(qedi, session);
+       return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+                   u16 qdepth, uint32_t initial_cmdsn)
+{
+       struct Scsi_Host *shost;
+       struct iscsi_cls_session *cls_session;
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+
+       if (!ep)
+               return NULL;
+
+       qedi_ep = ep->dd_data;
+       shost = qedi_ep->qedi->shost;
+       qedi = iscsi_host_priv(shost);
+
+       if (cmds_max > qedi->max_sqes)
+               cmds_max = qedi->max_sqes;
+       else if (cmds_max < QEDI_SQ_WQES_MIN)
+               cmds_max = QEDI_SQ_WQES_MIN;
+
+       cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+                                         cmds_max, 0, sizeof(struct qedi_cmd),
+                                         initial_cmdsn, ISCSI_MAX_TARGET);
+       if (!cls_session) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to setup session for ep=%p\n", qedi_ep);
+               return NULL;
+       }
+
+       if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+               goto session_teardown;
+       }
+
+       return cls_session;
+
+session_teardown:
+       iscsi_session_teardown(cls_session);
+       return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *session = cls_session->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+       qedi_destroy_cmd_pool(qedi, session);
+       iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct iscsi_cls_conn *cls_conn;
+       struct qedi_conn *qedi_conn;
+       struct iscsi_conn *conn;
+
+       cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+                                   cid);
+       if (!cls_conn) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+                        cid, cls_session);
+               return NULL;
+       }
+
+       conn = cls_conn->dd_data;
+       qedi_conn = conn->dd_data;
+       qedi_conn->cls_conn = cls_conn;
+       qedi_conn->qedi = qedi;
+       qedi_conn->ep = NULL;
+       qedi_conn->active_cmd_count = 0;
+       INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+       spin_lock_init(&qedi_conn->list_lock);
+
+       if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+                                  cid, cls_session);
+               goto free_conn;
+       }
+
+       return cls_conn;
+
+free_conn:
+       iscsi_conn_teardown(cls_conn);
+       return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+       iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+       iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+                                      struct qedi_conn *qedi_conn)
+{
+       u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+       if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+               iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+                                 "conn bind - entry #%d not free\n",
+                                 iscsi_cid);
+               return -EBUSY;
+       }
+
+       qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+       return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+       if (!qedi->cid_que.conn_cid_tbl) {
+               QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+               return NULL;
+
+       } else if (iscsi_cid >= qedi->max_active_conns) {
+               QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+               return NULL;
+       }
+       return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+                         struct iscsi_cls_conn *cls_conn,
+                         u64 transport_fd, int is_leading)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct qedi_endpoint *qedi_ep;
+       struct iscsi_endpoint *ep;
+
+       ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
+
+       qedi_ep = ep->dd_data;
+       if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+           (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+               return -EINVAL;
+
+       if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+               return -EINVAL;
+
+       qedi_ep->conn = qedi_conn;
+       qedi_conn->ep = qedi_ep;
+       qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+       qedi_conn->fw_cid = qedi_ep->fw_cid;
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+               return -EINVAL;
+
+       spin_lock_init(&qedi_conn->tmf_work_lock);
+       INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+       init_waitqueue_head(&qedi_conn->wait_queue);
+       return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+                                 struct qedi_conn *qedi_conn)
+{
+       struct qed_iscsi_params_update *conn_info;
+       struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_endpoint *qedi_ep;
+       int rval;
+
+       qedi_ep = qedi_conn->ep;
+
+       conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+       if (!conn_info) {
+               QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+               return -ENOMEM;
+       }
+
+       conn_info->update_flag = 0;
+
+       if (conn->hdrdgst_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+       if (conn->datadgst_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+       if (conn->session->initial_r2t_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+                         true);
+       if (conn->session->imm_data_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+                         true);
+
+       conn_info->max_seq_size = conn->session->max_burst;
+       conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+       conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+       conn_info->first_seq_length = conn->session->first_burst;
+       conn_info->exp_stat_sn = conn->exp_statsn;
+
+       rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+                                    conn_info);
+       if (rval) {
+               rval = -ENXIO;
+               QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+               goto update_conn_err;
+       }
+
+       kfree(conn_info);
+       rval = 0;
+
+update_conn_err:
+       return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+       u16 mss = 0;
+       u16 hdrs = TCP_HDR_LEN;
+
+       if (is_ipv6)
+               hdrs += IPV6_HDR_LEN;
+       else
+               hdrs += IPV4_HDR_LEN;
+
+       if (vlan_en)
+               hdrs += VLAN_LEN;
+
+       mss = pmtu - hdrs;
+
+       if (tcp_ts_en)
+               mss -= TCP_OPTION_LEN;
+
+       if (!mss)
+               mss = DEF_MSS;
+
+       return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+       struct qedi_ctx *qedi = qedi_ep->qedi;
+       struct qed_iscsi_params_offload *conn_info;
+       int rval;
+       int i;
+
+       conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+       if (!conn_info) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to allocate memory ep=%p\n", qedi_ep);
+               return -ENOMEM;
+       }
+
+       ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+       ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+       conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+       conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               conn_info->ip_version = 0;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+                         qedi_ep->src_addr, qedi_ep->dst_addr);
+       } else {
+               for (i = 1; i < 4; i++) {
+                       conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+                       conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+               }
+
+               conn_info->ip_version = 1;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+                         qedi_ep->src_addr, qedi_ep->dst_addr);
+       }
+
+       conn_info->src.port = qedi_ep->src_port;
+       conn_info->dst.port = qedi_ep->dst_port;
+
+       conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+       conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+       conn_info->vlan_id = qedi_ep->vlan_id;
+
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+       conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+       conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+       conn_info->dup_ack_theshold = 3;
+       conn_info->rcv_wnd = 65535;
+       conn_info->cwnd = DEF_MAX_CWND;
+
+       conn_info->ss_thresh = 65535;
+       conn_info->srtt = 300;
+       conn_info->rtt_var = 150;
+       conn_info->flow_label = 0;
+       conn_info->ka_timeout = DEF_KA_TIMEOUT;
+       conn_info->ka_interval = DEF_KA_INTERVAL;
+       conn_info->max_rt_time = DEF_MAX_RT_TIME;
+       conn_info->ttl = DEF_TTL;
+       conn_info->tos_or_tc = DEF_TOS;
+       conn_info->remote_port = qedi_ep->dst_port;
+       conn_info->local_port = qedi_ep->src_port;
+
+       conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+                                      (qedi_ep->ip_type == TCP_IPV6),
+                                      1, (qedi_ep->vlan_id != 0));
+
+       conn_info->rcv_wnd_scale = 4;
+       conn_info->ts_ticks_per_second = 1000;
+       conn_info->da_timeout_value = 200;
+       conn_info->ack_frequency = 2;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Default cq index [%d], mss [%d]\n",
+                 conn_info->default_cq, conn_info->mss);
+
+       rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+       if (rval)
+               QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+                        rval, qedi_ep);
+
+       kfree(conn_info);
+       return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_ctx *qedi;
+       int rval;
+
+       qedi = qedi_conn->qedi;
+
+       rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+       if (rval) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "conn_start: FW oflload conn failed.\n");
+               rval = -EINVAL;
+               goto start_err;
+       }
+
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       qedi_conn->abrt_conn = 0;
+
+       rval = iscsi_conn_start(cls_conn);
+       if (rval) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "iscsi_conn_start: FW oflload conn failed!!\n");
+       }
+
+start_err:
+       return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi;
+
+       shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+       qedi = iscsi_host_priv(shost);
+
+       qedi_conn_free_login_resources(qedi, qedi_conn);
+       iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+                            enum iscsi_param param, char *buf)
+{
+       struct qedi_endpoint *qedi_ep = ep->dd_data;
+       int len;
+
+       if (!qedi_ep)
+               return -ENOTCONN;
+
+       switch (param) {
+       case ISCSI_PARAM_CONN_PORT:
+               len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+               break;
+       case ISCSI_PARAM_CONN_ADDRESS:
+               if (qedi_ep->ip_type == TCP_IPV4)
+                       len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+               else
+                       len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+               break;
+       default:
+               return -ENOTCONN;
+       }
+
+       return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+                              enum iscsi_host_param param, char *buf)
+{
+       struct qedi_ctx *qedi;
+       int len;
+
+       qedi = iscsi_host_priv(shost);
+
+       switch (param) {
+       case ISCSI_HOST_PARAM_HWADDRESS:
+               len = sysfs_format_mac(buf, qedi->mac, 6);
+               break;
+       case ISCSI_HOST_PARAM_NETDEV_NAME:
+               len = sprintf(buf, "host%d\n", shost->host_no);
+               break;
+       case ISCSI_HOST_PARAM_IPADDRESS:
+               if (qedi->ip_type == TCP_IPV4)
+                       len = sprintf(buf, "%pI4\n", qedi->src_ip);
+               else
+                       len = sprintf(buf, "%pI6\n", qedi->src_ip);
+               break;
+       default:
+               return iscsi_host_get_param(shost, param, buf);
+       }
+
+       return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+                               struct iscsi_stats *stats)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qed_iscsi_stats iscsi_stats;
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi;
+
+       shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+       qedi = iscsi_host_priv(shost);
+       qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+       conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+       conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+       conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+       conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+       conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+       stats->txdata_octets = conn->txdata_octets;
+       stats->rxdata_octets = conn->rxdata_octets;
+       stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+       stats->dataout_pdus = conn->dataout_pdus_cnt;
+       stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+       stats->datain_pdus = conn->datain_pdus_cnt;
+       stats->r2t_pdus = conn->r2t_pdus_cnt;
+       stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+       stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+       stats->digest_err = 0;
+       stats->timeout_err = 0;
+       strcpy(stats->custom[0].desc, "eh_abort_cnt");
+       stats->custom[0].value = conn->eh_abort_cnt;
+       stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+       struct iscsi_sge *bd_tbl;
+
+       bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+       bd_tbl->sge_addr.hi =
+               (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+       bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+       bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+                               qedi_conn->gen_pdu.req_buf;
+       bd_tbl->reserved0 = 0;
+       bd_tbl = (struct iscsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
+       bd_tbl->sge_addr.hi =
+                       (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+       bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+       bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+       struct qedi_cmd *cmd = task->dd_data;
+       struct qedi_conn *qedi_conn = cmd->conn;
+       char *buf;
+       int data_len;
+       int rc = 0;
+
+       qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+               qedi_send_iscsi_login(qedi_conn, task);
+               break;
+       case ISCSI_OP_NOOP_OUT:
+               data_len = qedi_conn->gen_pdu.req_buf_size;
+               buf = qedi_conn->gen_pdu.req_buf;
+               if (data_len)
+                       rc = qedi_send_iscsi_nopout(qedi_conn, task,
+                                                   buf, data_len, 1);
+               else
+                       rc = qedi_send_iscsi_nopout(qedi_conn, task,
+                                                   NULL, 0, 1);
+               break;
+       case ISCSI_OP_LOGOUT:
+               rc = qedi_send_iscsi_logout(qedi_conn, task);
+               break;
+       case ISCSI_OP_SCSI_TMFUNC:
+               rc = qedi_iscsi_abort_work(qedi_conn, task);
+               break;
+       case ISCSI_OP_TEXT:
+               rc = qedi_send_iscsi_text(qedi_conn, task);
+               break;
+       default:
+               iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+                                 "unsupported op 0x%x\n", task->hdr->opcode);
+       }
+
+       return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+       qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+       if (task->data_count) {
+               memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+                      task->data_count);
+               qedi_conn->gen_pdu.req_wr_ptr =
+                       qedi_conn->gen_pdu.req_buf + task->data_count;
+       }
+
+       cmd->conn = conn->dd_data;
+       cmd->scsi_cmd = NULL;
+       return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+
+       cmd->state = 0;
+       cmd->task = NULL;
+       cmd->use_slowpath = false;
+       cmd->conn = qedi_conn;
+       cmd->task = task;
+       cmd->io_cmd_in_list = false;
+       INIT_LIST_HEAD(&cmd->io_cmd);
+
+       if (!sc)
+               return qedi_mtask_xmit(conn, task);
+
+       cmd->scsi_cmd = sc;
+       return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+               int non_blocking)
+{
+       struct qedi_ctx *qedi;
+       struct iscsi_endpoint *ep;
+       struct qedi_endpoint *qedi_ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct qed_dev *cdev  =  NULL;
+       struct qedi_uio_dev *udev = NULL;
+       struct iscsi_path path_req;
+       u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+       u32 iscsi_cid = QEDI_CID_RESERVED;
+       u16 len = 0;
+       char *buf = NULL;
+       int ret;
+
+       if (!shost) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost is NULL\n");
+               return ERR_PTR(ret);
+       }
+
+       if (do_not_recover) {
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+
+       qedi = iscsi_host_priv(shost);
+       cdev = qedi->cdev;
+       udev = qedi->udev;
+
+       if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+           test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+
+       ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+       if (!ep) {
+               QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+       qedi_ep = ep->dd_data;
+       memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+       qedi_ep->state = EP_STATE_IDLE;
+       qedi_ep->iscsi_cid = (u32)-1;
+       qedi_ep->qedi = qedi;
+
+       if (dst_addr->sa_family == AF_INET) {
+               addr = (struct sockaddr_in *)dst_addr;
+               memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+                      sizeof(struct in_addr));
+               qedi_ep->dst_port = ntohs(addr->sin_port);
+               qedi_ep->ip_type = TCP_IPV4;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "dst_addr=%pI4, dst_port=%u\n",
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else if (dst_addr->sa_family == AF_INET6) {
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+                      sizeof(struct in6_addr));
+               qedi_ep->dst_port = ntohs(addr6->sin6_port);
+               qedi_ep->ip_type = TCP_IPV6;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "dst_addr=%pI6, dst_port=%u\n",
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+       }
+
+       if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+               QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+               ret = -ENXIO;
+               goto ep_conn_exit;
+       }
+
+       ret = qedi_alloc_sq(qedi, qedi_ep);
+       if (ret)
+               goto ep_conn_exit;
+
+       ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+                                    &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+               ret = -ENXIO;
+               goto ep_free_sq;
+       }
+
+       iscsi_cid = qedi_ep->handle;
+       qedi_ep->iscsi_cid = iscsi_cid;
+
+       init_waitqueue_head(&qedi_ep->ofld_wait);
+       init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+       qedi_ep->state = EP_STATE_OFLDCONN_START;
+       qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+       buf = (char *)&path_req;
+       len = sizeof(path_req);
+       memset(&path_req, 0, len);
+
+       msg_type = ISCSI_KEVENT_PATH_REQ;
+       path_req.handle = (u64)qedi_ep->iscsi_cid;
+       path_req.pmtu = qedi->ll2_mtu;
+       qedi_ep->pmtu = qedi->ll2_mtu;
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+                      sizeof(struct in_addr));
+               path_req.ip_addr_len = 4;
+       } else {
+               memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+                      sizeof(struct in6_addr));
+               path_req.ip_addr_len = 16;
+       }
+
+       ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+                                len);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+                        iscsi_cid, ret);
+               goto ep_rel_conn;
+       }
+
+       atomic_inc(&qedi->num_offloads);
+       return ep;
+
+ep_rel_conn:
+       qedi->ep_tbl[iscsi_cid] = NULL;
+       ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+       if (ret)
+               QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+                         ret);
+ep_free_sq:
+       qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+       iscsi_destroy_endpoint(ep);
+       return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+       struct qedi_endpoint *qedi_ep;
+       int ret = 0;
+
+       if (do_not_recover)
+               return 1;
+
+       qedi_ep = ep->dd_data;
+       if (qedi_ep->state == EP_STATE_IDLE ||
+           qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+               return -1;
+
+       if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+               ret = 1;
+
+       ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+                                              QEDI_OFLD_WAIT_STATE(qedi_ep),
+                                              msecs_to_jiffies(timeout_ms));
+
+       if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+               ret = -1;
+
+       if (ret > 0)
+               return 1;
+       else if (!ret)
+               return 0;
+       else
+               return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+       struct qedi_cmd *cmd, *cmd_tmp;
+
+       list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+                                io_cmd) {
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+       struct qedi_endpoint *qedi_ep;
+       struct qedi_conn *qedi_conn = NULL;
+       struct iscsi_conn *conn = NULL;
+       struct qedi_ctx *qedi;
+       int ret = 0;
+       int wait_delay = 20 * HZ;
+       int abrt_conn = 0;
+       int count = 10;
+
+       qedi_ep = ep->dd_data;
+       qedi = qedi_ep->qedi;
+
+       flush_work(&qedi_ep->offload_work);
+
+       if (qedi_ep->conn) {
+               qedi_conn = qedi_ep->conn;
+               conn = qedi_conn->cls_conn->dd_data;
+               iscsi_suspend_queue(conn);
+               abrt_conn = qedi_conn->abrt_conn;
+
+               while (count--) {
+                       if (!test_bit(QEDI_CONN_FW_CLEANUP,
+                                     &qedi_conn->flags)) {
+                               break;
+                       }
+                       msleep(1000);
+               }
+
+               if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+                       if (do_not_recover) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                                         "Do not recover cid=0x%x\n",
+                                         qedi_ep->iscsi_cid);
+                               goto ep_exit_recover;
+                       }
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                                 "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+                                 qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+                       qedi_cleanup_active_cmd_list(qedi_conn);
+                       goto ep_release_conn;
+               }
+       }
+
+       if (do_not_recover)
+               goto ep_exit_recover;
+
+       switch (qedi_ep->state) {
+       case EP_STATE_OFLDCONN_START:
+               goto ep_release_conn;
+       case EP_STATE_OFLDCONN_FAILED:
+                       break;
+       case EP_STATE_OFLDCONN_COMPL:
+               if (unlikely(!qedi_conn))
+                       break;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+                         qedi_conn->active_cmd_count, abrt_conn,
+                         qedi_ep->state,
+                         qedi_ep->iscsi_cid,
+                         qedi_ep->conn
+                         );
+
+               if (!qedi_conn->active_cmd_count)
+                       abrt_conn = 0;
+               else
+                       abrt_conn = 1;
+
+               if (abrt_conn)
+                       qedi_clearsq(qedi, qedi_conn, NULL);
+               break;
+       default:
+               break;
+       }
+
+       qedi_ep->state = EP_STATE_DISCONN_START;
+       ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+       if (ret) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "destroy_conn failed returned %d\n", ret);
+       } else {
+               ret = wait_event_interruptible_timeout(
+                                       qedi_ep->tcp_ofld_wait,
+                                       (qedi_ep->state !=
+                                        EP_STATE_DISCONN_START),
+                                       wait_delay);
+               if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+                                 ret, wait_delay, qedi_ep->iscsi_cid);
+               }
+       }
+
+ep_release_conn:
+       ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+       if (ret)
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "release_conn returned %d, cid=0x%x\n",
+                         ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+       qedi_ep->state = EP_STATE_IDLE;
+       qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+       qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+       qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+       qedi_free_sq(qedi, qedi_ep);
+
+       if (qedi_conn)
+               qedi_conn->ep = NULL;
+
+       qedi_ep->conn = NULL;
+       qedi_ep->qedi = NULL;
+       atomic_dec(&qedi->num_offloads);
+
+       iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+       struct qed_dev *cdev = qedi->cdev;
+       struct qedi_uio_dev *udev;
+       struct qedi_uio_ctrl *uctrl;
+       struct sk_buff *skb;
+       u32 len;
+       int rc = 0;
+
+       udev = qedi->udev;
+       if (!udev) {
+               QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+               return -EINVAL;
+       }
+
+       uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+       if (!uctrl) {
+               QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+               return -EINVAL;
+       }
+
+       len = uctrl->host_tx_pkt_len;
+       if (!len) {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+               return -EINVAL;
+       }
+
+       skb = alloc_skb(len, GFP_ATOMIC);
+       if (!skb) {
+               QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+               return -EINVAL;
+       }
+
+       skb_put(skb, len);
+       memcpy(skb->data, udev->tx_pkt, len);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       if (vlanid)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+       rc = qedi_ops->ll2->start_xmit(cdev, skb);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+                        rc);
+               kfree_skb(skb);
+       }
+
+       uctrl->host_tx_pkt_len = 0;
+       uctrl->hw_tx_cons++;
+
+       return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+       struct qedi_endpoint *qedi_ep =
+               container_of(work, struct qedi_endpoint, offload_work);
+       struct qedi_ctx *qedi;
+       int wait_delay = 20 * HZ;
+       int ret;
+
+       qedi = qedi_ep->qedi;
+
+       ret = qedi_iscsi_offload_conn(qedi_ep);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+                        qedi_ep->iscsi_cid, qedi_ep, ret);
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               return;
+       }
+
+       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+                                              (qedi_ep->state ==
+                                              EP_STATE_OFLDCONN_COMPL),
+                                              wait_delay);
+       if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+                        qedi_ep->iscsi_cid, qedi_ep);
+       }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+       int ret = 0;
+       u32 iscsi_cid;
+       u16 port_id = 0;
+
+       if (!shost) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost is NULL\n");
+               return ret;
+       }
+
+       if (strcmp(shost->hostt->proc_name, "qedi")) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost %s is invalid\n",
+                        shost->hostt->proc_name);
+               return ret;
+       }
+
+       qedi = iscsi_host_priv(shost);
+       if (path_data->handle == QEDI_PATH_HANDLE) {
+               ret = qedi_data_avail(qedi, path_data->vlan_id);
+               goto set_path_exit;
+       }
+
+       iscsi_cid = (u32)path_data->handle;
+       qedi_ep = qedi->ep_tbl[iscsi_cid];
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+       if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+               QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+               ret = -EIO;
+               goto set_path_exit;
+       }
+
+       ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+       ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+       qedi_ep->vlan_id = path_data->vlan_id;
+       if (path_data->pmtu < DEF_PATH_MTU) {
+               qedi_ep->pmtu = qedi->ll2_mtu;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "MTU cannot be %u, using default MTU %u\n",
+                          path_data->pmtu, qedi_ep->pmtu);
+       }
+
+       if (path_data->pmtu != qedi->ll2_mtu) {
+               if (path_data->pmtu > JUMBO_MTU) {
+                       ret = -EINVAL;
+                       QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+                       goto set_path_exit;
+               }
+
+               qedi_reset_host_mtu(qedi, path_data->pmtu);
+               qedi_ep->pmtu = qedi->ll2_mtu;
+       }
+
+       port_id = qedi_ep->src_port;
+       if (port_id >= QEDI_LOCAL_PORT_MIN &&
+           port_id < QEDI_LOCAL_PORT_MAX) {
+               if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+                       port_id = 0;
+       } else {
+               port_id = 0;
+       }
+
+       if (!port_id) {
+               port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+               if (port_id == QEDI_LOCAL_PORT_INVALID) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Failed to allocate port id for iscsi_cid=0x%x\n",
+                                iscsi_cid);
+                       ret = -ENOMEM;
+                       goto set_path_exit;
+               }
+       }
+
+       qedi_ep->src_port = port_id;
+
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+                      sizeof(struct in_addr));
+               memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+                      sizeof(struct in_addr));
+               qedi->ip_type = TCP_IPV4;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+                         qedi_ep->src_addr, qedi_ep->src_port,
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else {
+               memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+                      sizeof(struct in6_addr));
+               memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+                      sizeof(struct in6_addr));
+               qedi->ip_type = TCP_IPV6;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+                         qedi_ep->src_addr, qedi_ep->src_port,
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       }
+
+       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+       queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+       ret = 0;
+
+set_path_exit:
+       return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+       switch (param_type) {
+       case ISCSI_HOST_PARAM:
+               switch (param) {
+               case ISCSI_HOST_PARAM_NETDEV_NAME:
+               case ISCSI_HOST_PARAM_HWADDRESS:
+               case ISCSI_HOST_PARAM_IPADDRESS:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       case ISCSI_PARAM:
+               switch (param) {
+               case ISCSI_PARAM_MAX_RECV_DLENGTH:
+               case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+               case ISCSI_PARAM_HDRDGST_EN:
+               case ISCSI_PARAM_DATADGST_EN:
+               case ISCSI_PARAM_CONN_ADDRESS:
+               case ISCSI_PARAM_CONN_PORT:
+               case ISCSI_PARAM_EXP_STATSN:
+               case ISCSI_PARAM_PERSISTENT_ADDRESS:
+               case ISCSI_PARAM_PERSISTENT_PORT:
+               case ISCSI_PARAM_PING_TMO:
+               case ISCSI_PARAM_RECV_TMO:
+               case ISCSI_PARAM_INITIAL_R2T_EN:
+               case ISCSI_PARAM_MAX_R2T:
+               case ISCSI_PARAM_IMM_DATA_EN:
+               case ISCSI_PARAM_FIRST_BURST:
+               case ISCSI_PARAM_MAX_BURST:
+               case ISCSI_PARAM_PDU_INORDER_EN:
+               case ISCSI_PARAM_DATASEQ_INORDER_EN:
+               case ISCSI_PARAM_ERL:
+               case ISCSI_PARAM_TARGET_NAME:
+               case ISCSI_PARAM_TPGT:
+               case ISCSI_PARAM_USERNAME:
+               case ISCSI_PARAM_PASSWORD:
+               case ISCSI_PARAM_USERNAME_IN:
+               case ISCSI_PARAM_PASSWORD_IN:
+               case ISCSI_PARAM_FAST_ABORT:
+               case ISCSI_PARAM_ABORT_TMO:
+               case ISCSI_PARAM_LU_RESET_TMO:
+               case ISCSI_PARAM_TGT_RESET_TMO:
+               case ISCSI_PARAM_IFACE_NAME:
+               case ISCSI_PARAM_INITIATOR_NAME:
+               case ISCSI_PARAM_BOOT_ROOT:
+               case ISCSI_PARAM_BOOT_NIC:
+               case ISCSI_PARAM_BOOT_TARGET:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       }
+
+       return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+       if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+               QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+                         atomic_read(&task->refcount));
+               return;
+       }
+
+       qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+       .owner = THIS_MODULE,
+       .name = QEDI_MODULE_NAME,
+       .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+               CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+       .create_session = qedi_session_create,
+       .destroy_session = qedi_session_destroy,
+       .create_conn = qedi_conn_create,
+       .bind_conn = qedi_conn_bind,
+       .start_conn = qedi_conn_start,
+       .stop_conn = iscsi_conn_stop,
+       .destroy_conn = qedi_conn_destroy,
+       .set_param = iscsi_set_param,
+       .get_ep_param = qedi_ep_get_param,
+       .get_conn_param = iscsi_conn_get_param,
+       .get_session_param = iscsi_session_get_param,
+       .get_host_param = qedi_host_get_param,
+       .send_pdu = iscsi_conn_send_pdu,
+       .get_stats = qedi_conn_get_stats,
+       .xmit_task = qedi_task_xmit,
+       .cleanup_task = qedi_cleanup_task,
+       .session_recovery_timedout = iscsi_session_recovery_timedout,
+       .ep_connect = qedi_ep_connect,
+       .ep_poll = qedi_ep_poll,
+       .ep_disconnect = qedi_ep_disconnect,
+       .set_path = qedi_set_path,
+       .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+                             struct qedi_conn *qedi_conn)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_conn *conn;
+
+       cls_conn = qedi_conn->cls_conn;
+       conn = cls_conn->dd_data;
+       cls_sess = iscsi_conn_to_session(cls_conn);
+
+       if (iscsi_is_session_online(cls_sess)) {
+               qedi_conn->abrt_conn = 1;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failing connection, state=0x%x, cid=0x%x\n",
+                        conn->session->state, qedi_conn->iscsi_conn_id);
+               iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+                                  ISCSI_ERR_CONN_FAILED);
+       }
+}
+
+static const struct {
+       enum iscsi_error_types error_code;
+       char *err_string;
+} qedi_iscsi_error[] = {
+       { ISCSI_STATUS_NONE,
+         "tcp_error none"
+       },
+       { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+         "task cid mismatch"
+       },
+       { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+         "invalid task"
+       },
+       { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+         "rq ring full"
+       },
+       { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+         "cmdq ring full"
+       },
+       { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+         "sge caching failed"
+       },
+       { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+         "hdr digest error"
+       },
+       { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+         "local cmpl error"
+       },
+       { ISCSI_CONN_ERROR_DATA_OVERRUN,
+         "invalid task"
+       },
+       { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+         "out of sge error"
+       },
+       { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+         "tcp seg ip options error"
+       },
+       { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+         "tcp ip fragment error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+         "AHS len protocol error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+         "itt out of range error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+         "data seg more than pdu size"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+         "invalid opcode"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+         "invalid opcode before update"
+       },
+       { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+         "unexpected opcode"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+         "r2t carries no data"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+         "data sn error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+         "data TTT error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+         "r2t TTT error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+         "buffer offset error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+         "buffer offset ooo"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+         "data seg len 0"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+         "data xer len error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+         "data xer len1 error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+         "data xer len2 error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+         "protocol lun error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+         "f bit zero error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+         "exp stat sn error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+         "dsl not zero error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+         "invalid dsl"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+         "data seg len too big"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+         "outstanding r2t count error"
+       },
+       { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+         "sense datalen error"
+       },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+       int i;
+       char *msg = NULL;
+
+       for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+               if (qedi_iscsi_error[i].error_code == err_code) {
+                       msg = qedi_iscsi_error[i].err_string;
+                       break;
+               }
+       }
+       return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+       struct qedi_conn *qedi_conn;
+       struct qedi_ctx *qedi;
+       char warn_notice[] = "iscsi_warning";
+       char error_notice[] = "iscsi_error";
+       char unknown_msg[] = "Unknown error";
+       char *message;
+       int need_recovery = 0;
+       u32 err_mask = 0;
+       char *msg;
+
+       if (!ep)
+               return;
+
+       qedi_conn = ep->conn;
+       if (!qedi_conn)
+               return;
+
+       qedi = ep->qedi;
+
+       QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+                data->error_code);
+
+       if (err_mask) {
+               need_recovery = 0;
+               message = warn_notice;
+       } else {
+               need_recovery = 1;
+               message = error_notice;
+       }
+
+       msg = qedi_get_iscsi_error(data->error_code);
+       if (!msg) {
+               need_recovery = 0;
+               msg = unknown_msg;
+       }
+
+       iscsi_conn_printk(KERN_ALERT,
+                         qedi_conn->cls_conn->dd_data,
+                         "qedi: %s - %s\n", message, msg);
+
+       if (need_recovery)
+               qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+       struct qedi_conn *qedi_conn;
+
+       if (!ep)
+               return;
+
+       qedi_conn = ep->conn;
+       if (!qedi_conn)
+               return;
+
+       QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+                data->error_code);
+
+       qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644 (file)
index 0000000..d3c06bb
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT         7200000
+#define DEF_KA_INTERVAL                10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS                        0
+#define DEF_TTL                        0xfe
+#define DEF_SND_SEQ_SCALE      0
+#define DEF_RCV_BUF            0xffff
+#define DEF_SND_BUF            0xffff
+#define DEF_SEED               0
+#define DEF_MAX_RT_TIME                8000
+#define DEF_MAX_DA_COUNT        2
+#define DEF_SWS_TIMER          1000
+#define DEF_MAX_CWND           2
+#define DEF_PATH_MTU           1500
+#define DEF_MSS                        1460
+#define DEF_LL2_MTU            1560
+#define JUMBO_MTU              9000
+
+#define MIN_MTU         576 /* rfc 793 */
+#define IPV4_HDR_LEN    20
+#define IPV6_HDR_LEN    40
+#define TCP_HDR_LEN     20
+#define TCP_OPTION_LEN  12
+#define VLAN_LEN         4
+
+enum {
+       EP_STATE_IDLE                   = 0x0,
+       EP_STATE_ACQRCONN_START         = 0x1,
+       EP_STATE_ACQRCONN_COMPL         = 0x2,
+       EP_STATE_OFLDCONN_START         = 0x4,
+       EP_STATE_OFLDCONN_COMPL         = 0x8,
+       EP_STATE_DISCONN_START          = 0x10,
+       EP_STATE_DISCONN_COMPL          = 0x20,
+       EP_STATE_CLEANUP_START          = 0x40,
+       EP_STATE_CLEANUP_CMPL           = 0x80,
+       EP_STATE_TCP_FIN_RCVD           = 0x100,
+       EP_STATE_TCP_RST_RCVD           = 0x200,
+       EP_STATE_LOGOUT_SENT            = 0x400,
+       EP_STATE_LOGOUT_RESP_RCVD       = 0x800,
+       EP_STATE_CLEANUP_FAILED         = 0x1000,
+       EP_STATE_OFLDCONN_FAILED        = 0x2000,
+       EP_STATE_CONNECT_FAILED         = 0x4000,
+       EP_STATE_DISCONN_TIMEDOUT       = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+       struct qedi_ctx *qedi;
+       u32 dst_addr[4];
+       u32 src_addr[4];
+       u16 src_port;
+       u16 dst_port;
+       u16 vlan_id;
+       u16 pmtu;
+       u8 src_mac[ETH_ALEN];
+       u8 dst_mac[ETH_ALEN];
+       u8 ip_type;
+       int state;
+       wait_queue_head_t ofld_wait;
+       wait_queue_head_t tcp_ofld_wait;
+       u32 iscsi_cid;
+       /* identifier of the connection from qed */
+       u32 handle;
+       u32 fw_cid;
+       void __iomem *p_doorbell;
+
+       /* Send queue management */
+       struct iscsi_wqe *sq;
+       dma_addr_t sq_dma;
+
+       u16 sq_prod_idx;
+       u16 fw_sq_prod_idx;
+       u16 sq_con_idx;
+       u32 sq_mem_size;
+
+       void *sq_pbl;
+       dma_addr_t sq_pbl_dma;
+       u32 sq_pbl_size;
+       struct qedi_conn *conn;
+       struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN       16
+
+struct qedi_io_bdt {
+       struct iscsi_sge *sge_tbl;
+       dma_addr_t sge_tbl_dma;
+       u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *      Logout and NOP
+ */
+struct generic_pdu_resc {
+       char *req_buf;
+       dma_addr_t req_dma_addr;
+       u32 req_buf_size;
+       char *req_wr_ptr;
+       struct iscsi_hdr resp_hdr;
+       char *resp_buf;
+       dma_addr_t resp_dma_addr;
+       u32 resp_buf_size;
+       char *resp_wr_ptr;
+       char *req_bd_tbl;
+       dma_addr_t req_bd_dma;
+       char *resp_bd_tbl;
+       dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+       struct iscsi_cls_conn *cls_conn;
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *ep;
+       struct list_head active_cmd_list;
+       spinlock_t list_lock;           /* internal conn lock */
+       u32 active_cmd_count;
+       u32 cmd_cleanup_req;
+       u32 cmd_cleanup_cmpl;
+
+       u32 iscsi_conn_id;
+       int itt;
+       int abrt_conn;
+#define QEDI_CID_RESERVED      0x5AFF
+       u32 fw_cid;
+       /*
+        * Buffer for login negotiation process
+        */
+       struct generic_pdu_resc gen_pdu;
+
+       struct list_head tmf_work_list;
+       wait_queue_head_t wait_queue;
+       spinlock_t tmf_work_lock;       /* tmf work lock */
+       unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP   1
+};
+
+struct qedi_cmd {
+       struct list_head io_cmd;
+       bool io_cmd_in_list;
+       struct iscsi_hdr hdr;
+       struct qedi_conn *conn;
+       struct scsi_cmnd *scsi_cmd;
+       struct scatterlist *sg;
+       struct qedi_io_bdt io_tbl;
+       struct iscsi_task_context request;
+       unsigned char *sense_buffer;
+       dma_addr_t sense_buffer_dma;
+       u16 task_id;
+
+       /* field populated for tmf work queue */
+       struct iscsi_task *task;
+       struct work_struct tmf_work;
+       int state;
+#define CLEANUP_WAIT   1
+#define CLEANUP_RECV   2
+#define CLEANUP_WAIT_FAILED    3
+#define CLEANUP_NOT_REQUIRED   4
+#define LUN_RESET_RESPONSE_RECEIVED    5
+#define RESPONSE_RECEIVED      6
+
+       int type;
+#define TYPEIO         1
+#define TYPERESET      2
+
+       struct qedi_work_map *list_tmf_work;
+       /* slowpath management */
+       bool use_slowpath;
+
+       struct iscsi_tm_rsp *tmf_resp_buf;
+       struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+       struct list_head list;
+       struct qedi_cmd *qedi_cmd;
+       int rtid;
+
+       int state;
+#define QEDI_WORK_QUEUED       1
+#define QEDI_WORK_SCHEDULED    2
+#define QEDI_WORK_EXIT         3
+
+       struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+                               (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644 (file)
index 0000000..19ead8d
--- /dev/null
@@ -0,0 +1,2127 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+                " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+       struct async_data *data;
+       int rval = 0;
+
+       if (!context || !fw_handle) {
+               QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+               return -EINVAL;
+       }
+
+       qedi = (struct qedi_ctx *)context;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+       data = (struct async_data *)fw_handle;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+                  data->cid, data->itid, data->error_code,
+                  data->fw_debug_param);
+
+       qedi_ep = qedi->ep_tbl[data->cid];
+
+       if (!qedi_ep) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Cannot process event, ep already disconnected, cid=0x%x\n",
+                          data->cid);
+               WARN_ON(1);
+               return -ENODEV;
+       }
+
+       switch (fw_event_code) {
+       case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+               if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+                       qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+               wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+               break;
+       case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+               qedi_ep->state = EP_STATE_DISCONN_COMPL;
+               wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+               break;
+       case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+               qedi_process_iscsi_error(qedi_ep, data);
+               break;
+       case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+       case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+       case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+       case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+               qedi_process_tcp_error(qedi_ep, data);
+               break;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+                        fw_event_code);
+       }
+
+       return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+       struct qedi_uio_dev *udev = uinfo->priv;
+       struct qedi_ctx *qedi = udev->qedi;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (udev->uio_dev != -1)
+               return -EBUSY;
+
+       rtnl_lock();
+       udev->uio_dev = iminor(inode);
+       qedi_reset_uio_rings(udev);
+       set_bit(UIO_DEV_OPENED, &qedi->flags);
+       rtnl_unlock();
+
+       return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+       struct qedi_uio_dev *udev = uinfo->priv;
+       struct qedi_ctx *qedi = udev->qedi;
+
+       udev->uio_dev = -1;
+       clear_bit(UIO_DEV_OPENED, &qedi->flags);
+       qedi_ll2_free_skbs(qedi);
+       return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+       if (udev->ll2_ring) {
+               free_page((unsigned long)udev->ll2_ring);
+               udev->ll2_ring = NULL;
+       }
+
+       if (udev->ll2_buf) {
+               free_pages((unsigned long)udev->ll2_buf, 2);
+               udev->ll2_buf = NULL;
+       }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+       uio_unregister_device(&udev->qedi_uinfo);
+
+       __qedi_free_uio_rings(udev);
+
+       pci_dev_put(udev->pdev);
+       kfree(udev->uctrl);
+       kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+       if (!udev)
+               return;
+
+       list_del_init(&udev->list);
+       __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+       struct qedi_ctx *qedi = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+
+       qedi = udev->qedi;
+       uctrl = udev->uctrl;
+
+       spin_lock_bh(&qedi->ll2_lock);
+       uctrl->host_rx_cons = 0;
+       uctrl->hw_rx_prod = 0;
+       uctrl->hw_rx_bd_prod = 0;
+       uctrl->host_rx_bd_cons = 0;
+
+       memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+       memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+       spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+       int rc = 0;
+
+       if (udev->ll2_ring || udev->ll2_buf)
+               return rc;
+
+       /* Allocating memory for LL2 ring  */
+       udev->ll2_ring_size = QEDI_PAGE_SIZE;
+       udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+       if (!udev->ll2_ring) {
+               rc = -ENOMEM;
+               goto exit_alloc_ring;
+       }
+
+       /* Allocating memory for Tx/Rx pkt buffer */
+       udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+       udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+       udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+                                                __GFP_ZERO, 2);
+       if (!udev->ll2_buf) {
+               rc = -ENOMEM;
+               goto exit_alloc_buf;
+       }
+       return rc;
+
+exit_alloc_buf:
+       free_page((unsigned long)udev->ll2_ring);
+       udev->ll2_ring = NULL;
+exit_alloc_ring:
+       return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+       struct qedi_uio_dev *udev = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+       int rc = 0;
+
+       list_for_each_entry(udev, &qedi_udev_list, list) {
+               if (udev->pdev == qedi->pdev) {
+                       udev->qedi = qedi;
+                       if (__qedi_alloc_uio_rings(udev)) {
+                               udev->qedi = NULL;
+                               return -ENOMEM;
+                       }
+                       qedi->udev = udev;
+                       return 0;
+               }
+       }
+
+       udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+       if (!udev) {
+               rc = -ENOMEM;
+               goto err_udev;
+       }
+
+       uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+       if (!uctrl) {
+               rc = -ENOMEM;
+               goto err_uctrl;
+       }
+
+       udev->uio_dev = -1;
+
+       udev->qedi = qedi;
+       udev->pdev = qedi->pdev;
+       udev->uctrl = uctrl;
+
+       rc = __qedi_alloc_uio_rings(udev);
+       if (rc)
+               goto err_uio_rings;
+
+       list_add(&udev->list, &qedi_udev_list);
+
+       pci_dev_get(udev->pdev);
+       qedi->udev = udev;
+
+       udev->tx_pkt = udev->ll2_buf;
+       udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+       return 0;
+
+ err_uio_rings:
+       kfree(uctrl);
+ err_uctrl:
+       kfree(udev);
+ err_udev:
+       return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+       struct qedi_uio_dev *udev = qedi->udev;
+       struct uio_info *uinfo;
+       int ret = 0;
+
+       if (!udev)
+               return -ENOMEM;
+
+       uinfo = &udev->qedi_uinfo;
+
+       uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+       uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+       uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+       uinfo->mem[1].size = udev->ll2_ring_size;
+       uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+       uinfo->mem[2].size = udev->ll2_buf_size;
+       uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->name = "qedi_uio";
+       uinfo->version = QEDI_MODULE_VERSION;
+       uinfo->irq = UIO_IRQ_CUSTOM;
+
+       uinfo->open = qedi_uio_open;
+       uinfo->release = qedi_uio_close;
+
+       if (udev->uio_dev == -1) {
+               if (!uinfo->priv) {
+                       uinfo->priv = udev;
+
+                       ret = uio_register_device(&udev->pdev->dev, uinfo);
+                       if (ret) {
+                               QEDI_ERR(&qedi->dbg_ctx,
+                                        "UIO registration failed\n");
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+                                 struct qed_sb_info *sb_info, u16 sb_id)
+{
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       int ret;
+
+       sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+                                    sizeof(struct status_block), &sb_phys,
+                                    GFP_KERNEL);
+       if (!sb_virt) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Status block allocation failed for id = %d.\n",
+                         sb_id);
+               return -ENOMEM;
+       }
+
+       ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+                                      sb_id, QED_SB_TYPE_STORAGE);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Status block initialization failed for id = %d.\n",
+                         sb_id);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+       struct qed_sb_info *sb_info;
+       int id;
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               sb_info = &qedi->sb_array[id];
+               if (sb_info->sb_virt)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         sizeof(*sb_info->sb_virt),
+                                         (void *)sb_info->sb_virt,
+                                         sb_info->sb_phys);
+       }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+       kfree(qedi->fp_array);
+       kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+       qedi_free_sb(qedi);
+       qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+       int ret = 0;
+
+       qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+                                sizeof(struct qedi_fastpath), GFP_KERNEL);
+       if (!qedi->fp_array) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fastpath fp array allocation failed.\n");
+               return -ENOMEM;
+       }
+
+       qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+                                sizeof(struct qed_sb_info), GFP_KERNEL);
+       if (!qedi->sb_array) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fastpath sb array allocation failed.\n");
+               ret = -ENOMEM;
+               goto free_fp;
+       }
+
+       return ret;
+
+free_fp:
+       qedi_free_fp(qedi);
+       return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+       struct qedi_fastpath *fp;
+       int id;
+
+       memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+              sizeof(*qedi->fp_array));
+       memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+              sizeof(*qedi->sb_array));
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               fp = &qedi->fp_array[id];
+               fp->sb_info = &qedi->sb_array[id];
+               fp->sb_id = id;
+               fp->qedi = qedi;
+               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+                        "qedi", id);
+
+               /* fp_array[i] ---- irq cookie
+                * So init data which is needed in int ctx
+                */
+       }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+       struct qedi_fastpath *fp;
+       int id, ret = 0;
+
+       ret = qedi_alloc_fp(qedi);
+       if (ret)
+               goto err;
+
+       qedi_int_fp(qedi);
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               fp = &qedi->fp_array[id];
+               ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+               if (ret) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "SB allocation and initialization failed.\n");
+                       ret = -EIO;
+                       goto err_init;
+               }
+       }
+
+       return 0;
+
+err_init:
+       qedi_free_sb(qedi);
+       qedi_free_fp(qedi);
+err:
+       return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+       int i;
+
+       qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+                                                  sizeof(u32), GFP_KERNEL);
+       if (!qedi->cid_que.cid_que_base)
+               return -ENOMEM;
+
+       qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+                                                  sizeof(struct qedi_conn *),
+                                                  GFP_KERNEL);
+       if (!qedi->cid_que.conn_cid_tbl) {
+               kfree(qedi->cid_que.cid_que_base);
+               qedi->cid_que.cid_que_base = NULL;
+               return -ENOMEM;
+       }
+
+       qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+       qedi->cid_que.cid_q_prod_idx = 0;
+       qedi->cid_que.cid_q_cons_idx = 0;
+       qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+       qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+       for (i = 0; i < qedi->max_active_conns; i++) {
+               qedi->cid_que.cid_que[i] = i;
+               qedi->cid_que.conn_cid_tbl[i] = NULL;
+       }
+
+       return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+       kfree(qedi->cid_que.cid_que_base);
+       qedi->cid_que.cid_que_base = NULL;
+
+       kfree(qedi->cid_que.conn_cid_tbl);
+       qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+                           u16 start_id, u16 next)
+{
+       id_tbl->start = start_id;
+       id_tbl->max = size;
+       id_tbl->next = next;
+       spin_lock_init(&id_tbl->lock);
+       id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+       if (!id_tbl->table)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+       kfree(id_tbl->table);
+       id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+       int ret = -1;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return ret;
+
+       spin_lock(&id_tbl->lock);
+       if (!test_bit(id, id_tbl->table)) {
+               set_bit(id, id_tbl->table);
+               ret = 0;
+       }
+       spin_unlock(&id_tbl->lock);
+       return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+       u16 id;
+
+       spin_lock(&id_tbl->lock);
+       id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+       if (id >= id_tbl->max) {
+               id = QEDI_LOCAL_PORT_INVALID;
+               if (id_tbl->next != 0) {
+                       id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+                       if (id >= id_tbl->next)
+                               id = QEDI_LOCAL_PORT_INVALID;
+               }
+       }
+
+       if (id < id_tbl->max) {
+               set_bit(id, id_tbl->table);
+               id_tbl->next = (id + 1) & (id_tbl->max - 1);
+               id += id_tbl->start;
+       }
+
+       spin_unlock(&id_tbl->lock);
+
+       return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+       if (id == QEDI_LOCAL_PORT_INVALID)
+               return;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return;
+
+       clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+       kfree(qedi->ep_tbl);
+       qedi->ep_tbl = NULL;
+       qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+       u16 port_id;
+
+       qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+                               sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+       if (!qedi->ep_tbl)
+               return -ENOMEM;
+       port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+       if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+                            QEDI_LOCAL_PORT_MIN, port_id)) {
+               qedi_cm_free_mem(qedi);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi = NULL;
+
+       shost = iscsi_host_alloc(&qedi_host_template,
+                                sizeof(struct qedi_ctx), 0);
+       if (!shost) {
+               QEDI_ERR(NULL, "Could not allocate shost\n");
+               goto exit_setup_shost;
+       }
+
+       shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+       shost->max_channel = 0;
+       shost->max_lun = ~0;
+       shost->max_cmd_len = 16;
+       shost->transportt = qedi_scsi_transport;
+
+       qedi = iscsi_host_priv(shost);
+       memset(qedi, 0, sizeof(*qedi));
+       qedi->shost = shost;
+       qedi->dbg_ctx.host_no = shost->host_no;
+       qedi->pdev = pdev;
+       qedi->dbg_ctx.pdev = pdev;
+       qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+       qedi->max_sqes = QEDI_SQ_SIZE;
+
+       if (shost_use_blk_mq(shost))
+               shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+       pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+       return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+       struct qedi_uio_dev *udev;
+       struct qedi_uio_ctrl *uctrl;
+       struct skb_work_list *work;
+       u32 prod;
+
+       if (!qedi) {
+               QEDI_ERR(NULL, "qedi is NULL\n");
+               return -1;
+       }
+
+       if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+                         "UIO DEV is not opened\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       udev = qedi->udev;
+       uctrl = udev->uctrl;
+
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate work so dropping frame.\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       INIT_LIST_HEAD(&work->list);
+       work->skb = skb;
+
+       if (skb_vlan_tag_present(skb))
+               work->vlan_id = skb_vlan_tag_get(skb);
+
+       if (work->vlan_id)
+               __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+       spin_lock_bh(&qedi->ll2_lock);
+       list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+       ++uctrl->hw_rx_prod_cnt;
+       prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+       if (prod != uctrl->host_rx_cons) {
+               uctrl->hw_rx_prod = prod;
+               spin_unlock_bh(&qedi->ll2_lock);
+               wake_up_process(qedi->ll2_recv_thread);
+               return 0;
+       }
+
+       spin_unlock_bh(&qedi->ll2_lock);
+       return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+                               u16 vlan_id)
+{
+       struct qedi_uio_dev *udev = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+       struct qedi_rx_bd rxbd;
+       struct qedi_rx_bd *p_rxbd;
+       u32 rx_bd_prod;
+       void *pkt;
+       int len = 0;
+
+       if (!qedi) {
+               QEDI_ERR(NULL, "qedi is NULL\n");
+               return -1;
+       }
+
+       udev = qedi->udev;
+       uctrl = udev->uctrl;
+       pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+       len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+       memcpy(pkt, skb->data, len);
+
+       memset(&rxbd, 0, sizeof(rxbd));
+       rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+       rxbd.rx_pkt_len = len;
+       rxbd.vlan_id = vlan_id;
+
+       uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+       rx_bd_prod = uctrl->hw_rx_bd_prod;
+       p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+       p_rxbd += rx_bd_prod;
+
+       memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+       /* notify the iscsiuio about new packet */
+       uio_event_notify(&udev->qedi_uinfo);
+
+       return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+       struct skb_work_list *work, *work_tmp;
+
+       spin_lock_bh(&qedi->ll2_lock);
+       list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+               list_del(&work->list);
+               if (work->skb)
+                       kfree_skb(work->skb);
+               kfree(work);
+       }
+       spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+       struct skb_work_list *work, *work_tmp;
+
+       set_user_nice(current, -20);
+
+       while (!kthread_should_stop()) {
+               spin_lock_bh(&qedi->ll2_lock);
+               list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+                                        list) {
+                       list_del(&work->list);
+                       qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+                       kfree_skb(work->skb);
+                       kfree(work);
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_bh(&qedi->ll2_lock);
+               schedule();
+       }
+
+       __set_current_state(TASK_RUNNING);
+       return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+       u8 num_sq_pages;
+       u32 log_page_size;
+       int rval = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+                 MIN_NUM_CPUS_MSIX(qedi));
+
+       num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+       qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+       memset(&qedi->pf_params.iscsi_pf_params, 0,
+              sizeof(qedi->pf_params.iscsi_pf_params));
+
+       qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+                       qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+                       &qedi->hw_p_cpuq);
+       if (!qedi->p_cpuq) {
+               QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+               rval = -1;
+               goto err_alloc_mem;
+       }
+
+       rval = qedi_alloc_global_queues(qedi);
+       if (rval) {
+               QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+               rval = -1;
+               goto err_alloc_mem;
+       }
+
+       qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+       qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+       qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+       qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+       qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+       for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+               if ((1 << log_page_size) == PAGE_SIZE)
+                       break;
+       }
+       qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+       qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+                                                          (u64)qedi->hw_p_cpuq;
+
+       /* RQ BDQ initializations.
+        * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+        * rqe_log_size: 8 for 256B RQE
+        */
+       qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+       /* BDQ address and size */
+       qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+                                                       qedi->bdq_pbl_list_dma;
+       qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+                                               qedi->bdq_pbl_list_num_entries;
+       qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+       /* cq_num_entries: num_tasks + rq_num_entries */
+       qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+       qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+       qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+       qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+       return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+       size_t size = 0;
+
+       if (qedi->p_cpuq) {
+               size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+               pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+                                   qedi->hw_p_cpuq);
+       }
+
+       qedi_free_global_queues(qedi);
+
+       kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+       if (link->link_up) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_UP);
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Link Down event.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+       }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+       {
+               .link_update =          qedi_link_update,
+       }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+                         u16 que_idx, struct qedi_percpu_s *p)
+{
+       struct qedi_work *qedi_work;
+       struct qedi_conn *q_conn;
+       struct iscsi_conn *conn;
+       struct qedi_cmd *qedi_cmd;
+       u32 iscsi_cid;
+       int rc = 0;
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+       if (!q_conn) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Session no longer exists for cid=0x%x!!\n",
+                         iscsi_cid);
+               return -1;
+       }
+       conn = q_conn->cls_conn->dd_data;
+
+       switch (cqe->cqe_common.cqe_type) {
+       case ISCSI_CQE_TYPE_SOLICITED:
+       case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+               qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+               if (!qedi_cmd) {
+                       rc = -1;
+                       break;
+               }
+               INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+               qedi_cmd->cqe_work.qedi = qedi;
+               memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+               qedi_cmd->cqe_work.que_idx = que_idx;
+               qedi_cmd->cqe_work.is_solicited = true;
+               list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+               break;
+       case ISCSI_CQE_TYPE_UNSOLICITED:
+       case ISCSI_CQE_TYPE_DUMMY:
+       case ISCSI_CQE_TYPE_TASK_CLEANUP:
+               qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+               if (!qedi_work) {
+                       rc = -1;
+                       break;
+               }
+               INIT_LIST_HEAD(&qedi_work->list);
+               qedi_work->qedi = qedi;
+               memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+               qedi_work->que_idx = que_idx;
+               qedi_work->is_solicited = false;
+               list_add_tail(&qedi_work->list, &p->work_list);
+               break;
+       default:
+               rc = -1;
+               QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+       }
+       return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+       struct qedi_ctx *qedi = fp->qedi;
+       struct qed_sb_info *sb_info = fp->sb_info;
+       struct status_block *sb = sb_info->sb_virt;
+       struct qedi_percpu_s *p = NULL;
+       struct global_queue *que;
+       u16 prod_idx;
+       unsigned long flags;
+       union iscsi_cqe *cqe;
+       int cpu;
+       int ret;
+
+       /* Get the current firmware producer index */
+       prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+       if (prod_idx >= QEDI_CQ_SIZE)
+               prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+       que = qedi->global_queues[fp->sb_id];
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                 "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+                 que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+       qedi->intr_cpu = fp->sb_id;
+       cpu = smp_processor_id();
+       p = &per_cpu(qedi_percpu, cpu);
+
+       if (unlikely(!p->iothread))
+               WARN_ON(1);
+
+       spin_lock_irqsave(&p->p_work_lock, flags);
+       while (que->cq_cons_idx != prod_idx) {
+               cqe = &que->cq[que->cq_cons_idx];
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                         "cqe=%p prod_idx=%d cons_idx=%d.\n",
+                         cqe, prod_idx, que->cq_cons_idx);
+
+               ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+               if (ret)
+                       continue;
+
+               que->cq_cons_idx++;
+               if (que->cq_cons_idx == QEDI_CQ_SIZE)
+                       que->cq_cons_idx = 0;
+       }
+       wake_up_process(p->iothread);
+       spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+       return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+       struct qedi_ctx *qedi = fp->qedi;
+       struct global_queue *que;
+       struct qed_sb_info *sb_info = fp->sb_info;
+       struct status_block *sb = sb_info->sb_virt;
+       u16 prod_idx;
+
+       barrier();
+
+       /* Get the current firmware producer index */
+       prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+       /* Get the pointer to the global CQ this completion is on */
+       que = qedi->global_queues[fp->sb_id];
+
+       /* prod idx wrap around uint16 */
+       if (prod_idx >= QEDI_CQ_SIZE)
+               prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+       return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+       struct qedi_fastpath *fp = dev_id;
+       struct qedi_ctx *qedi = fp->qedi;
+       bool wake_io_thread = true;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+       wake_io_thread = qedi_process_completions(fp);
+       if (wake_io_thread) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                         "process already running\n");
+       }
+
+       if (qedi_fp_has_work(fp) == 0)
+               qed_sb_update_sb_idx(fp->sb_info);
+
+       /* Check for more work */
+       rmb();
+
+       if (qedi_fp_has_work(fp) == 0)
+               qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+       else
+               goto process_again;
+
+       return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+       /* Cookie is qedi_ctx struct */
+       struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+       QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM          0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+       int i;
+
+       if (qedi->int_info.msix_cnt) {
+               for (i = 0; i < qedi->int_info.used_cnt; i++) {
+                       synchronize_irq(qedi->int_info.msix[i].vector);
+                       irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+                                             NULL);
+                       free_irq(qedi->int_info.msix[i].vector,
+                                &qedi->fp_array[i]);
+               }
+       } else {
+               qedi_ops->common->simd_handler_clean(qedi->cdev,
+                                                    QEDI_SIMD_HANDLER_NUM);
+       }
+
+       qedi->int_info.used_cnt = 0;
+       qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+       int i, rc, cpu;
+
+       cpu = cpumask_first(cpu_online_mask);
+       for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+               rc = request_irq(qedi->int_info.msix[i].vector,
+                                qedi_msix_handler, 0, "qedi",
+                                &qedi->fp_array[i]);
+
+               if (rc) {
+                       QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+                       qedi_sync_free_irqs(qedi);
+                       return rc;
+               }
+               qedi->int_info.used_cnt++;
+               rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+                                          get_cpu_mask(cpu));
+               cpu = cpumask_next(cpu, cpu_online_mask);
+       }
+
+       return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+       int rc = 0;
+
+       rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+       rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+       if (rc)
+               goto exit_setup_int;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+                  qedi->int_info.msix_cnt, num_online_cpus());
+
+       if (qedi->int_info.msix_cnt) {
+               rc = qedi_request_msix_irq(qedi);
+               goto exit_setup_int;
+       } else {
+               qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+                                                     QEDI_SIMD_HANDLER_NUM,
+                                                     qedi_simd_int_handler);
+               qedi->int_info.used_cnt = 1;
+       }
+
+exit_setup_int:
+       return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+       int i;
+
+       if (qedi->bdq_pbl_list)
+               dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+                                 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+       if (qedi->bdq_pbl)
+               dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+                                 qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               if (qedi->bdq[i].buf_addr) {
+                       dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+                                         qedi->bdq[i].buf_addr,
+                                         qedi->bdq[i].buf_dma);
+               }
+       }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+       int i;
+       struct global_queue **gl = qedi->global_queues;
+
+       for (i = 0; i < qedi->num_queues; i++) {
+               if (!gl[i])
+                       continue;
+
+               if (gl[i]->cq)
+                       dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+                                         gl[i]->cq, gl[i]->cq_dma);
+               if (gl[i]->cq_pbl)
+                       dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+                                         gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+               kfree(gl[i]);
+       }
+       qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+       int i;
+       struct scsi_bd *pbl;
+       u64 *list;
+       dma_addr_t page;
+
+       /* Alloc dma memory for BDQ buffers */
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               qedi->bdq[i].buf_addr =
+                               dma_alloc_coherent(&qedi->pdev->dev,
+                                                  QEDI_BDQ_BUF_SIZE,
+                                                  &qedi->bdq[i].buf_dma,
+                                                  GFP_KERNEL);
+               if (!qedi->bdq[i].buf_addr) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not allocate BDQ buffer %d.\n", i);
+                       return -ENOMEM;
+               }
+       }
+
+       /* Alloc dma memory for BDQ page buffer list */
+       qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+       qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+       qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+                 qedi->rq_num_entries);
+
+       qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+                                          qedi->bdq_pbl_mem_size,
+                                          &qedi->bdq_pbl_dma, GFP_KERNEL);
+       if (!qedi->bdq_pbl) {
+               QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * Populate BDQ PBL with physical and virtual address of individual
+        * BDQ buffers
+        */
+       pbl = (struct scsi_bd  *)qedi->bdq_pbl;
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               pbl->address.hi =
+                               cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+               pbl->address.lo =
+                               cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+                         pbl, pbl->address.hi, pbl->address.lo, i);
+               pbl->opaque.hi = 0;
+               pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+               pbl++;
+       }
+
+       /* Allocate list of PBL pages */
+       qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+                                               PAGE_SIZE,
+                                               &qedi->bdq_pbl_list_dma,
+                                               GFP_KERNEL);
+       if (!qedi->bdq_pbl_list) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Could not allocate list of PBL pages.\n");
+               return -ENOMEM;
+       }
+       memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+       /*
+        * Now populate PBL list with pages that contain pointers to the
+        * individual buffers.
+        */
+       qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+       list = (u64 *)qedi->bdq_pbl_list;
+       page = qedi->bdq_pbl_list_dma;
+       for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+               *list = qedi->bdq_pbl_dma;
+               list++;
+               page += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+       u32 *list;
+       int i;
+       int status = 0, rc;
+       u32 *pbl;
+       dma_addr_t page;
+       int num_pages;
+
+       /*
+        * Number of global queues (CQ / RQ). This should
+        * be <= number of available MSIX vectors for the PF
+        */
+       if (!qedi->num_queues) {
+               QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+               return 1;
+       }
+
+       /* Make sure we allocated the PBL that will contain the physical
+        * addresses of our queues
+        */
+       if (!qedi->p_cpuq) {
+               status = 1;
+               goto mem_alloc_failure;
+       }
+
+       qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+                                      qedi->num_queues), GFP_KERNEL);
+       if (!qedi->global_queues) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Unable to allocate global queues array ptr memory\n");
+               return -ENOMEM;
+       }
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "qedi->global_queues=%p.\n", qedi->global_queues);
+
+       /* Allocate DMA coherent buffers for BDQ */
+       rc = qedi_alloc_bdq(qedi);
+       if (rc)
+               goto mem_alloc_failure;
+
+       /* Allocate a CQ and an associated PBL for each MSI-X
+        * vector.
+        */
+       for (i = 0; i < qedi->num_queues; i++) {
+               qedi->global_queues[i] =
+                                       kzalloc(sizeof(*qedi->global_queues[0]),
+                                               GFP_KERNEL);
+               if (!qedi->global_queues[i]) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to allocation global queue %d.\n", i);
+                       goto mem_alloc_failure;
+               }
+
+               qedi->global_queues[i]->cq_mem_size =
+                   (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+               qedi->global_queues[i]->cq_mem_size =
+                   (qedi->global_queues[i]->cq_mem_size +
+                   (QEDI_PAGE_SIZE - 1));
+
+               qedi->global_queues[i]->cq_pbl_size =
+                   (qedi->global_queues[i]->cq_mem_size /
+                   QEDI_PAGE_SIZE) * sizeof(void *);
+               qedi->global_queues[i]->cq_pbl_size =
+                   (qedi->global_queues[i]->cq_pbl_size +
+                   (QEDI_PAGE_SIZE - 1));
+
+               qedi->global_queues[i]->cq =
+                   dma_alloc_coherent(&qedi->pdev->dev,
+                                      qedi->global_queues[i]->cq_mem_size,
+                                      &qedi->global_queues[i]->cq_dma,
+                                      GFP_KERNEL);
+
+               if (!qedi->global_queues[i]->cq) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Could not allocate cq.\n");
+                       status = -ENOMEM;
+                       goto mem_alloc_failure;
+               }
+               memset(qedi->global_queues[i]->cq, 0,
+                      qedi->global_queues[i]->cq_mem_size);
+
+               qedi->global_queues[i]->cq_pbl =
+                   dma_alloc_coherent(&qedi->pdev->dev,
+                                      qedi->global_queues[i]->cq_pbl_size,
+                                      &qedi->global_queues[i]->cq_pbl_dma,
+                                      GFP_KERNEL);
+
+               if (!qedi->global_queues[i]->cq_pbl) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Could not allocate cq PBL.\n");
+                       status = -ENOMEM;
+                       goto mem_alloc_failure;
+               }
+               memset(qedi->global_queues[i]->cq_pbl, 0,
+                      qedi->global_queues[i]->cq_pbl_size);
+
+               /* Create PBL */
+               num_pages = qedi->global_queues[i]->cq_mem_size /
+                   QEDI_PAGE_SIZE;
+               page = qedi->global_queues[i]->cq_dma;
+               pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+               while (num_pages--) {
+                       *pbl = (u32)page;
+                       pbl++;
+                       *pbl = (u32)((u64)page >> 32);
+                       pbl++;
+                       page += QEDI_PAGE_SIZE;
+               }
+       }
+
+       list = (u32 *)qedi->p_cpuq;
+
+       /*
+        * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+        * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
+        * to the physical address which contains an array of pointers to the
+        * physical addresses of the specific queue pages.
+        */
+       for (i = 0; i < qedi->num_queues; i++) {
+               *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+               list++;
+               *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+               list++;
+
+               *list = (u32)0;
+               list++;
+               *list = (u32)((u64)0 >> 32);
+               list++;
+       }
+
+       return 0;
+
+mem_alloc_failure:
+       qedi_free_global_queues(qedi);
+       return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+       int rval = 0;
+       u32 *pbl;
+       dma_addr_t page;
+       int num_pages;
+
+       if (!ep)
+               return -EIO;
+
+       /* Calculate appropriate queue and PBL sizes */
+       ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+       ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+       ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+       ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+       ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+                                   &ep->sq_dma, GFP_KERNEL);
+       if (!ep->sq) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate send queue.\n");
+               rval = -ENOMEM;
+               goto out;
+       }
+       memset(ep->sq, 0, ep->sq_mem_size);
+
+       ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+                                       &ep->sq_pbl_dma, GFP_KERNEL);
+       if (!ep->sq_pbl) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate send queue PBL.\n");
+               rval = -ENOMEM;
+               goto out_free_sq;
+       }
+       memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+       /* Create PBL */
+       num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+       page = ep->sq_dma;
+       pbl = (u32 *)ep->sq_pbl;
+
+       while (num_pages--) {
+               *pbl = (u32)page;
+               pbl++;
+               *pbl = (u32)((u64)page >> 32);
+               pbl++;
+               page += QEDI_PAGE_SIZE;
+       }
+
+       return rval;
+
+out_free_sq:
+       dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+                         ep->sq_dma);
+out:
+       return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+       if (ep->sq_pbl)
+               dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+                                 ep->sq_pbl_dma);
+       if (ep->sq)
+               dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+                                 ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+       s16 tmp_idx;
+
+again:
+       tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+                                     MAX_ISCSI_TASK_ENTRIES);
+
+       if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+               QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+               tmp_idx = -1;
+               goto err_idx;
+       }
+
+       if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+               goto again;
+
+err_idx:
+       return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+       if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "FW task context, already cleared, tid=0x%x\n", idx);
+               WARN_ON(1);
+       }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+                        struct qedi_cmd *cmd)
+{
+       qedi->itt_map[tid].itt = proto_itt;
+       qedi->itt_map[tid].p_cmd = cmd;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+                 qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+       u16 i;
+
+       for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+               if (qedi->itt_map[i].itt == itt) {
+                       *tid = i;
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                                 "Ref itt=0x%x, found at tid=0x%x\n",
+                                 itt, *tid);
+                       return;
+               }
+       }
+
+       WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+       *proto_itt = qedi->itt_map[tid].itt;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "Get itt map tid [0x%x with proto itt[0x%x]",
+                 tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+       struct qedi_cmd *cmd = NULL;
+
+       if (tid > MAX_ISCSI_TASK_ENTRIES)
+               return NULL;
+
+       cmd = qedi->itt_map[tid].p_cmd;
+       if (cmd->task_id != tid)
+               return NULL;
+
+       qedi->itt_map[tid].p_cmd = NULL;
+
+       return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+       qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+                               sizeof(struct qedi_itt_map), GFP_KERNEL);
+       if (!qedi->itt_map) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Unable to allocate itt map array memory\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+       kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+       .rx_cb = qedi_ll2_rx,
+       .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+       struct qedi_percpu_s *p = arg;
+       struct qedi_work *work, *tmp;
+       unsigned long flags;
+       LIST_HEAD(work_list);
+
+       set_user_nice(current, -20);
+
+       while (!kthread_should_stop()) {
+               spin_lock_irqsave(&p->p_work_lock, flags);
+               while (!list_empty(&p->work_list)) {
+                       list_splice_init(&p->work_list, &work_list);
+                       spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+                       list_for_each_entry_safe(work, tmp, &work_list, list) {
+                               list_del_init(&work->list);
+                               qedi_fp_process_cqes(work);
+                               if (!work->is_solicited)
+                                       kfree(work);
+                       }
+                       cond_resched();
+                       spin_lock_irqsave(&p->p_work_lock, flags);
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_irqrestore(&p->p_work_lock, flags);
+               schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+static void qedi_percpu_thread_create(unsigned int cpu)
+{
+       struct qedi_percpu_s *p;
+       struct task_struct *thread;
+
+       p = &per_cpu(qedi_percpu, cpu);
+
+       thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+                                       cpu_to_node(cpu),
+                                       "qedi_thread/%d", cpu);
+       if (likely(!IS_ERR(thread))) {
+               kthread_bind(thread, cpu);
+               p->iothread = thread;
+               wake_up_process(thread);
+       }
+}
+
+static void qedi_percpu_thread_destroy(unsigned int cpu)
+{
+       struct qedi_percpu_s *p;
+       struct task_struct *thread;
+       struct qedi_work *work, *tmp;
+
+       p = &per_cpu(qedi_percpu, cpu);
+       spin_lock_bh(&p->p_work_lock);
+       thread = p->iothread;
+       p->iothread = NULL;
+
+       list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+               list_del_init(&work->list);
+               qedi_fp_process_cqes(work);
+               if (!work->is_solicited)
+                       kfree(work);
+       }
+
+       spin_unlock_bh(&p->p_work_lock);
+       if (thread)
+               kthread_stop(thread);
+}
+
+static int qedi_cpu_callback(struct notifier_block *nfb,
+                            unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               QEDI_ERR(NULL, "CPU %d online.\n", cpu);
+               qedi_percpu_thread_create(cpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
+               qedi_percpu_thread_destroy(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block qedi_cpu_notifier = {
+       .notifier_call = qedi_cpu_callback,
+};
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+       struct qed_ll2_params params;
+
+       qedi_recover_all_conns(qedi);
+
+       qedi_ops->ll2->stop(qedi->cdev);
+       qedi_ll2_free_skbs(qedi);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+                 qedi->ll2_mtu, mtu);
+       memset(&params, 0, sizeof(params));
+       qedi->ll2_mtu = mtu;
+       params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+       params.drop_ttl0_packets = 0;
+       params.rx_vlan_stripping = 1;
+       ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+       qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+       struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+       if (qedi->tmf_thread) {
+               flush_workqueue(qedi->tmf_thread);
+               destroy_workqueue(qedi->tmf_thread);
+               qedi->tmf_thread = NULL;
+       }
+
+       if (qedi->offload_thread) {
+               flush_workqueue(qedi->offload_thread);
+               destroy_workqueue(qedi->offload_thread);
+               qedi->offload_thread = NULL;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+               qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+       qedi_sync_free_irqs(qedi);
+
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+               qedi_ops->stop(qedi->cdev);
+               qedi_ops->ll2->stop(qedi->cdev);
+       }
+
+       if (mode == QEDI_MODE_NORMAL)
+               qedi_free_iscsi_pf_param(qedi);
+
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+               qedi_ops->common->slowpath_stop(qedi->cdev);
+               qedi_ops->common->remove(qedi->cdev);
+       }
+
+       qedi_destroy_fp(qedi);
+
+       if (mode == QEDI_MODE_NORMAL) {
+               qedi_release_cid_que(qedi);
+               qedi_cm_free_mem(qedi);
+               qedi_free_uio(qedi->udev);
+               qedi_free_itt(qedi);
+
+               iscsi_host_remove(qedi->shost);
+               iscsi_host_free(qedi->shost);
+
+               if (qedi->ll2_recv_thread) {
+                       kthread_stop(qedi->ll2_recv_thread);
+                       qedi->ll2_recv_thread = NULL;
+               }
+               qedi_ll2_free_skbs(qedi);
+       }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+       struct qedi_ctx *qedi;
+       struct qed_ll2_params params;
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+       bool is_vf = false;
+       char host_buf[16];
+       struct qed_link_params link_params;
+       struct qed_slowpath_params sp_params;
+       struct qed_probe_params qed_params;
+       void *task_start, *task_end;
+       int rc;
+       u16 tmp;
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               qedi = qedi_host_alloc(pdev);
+               if (!qedi) {
+                       rc = -ENOMEM;
+                       goto exit_probe;
+               }
+       } else {
+               qedi = pci_get_drvdata(pdev);
+       }
+
+       memset(&qed_params, 0, sizeof(qed_params));
+       qed_params.protocol = QED_PROTOCOL_ISCSI;
+       qed_params.dp_module = dp_module;
+       qed_params.dp_level = dp_level;
+       qed_params.is_vf = is_vf;
+       qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+       if (!qedi->cdev) {
+               rc = -ENODEV;
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+               goto free_host;
+       }
+
+       qedi->msix_count = MAX_NUM_MSIX_PF;
+       atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               rc = qedi_set_iscsi_pf_param(qedi);
+               if (rc) {
+                       rc = -ENOMEM;
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Set iSCSI pf param fail\n");
+                       goto free_host;
+               }
+       }
+
+       qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+       rc = qedi_prepare_fp(qedi);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+               goto free_pf_params;
+       }
+
+       /* Start the Slowpath-process */
+       memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+       sp_params.int_mode = QED_INT_MODE_MSIX;
+       sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+       sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+       sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+       sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+       strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+       rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+               goto stop_hw;
+       }
+
+       /* update_pf_params needs to be called before and after slowpath
+        * start
+        */
+       qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+       qedi_setup_int(qedi);
+       if (rc)
+               goto stop_iscsi_func;
+
+       qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+       /* Learn information crucial for qedi to progress */
+       rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+       if (rc)
+               goto stop_iscsi_func;
+
+       /* Record BDQ producer doorbell addresses */
+       qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+       qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "BDQ primary_prod=%p secondary_prod=%p.\n",
+                 qedi->bdq_primary_prod,
+                 qedi->bdq_secondary_prod);
+
+       /*
+        * We need to write the number of BDs in the BDQ we've preallocated so
+        * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+        * packet arrives.
+        */
+       qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Writing %d to primary and secondary BDQ doorbell registers.\n",
+                 qedi->bdq_prod_idx);
+       writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+       tmp = readw(qedi->bdq_primary_prod);
+       writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+       tmp = readw(qedi->bdq_secondary_prod);
+
+       ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+                 qedi->mac);
+
+       sprintf(host_buf, "host_%d", qedi->shost->host_no);
+       qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+       qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+       memset(&params, 0, sizeof(params));
+       params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+       qedi->ll2_mtu = DEF_PATH_MTU;
+       params.drop_ttl0_packets = 0;
+       params.rx_vlan_stripping = 1;
+       ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               /* set up rx path */
+               INIT_LIST_HEAD(&qedi->ll2_skb_list);
+               spin_lock_init(&qedi->ll2_lock);
+               /* start qedi context */
+               spin_lock_init(&qedi->hba_lock);
+               spin_lock_init(&qedi->task_idx_lock);
+       }
+       qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+       qedi_ops->ll2->start(qedi->cdev, &params);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+                                                   (void *)qedi,
+                                                   "qedi_ll2_thread");
+       }
+
+       rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+                            qedi, qedi_iscsi_event_cb);
+       if (rc) {
+               rc = -ENODEV;
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+               goto stop_slowpath;
+       }
+
+       task_start = qedi_get_task_mem(&qedi->tasks, 0);
+       task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Task context start=%p, end=%p block_size=%u.\n",
+                  task_start, task_end, qedi->tasks.size);
+
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+       if (rc) {
+               QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+                          &qedi_dbg_fops);
+#endif
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+                 QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+                 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+       if (mode == QEDI_MODE_NORMAL) {
+               if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not add iscsi host\n");
+                       rc = -ENOMEM;
+                       goto remove_host;
+               }
+
+               /* Allocate uio buffers */
+               rc = qedi_alloc_uio_rings(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "UIO alloc ring failed err=%d\n", rc);
+                       goto remove_host;
+               }
+
+               rc = qedi_init_uio(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "UIO init failed, err=%d\n", rc);
+                       goto free_uio;
+               }
+
+               /* host the array on iscsi_conn */
+               rc = qedi_setup_cid_que(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not setup cid que\n");
+                       goto free_uio;
+               }
+
+               rc = qedi_cm_alloc_mem(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not alloc cm memory\n");
+                       goto free_cid_que;
+               }
+
+               rc = qedi_alloc_itt(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not alloc itt memory\n");
+                       goto free_cid_que;
+               }
+
+               sprintf(host_buf, "host_%d", qedi->shost->host_no);
+               qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+               if (!qedi->tmf_thread) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to start tmf thread!\n");
+                       rc = -ENODEV;
+                       goto free_cid_que;
+               }
+
+               sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+               qedi->offload_thread = create_workqueue(host_buf);
+               if (!qedi->offload_thread) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to start offload thread!\n");
+                       rc = -ENODEV;
+                       goto free_cid_que;
+               }
+
+               /* F/w needs 1st task context memory entry for performance */
+               set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+               atomic_set(&qedi->num_offloads, 0);
+       }
+
+       return 0;
+
+free_cid_que:
+       qedi_release_cid_que(qedi);
+free_uio:
+       qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+       iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+       qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+       qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+       qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+       qedi_free_iscsi_pf_param(qedi);
+free_host:
+       iscsi_host_free(qedi->shost);
+exit_probe:
+       return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+       __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+       { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static struct pci_driver qedi_pci_driver = {
+       .name = QEDI_MODULE_NAME,
+       .id_table = qedi_pci_tbl,
+       .probe = qedi_probe,
+       .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+       int rc = 0;
+       int ret;
+       struct qedi_percpu_s *p;
+       unsigned int cpu = 0;
+
+       qedi_ops = qed_get_iscsi_ops();
+       if (!qedi_ops) {
+               QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+               rc = -EINVAL;
+               goto exit_qedi_init_0;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_init("qedi");
+#endif
+
+       qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+       if (!qedi_scsi_transport) {
+               QEDI_ERR(NULL, "Could not register qedi transport");
+               rc = -ENOMEM;
+               goto exit_qedi_init_1;
+       }
+
+       register_hotcpu_notifier(&qedi_cpu_notifier);
+
+       ret = pci_register_driver(&qedi_pci_driver);
+       if (ret) {
+               QEDI_ERR(NULL, "Failed to register driver\n");
+               rc = -EINVAL;
+               goto exit_qedi_init_2;
+       }
+
+       for_each_possible_cpu(cpu) {
+               p = &per_cpu(qedi_percpu, cpu);
+               INIT_LIST_HEAD(&p->work_list);
+               spin_lock_init(&p->p_work_lock);
+               p->iothread = NULL;
+       }
+
+       for_each_online_cpu(cpu)
+               qedi_percpu_thread_create(cpu);
+
+       return rc;
+
+exit_qedi_init_2:
+       iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_exit();
+#endif
+       qed_put_iscsi_ops();
+exit_qedi_init_0:
+       return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+       unsigned int cpu = 0;
+
+       for_each_online_cpu(cpu)
+               qedi_percpu_thread_destroy(cpu);
+
+       pci_unregister_driver(&qedi_pci_driver);
+       unregister_hotcpu_notifier(&qedi_cpu_notifier);
+       iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_exit();
+#endif
+       qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644 (file)
index 0000000..b10c48b
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+
+       return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+       if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+               return sprintf(buf, "Online\n");
+       else
+               return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+       struct qed_link_output if_link;
+
+       qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+       return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+       &dev_attr_port_state,
+       &dev_attr_speed,
+       NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644 (file)
index 0000000..9543a1b
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION    "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER          8
+#define QEDI_DRIVER_MINOR_VER          10
+#define QEDI_DRIVER_REV_VER            3
+#define QEDI_DRIVER_ENG_VER            0
index fe7469c901f76ac2d46006eeb208f3c9398d3ee1..47eb4d545d13c5f9b80149f162b04756108cd654 100644 (file)
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
        scsi_qla_host_t *vha = NULL;
        struct qla_hw_data *ha = base_vha->hw;
-       uint16_t options = 0;
        int     cnt;
        struct req_que *req = ha->req_q_map[0];
+       struct qla_qpair *qpair;
 
        ret = qla24xx_vport_create_req_sanity_check(fc_vport);
        if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        qlt_vport_create(vha, ha);
        qla24xx_vport_disable(fc_vport, disable);
 
-       if (ha->flags.cpu_affinity_enabled) {
-               req = ha->req_q_map[1];
-               ql_dbg(ql_dbg_multiq, vha, 0xc000,
-                   "Request queue %p attached with "
-                   "VP[%d], cpu affinity =%d\n",
-                   req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
-               goto vport_queue;
-       } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+       if (!ql2xmqsupport || !ha->npiv_info)
                goto vport_queue;
+
        /* Create a request queue in QoS mode for the vport */
        for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
                if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        }
 
        if (qos) {
-               ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
-                       qos);
-               if (!ret)
+               qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+               if (!qpair)
                        ql_log(ql_log_warn, vha, 0x7084,
-                           "Can't create request queue for VP[%d]\n",
+                           "Can't create qpair for VP[%d]\n",
                            vha->vp_idx);
                else {
                        ql_dbg(ql_dbg_multiq, vha, 0xc001,
-                           "Request Que:%d Q0s: %d) created for VP[%d]\n",
-                           ret, qos, vha->vp_idx);
+                           "Queue pair: %d Qos: %d) created for VP[%d]\n",
+                           qpair->id, qos, vha->vp_idx);
                        ql_dbg(ql_dbg_user, vha, 0x7085,
-                           "Request Que:%d Q0s: %d) created for VP[%d]\n",
-                           ret, qos, vha->vp_idx);
-                       req = ha->req_q_map[ret];
+                           "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+                           qpair->id, qos, vha->vp_idx);
+                       req = qpair->req;
+                       vha->qpair = qpair;
                }
        }
 
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        clear_bit(vha->vp_idx, ha->vp_idx_map);
        mutex_unlock(&ha->vport_lock);
 
-       if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
-               if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+       if (vha->qpair->vp_idx == vha->vp_idx) {
+               if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
                        ql_log(ql_log_warn, vha, 0x7087,
-                           "Queue delete failed.\n");
+                           "Queue Pair delete failed.\n");
        }
 
        ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
index 45af34ddc43297d06c8c7cf44425a1a7cee4cc7f..21d9fb7fc88796cbaa09fbfa160b9b20c17e2015 100644 (file)
@@ -11,7 +11,7 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0191       | 0x0146         |
+ * | Module Init and Probe        |       0x0193       | 0x0146         |
  * |                              |                    | 0x015b-0x0160 |
  * |                              |                    | 0x016e                |
  * | Mailbox commands             |       0x1199       | 0x1193                |
@@ -58,7 +58,7 @@
  * |                              |                    | 0xb13a,0xb142  |
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149                |
- * | MultiQ                       |       0xc00c       |               |
+ * | MultiQ                       |       0xc010       |               |
  * | Misc                         |       0xd301       | 0xd031-0xd0ff |
  * |                              |                    | 0xd101-0xd1fe |
  * |                              |                    | 0xd214-0xd2fe |
index 5236e3f2a06a432740316cb74e1dea708c5fc439..f7df01b76714e09dc919cbb9660b66bed603d6bc 100644 (file)
@@ -401,6 +401,7 @@ typedef struct srb {
        uint16_t type;
        char *name;
        int iocbs;
+       struct qla_qpair *qpair;
        union {
                struct srb_iocb iocb_cmd;
                struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
 
        int (*get_flash_version) (struct scsi_qla_host *, void *);
        int (*start_scsi) (srb_t *);
+       int (*start_scsi_mq) (srb_t *);
        int (*abort_isp) (struct scsi_qla_host *);
        int (*iospace_config)(struct qla_hw_data*);
        int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
 #define QLA_MSIX_FW_MODE(m)    (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
 #define QLA_MSIX_FW_MODE_1(m)  (QLA_MSIX_FW_MODE(m) == 1)
 
-#define QLA_MSIX_DEFAULT       0x00
-#define QLA_MSIX_RSP_Q         0x01
+#define QLA_MSIX_DEFAULT               0x00
+#define QLA_MSIX_RSP_Q                 0x01
+#define QLA_ATIO_VECTOR                0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q    0x03
 
 #define QLA_MIDX_DEFAULT       0
 #define QLA_MIDX_RSP_Q         1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
 
 struct qla_msix_entry {
        int have_irq;
+       int in_use;
        uint32_t vector;
        uint16_t entry;
-       struct rsp_que *rsp;
+       char name[30];
+       void *handle;
        struct irq_affinity_notify irq_notify;
        int cpuid;
 };
@@ -2872,7 +2878,6 @@ struct rsp_que {
        struct qla_msix_entry *msix;
        struct req_que *req;
        srb_t *status_srb; /* status continuation entry */
-       struct work_struct q_work;
 
        dma_addr_t  dma_fx00;
        response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
        uint8_t req_pkt[REQUEST_ENTRY_SIZE];
 };
 
+/*Queue pair data structure */
+struct qla_qpair {
+       spinlock_t qp_lock;
+       atomic_t ref_count;
+       /* distill these fields down to 'online=0/1'
+        * ha->flags.eeh_busy
+        * ha->flags.pci_channel_io_perm_failure
+        * base_vha->loop_state
+        */
+       uint32_t online:1;
+       /* move vha->flags.difdix_supported here */
+       uint32_t difdix_supported:1;
+       uint32_t delete_in_progress:1;
+
+       uint16_t id;                    /* qp number used with FW */
+       uint16_t num_active_cmd;        /* cmds down at firmware */
+       cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+       uint16_t vp_idx;                /* vport ID */
+
+       mempool_t *srb_mempool;
+
+       /* to do: New driver: move queues to here instead of pointers */
+       struct req_que *req;
+       struct rsp_que *rsp;
+       struct atio_que *atio;
+       struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+       struct qla_hw_data *hw;
+       struct work_struct q_work;
+       struct list_head qp_list_elem; /* vha->qp_list */
+};
+
 /* Place holder for FW buffer parameters */
 struct qlfc_fw {
        void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
                uint32_t        chip_reset_done         :1;
                uint32_t        running_gold_fw         :1;
                uint32_t        eeh_busy                :1;
-               uint32_t        cpu_affinity_enabled    :1;
                uint32_t        disable_msix_handshake  :1;
                uint32_t        fcp_prio_enabled        :1;
                uint32_t        isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
        uint8_t         mqenable;
        struct req_que **req_q_map;
        struct rsp_que **rsp_q_map;
+       struct qla_qpair **queue_pair_map;
        unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
        unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+       unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+               / sizeof(unsigned long)];
        uint8_t         max_req_queues;
        uint8_t         max_rsp_queues;
+       uint8_t         max_qpairs;
+       struct qla_qpair *base_qpair;
        struct qla_npiv_entry *npiv_info;
        uint16_t        nvram_npiv_size;
 
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
 
        struct mutex vport_lock;        /* Virtual port synchronization */
        spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+       struct mutex mq_lock;        /* multi-queue synchronization */
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
 
                uint32_t        fw_tgt_reported:1;
                uint32_t        bbcr_enable:1;
+               uint32_t        qpairs_available:1;
        } flags;
 
        atomic_t        loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
 #define FX00_TARGET_SCAN       24
 #define FX00_CRITEMP_RECOVERY  25
 #define FX00_HOST_INFO_RESEND  26
+#define QPAIR_ONLINE_CHECK_NEEDED      27
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
        /* List of pending PLOGI acks, protected by hw lock */
        struct list_head        plogi_ack_list;
 
+       struct list_head        qp_list;
+
        uint32_t        vp_abort_cnt;
 
        struct fc_vport *fc_vport;      /* holds fc_vport * for each vport */
        uint16_t        vp_idx;         /* vport ID */
+       struct qla_qpair *qpair;        /* base qpair */
 
        unsigned long           vp_flags;
 #define VP_IDX_ACQUIRED                0       /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
        scsi_qla_host_t *vha;
 };
 
+struct qla2_sgx {
+       dma_addr_t              dma_addr;       /* OUT */
+       uint32_t                dma_len;        /* OUT */
+
+       uint32_t                tot_bytes;      /* IN */
+       struct scatterlist      *cur_sg;        /* IN */
+
+       /* for book keeping, bzero on initial invocation */
+       uint32_t                bytes_consumed;
+       uint32_t                num_bytes;
+       uint32_t                tot_partial;
+
+       /* for debugging */
+       uint32_t                num_sg;
+       srb_t                   *sp;
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
                (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
                         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
 
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
-       atomic_inc(&__vha->vref_count);                      \
-       mb();                                                \
-       if (__vha->flags.delete_progress) {                  \
-               atomic_dec(&__vha->vref_count);              \
-               __bail = 1;                                  \
-       } else {                                             \
-               __bail = 0;                                  \
-       }                                                    \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {          \
+       atomic_inc(&__vha->vref_count);                 \
+       mb();                                           \
+       if (__vha->flags.delete_progress) {             \
+               atomic_dec(&__vha->vref_count);         \
+               __bail = 1;                             \
+       } else {                                        \
+               __bail = 0;                             \
+       }                                               \
 } while (0)
 
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do {                   \
-       atomic_dec(&__vha->vref_count);                      \
+#define QLA_VHA_MARK_NOT_BUSY(__vha)                   \
+       atomic_dec(&__vha->vref_count);                 \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do {      \
+       atomic_inc(&__qpair->ref_count);                \
+       mb();                                           \
+       if (__qpair->delete_in_progress) {              \
+               atomic_dec(&__qpair->ref_count);        \
+               __bail = 1;                             \
+       } else {                                        \
+              __bail = 0;                              \
+       }                                               \
 } while (0)
 
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair)               \
+       atomic_dec(&__qpair->ref_count);                \
+
 /*
  * qla2x00 local function return status codes
  */
index c51d9f3359e3286f272a15a8442c8b7a2fcc1a13..afa0116a163b12b5f8663a52bee5cc2ec846541d 100644 (file)
@@ -91,12 +91,17 @@ extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
 extern int qla2x00_init_rings(scsi_qla_host_t *);
 extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+       int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
 
 /*
  * Global Data in qla_os.c source file.
  */
 extern char qla2x00_version_str[];
 
+extern struct kmem_cache *srb_cachep;
+
 extern int ql2xlogintimeout;
 extern int qlport_down_retry;
 extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
 extern int ql2xfwloadbin;
 extern int ql2xetsenable;
 extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
 extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+       uint16_t, struct req_que *);
 extern int qla2x00_start_scsi(srb_t *sp);
 extern int qla24xx_start_scsi(srb_t *sp);
 int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
 extern int qla2x00_start_sp(srb_t *);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
        uint32_t *, uint16_t, struct qla_tgt_cmd *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
        uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+       struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
 extern void
 qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
        uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
 
 /*
  * Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
 extern int qla2x00_dfs_remove(scsi_qla_host_t *);
 
 /* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+       struct qla_msix_entry *, int);
 extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
 extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
        uint16_t, int, uint8_t);
 extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
-       uint16_t, int);
+       uint16_t, struct qla_qpair *);
+
 extern void qla2x00_init_response_q_entries(struct rsp_que *);
 extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_delete_queues(struct scsi_qla_host *);
 extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
 extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
index 5b09296b46a3058f9c938990274d158b508af884..632d5f30386ab0ae529036c292f3c1c8e64162ca 100644 (file)
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
        if (req->outstanding_cmds)
                return QLA_SUCCESS;
 
-       if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
-           (ql2xmultique_tag || ql2xmaxqueues > 1)))
+       if (!IS_FWI2_CAPABLE(ha))
                req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
        else {
                if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
        struct req_que *req;
        struct rsp_que *rsp;
 
-       if (vha->hw->flags.cpu_affinity_enabled)
-               req = vha->hw->req_q_map[0];
-       else
-               req = vha->req;
+       req = vha->req;
        rsp = req->rsp;
 
        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
                return -EINVAL;
 
        rval = qla2x00_fw_ready(base_vha);
-       if (ha->flags.cpu_affinity_enabled)
-               req = ha->req_q_map[0];
+       if (vha->qpair)
+               req = vha->qpair->req;
        else
-               req = vha->req;
+               req = ha->req_q_map[0];
        rsp = req->rsp;
 
        if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
 
        return ret;
 }
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+       int rsp_id = 0;
+       int  req_id = 0;
+       int i;
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t qpair_id = 0;
+       struct qla_qpair *qpair = NULL;
+       struct qla_msix_entry *msix;
+
+       if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+               ql_log(ql_log_warn, vha, 0x00181,
+                   "FW/Driver is not multi-queue capable.\n");
+               return NULL;
+       }
+
+       if (ql2xmqsupport) {
+               qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+               if (qpair == NULL) {
+                       ql_log(ql_log_warn, vha, 0x0182,
+                           "Failed to allocate memory for queue pair.\n");
+                       return NULL;
+               }
+               memset(qpair, 0, sizeof(struct qla_qpair));
+
+               qpair->hw = vha->hw;
+
+               /* Assign available que pair id */
+               mutex_lock(&ha->mq_lock);
+               qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+               if (qpair_id >= ha->max_qpairs) {
+                       mutex_unlock(&ha->mq_lock);
+                       ql_log(ql_log_warn, vha, 0x0183,
+                           "No resources to create additional q pair.\n");
+                       goto fail_qid_map;
+               }
+               set_bit(qpair_id, ha->qpair_qid_map);
+               ha->queue_pair_map[qpair_id] = qpair;
+               qpair->id = qpair_id;
+               qpair->vp_idx = vp_idx;
+
+               for (i = 0; i < ha->msix_count; i++) {
+                       msix = &ha->msix_entries[i];
+                       if (msix->in_use)
+                               continue;
+                       qpair->msix = msix;
+                       ql_log(ql_dbg_multiq, vha, 0xc00f,
+                           "Vector %x selected for qpair\n", msix->vector);
+                       break;
+               }
+               if (!qpair->msix) {
+                       ql_log(ql_log_warn, vha, 0x0184,
+                           "Out of MSI-X vectors!.\n");
+                       goto fail_msix;
+               }
+
+               qpair->msix->in_use = 1;
+               list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+               mutex_unlock(&ha->mq_lock);
+
+               /* Create response queue first */
+               rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+               if (!rsp_id) {
+                       ql_log(ql_log_warn, vha, 0x0185,
+                           "Failed to create response queue.\n");
+                       goto fail_rsp;
+               }
+
+               qpair->rsp = ha->rsp_q_map[rsp_id];
+
+               /* Create request queue */
+               req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+               if (!req_id) {
+                       ql_log(ql_log_warn, vha, 0x0186,
+                           "Failed to create request queue.\n");
+                       goto fail_req;
+               }
+
+               qpair->req = ha->req_q_map[req_id];
+               qpair->rsp->req = qpair->req;
+
+               if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+                       if (ha->fw_attributes & BIT_4)
+                               qpair->difdix_supported = 1;
+               }
+
+               qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+               if (!qpair->srb_mempool) {
+                       ql_log(ql_log_warn, vha, 0x0191,
+                           "Failed to create srb mempool for qpair %d\n",
+                           qpair->id);
+                       goto fail_mempool;
+               }
+
+               /* Mark as online */
+               qpair->online = 1;
+
+               if (!vha->flags.qpairs_available)
+                       vha->flags.qpairs_available = 1;
+
+               ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+                   "Request/Response queue pair created, id %d\n",
+                   qpair->id);
+               ql_dbg(ql_dbg_init, vha, 0x0187,
+                   "Request/Response queue pair created, id %d\n",
+                   qpair->id);
+       }
+       return qpair;
+
+fail_mempool:
+fail_req:
+       qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+       mutex_lock(&ha->mq_lock);
+       qpair->msix->in_use = 0;
+       list_del(&qpair->qp_list_elem);
+       if (list_empty(&vha->qp_list))
+               vha->flags.qpairs_available = 0;
+fail_msix:
+       ha->queue_pair_map[qpair_id] = NULL;
+       clear_bit(qpair_id, ha->qpair_qid_map);
+       mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+       kfree(qpair);
+       return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+       int ret;
+       struct qla_hw_data *ha = qpair->hw;
+
+       qpair->delete_in_progress = 1;
+       while (atomic_read(&qpair->ref_count))
+               msleep(500);
+
+       ret = qla25xx_delete_req_que(vha, qpair->req);
+       if (ret != QLA_SUCCESS)
+               goto fail;
+       ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+       if (ret != QLA_SUCCESS)
+               goto fail;
+
+       mutex_lock(&ha->mq_lock);
+       ha->queue_pair_map[qpair->id] = NULL;
+       clear_bit(qpair->id, ha->qpair_qid_map);
+       list_del(&qpair->qp_list_elem);
+       if (list_empty(&vha->qp_list))
+               vha->flags.qpairs_available = 0;
+       mempool_destroy(qpair->srb_mempool);
+       kfree(qpair);
+       mutex_unlock(&ha->mq_lock);
+
+       return QLA_SUCCESS;
+fail:
+       return ret;
+}
index edc48f3b8230cd60b9b3df25bbf87394f5ad6676..44e404583c86fca78d50be48c12129c2f89ad695 100644 (file)
@@ -215,6 +215,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
            test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
 }
 
+static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+       srb_t *sp = NULL;
+       uint8_t bail;
+
+       QLA_QPAIR_MARK_BUSY(qpair, bail);
+       if (unlikely(bail))
+               return NULL;
+
+       sp = mempool_alloc(qpair->srb_mempool, flag);
+       if (!sp)
+               goto done;
+
+       memset(sp, 0, sizeof(*sp));
+       sp->fcport = fcport;
+       sp->iocbs = 1;
+done:
+       if (!sp)
+               QLA_QPAIR_MARK_NOT_BUSY(qpair);
+       return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+       mempool_free(sp, qpair->srb_mempool);
+       QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
 static inline srb_t *
 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
 {
index 221ad89078938d37227bd385f55a5a5b74675c0b..58e49a3e1de8bcc30b448a889ab8f4b0144fd982 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <scsi/scsi_tcq.h>
 
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  * @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
        return (cont_pkt);
 }
 
-static inline int
+inline int
 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
  * @sp: SRB command to process
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
  */
-static inline void
+inline void
 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
-    uint16_t tot_dsds)
+       uint16_t tot_dsds, struct req_que *req)
 {
        uint16_t        avail_dsds;
        uint32_t        *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
        }
 }
 
-struct qla2_sgx {
-       dma_addr_t              dma_addr;       /* OUT */
-       uint32_t                dma_len;        /* OUT */
-
-       uint32_t                tot_bytes;      /* IN */
-       struct scatterlist      *cur_sg;        /* IN */
-
-       /* for book keeping, bzero on initial invocation */
-       uint32_t                bytes_consumed;
-       uint32_t                num_bytes;
-       uint32_t                tot_partial;
-
-       /* for debugging */
-       uint32_t                num_sg;
-       srb_t                   *sp;
-};
-
-static int
+int
 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
        uint32_t *partial)
 {
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
  */
-static inline int
+inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
 {
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
        struct qla_hw_data *ha = vha->hw;
 
        /* Setup device pointers. */
-       qla25xx_set_que(sp, &rsp);
        req = vha->req;
+       rsp = req->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 
        /* Build IOCB segments */
-       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
 
        /* Set total data segment count. */
        cmd_pkt->entry_count = (uint8_t)req_cnt;
-       /* Specify response queue number where completion should happen */
-       cmd_pkt->entry_status = (uint8_t) rsp->id;
        wmb();
        /* Adjust ring index. */
        req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
        }
 
        /* Setup device pointers. */
-
-       qla25xx_set_que(sp, &rsp);
        req = vha->req;
+       rsp = req->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
        return QLA_FUNCTION_FAILED;
 }
 
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
 {
+       int             nseg;
+       unsigned long   flags;
+       uint32_t        *clr_ptr;
+       uint32_t        index;
+       uint32_t        handle;
+       struct cmd_type_7 *cmd_pkt;
+       uint16_t        cnt;
+       uint16_t        req_cnt;
+       uint16_t        tot_dsds;
+       struct req_que *req = NULL;
+       struct rsp_que *rsp = NULL;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       struct qla_hw_data *ha = sp->fcport->vha->hw;
-       int affinity = cmd->request->cpu;
+       struct scsi_qla_host *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qpair = sp->qpair;
+
+       /* Setup qpair pointers */
+       rsp = qpair->rsp;
+       req = qpair->req;
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+                   QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire qpair specific lock */
+       spin_lock_irqsave(&qpair->qp_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+       } else
+               nseg = 0;
+
+       tot_dsds = nseg;
+       req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+       /* Zero out remaining portion of packet. */
+       /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+       cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+       /* Set NPORT-ID and LUN number*/
+       cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+       cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+       int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+       host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+       cmd_pkt->task = TSK_SIMPLE;
+
+       /* Load SCSI command packet. */
+       memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+       host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+       cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+       /* Build IOCB segments */
+       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+       /* Set total data segment count. */
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+       wmb();
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       sp->flags |= SRB_DMA_VALID;
+
+       /* Set chip new ring index. */
+       WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+               rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+       return QLA_SUCCESS;
+
+queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+       return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+       int                     nseg;
+       unsigned long           flags;
+       uint32_t                *clr_ptr;
+       uint32_t                index;
+       uint32_t                handle;
+       uint16_t                cnt;
+       uint16_t                req_cnt = 0;
+       uint16_t                tot_dsds;
+       uint16_t                tot_prot_dsds;
+       uint16_t                fw_prot_opts = 0;
+       struct req_que          *req = NULL;
+       struct rsp_que          *rsp = NULL;
+       struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
+       struct scsi_qla_host    *vha = sp->fcport->vha;
+       struct qla_hw_data      *ha = vha->hw;
+       struct cmd_type_crc_2   *cmd_pkt;
+       uint32_t                status = 0;
+       struct qla_qpair        *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE       BIT_0
+
+       /* Check for host side state */
+       if (!qpair->online) {
+               cmd->result = DID_NO_CONNECT << 16;
+               return QLA_INTERFACE_ERROR;
+       }
+
+       if (!qpair->difdix_supported &&
+               scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+               cmd->result = DID_NO_CONNECT << 16;
+               return QLA_INTERFACE_ERROR;
+       }
+
+       /* Only process protection or >16 cdb in this routine */
+       if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+               if (cmd->cmd_len <= 16)
+                       return qla2xxx_start_scsi_mq(sp);
+       }
+
+       /* Setup qpair pointers */
+       rsp = qpair->rsp;
+       req = qpair->req;
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+                   QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&qpair->qp_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Compute number of required data segments */
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+               else
+                       sp->flags |= SRB_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       struct qla2_sgx sgx;
+                       uint32_t        partial;
+
+                       memset(&sgx, 0, sizeof(struct qla2_sgx));
+                       sgx.tot_bytes = scsi_bufflen(cmd);
+                       sgx.cur_sg = scsi_sglist(cmd);
+                       sgx.sp = sp;
+
+                       nseg = 0;
+                       while (qla24xx_get_one_block_sg(
+                           cmd->device->sector_size, &sgx, &partial))
+                               nseg++;
+               }
+       } else
+               nseg = 0;
+
+       /* number of required data segments */
+       tot_dsds = nseg;
+
+       /* Compute number of required protection segments */
+       if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+                   scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+               else
+                       sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+               }
+       } else {
+               nseg = 0;
+       }
+
+       req_cnt = 1;
+       /* Total Data and protection sg segment(s) */
+       tot_prot_dsds = nseg;
+       tot_dsds += nseg;
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       status |= QDSS_GOT_Q_SPACE;
+
+       /* Build header part of command packet (excluding the OPCODE). */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       /* Fill-in common area */
+       cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+       /* Set NPORT-ID and LUN number*/
+       cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+       cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 
-       if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
-               affinity < ha->max_rsp_queues - 1)
-               *rsp = ha->rsp_q_map[affinity + 1];
-        else
-               *rsp = ha->rsp_q_map[0];
+       int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+       host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+       /* Total Data and protection segment(s) */
+       cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+       /* Build IOCB segments and adjust for data protection segments */
+       if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+           req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+               QLA_SUCCESS)
+               goto queuing_error;
+
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+       cmd_pkt->timeout = cpu_to_le16(0);
+       wmb();
+
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       /* Set chip new ring index. */
+       WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+       return QLA_SUCCESS;
+
+queuing_error:
+       if (status & QDSS_GOT_Q_SPACE) {
+               req->outstanding_cmds[handle] = NULL;
+               req->cnt += req_cnt;
+       }
+       /* Cleanup will be performed by the caller (queuecommand) */
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+       return QLA_FUNCTION_FAILED;
 }
 
 /* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 
                /* Build IOCB segments */
-               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
 
                /* Set total data segment count. */
                cmd_pkt->entry_count = (uint8_t)req_cnt;
index 19f18485a854ff1dbe8a4db1105fca4b75009931..5093ca9b02ec52c8e70674f88205941cc0967d9f 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
@@ -2870,41 +2871,6 @@ out:
        return IRQ_HANDLED;
 }
 
-static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
-       struct qla_hw_data *ha;
-       scsi_qla_host_t *vha;
-       struct rsp_que *rsp;
-       struct device_reg_24xx __iomem *reg;
-       unsigned long flags;
-       uint32_t hccr = 0;
-
-       rsp = (struct rsp_que *) dev_id;
-       if (!rsp) {
-               ql_log(ql_log_info, NULL, 0x505b,
-                   "%s: NULL response queue pointer.\n", __func__);
-               return IRQ_NONE;
-       }
-       ha = rsp->hw;
-       vha = pci_get_drvdata(ha->pdev);
-
-       /* Clear the interrupt, if enabled, for this response queue */
-       if (!ha->flags.disable_msix_handshake) {
-               reg = &ha->iobase->isp24;
-               spin_lock_irqsave(&ha->hardware_lock, flags);
-               WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
-               hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       }
-       if (qla2x00_check_reg32_for_disconnect(vha, hccr))
-               goto out;
-       queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t
 qla24xx_msix_default(int irq, void *dev_id)
 {
@@ -3001,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+       struct qla_hw_data *ha;
+       struct qla_qpair *qpair;
+       struct device_reg_24xx __iomem *reg;
+       unsigned long flags;
+
+       qpair = dev_id;
+       if (!qpair) {
+               ql_log(ql_log_info, NULL, 0x505b,
+                   "%s: NULL response queue pointer.\n", __func__);
+               return IRQ_NONE;
+       }
+       ha = qpair->hw;
+
+       /* Clear the interrupt, if enabled, for this response queue */
+       if (unlikely(!ha->flags.disable_msix_handshake)) {
+               reg = &ha->iobase->isp24;
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       }
+
+       queue_work(ha->wq, &qpair->q_work);
+
+       return IRQ_HANDLED;
+}
+
 /* Interrupt handling helpers. */
 
 struct qla_init_msix_entry {
@@ -3008,69 +3003,28 @@ struct qla_init_msix_entry {
        irq_handler_t handler;
 };
 
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
        { "qla2xxx (default)", qla24xx_msix_default },
        { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
-       { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+       { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
 };
 
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
        { "qla2xxx (default)", qla82xx_msix_default },
        { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
 };
 
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
-       { "qla2xxx (default)", qla24xx_msix_default },
-       { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
-       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
-       int i;
-       struct qla_msix_entry *qentry;
-       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
-       for (i = 0; i < ha->msix_count; i++) {
-               qentry = &ha->msix_entries[i];
-               if (qentry->have_irq) {
-                       /* un-register irq cpu affinity notification */
-                       irq_set_affinity_notifier(qentry->vector, NULL);
-                       free_irq(qentry->vector, qentry->rsp);
-               }
-       }
-       pci_disable_msix(ha->pdev);
-       kfree(ha->msix_entries);
-       ha->msix_entries = NULL;
-       ha->flags.msix_enabled = 0;
-       ql_dbg(ql_dbg_init, vha, 0x0042,
-           "Disabled the MSI.\n");
-}
-
 static int
 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
 #define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR    2
        int i, ret;
-       struct msix_entry *entries;
        struct qla_msix_entry *qentry;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
-       entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
-                       GFP_KERNEL);
-       if (!entries) {
-               ql_log(ql_log_warn, vha, 0x00bc,
-                   "Failed to allocate memory for msix_entry.\n");
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < ha->msix_count; i++)
-               entries[i].entry = i;
-
-       ret = pci_enable_msix_range(ha->pdev,
-                                   entries, MIN_MSIX_COUNT, ha->msix_count);
+       ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+                                   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (ret < 0) {
                ql_log(ql_log_fatal, vha, 0x00c7,
                    "MSI-X: Failed to enable support, "
@@ -3080,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
        } else if (ret < ha->msix_count) {
                ql_log(ql_log_warn, vha, 0x00c6,
                    "MSI-X: Failed to enable support "
-                   "-- %d/%d\n Retry with %d vectors.\n",
-                   ha->msix_count, ret, ret);
+                    "with %d vectors, using %d vectors.\n",
+                   ha->msix_count, ret);
                ha->msix_count = ret;
-               ha->max_rsp_queues = ha->msix_count - 1;
+               /* Recalculate queue values */
+               if (ha->mqiobase && ql2xmqsupport) {
+                       ha->max_req_queues = ha->msix_count - 1;
+
+                       /* ATIOQ needs 1 vector. That's 1 less QPair */
+                       if (QLA_TGT_MODE_ENABLED())
+                               ha->max_req_queues--;
+
+                       ha->max_rsp_queues = ha->max_req_queues;
+
+                       ha->max_qpairs = ha->max_req_queues - 1;
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+                           "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+               }
        }
        ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
                                ha->msix_count, GFP_KERNEL);
@@ -3097,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 
        for (i = 0; i < ha->msix_count; i++) {
                qentry = &ha->msix_entries[i];
-               qentry->vector = entries[i].vector;
-               qentry->entry = entries[i].entry;
+               qentry->vector = pci_irq_vector(ha->pdev, i);
+               qentry->entry = i;
                qentry->have_irq = 0;
-               qentry->rsp = NULL;
+               qentry->in_use = 0;
+               qentry->handle = NULL;
                qentry->irq_notify.notify  = qla_irq_affinity_notify;
                qentry->irq_notify.release = qla_irq_affinity_release;
                qentry->cpuid = -1;
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
                qentry = &ha->msix_entries[i];
-               qentry->rsp = rsp;
+               qentry->handle = rsp;
                rsp->msix = qentry;
+               scnprintf(qentry->name, sizeof(qentry->name),
+                   msix_entries[i].name);
                if (IS_P3P_TYPE(ha))
                        ret = request_irq(qentry->vector,
                                qla82xx_msix_entries[i].handler,
@@ -3122,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                if (ret)
                        goto msix_register_fail;
                qentry->have_irq = 1;
+               qentry->in_use = 1;
 
                /* Register for CPU affinity notification. */
                irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3141,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
         * queue.
         */
        if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
-               qentry = &ha->msix_entries[ATIO_VECTOR];
-               qentry->rsp = rsp;
+               qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
                rsp->msix = qentry;
+               qentry->handle = rsp;
+               scnprintf(qentry->name, sizeof(qentry->name),
+                   msix_entries[QLA_ATIO_VECTOR].name);
+               qentry->in_use = 1;
                ret = request_irq(qentry->vector,
-                       qla83xx_msix_entries[ATIO_VECTOR].handler,
-                       0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+                       msix_entries[QLA_ATIO_VECTOR].handler,
+                       0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
                qentry->have_irq = 1;
        }
 
@@ -3155,7 +3129,7 @@ msix_register_fail:
                ql_log(ql_log_fatal, vha, 0x00cb,
                    "MSI-X: unable to register handler -- %x/%d.\n",
                    qentry->vector, ret);
-               qla24xx_disable_msix(ha);
+               qla2x00_free_irqs(vha);
                ha->mqenable = 0;
                goto msix_out;
        }
@@ -3163,11 +3137,13 @@ msix_register_fail:
        /* Enable MSI-X vector for response queue update for queue 0 */
        if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                if (ha->msixbase && ha->mqiobase &&
-                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+                    ql2xmqsupport))
                        ha->mqenable = 1;
        } else
-               if (ha->mqiobase
-                   && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+               if (ha->mqiobase &&
+                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+                    ql2xmqsupport))
                        ha->mqenable = 1;
        ql_dbg(ql_dbg_multiq, vha, 0xc005,
            "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3177,7 +3153,6 @@ msix_register_fail:
            ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
 
 msix_out:
-       kfree(entries);
        return ret;
 }
 
@@ -3230,7 +3205,7 @@ skip_msix:
            !IS_QLA27XX(ha))
                goto skip_msi;
 
-       ret = pci_enable_msi(ha->pdev);
+       ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
        if (!ret) {
                ql_dbg(ql_dbg_init, vha, 0x0038,
                    "MSI: Enabled.\n");
@@ -3275,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct rsp_que *rsp;
+       struct qla_msix_entry *qentry;
+       int i;
 
        /*
         * We need to check that ha->rsp_q_map is valid in case we are called
@@ -3284,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
                return;
        rsp = ha->rsp_q_map[0];
 
-       if (ha->flags.msix_enabled)
-               qla24xx_disable_msix(ha);
-       else if (ha->flags.msi_enabled) {
-               free_irq(ha->pdev->irq, rsp);
-               pci_disable_msi(ha->pdev);
-       } else
-               free_irq(ha->pdev->irq, rsp);
-}
+       if (ha->flags.msix_enabled) {
+               for (i = 0; i < ha->msix_count; i++) {
+                       qentry = &ha->msix_entries[i];
+                       if (qentry->have_irq) {
+                               irq_set_affinity_notifier(qentry->vector, NULL);
+                               free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+                       }
+               }
+               kfree(ha->msix_entries);
+               ha->msix_entries = NULL;
+               ha->flags.msix_enabled = 0;
+               ql_dbg(ql_dbg_init, vha, 0x0042,
+                       "Disabled MSI-X.\n");
+       } else {
+               free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+       }
 
+       pci_free_irq_vectors(ha->pdev);
+}
 
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+       struct qla_msix_entry *msix, int vector_type)
 {
-       struct qla_hw_data *ha = rsp->hw;
-       struct qla_init_msix_entry *intr = &msix_entries[2];
-       struct qla_msix_entry *msix = rsp->msix;
+       struct qla_init_msix_entry *intr = &msix_entries[vector_type];
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        int ret;
 
-       ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+       scnprintf(msix->name, sizeof(msix->name),
+           "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+       ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
        if (ret) {
                ql_log(ql_log_fatal, vha, 0x00e6,
                    "MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3310,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
                return ret;
        }
        msix->have_irq = 1;
-       msix->rsp = rsp;
+       msix->handle = qpair;
        return ret;
 }
 
@@ -3323,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
                container_of(notify, struct qla_msix_entry, irq_notify);
        struct qla_hw_data *ha;
        struct scsi_qla_host *base_vha;
+       struct rsp_que *rsp = e->handle;
 
        /* user is recommended to set mask to just 1 cpu */
        e->cpuid = cpumask_first(mask);
 
-       ha = e->rsp->hw;
+       ha = rsp->hw;
        base_vha = pci_get_drvdata(ha->pdev);
 
        ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3351,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
                container_of(ref, struct irq_affinity_notify, kref);
        struct qla_msix_entry *e =
                container_of(notify, struct qla_msix_entry, irq_notify);
-       struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+       struct rsp_que *rsp = e->handle;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
 
        ql_dbg(ql_dbg_init, base_vha, 0xffff,
-           "%s: host%ld: vector %d cpu %d \n", __func__,
+               "%s: host%ld: vector %d cpu %d\n", __func__,
            base_vha->host_no, e->vector, e->cpuid);
 }
index 23698c9986998a0a7279e6e256d4c377f6fceab9..2819ceb96041e5b97b234f115c9b35d4b4251ffe 100644 (file)
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
+struct rom_cmd {
+       uint16_t cmd;
+} rom_cmds[] = {
+       { MBC_LOAD_RAM },
+       { MBC_EXECUTE_FIRMWARE },
+       { MBC_READ_RAM_WORD },
+       { MBC_MAILBOX_REGISTER_TEST },
+       { MBC_VERIFY_CHECKSUM },
+       { MBC_GET_FIRMWARE_VERSION },
+       { MBC_LOAD_RISC_RAM },
+       { MBC_DUMP_RISC_RAM },
+       { MBC_LOAD_RISC_RAM_EXTENDED },
+       { MBC_DUMP_RISC_RAM_EXTENDED },
+       { MBC_WRITE_RAM_WORD_EXTENDED },
+       { MBC_READ_RAM_EXTENDED },
+       { MBC_GET_RESOURCE_COUNTS },
+       { MBC_SET_FIRMWARE_OPTION },
+       { MBC_MID_INITIALIZE_FIRMWARE },
+       { MBC_GET_FIRMWARE_STATE },
+       { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+       { MBC_GET_RETRY_COUNT },
+       { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+       int i;
+       struct  rom_cmd *wc;
+
+       for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+               wc = rom_cmds + i;
+               if (wc->cmd == cmd)
+                       return 1;
+       }
+
+       return 0;
+}
 
 /*
  * qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
+       /* check if ISP abort is active and return cmd with timeout */
+       if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+           test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+           test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+           !is_rom_cmd(mcp->mb[0])) {
+               ql_log(ql_log_info, vha, 0x1005,
+                   "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+                   mcp->mb[0]);
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
        /*
         * Wait for active mailbox commands to finish by waiting at most tov
         * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+               wait_time = jiffies;
                if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
                    mcp->tov * HZ)) {
                        ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                }
+               if (time_after(jiffies, wait_time + 5 * HZ))
+                       ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+                           command, jiffies_to_msecs(jiffies - wait_time));
        } else {
                ql_dbg(ql_dbg_mbx, vha, 0x1011,
                    "Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
        fc_port_t       *fcport = sp->fcport;
        scsi_qla_host_t *vha = fcport->vha;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = vha->req;
+       struct req_que *req;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
            "Entered %s.\n", __func__);
 
+       if (vha->flags.qpairs_available && sp->qpair)
+               req = sp->qpair->req;
+       else
+               req = vha->req;
+
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
            "Entered %s.\n", __func__);
 
-       if (ha->flags.cpu_affinity_enabled)
-               req = ha->req_q_map[0];
+       if (vha->vp_idx && vha->qpair)
+               req = vha->qpair->req;
        else
-               req = vha->req;
+               req = ha->req_q_map[0];
 
        lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
        if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        }
        memset(lg, 0, sizeof(struct logio_entry_24xx));
 
-       if (ql2xmaxqueues > 1)
-               req = ha->req_q_map[0];
-       else
-               req = vha->req;
+       req = vha->req;
        lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        lg->entry_count = 1;
        lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
            "Entered %s.\n", __func__);
 
+       if (vha->flags.qpairs_available && sp->qpair)
+               req = sp->qpair->req;
+
        if (ql2xasynctmfenable)
                return qla24xx_async_abort_command(sp);
 
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        struct qla_hw_data *ha;
        struct req_que *req;
        struct rsp_que *rsp;
+       struct qla_qpair *qpair;
 
        vha = fcport->vha;
        ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
            "Entered %s.\n", __func__);
 
-       if (ha->flags.cpu_affinity_enabled)
-               rsp = ha->rsp_q_map[tag + 1];
-       else
+       if (vha->vp_idx && vha->qpair) {
+               /* NPIV port */
+               qpair = vha->qpair;
+               rsp = qpair->rsp;
+               req = qpair->req;
+       } else {
                rsp = req->rsp;
+       }
+
        tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
        if (tsk == NULL) {
                ql_log(ql_log_warn, vha, 0x1093,
index cf7ba52bae665fa482b8535f92e5fcb1dbe25244..c6d6f0d912ff75ffaf9b9d810f81af735e39549b 100644 (file)
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        uint16_t que_id = rsp->id;
 
        if (rsp->msix && rsp->msix->have_irq) {
-               free_irq(rsp->msix->vector, rsp);
+               free_irq(rsp->msix->vector, rsp->msix->handle);
                rsp->msix->have_irq = 0;
-               rsp->msix->rsp = NULL;
+               rsp->msix->in_use = 0;
+               rsp->msix->handle = NULL;
        }
        dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
                sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
        return ret;
 }
 
-static int
+int
 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 {
        int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
        struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qpair, *tqpair;
 
-       /* Delete request queues */
-       for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
-               req = ha->req_q_map[cnt];
-               if (req && test_bit(cnt, ha->req_qid_map)) {
-                       ret = qla25xx_delete_req_que(vha, req);
-                       if (ret != QLA_SUCCESS) {
-                               ql_log(ql_log_warn, vha, 0x00ea,
-                                   "Couldn't delete req que %d.\n",
-                                   req->id);
-                               return ret;
+       if (ql2xmqsupport) {
+               list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+                   qp_list_elem)
+                       qla2xxx_delete_qpair(vha, qpair);
+       } else {
+               /* Delete request queues */
+               for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+                       req = ha->req_q_map[cnt];
+                       if (req && test_bit(cnt, ha->req_qid_map)) {
+                               ret = qla25xx_delete_req_que(vha, req);
+                               if (ret != QLA_SUCCESS) {
+                                       ql_log(ql_log_warn, vha, 0x00ea,
+                                           "Couldn't delete req que %d.\n",
+                                           req->id);
+                                       return ret;
+                               }
                        }
                }
-       }
 
-       /* Delete response queues */
-       for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
-               rsp = ha->rsp_q_map[cnt];
-               if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
-                       ret = qla25xx_delete_rsp_que(vha, rsp);
-                       if (ret != QLA_SUCCESS) {
-                               ql_log(ql_log_warn, vha, 0x00eb,
-                                   "Couldn't delete rsp que %d.\n",
-                                   rsp->id);
-                               return ret;
+               /* Delete response queues */
+               for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+                       rsp = ha->rsp_q_map[cnt];
+                       if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+                               ret = qla25xx_delete_rsp_que(vha, rsp);
+                               if (ret != QLA_SUCCESS) {
+                                       ql_log(ql_log_warn, vha, 0x00eb,
+                                           "Couldn't delete rsp que %d.\n",
+                                           rsp->id);
+                                       return ret;
+                               }
                        }
                }
        }
+
        return ret;
 }
 
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS)
                goto que_failed;
 
-       mutex_lock(&ha->vport_lock);
+       mutex_lock(&ha->mq_lock);
        que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
        if (que_id >= ha->max_req_queues) {
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                ql_log(ql_log_warn, base_vha, 0x00db,
                    "No resources to create additional request queue.\n");
                goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        req->req_q_out = &reg->isp25mq.req_q_out;
        req->max_q_depth = ha->req_q_map[0]->max_q_depth;
        req->out_ptr = (void *)(req->ring + req->length);
-       mutex_unlock(&ha->vport_lock);
+       mutex_unlock(&ha->mq_lock);
        ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
            "ring_ptr=%p ring_index=%d, "
            "cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS) {
                ql_log(ql_log_fatal, base_vha, 0x00df,
                    "%s failed.\n", __func__);
-               mutex_lock(&ha->vport_lock);
+               mutex_lock(&ha->mq_lock);
                clear_bit(que_id, ha->req_qid_map);
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                goto que_failed;
        }
 
@@ -741,20 +750,20 @@ failed:
 static void qla_do_work(struct work_struct *work)
 {
        unsigned long flags;
-       struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+       struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
        struct scsi_qla_host *vha;
-       struct qla_hw_data *ha = rsp->hw;
+       struct qla_hw_data *ha = qpair->hw;
 
-       spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+       spin_lock_irqsave(&qpair->qp_lock, flags);
        vha = pci_get_drvdata(ha->pdev);
-       qla24xx_process_response_queue(vha, rsp);
-       spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+       qla24xx_process_response_queue(vha, qpair->rsp);
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
 }
 
 /* create response queue */
 int
 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
-       uint8_t vp_idx, uint16_t rid, int req)
+       uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
 {
        int ret = 0;
        struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
                goto que_failed;
        }
 
-       mutex_lock(&ha->vport_lock);
+       mutex_lock(&ha->mq_lock);
        que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
        if (que_id >= ha->max_rsp_queues) {
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                ql_log(ql_log_warn, base_vha, 0x00e2,
                    "No resources to create additional request queue.\n");
                goto que_failed;
        }
        set_bit(que_id, ha->rsp_qid_map);
 
-       if (ha->flags.msix_enabled)
-               rsp->msix = &ha->msix_entries[que_id + 1];
-       else
-               ql_log(ql_log_warn, base_vha, 0x00e3,
-                   "MSIX not enabled.\n");
+       rsp->msix = qpair->msix;
 
        ha->rsp_q_map[que_id] = rsp;
        rsp->rid = rid;
        rsp->vp_idx = vp_idx;
        rsp->hw = ha;
        ql_dbg(ql_dbg_init, base_vha, 0x00e4,
-           "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+           "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
            que_id, rsp->rid, rsp->vp_idx, rsp->hw);
        /* Use alternate PCI bus number */
        if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        if (!IS_MSIX_NACK_CAPABLE(ha))
                options |= BIT_6;
 
+       /* Set option to indicate response queue creation */
+       options |= BIT_1;
+
        rsp->options = options;
        rsp->id = que_id;
        reg = ISP_QUE_REG(ha, que_id);
        rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
        rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
        rsp->in_ptr = (void *)(rsp->ring + rsp->length);
-       mutex_unlock(&ha->vport_lock);
+       mutex_unlock(&ha->mq_lock);
        ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
-           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
            rsp->options, rsp->id, rsp->rsp_q_in,
            rsp->rsp_q_out);
        ql_dbg(ql_dbg_init, base_vha, 0x00e5,
-           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
            rsp->options, rsp->id, rsp->rsp_q_in,
            rsp->rsp_q_out);
 
-       ret = qla25xx_request_irq(rsp);
+       ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+           QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
        if (ret)
                goto que_failed;
 
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS) {
                ql_log(ql_log_fatal, base_vha, 0x00e7,
                    "%s failed.\n", __func__);
-               mutex_lock(&ha->vport_lock);
+               mutex_lock(&ha->mq_lock);
                clear_bit(que_id, ha->rsp_qid_map);
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                goto que_failed;
        }
-       if (req >= 0)
-               rsp->req = ha->req_q_map[req];
-       else
-               rsp->req = NULL;
+       rsp->req = NULL;
 
        qla2x00_init_response_q_entries(rsp);
-       if (rsp->hw->wq)
-               INIT_WORK(&rsp->q_work, qla_do_work);
+       if (qpair->hw->wq)
+               INIT_WORK(&qpair->q_work, qla_do_work);
        return rsp->id;
 
 que_failed:
index 56d6142852a553ed9ad8011cb4c18a84e8656e0d..8521cfe302e9e3e72c7aaf1a4753ca75f953b972 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
 /*
  * SRB allocation cache
  */
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
 
 /*
  * CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
                "Enables iIDMA settings "
                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
-               "Enables MQ settings "
-               "Default is 1 for single queue. Set it to number "
-               "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
-               "Enables CPU affinity settings for the driver "
-               "Default is 0 for no affinity of request and response IO. "
-               "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+               "Enable on demand multiple queue pairs support "
+               "Default is 1 for supported. "
+               "Set it to 0 to turn off mq qpair support.");
 
 int ql2xfwloadbin;
 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
 
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
        .scan_finished          = qla2xxx_scan_finished,
        .scan_start             = qla2xxx_scan_start,
        .change_queue_depth     = scsi_change_queue_depth,
+       .map_queues             = qla2xxx_map_queues,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
        struct req_que **, struct rsp_que **);
 static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+       struct qla_qpair *qpair);
 
 /* -------------------------------------------------------------------------- */
 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
                    "Unable to allocate memory for response queue ptrs.\n");
                goto fail_rsp_map;
        }
+
+       if (ql2xmqsupport && ha->max_qpairs) {
+               ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+                       GFP_KERNEL);
+               if (!ha->queue_pair_map) {
+                       ql_log(ql_log_fatal, vha, 0x0180,
+                           "Unable to allocate memory for queue pair ptrs.\n");
+                       goto fail_qpair_map;
+               }
+               ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+               if (ha->base_qpair == NULL) {
+                       ql_log(ql_log_warn, vha, 0x0182,
+                           "Failed to allocate base queue pair memory.\n");
+                       goto fail_base_qpair;
+               }
+               ha->base_qpair->req = req;
+               ha->base_qpair->rsp = rsp;
+       }
+
        /*
         * Make sure we record at least the request and response queue zero in
         * case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
        set_bit(0, ha->req_qid_map);
        return 1;
 
+fail_base_qpair:
+       kfree(ha->queue_pair_map);
+fail_qpair_map:
+       kfree(ha->rsp_q_map);
+       ha->rsp_q_map = NULL;
 fail_rsp_map:
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
        struct req_que *req;
        struct rsp_que *rsp;
        int cnt;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
                if (!test_bit(cnt, ha->req_qid_map))
                        continue;
 
                req = ha->req_q_map[cnt];
+               clear_bit(cnt, ha->req_qid_map);
+               ha->req_q_map[cnt] = NULL;
+
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_req_que(ha, req);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
 
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
                if (!test_bit(cnt, ha->rsp_qid_map))
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
+               clear_bit(cnt, ha->req_qid_map);
+               ha->rsp_q_map[cnt] =  NULL;
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
-       kfree(ha->rsp_q_map);
-       ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
-       uint16_t options = 0;
-       int ques, req, ret;
-       struct qla_hw_data *ha = vha->hw;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (!(ha->fw_attributes & BIT_6)) {
-               ql_log(ql_log_warn, vha, 0x00d8,
-                   "Firmware is not multi-queue capable.\n");
-               goto fail;
-       }
-       if (ql2xmultique_tag) {
-               /* create a request queue for IO */
-               options |= BIT_7;
-               req = qla25xx_create_req_que(ha, options, 0, 0, -1,
-                       QLA_DEFAULT_QUE_QOS);
-               if (!req) {
-                       ql_log(ql_log_warn, vha, 0x00e0,
-                           "Failed to create request queue.\n");
-                       goto fail;
-               }
-               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
-               vha->req = ha->req_q_map[req];
-               options |= BIT_1;
-               for (ques = 1; ques < ha->max_rsp_queues; ques++) {
-                       ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
-                       if (!ret) {
-                               ql_log(ql_log_warn, vha, 0x00e8,
-                                   "Failed to create response queue.\n");
-                               goto fail2;
-                       }
-               }
-               ha->flags.cpu_affinity_enabled = 1;
-               ql_dbg(ql_dbg_multiq, vha, 0xc007,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-               ql_dbg(ql_dbg_init, vha, 0x00e9,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-       }
-       return 0;
-fail2:
-       qla25xx_delete_queues(vha);
-       destroy_workqueue(ha->wq);
-       ha->wq = NULL;
-       vha->req = ha->req_q_map[0];
-fail:
-       ha->mqenable = 0;
-       kfree(ha->req_q_map);
        kfree(ha->rsp_q_map);
-       ha->max_req_queues = ha->max_rsp_queues = 1;
-       return 1;
+       ha->rsp_q_map = NULL;
 }
 
 static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-static void
+void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
        cmd->scsi_done(cmd);
 }
 
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct qla_hw_data *ha = sp->fcport->vha->hw;
+       void *ctx = GET_CMD_CTX_SP(sp);
+
+       if (sp->flags & SRB_DMA_VALID) {
+               scsi_dma_unmap(cmd);
+               sp->flags &= ~SRB_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+               dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+                   scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+               sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+               /* List assured to be having elements */
+               qla2x00_clean_dsd_pool(ha, sp, NULL);
+               sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+               dma_pool_free(ha->dl_dma_pool, ctx,
+                   ((struct crc_context *)ctx)->crc_ctx_dma);
+               sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+               struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+               dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+                   ctx1->fcp_cmnd_dma);
+               list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+               ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+               ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+               mempool_free(ctx1, ha->ctx_mempool);
+       }
+
+       CMD_SP(cmd) = NULL;
+       qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+       cmd->result = res;
+
+       if (atomic_read(&sp->ref_count) == 0) {
+               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+                   "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+                   sp, GET_CMD_SP(sp));
+               if (ql2xextended_error_logging & ql_dbg_io)
+                       WARN_ON(atomic_read(&sp->ref_count) == 0);
+               return;
+       }
+       if (!atomic_dec_and_test(&sp->ref_count))
+               return;
+
+       qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+       cmd->scsi_done(cmd);
+}
+
 /* If we are SP1 here, we need to still take and release the host_lock as SP1
  * does not have the changes necessary to avoid taking host->host_lock.
  */
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
        srb_t *sp;
        int rval;
+       struct qla_qpair *qpair = NULL;
+       uint32_t tag;
+       uint16_t hwq;
 
        if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
                cmd->result = DID_NO_CONNECT << 16;
                goto qc24_fail_command;
        }
 
+       if (ha->mqenable) {
+               if (shost_use_blk_mq(vha->host)) {
+                       tag = blk_mq_unique_tag(cmd->request);
+                       hwq = blk_mq_unique_tag_to_hwq(tag);
+                       qpair = ha->queue_pair_map[hwq];
+               } else if (vha->vp_idx && vha->qpair) {
+                       qpair = vha->qpair;
+               }
+
+               if (qpair)
+                       return qla2xxx_mqueuecommand(host, cmd, qpair);
+       }
+
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure) {
                        ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
        return 0;
 }
 
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+    struct qla_qpair *qpair)
+{
+       scsi_qla_host_t *vha = shost_priv(host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       srb_t *sp;
+       int rval;
+
+       rval = fc_remote_port_chkready(rport);
+       if (rval) {
+               cmd->result = rval;
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+                   "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+                   cmd, rval);
+               goto qc24_fail_command;
+       }
+
+       if (!fcport) {
+               cmd->result = DID_NO_CONNECT << 16;
+               goto qc24_fail_command;
+       }
+
+       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+                       atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+                       ql_dbg(ql_dbg_io, vha, 0x3077,
+                           "Returning DNC, fcport_state=%d loop_state=%d.\n",
+                           atomic_read(&fcport->state),
+                           atomic_read(&base_vha->loop_state));
+                       cmd->result = DID_NO_CONNECT << 16;
+                       goto qc24_fail_command;
+               }
+               goto qc24_target_busy;
+       }
+
+       /*
+        * Return target busy if we've received a non-zero retry_delay_timer
+        * in a FCP_RSP.
+        */
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+               fcport->retry_delay_timestamp = 0;
+       else
+               goto qc24_target_busy;
+
+       sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+       if (!sp)
+               goto qc24_host_busy;
+
+       sp->u.scmd.cmd = cmd;
+       sp->type = SRB_SCSI_CMD;
+       atomic_set(&sp->ref_count, 1);
+       CMD_SP(cmd) = (void *)sp;
+       sp->free = qla2xxx_qpair_sp_free_dma;
+       sp->done = qla2xxx_qpair_sp_compl;
+       sp->qpair = qpair;
+
+       rval = ha->isp_ops->start_scsi_mq(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+                   "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+               if (rval == QLA_INTERFACE_ERROR)
+                       goto qc24_fail_command;
+               goto qc24_host_busy_free_sp;
+       }
+
+       return 0;
+
+qc24_host_busy_free_sp:
+       qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+       return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+       return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+       cmd->scsi_done(cmd);
+
+       return 0;
+}
+
 /*
  * qla2x00_eh_wait_on_command
  *    Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 {
        resource_size_t pio;
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
 
        /* Determine queue resources */
        ha->max_req_queues = ha->max_rsp_queues = 1;
-       if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
-               (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
-               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+       if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
                goto mqiobase_exit;
 
        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
                    "MQIO Base=%p.\n", ha->mqiobase);
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
+               ha->msix_count = msix + 1;
                /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                           QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-               }
+               /* MB interrupt uses 1 vector */
+               ha->max_req_queues = ha->msix_count - 1;
+               ha->max_rsp_queues = ha->max_req_queues;
+               /* Queue pairs is the max value minus the base queue pair */
+               ha->max_qpairs = ha->max_rsp_queues - 1;
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+                   "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
                ql_log_pci(ql_log_info, ha->pdev, 0x001a,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x001b,
                    "BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
 qla83xx_iospace_config(struct qla_hw_data *ha)
 {
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev,
                    QLA_83XX_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
-               /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                                               QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
+               ha->msix_count = msix + 1;
+               /*
+                * By default, driver uses at least two msix vectors
+                * (default & rspq)
+                */
+               if (ql2xmqsupport) {
+                       /* MB interrupt uses 1 vector */
+                       ha->max_req_queues = ha->msix_count - 1;
+                       ha->max_rsp_queues = ha->max_req_queues;
+
+                       /* ATIOQ needs 1 vector. That's 1 less QPair */
+                       if (QLA_TGT_MODE_ENABLED())
+                               ha->max_req_queues--;
+
+                       /* Queue pairs is the max value minus
+                        * the base queue pair */
+                       ha->max_qpairs = ha->max_req_queues - 1;
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+                           "Max no of queues pairs: %d.\n", ha->max_qpairs);
                }
                ql_log_pci(ql_log_info, ha->pdev, 0x011c,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x011e,
                    "BAR 1 not enabled.\n");
 
 mqiobase_exit:
        ha->msix_count = ha->max_rsp_queues + 1;
+       if (QLA_TGT_MODE_ENABLED())
+               ha->msix_count++;
 
        qlt_83xx_iospace_config(ha);
 
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .write_optrom           = qla82xx_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla82xx_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
        .write_optrom           = qla8044_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla8044_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qlafx00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qlafx00_abort_isp,
        .iospace_config         = qlafx00_iospace_config,
        .initialize_adapter     = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        uint16_t req_length = 0, rsp_length = 0;
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
+       int i;
+
        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
        sht = &qla2xxx_driver_template;
        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "Found an ISP%04X irq %d iobase 0x%p.\n",
            pdev->device, pdev->irq, ha->iobase);
        mutex_init(&ha->vport_lock);
+       mutex_init(&ha->mq_lock);
        init_completion(&ha->mbx_cmd_comp);
        complete(&ha->mbx_cmd_comp);
        init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
-que_init:
+       /* Set up the irqs */
+       ret = qla2x00_request_irqs(ha, rsp);
+       if (ret)
+               goto probe_init_failed;
+
        /* Alloc arrays of request and response ring ptrs */
        if (!qla2x00_alloc_queues(ha, req, rsp)) {
                ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
                goto probe_init_failed;
        }
 
-       qlt_probe_one_stage1(base_vha, ha);
+       if (ha->mqenable && shost_use_blk_mq(host)) {
+               /* number of hardware queues supported by blk/scsi-mq*/
+               host->nr_hw_queues = ha->max_qpairs;
 
-       /* Set up the irqs */
-       ret = qla2x00_request_irqs(ha, rsp);
-       if (ret)
-               goto probe_init_failed;
+               ql_dbg(ql_dbg_init, base_vha, 0x0192,
+                       "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+       } else
+               ql_dbg(ql_dbg_init, base_vha, 0x0193,
+                       "blk/scsi-mq disabled.\n");
+
+       qlt_probe_one_stage1(base_vha, ha);
 
        pci_save_state(pdev);
 
@@ -2842,11 +3013,12 @@ que_init:
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
-       if (ha->mqenable) {
-               if (qla25xx_setup_mode(base_vha)) {
-                       ql_log(ql_log_warn, base_vha, 0x00ec,
-                           "Failed to create queues, falling back to single queue mode.\n");
-                       goto que_init;
+       if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+               /* Create start of day qpairs for Block MQ */
+               if (shost_use_blk_mq(host)) {
+                       for (i = 0; i < ha->max_qpairs; i++)
+                               qla2xxx_create_qpair(base_vha, 5, 0);
                }
        }
 
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
 static void
 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
 {
-       /* Flush the work queue and remove it */
-       if (ha->wq) {
-               flush_workqueue(ha->wq);
-               destroy_workqueue(ha->wq);
-               ha->wq = NULL;
-       }
-
        /* Cancel all work and destroy DPC workqueues */
        if (ha->dpc_lp_wq) {
                cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                ha->isp_ops->disable_intrs(ha);
        }
 
+       qla2x00_free_fcports(vha);
+
        qla2x00_free_irqs(vha);
 
-       qla2x00_free_fcports(vha);
+       /* Flush the work queue and remove it */
+       if (ha->wq) {
+               flush_workqueue(ha->wq);
+               destroy_workqueue(ha->wq);
+               ha->wq = NULL;
+       }
+
 
        qla2x00_mem_free(ha);
 
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
        INIT_LIST_HEAD(&vha->logo_list);
        INIT_LIST_HEAD(&vha->plogi_ack_list);
+       INIT_LIST_HEAD(&vha->qp_list);
 
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
 
        base_vha->flags.init_done = 0;
        qla25xx_delete_queues(base_vha);
-       qla2x00_free_irqs(base_vha);
        qla2x00_free_fcports(base_vha);
+       qla2x00_free_irqs(base_vha);
        qla2x00_mem_free(ha);
        qla82xx_md_free(base_vha);
        qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
 {
        scsi_qla_host_t *base_vha;
        struct qla_hw_data *ha;
+       uint32_t online;
+       struct qla_qpair *qpair;
 
        ha = (struct qla_hw_data *)data;
        base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
                                ha->isp_ops->beacon_blink(base_vha);
                }
 
+               /* qpair online check */
+               if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+                   &base_vha->dpc_flags)) {
+                       if (ha->flags.eeh_busy ||
+                           ha->flags.pci_channel_io_perm_failure)
+                               online = 0;
+                       else
+                               online = 1;
+
+                       mutex_lock(&ha->mq_lock);
+                       list_for_each_entry(qpair, &base_vha->qp_list,
+                           qp_list_elem)
+                       qpair->online = online;
+                       mutex_unlock(&ha->mq_lock);
+               }
+
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
 
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
        switch (state) {
        case pci_channel_io_normal:
                ha->flags.eeh_busy = 0;
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
                ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                pci_disable_device(pdev);
                /* Return back all IOs */
                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
                ha->flags.pci_channel_io_perm_failure = 1;
                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_DISCONNECT;
        }
        return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
        qla83xx_wr_reg(vha, reg, data);
 }
 
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+       scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+       return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
        .error_detected = qla2xxx_pci_error_detected,
        .mmio_enabled = qla2xxx_pci_mmio_enabled,
index 07349270535d19002a39fa12f968f22c3ebd8cb1..82dfe07b1d47f7e1f8ae3517191f15d190168834 100644 (file)
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        struct request_queue *rq = sdev->request_queue;
        struct scsi_target *starget = sdev->sdev_target;
 
-       error = scsi_device_set_state(sdev, SDEV_RUNNING);
-       if (error)
-               return error;
-
        error = scsi_target_add(starget);
        if (error)
                return error;
index 070332eb41f33de2c765bedc724f50f404af62a9..dbe5b4b95df0d9d317dbdc2261914e4d8771991f 100644 (file)
@@ -581,6 +581,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
        sg_io_hdr_t *hp;
        unsigned char cmnd[SG_MAX_CDB_SIZE];
 
+       if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+               return -EINVAL;
+
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
index aa43bfea0d004270d79411c91f5ee5b413daba73..abe6173726614f627c085129f4e8aa9dc6ba1eda 100644 (file)
@@ -23,6 +23,7 @@
 #include "unipro.h"
 #include "ufs-qcom.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN  \
        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
@@ -1031,6 +1032,34 @@ out:
        return ret;
 }
 
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+       int err;
+       u32 pa_vs_config_reg1;
+
+       err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+                            &pa_vs_config_reg1);
+       if (err)
+               goto out;
+
+       /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+       err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+                           (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+       return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+       int err = 0;
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+               err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+       return err;
+}
+
 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
         */
        host->generic_phy = devm_phy_get(dev, "ufsphy");
 
-       if (IS_ERR(host->generic_phy)) {
+       if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+               /*
+                * UFS driver might be probed before the phy driver does.
+                * In that case we would like to return EPROBE_DEFER code.
+                */
+               err = -EPROBE_DEFER;
+               dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+                       __func__, err);
+               goto out_variant_clear;
+       } else if (IS_ERR(host->generic_phy)) {
                err = PTR_ERR(host->generic_phy);
                dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
                goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
 
-       ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+       /* clear bit 17 - UTP_DBG_RAMS_EN */
+       ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
 
        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
        .link_startup_notify    = ufs_qcom_link_startup_notify,
        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
+       .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
        .suspend                = ufs_qcom_suspend,
        .resume                 = ufs_qcom_resume,
        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
index a19307a57ce248f5cb102c294408227b8ed603de..fe517cd7dac348b40b97c322e49e26976b7256cc 100644 (file)
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
         UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
 /* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1      0x9000
 #define DME_VS_CORE_CLK_CTRL   0xD002
 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
 #define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT               BIT(8)
index f7983058f3f716ed27505fbcef0d420eaf8a6163..08b799d4efcc68b99b7c43fa7af968dd3b543a2c 100644 (file)
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE     (1 << 7)
 
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME        (1 << 8)
 
 struct ufs_hba;
 void ufs_advertise_fixup_device(struct ufs_hba *hba);
 
-static struct ufs_dev_fix ufs_fixups[] = {
-       /* UFS cards deviations table */
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_NO_FASTAUTO),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
-       END_FIX
-};
 #endif /* UFS_QUIRKS_H_ */
index ef8548c3a423d213dd04537753190b48f820cb83..a2c2817fc566911e59f019d91639260d2df2fa09 100644 (file)
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
        return ufs_pm_lvl_states[lvl].link_state;
 }
 
+static struct ufs_dev_fix ufs_fixups[] = {
+       /* UFS cards deviations table */
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_NO_FASTAUTO),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+               UFS_DEVICE_QUIRK_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+               UFS_DEVICE_QUIRK_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+       END_FIX
+};
+
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  */
 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-       if (hba->ufs_version == UFSHCI_VERSION_10)
-               return INTERRUPT_MASK_ALL_VER_10;
-       else
-               return INTERRUPT_MASK_ALL_VER_11;
+       u32 intr_mask = 0;
+
+       switch (hba->ufs_version) {
+       case UFSHCI_VERSION_10:
+               intr_mask = INTERRUPT_MASK_ALL_VER_10;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_11:
+       case UFSHCI_VERSION_20:
+               intr_mask = INTERRUPT_MASK_ALL_VER_11;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_21:
+       default:
+               intr_mask = INTERRUPT_MASK_ALL_VER_21;
+       }
+
+       return intr_mask;
 }
 
 /**
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
                ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+       ufshcd_vops_apply_dev_quirks(hba);
 }
 
 /**
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
+       if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+           (hba->ufs_version != UFSHCI_VERSION_11) &&
+           (hba->ufs_version != UFSHCI_VERSION_20) &&
+           (hba->ufs_version != UFSHCI_VERSION_21))
+               dev_err(hba->dev, "invalid UFS version 0x%x\n",
+                       hba->ufs_version);
+
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
 
index 7d9ff22acfeaf78c29c1ed4e3d95144d2fa18d2c..08cd26ed238270a3f6bfb797137f4962bfe04312 100644 (file)
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
  * @setup_task_mgmt: called before any task management request is issued
  *                  to set some things
  * @hibern8_notify: called around hibern8 enter/exit
- *                 to configure some things
+ * @apply_dev_quirks: called to apply device specific quirks
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
        void    (*setup_xfer_req)(struct ufs_hba *, int, bool);
        void    (*setup_task_mgmt)(struct ufs_hba *, int, u8);
        void    (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
-                                      enum ufs_notify_change_status);
+                                       enum ufs_notify_change_status);
+       int     (*apply_dev_quirks)(struct ufs_hba *);
        int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
        int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
        void    (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
                return hba->vops->hibern8_notify(hba, cmd, status);
 }
 
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->apply_dev_quirks)
+               return hba->vops->apply_dev_quirks(hba);
+       return 0;
+}
+
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
        if (hba->vops && hba->vops->suspend)
index 5d978867be57199beac282369eae799599a1ea9f..8c5190e2e1c928407e8aac111b67758fff9bc191 100644 (file)
@@ -72,6 +72,10 @@ enum {
        REG_UIC_COMMAND_ARG_1                   = 0x94,
        REG_UIC_COMMAND_ARG_2                   = 0x98,
        REG_UIC_COMMAND_ARG_3                   = 0x9C,
+       REG_UFS_CCAP                            = 0x100,
+       REG_UFS_CRYPTOCAP                       = 0x104,
+
+       UFSHCI_CRYPTO_REG_SPACE_SIZE            = 0x400,
 };
 
 /* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
 
        /* Interrupt disable mask for UFSHCI v1.1 */
        INTERRUPT_MASK_ALL_VER_11       = 0x31FFF,
+
+       /* Interrupt disable mask for UFSHCI v2.1 */
+       INTERRUPT_MASK_ALL_VER_21       = 0x71FFF,
 };
 
 /*
index d02bf58aea6d85bfdfa0473b95ca94794135becc..8bcb9b71f764325d585f659fb1430b393b54214c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <asm/unaligned.h>
+#include <net/tcp.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include "cxgbit.h"
index b7d747e92c7abf589e35154b25482a9dedb57118..da2c73a255dec194bba90826f6b3e95e9a264e32 100644 (file)
@@ -23,7 +23,9 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/idr.h>
+#include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <net/ipv6.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/iscsi_proto.h>
 #include <scsi/scsi_tcq.h>
index 4cf2c0f2ba2f981699499cce77726d20aeee9dc9..e0db2ceb0f87cb170a2ff1b12fcb5a8e7a407cb2 100644 (file)
@@ -1,6 +1,18 @@
 #ifndef ISCSI_TARGET_H
 #define ISCSI_TARGET_H
 
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsi_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
 extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
 extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
 extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
index e116f0e845c08c4f7c91bd9d91c891c1fb9683b7..903b667f8e0136d1e1d919c1bc51d2a3c99bd7c3 100644 (file)
@@ -20,8 +20,8 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/err.h>
+#include <linux/random.h>
 #include <linux/scatterlist.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
index d22f7b96a06ca98aa3bd83f669d92eb686cfcec9..1b91c13cc9657e5661c6ea254e799a068eedc4b7 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ISCSI_CHAP_H_
 #define _ISCSI_CHAP_H_
 
+#include <linux/types.h>
+
 #define CHAP_DIGEST_UNKNOWN    0
 #define CHAP_DIGEST_MD5                5
 #define CHAP_DIGEST_SHA                6
@@ -18,6 +20,9 @@
 #define CHAP_STAGE_CLIENT_NRIC 4
 #define CHAP_STAGE_SERVER_NR   5
 
+struct iscsi_node_auth;
+struct iscsi_conn;
+
 extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
                                int *, int *);
 
index 923c032f0b95f1efe2d1e12e90cb9086f3fd9346..bf40f03755ddc50697652ccde864d40df840fa0b 100644 (file)
 #include <linux/ctype.h>
 #include <linux/export.h>
 #include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
@@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
 
                tpg_np_new = iscsit_tpg_add_network_portal(tpg,
                                        &np->np_sockaddr, tpg_np, type);
-               if (IS_ERR(tpg_np_new))
+               if (IS_ERR(tpg_np_new)) {
+                       rc = PTR_ERR(tpg_np_new);
                        goto out;
+               }
        } else {
                tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
                if (tpg_np_new) {
index 647d4a5dca5281838b904ba67fd03d8a0ea80642..173ddd93c75726937dabfb33a11e85479437a207 100644 (file)
@@ -16,8 +16,8 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
index 646429ac5a02bf3055f87204e7591d09c530062e..16edeeeb7777b447cd93a4222e4e2cf54a10cfa1 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_TARGET_DATAIN_VALUES_H
 #define ISCSI_TARGET_DATAIN_VALUES_H
 
+struct iscsi_cmd;
+struct iscsi_datain;
+
 extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
 extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
 extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
index a0e2df9e809032034d80da9b398252365b626b92..06dbff5cd52069af9539d24305d0a2960bf5402f 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_TARGET_DEVICE_H
 #define ISCSI_TARGET_DEVICE_H
 
+struct iscsi_cmd;
+struct iscsi_session;
+
 extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
 extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
 
index a9e2f9497fb22a1734ae27393e63fa351533f2d8..60e69e2af6eda981efb74e4ac313fb0d031093bd 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef ISCSI_TARGET_ERL0_H
 #define ISCSI_TARGET_ERL0_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_session;
+
 extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
 extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
 extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
index 9214c9dafa2be56082b792eaccd31ca87420b59e..fe9b7f1e44aca5c8bcda8677310351f03e507095 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <linux/list.h>
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 2a3ebf118a342fb33200810237afef1694c78536..54d36bd25beacdf4da8856b0b97ab5e237d4b2aa 100644 (file)
@@ -1,6 +1,16 @@
 #ifndef ISCSI_TARGET_ERL1_H
 #define ISCSI_TARGET_ERL1_H
 
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsi_session;
+
 extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
 extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
                        struct iscsi_cmd *, struct iscsi_datain_req *);
index e24f1c7c5862d4af2f0ae53efb3e981f153080db..faf9ae014b30443583555c1a49f089a3ad7d32c0 100644 (file)
@@ -17,6 +17,7 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 63f2501f3fe08344ea000d3ae83e414fe4e361de..7965f1e865061ef0ec40ef63fe89d33a5e189379 100644 (file)
@@ -1,6 +1,13 @@
 #ifndef ISCSI_TARGET_ERL2_H
 #define ISCSI_TARGET_ERL2_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
 extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
 extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
 extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
index 15f79a2ca34ab6e17fd5fda6f68425b9af1809eb..450f51deb2a2ae18137ede36d4a3e8c188fd7352 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/string.h>
 #include <linux/kthread.h>
 #include <linux/idr.h>
+#include <linux/tcp.h>        /* TCP_NODELAY */
+#include <net/ipv6.h>         /* ipv6_addr_v4mapped() */
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index b597aa2c61a1c60d2794610796ac156c220e43fe..0e1fd6cedd54cb83ffc576654d5428d47e75efa9 100644 (file)
@@ -1,6 +1,13 @@
 #ifndef ISCSI_TARGET_LOGIN_H
 #define ISCSI_TARGET_LOGIN_H
 
+#include <linux/types.h>
+
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
 extern int iscsi_login_setup_crypto(struct iscsi_conn *);
 extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
 extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
index 89d34bd6d87f94519c26168741b74bcb6478061a..46388c9e08dad3e5de751d3280f8fc829683154d 100644 (file)
@@ -18,6 +18,8 @@
 
 #include <linux/ctype.h>
 #include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/sock.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index f021cbd330e51e0c61d85fc4f909e502073f09e5..53438bfca4c66bee15a05ca56ae4d89686d6a05d 100644 (file)
@@ -4,6 +4,10 @@
 #define DECIMAL         0
 #define HEX             1
 
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+
 extern void convert_null_to_semi(char *, int);
 extern int extract_param(const char *, const char *, unsigned int, char *,
                unsigned char *);
index 0c69a46a62ec7f9b082679e7a24f24fb0ffe321e..79cdf06ade48bf8d63336877b40af572778f574a 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef ISCSI_TARGET_NODEATTRIB_H
 #define ISCSI_TARGET_NODEATTRIB_H
 
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
 extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
                                              struct iscsi_portal_group *);
 extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
index 0efa80bb89628602598346c0647a958536acdf3b..e65bf78ceef3740fc1923c1b3ed446aa2996b82d 100644 (file)
@@ -17,7 +17,7 @@
  ******************************************************************************/
 
 #include <linux/slab.h>
-
+#include <linux/uio.h> /* struct kvec */
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
index a0751e3f0813429bd5c87c2ecab16780cfbfa1c7..9962ccf0ccd7d923d074923661a48e4b058e5400 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef ISCSI_PARAMETERS_H
 #define ISCSI_PARAMETERS_H
 
+#include <linux/types.h>
 #include <scsi/iscsi_proto.h>
 
 struct iscsi_extra_response {
@@ -23,6 +24,11 @@ struct iscsi_param {
        struct list_head p_list;
 } ____cacheline_aligned;
 
+struct iscsi_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
 extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
 extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
 extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
index d5b153751a8d223c9edb21a01490748911a4756d..be1234362271b0b3f672c33a83b26f0407b630c8 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_SEQ_AND_PDU_LIST_H
 #define ISCSI_SEQ_AND_PDU_LIST_H
 
+#include <linux/types.h>
+#include <linux/cache.h>
+
 /* struct iscsi_pdu->status */
 #define DATAOUT_PDU_SENT                       1
 
@@ -78,6 +81,8 @@ struct iscsi_seq {
        u32             xfer_len;
 } ____cacheline_aligned;
 
+struct iscsi_cmd;
+
 extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
index 142e992cb097a63cb9a8efbd41c0a725ba719592..64cc5c07e47c2d301cfc66c14082a6a76458542f 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef ISCSI_TARGET_TMR_H
 #define ISCSI_TARGET_TMR_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_tmr_req;
+
 extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
 extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
                        unsigned char *);
index 0814e5894a9616ffcc79fb0d0f086f718a971fd7..2e7e08dbda4807ed51c6e886b3399bc95199f3d5 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
@@ -260,7 +260,6 @@ err_out:
                iscsi_release_param_list(tpg->param_list);
                tpg->param_list = NULL;
        }
-       kfree(tpg);
        return -ENOMEM;
 }
 
index 2da211920c186215e740d1aaa47f781999d1fb71..ceba298511677a5cf5ca49d784258f6c111da7dd 100644 (file)
@@ -1,6 +1,15 @@
 #ifndef ISCSI_TARGET_TPG_H
 #define ISCSI_TARGET_TPG_H
 
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsi_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
 extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
 extern int iscsit_load_discovery_tpg(void);
 extern void iscsit_release_discovery_tpg(void);
index 08217d62fb0d6860e40bcb9fa4b2947e710b3bd4..c4eb141c6435983ea3493159f48cf6c3919a9f7c 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/module.h>
 #include <target/iscsi/iscsi_transport.h>
 
 static LIST_HEAD(g_transport_list);
index 1f38177207e0806b18641766fdd6849ae1555a9b..b5a1b4ccba124d4dbf60fd528ec05d3a7d0dbf32 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/list.h>
 #include <linux/percpu_ida.h>
+#include <net/ipv6.h>         /* ipv6_addr_equal() */
 #include <scsi/scsi_tcq.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
index 995f1cb29d0e08268acf9f3547494d23498dc87b..8ff08856516aba68394fc07661ec71b635c8b6a2 100644 (file)
@@ -1,8 +1,16 @@
 #ifndef ISCSI_TARGET_UTIL_H
 #define ISCSI_TARGET_UTIL_H
 
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h>        /* itt_t */
+
 #define MARKER_SIZE    8
 
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
 extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
 extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
 extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
index 4346462094a1af4e4ce778abd41a0f1da1d0a559..a8a230b4e6b532866becda3b04d70d2fee7c93c2 100644 (file)
@@ -1,3 +1,7 @@
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
 #define TCM_LOOP_VERSION               "v2.1-rc2"
 #define TL_WWN_ADDR_LEN                        256
 #define TL_TPGS_PER_HBA                        32
index 58bb6ed181853b49370d9fda31f73e7bd24c7fda..e5c3e5f827d0b8f163bbe9f1e78c2da7cfc2bae5 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/string.h>
 #include <linux/configfs.h>
 #include <linux/ctype.h>
+#include <linux/delay.h>
 #include <linux/firewire.h>
 #include <linux/firewire-constants.h>
 #include <scsi/scsi_proto.h>
@@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
        struct sbp_target_request *req;
        int tag;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0)
                return ERR_PTR(-ENOMEM);
 
index 4c82bbe19003d083979fac3139ab91e3a15a01a0..f5e330099bfca713f4cb12bd2dc77826fdad1b3b 100644 (file)
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/configfs.h>
+#include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/fcntl.h>
 #include <linux/file.h>
+#include <linux/fs.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 9b250f9b33bfb830ff194e3b0bf028f9e0ff02fb..c69c11baf07f03ab6dae23a52bace4e956b613a2 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_ALUA_H
 #define TARGET_CORE_ALUA_H
 
+#include <target/target_core_base.h>
+
 /*
  * INQUIRY response data, TPGS Field
  *
index a35a347ec357ad48626cc4083eca16588c29eb8b..54b36c9835be3ae2127cb1f447321eba73b824ac 100644 (file)
@@ -144,12 +144,12 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
                return -EINVAL;
        }
        if (!S_ISDIR(file_inode(fp)->i_mode)) {
-               filp_close(fp, 0);
+               filp_close(fp, NULL);
                mutex_unlock(&g_tf_lock);
                pr_err("db_root: not a directory: %s\n", db_root_stage);
                return -EINVAL;
        }
-       filp_close(fp, 0);
+       filp_close(fp, NULL);
 
        strncpy(db_root, db_root_stage, read_bytes);
 
index 6b423485c5d6b4f6e8e54a332f1dd62eda0325d9..1ebd13ef7bd333c5cbc488f7543eda58e29a2123 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kthread.h>
 #include <linux/in.h>
 #include <linux/export.h>
+#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
index d545993df18be9ede3253861e24c25726d9a8e27..87aa376a1a1ae9f9119369725199d7bd5ba22a1e 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/falloc.h>
+#include <linux/uio.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 068966fce3089527fb7f14dd7bd3aa0fafdd1041..526595a072de899c618487b4edc909ad0fd64d91 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_FILE_H
 #define TARGET_CORE_FILE_H
 
+#include <target/target_core_base.h>
+
 #define FD_VERSION             "4.0"
 
 #define FD_MAX_DEV_NAME                256
index 01c2afd815008d6cfa9d6b6d5490e1c39394db89..718d3fcd3e7cd8d8cacd7057ff85119a836725ca 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef TARGET_CORE_IBLOCK_H
 #define TARGET_CORE_IBLOCK_H
 
+#include <linux/atomic.h>
+#include <target/target_core_base.h>
+
 #define IBLOCK_VERSION         "4.0"
 
 #define IBLOCK_MAX_CDBS                16
index e2c970a9d61c32c7a95d034f889992a7560fdb64..9ab7090f7c839c6900cb30ddf7db1b8be4bc78cf 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef TARGET_CORE_INTERNAL_H
 #define TARGET_CORE_INTERNAL_H
 
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define TARGET_CORE_NAME_MAX_LEN       64
 #define TARGET_FABRIC_NAME_SIZE                32
 
index 47463c99c3181ed8e133b2d39ba9362d0196541a..d761025144f9dc178cc43d4803b4c79b0147815b 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/list.h>
 #include <linux/vmalloc.h>
 #include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
@@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
 
        if ((cmd->t_task_cdb[1] & 0x01) &&
            (cmd->t_task_cdb[1] & 0x02)) {
-               pr_err("LongIO and Obselete Bits set, returning"
-                               " ILLEGAL_REQUEST\n");
+               pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
        /*
index e3d26e9126a01fa860693d4ecb6852b0c7de3d36..847bd470339c7ab1e1d498a6ddf37be37c526d57 100644 (file)
@@ -1,5 +1,9 @@
 #ifndef TARGET_CORE_PR_H
 #define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 /*
  * PERSISTENT_RESERVE_OUT service action codes
  *
index 6d2007e35df65919f1687341499b26076ba25ed1..8a02fa47c7e8e907952f740b490f145f083e8459 100644 (file)
 #define PS_TIMEOUT_DISK                (15*HZ)
 #define PS_TIMEOUT_OTHER       (500*HZ)
 
-#include <linux/device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
+#include <linux/cache.h>             /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
 
+struct block_device;
 struct scsi_device;
+struct Scsi_Host;
 
 struct pscsi_plugin_task {
        unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
index 24b36fd785f19a03d4dcd4507890d4ef850f023f..ddc216c9f1f63dcdea780b5be5edbf34d9cc93d4 100644 (file)
@@ -26,7 +26,9 @@
 
 #include <linux/string.h>
 #include <linux/parser.h>
+#include <linux/highmem.h>
 #include <linux/timer.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <scsi/scsi_proto.h>
index cc46a6a89b38e863a3d7b4c2f13207d251539fd2..91fc1a34791d909a1d68d265321caeaa833657db 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef TARGET_CORE_RD_H
 #define TARGET_CORE_RD_H
 
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define RD_HBA_VERSION         "v4.0"
 #define RD_MCP_VERSION         "4.0"
 
index 04f616b3ba0a848a80d4a70c084c1b45d406c168..4879e70e2eefb68ddc229effbe4a9822f369ce3f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/ratelimit.h>
 #include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/scsi_tcq.h>
index bd6e78ba153d68bd37b784ba7ebd52290932906c..97402856a8f0e3be40ae8eee5b0f74e74fdb2f9d 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_UA_H
 #define TARGET_CORE_UA_H
 
+#include <target/target_core_base.h>
+
 /*
  * From spc4r17, Table D.1: ASC and ASCQ Assignement
  */
index 2b3c8564ace8154548349c6a71872f0b1aceeadb..8041710b697298ec7073c4e5910849bd1a154703 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uio_driver.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
+#include <linux/highmem.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
-       int ret;
+       sense_reason_t ret;
 
        tcmu_cmd = tcmu_alloc_cmd(se_cmd);
        if (!tcmu_cmd)
@@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
        target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
        cmd->se_cmd = NULL;
 
-       kmem_cache_free(tcmu_cmd_cache, cmd);
-
        return 0;
 }
 
index 094a1440eacb3dccdd9c35a678a2940c3e03216d..37d5caebffa6b593025a28b703a54a71e7d940d3 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/configfs.h>
+#include <linux/ratelimit.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 700a981c7b415264c40d70058cc3fc6c497b32ed..4d3d4dd060f28366ebd069abb603472ae0275d5b 100644 (file)
@@ -1,3 +1,5 @@
+#include <target/target_core_base.h>
+
 #define XCOPY_TARGET_DESC_LEN          32
 #define XCOPY_SEGMENT_DESC_LEN         28
 #define XCOPY_NAA_IEEE_REGEX_LEN       16
index e28209b99b59804de51663afe0a677c91745a827..11d27b93b41392aee08a4034c1e828927521defc 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef __TCM_FC_H__
 #define __TCM_FC_H__
 
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define FT_VERSION "0.4"
 
 #define FT_NAMELEN 32          /* length of ASCII WWPNs including pad */
index 197f73386fac9ab45473de09db462ab5e4640d90..d2351139342f6200209078e769e04f5ea1eb2d1f 100644 (file)
@@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
        struct usbg_cmd *cmd;
        int tag;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0)
                return ERR_PTR(-ENOMEM);
 
index 8edf253484af981c0e0f6a9a931a71f85d721918..8c79e1a53af95ac12ca9a1f4e28b23e97c8d69f4 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1367,6 +1367,39 @@ out:
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
+{
+       struct kioctx *ioctx = NULL;
+       unsigned long ctx;
+       long ret;
+
+       ret = get_user(ctx, ctx32p);
+       if (unlikely(ret))
+               goto out;
+
+       ret = -EINVAL;
+       if (unlikely(ctx || nr_events == 0)) {
+               pr_debug("EINVAL: ctx %lu nr_events %u\n",
+                        ctx, nr_events);
+               goto out;
+       }
+
+       ioctx = ioctx_alloc(nr_events);
+       ret = PTR_ERR(ioctx);
+       if (!IS_ERR(ioctx)) {
+               /* truncating is ok because it's a user address */
+               ret = put_user((u32)ioctx->user_id, ctx32p);
+               if (ret)
+                       kill_ioctx(current->mm, ioctx, NULL);
+               percpu_ref_put(&ioctx->users);
+       }
+
+out:
+       return ret;
+}
+#endif
+
 /* sys_io_destroy:
  *     Destroy the aio_context specified.  May cancel any outstanding 
  *     AIOs and block on completion.  Will fail with -ENOSYS if not
@@ -1591,8 +1624,8 @@ out_put_req:
        return ret;
 }
 
-long do_io_submit(aio_context_t ctx_id, long nr,
-                 struct iocb __user *__user *iocbpp, bool compat)
+static long do_io_submit(aio_context_t ctx_id, long nr,
+                         struct iocb __user *__user *iocbpp, bool compat)
 {
        struct kioctx *ctx;
        long ret = 0;
@@ -1662,6 +1695,44 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
        return do_io_submit(ctx_id, nr, iocbpp, 0);
 }
 
+#ifdef CONFIG_COMPAT
+static inline long
+copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
+{
+       compat_uptr_t uptr;
+       int i;
+
+       for (i = 0; i < nr; ++i) {
+               if (get_user(uptr, ptr32 + i))
+                       return -EFAULT;
+               if (put_user(compat_ptr(uptr), ptr64 + i))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+#define MAX_AIO_SUBMITS        (PAGE_SIZE/sizeof(struct iocb *))
+
+COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
+                      int, nr, u32 __user *, iocb)
+{
+       struct iocb __user * __user *iocb64;
+       long ret;
+
+       if (unlikely(nr < 0))
+               return -EINVAL;
+
+       if (nr > MAX_AIO_SUBMITS)
+               nr = MAX_AIO_SUBMITS;
+
+       iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
+       ret = copy_iocb(nr, iocb, iocb64);
+       if (!ret)
+               ret = do_io_submit(ctx_id, nr, iocb64, 1);
+       return ret;
+}
+#endif
+
 /* lookup_kiocb
  *     Finds a given iocb for cancellation.
  */
@@ -1761,3 +1832,25 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
        }
        return ret;
 }
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
+                      compat_long_t, min_nr,
+                      compat_long_t, nr,
+                      struct io_event __user *, events,
+                      struct compat_timespec __user *, timeout)
+{
+       struct timespec t;
+       struct timespec __user *ut = NULL;
+
+       if (timeout) {
+               if (compat_get_timespec(&t, timeout))
+                       return -EFAULT;
+
+               ut = compat_alloc_user_space(sizeof(*ut));
+               if (copy_to_user(ut, &t, sizeof(t)))
+                       return -EFAULT;
+       }
+       return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
+}
+#endif
index c6bad51d8ec7b56d8027c019a98a656596911110..b914cfb03820aa7780e629dc047287df1ea56b08 100644 (file)
@@ -129,6 +129,7 @@ static inline befs_inode_addr
 blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
 {
        befs_inode_addr iaddr;
+
        iaddr.allocation_group = blockno >> BEFS_SB(sb)->ag_shift;
        iaddr.start =
            blockno - (iaddr.allocation_group << BEFS_SB(sb)->ag_shift);
@@ -140,7 +141,7 @@ blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
 static inline unsigned int
 befs_iaddrs_per_block(struct super_block *sb)
 {
-       return BEFS_SB(sb)->block_size / sizeof (befs_disk_inode_addr);
+       return BEFS_SB(sb)->block_size / sizeof(befs_disk_inode_addr);
 }
 
 #include "endian.h"
index eb557d9dc8be973ae82c89feb274275202080059..69c9d8cde95563b58dd31c5a5a7e41e9f02fb3c1 100644 (file)
@@ -55,12 +55,12 @@ enum super_flags {
 };
 
 #define BEFS_BYTEORDER_NATIVE 0x42494745
-#define BEFS_BYTEORDER_NATIVE_LE (__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE)
-#define BEFS_BYTEORDER_NATIVE_BE (__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE)
+#define BEFS_BYTEORDER_NATIVE_LE ((__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE))
+#define BEFS_BYTEORDER_NATIVE_BE ((__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE))
 
 #define BEFS_SUPER_MAGIC BEFS_SUPER_MAGIC1
-#define BEFS_SUPER_MAGIC1_LE (__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1)
-#define BEFS_SUPER_MAGIC1_BE (__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1)
+#define BEFS_SUPER_MAGIC1_LE ((__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1))
+#define BEFS_SUPER_MAGIC1_BE ((__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1))
 
 /*
  * Flags of inode
@@ -79,7 +79,7 @@ enum inode_flags {
        BEFS_INODE_WAS_WRITTEN = 0x00020000,
        BEFS_NO_TRANSACTION = 0x00040000,
 };
-/* 
+/*
  * On-Disk datastructures of BeFS
  */
 
@@ -139,7 +139,7 @@ typedef struct {
 
 } PACKED befs_super_block;
 
-/* 
+/*
  * Note: the indirect and dbl_indir block_runs may
  * be longer than one block!
  */
index 7e135ea73fddf65295a804502d7c3a57e906cffb..d509887c580ceedba9ab003cb390417aeab993e9 100644 (file)
@@ -12,8 +12,8 @@
  *
  * Dominic Giampaolo, author of "Practical File System
  * Design with the Be File System", for such a helpful book.
- * 
- * Marcus J. Ranum, author of the b+tree package in 
+ *
+ * Marcus J. Ranum, author of the b+tree package in
  * comp.sources.misc volume 10. This code is not copied from that
  * work, but it is partially based on it.
  *
  */
 
 /* Befs B+tree structure:
- * 
+ *
  * The first thing in the tree is the tree superblock. It tells you
  * all kinds of useful things about the tree, like where the rootnode
  * is located, and the size of the nodes (always 1024 with current version
  * of BeOS).
  *
  * The rest of the tree consists of a series of nodes. Nodes contain a header
- * (struct befs_btree_nodehead), the packed key data, an array of shorts 
+ * (struct befs_btree_nodehead), the packed key data, an array of shorts
  * containing the ending offsets for each of the keys, and an array of
- * befs_off_t values. In interior nodes, the keys are the ending keys for 
- * the childnode they point to, and the values are offsets into the 
- * datastream containing the tree. 
+ * befs_off_t values. In interior nodes, the keys are the ending keys for
+ * the childnode they point to, and the values are offsets into the
+ * datastream containing the tree.
  */
 
 /* Note:
- * 
- * The book states 2 confusing things about befs b+trees. First, 
+ *
+ * The book states 2 confusing things about befs b+trees. First,
  * it states that the overflow field of node headers is used by internal nodes
  * to point to another node that "effectively continues this one". Here is what
  * I believe that means. Each key in internal nodes points to another node that
- * contains key values less than itself. Inspection reveals that the last key 
- * in the internal node is not the last key in the index. Keys that are 
- * greater than the last key in the internal node go into the overflow node. 
+ * contains key values less than itself. Inspection reveals that the last key
+ * in the internal node is not the last key in the index. Keys that are
+ * greater than the last key in the internal node go into the overflow node.
  * I imagine there is a performance reason for this.
  *
- * Second, it states that the header of a btree node is sufficient to 
- * distinguish internal nodes from leaf nodes. Without saying exactly how. 
+ * Second, it states that the header of a btree node is sufficient to
+ * distinguish internal nodes from leaf nodes. Without saying exactly how.
  * After figuring out the first, it becomes obvious that internal nodes have
  * overflow nodes and leafnodes do not.
  */
 
-/* 
+/*
  * Currently, this code is only good for directory B+trees.
  * In order to be used for other BFS indexes, it needs to be extended to handle
  * duplicate keys and non-string keytypes (int32, int64, float, double).
@@ -237,8 +237,8 @@ befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
  * with @key (usually the disk block number of an inode).
  *
  * On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND.
- * 
- * Algorithm: 
+ *
+ * Algorithm:
  *   Read the superblock and rootnode of the b+tree.
  *   Drill down through the interior nodes using befs_find_key().
  *   Once at the correct leaf node, use befs_find_key() again to get the
@@ -402,12 +402,12 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
  *
  * Here's how it works: Key_no is the index of the key/value pair to
  * return in keybuf/value.
- * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is 
+ * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is
  * the number of characters in the key (just a convenience).
  *
  * Algorithm:
  *   Get the first leafnode of the tree. See if the requested key is in that
- *   node. If not, follow the node->right link to the next leafnode. Repeat 
+ *   node. If not, follow the node->right link to the next leafnode. Repeat
  *   until the (key_no)th key is found or the tree is out of keys.
  */
 int
@@ -536,7 +536,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
  * @node_off: Pointer to offset of current node within datastream. Modified
  *             by the function.
  *
- * Helper function for btree traverse. Moves the current position to the 
+ * Helper function for btree traverse. Moves the current position to the
  * start of the first leaf node.
  *
  * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY.
@@ -592,10 +592,10 @@ befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
 }
 
 /**
- * befs_leafnode - Determine if the btree node is a leaf node or an 
+ * befs_leafnode - Determine if the btree node is a leaf node or an
  * interior node
  * @node: Pointer to node structure to test
- * 
+ *
  * Return 1 if leaf, 0 if interior
  */
 static int
@@ -656,7 +656,7 @@ befs_bt_valarray(struct befs_btree_node *node)
  * @node: Pointer to the node structure to find the keydata array within
  *
  * Returns a pointer to the start of the keydata array
- * of the node pointed to by the node header 
+ * of the node pointed to by the node header
  */
 static char *
 befs_bt_keydata(struct befs_btree_node *node)
@@ -702,7 +702,7 @@ befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node,
 
 /**
  * befs_compare_strings - compare two strings
- * @key1: pointer to the first key to be compared 
+ * @key1: pointer to the first key to be compared
  * @keylen1: length in bytes of key1
  * @key2: pointer to the second key to be compared
  * @keylen2: length in bytes of key2
index f2a8f637e9e07faf3faf7773737bdaea257ddf29..60c6c728e64e78168ad9c37d759948d6e82f596a 100644 (file)
@@ -1,13 +1,11 @@
 /*
  * btree.h
- * 
+ *
  */
 
-
 int befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
-                   const char *key, befs_off_t * value);
+                   const char *key, befs_off_t *value);
 
 int befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
                    loff_t key_no, size_t bufsize, char *keybuf,
-                   size_t * keysize, befs_off_t * value);
-
+                   size_t *keysize, befs_off_t *value);
index b4c7ba013c0d6e752296599d6b29597e5dd13512..720b3bc5c16a70bd664562bfe72abacffa8c7360 100644 (file)
@@ -84,13 +84,11 @@ befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
  *
  * Takes a file position and gives back a brun who's starting block
  * is block number fblock of the file.
- * 
+ *
  * Returns BEFS_OK or BEFS_ERR.
- * 
+ *
  * Calls specialized functions for each of the three possible
  * datastream regions.
- *
- * 2001-11-15 Will Dyson
  */
 int
 befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
@@ -120,7 +118,7 @@ befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
 
 /**
  * befs_read_lsmylink - read long symlink from datastream.
- * @sb: Filesystem superblock 
+ * @sb: Filesystem superblock
  * @ds: Datastream to read from
  * @buff: Buffer in which to place long symlink data
  * @len: Length of the long symlink in bytes
index 91ba8203d83f221278df23e8bb4ab5c3e9cb6d11..7ff9ff09ec6e70718b4b8bd398239922d1065f84 100644 (file)
@@ -5,10 +5,10 @@
 
 struct buffer_head *befs_read_datastream(struct super_block *sb,
                                         const befs_data_stream *ds,
-                                        befs_off_t pos, uint * off);
+                                        befs_off_t pos, uint *off);
 
 int befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
-                    befs_blocknr_t fblock, befs_block_run * run);
+                    befs_blocknr_t fblock, befs_block_run *run);
 
 size_t befs_read_lsymlink(struct super_block *sb, const befs_data_stream *data,
                          void *buff, befs_off_t len);
@@ -17,4 +17,3 @@ befs_blocknr_t befs_count_blocks(struct super_block *sb,
                          const befs_data_stream *ds);
 
 extern const befs_inode_addr BAD_IADDR;
-
index 85c13392e9e897dae2ee288f301c88c3067e8303..36656c86f50ec526d0a9f9c3b8ce76ec6098198a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  linux/fs/befs/debug.c
- * 
+ *
  * Copyright (C) 2001 Will Dyson (will_dyson at pobox.com)
  *
  * With help from the ntfs-tng driver by Anton Altparmakov
@@ -57,6 +57,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
 
        struct va_format vaf;
        va_list args;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -67,7 +68,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
 }
 
 void
-befs_dump_inode(const struct super_block *sb, befs_inode * inode)
+befs_dump_inode(const struct super_block *sb, befs_inode *inode)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -151,7 +152,7 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode)
  */
 
 void
-befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
+befs_dump_super_block(const struct super_block *sb, befs_super_block *sup)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -202,7 +203,7 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
 #if 0
 /* unused */
 void
-befs_dump_small_data(const struct super_block *sb, befs_small_data * sd)
+befs_dump_small_data(const struct super_block *sb, befs_small_data *sd)
 {
 }
 
@@ -221,7 +222,8 @@ befs_dump_run(const struct super_block *sb, befs_disk_block_run run)
 #endif  /*  0  */
 
 void
-befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * super)
+befs_dump_index_entry(const struct super_block *sb,
+                     befs_disk_btree_super *super)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -242,7 +244,7 @@ befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * supe
 }
 
 void
-befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead * node)
+befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *node)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
index fa4b718de597394cf4b50be1fb877937d8f5935d..5367a6470a69fb59c0db37e321ff9d9c343d5519 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * inode.c
- * 
+ *
  * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
  */
 
 #include "inode.h"
 
 /*
      Validates the correctness of the befs inode
      Returns BEFS_OK if the inode should be used, otherwise
      returns BEFS_BAD_INODE
-*/
* Validates the correctness of the befs inode
* Returns BEFS_OK if the inode should be used, otherwise
* returns BEFS_BAD_INODE
+ */
 int
-befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
                 befs_blocknr_t inode)
 {
        u32 magic1 = fs32_to_cpu(sb, raw_inode->magic1);
index 9dc7fd9b7570d63321c86b3088e5a975f7e8c419..2219e412f49bee9255241464701290c6d006fa58 100644 (file)
@@ -1,8 +1,7 @@
 /*
  * inode.h
- * 
+ *
  */
 
-int befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+int befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
                     befs_blocknr_t inode);
-
index b4a558126ee1724b0d3bd833f68a1c201833af33..227cb86e07fe3a99afc45c1790a87671641eb5af 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
  *
- * Based on portions of file.c and inode.c 
+ * Based on portions of file.c and inode.c
  * by Makoto Kato (m_kato@ga2.so-net.ne.jp)
  *
  * Many thanks to Dominic Giampaolo, author of Practical File System
@@ -19,8 +19,7 @@
 /*
  * Converts befs notion of disk addr to a disk offset and uses
  * linux kernel function sb_bread() to get the buffer containing
- * the offset. -Will Dyson
- *
+ * the offset.
  */
 
 struct buffer_head *
@@ -55,7 +54,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
        befs_debug(sb, "<--- %s", __func__);
        return bh;
 
-      error:
+error:
        befs_debug(sb, "<--- %s ERROR", __func__);
        return NULL;
 }
index 78d7bc6e60dee4d51fb1ac6325cca8b3929bd9c2..9b3e1967cb313f100a7ae96d9559f4ae6cea902d 100644 (file)
@@ -4,4 +4,3 @@
 
 struct buffer_head *befs_bread_iaddr(struct super_block *sb,
                                     befs_inode_addr iaddr);
-
index 647a276eba5654593739aafa9a6984ca967d8adf..19407165f4aad9719ef5f339fabc31bb2c22ef9e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/parser.h>
 #include <linux/namei.h>
 #include <linux/sched.h>
+#include <linux/exportfs.h>
 
 #include "befs.h"
 #include "btree.h"
@@ -37,7 +38,8 @@ static int befs_readdir(struct file *, struct dir_context *);
 static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 static int befs_readpage(struct file *file, struct page *page);
 static sector_t befs_bmap(struct address_space *mapping, sector_t block);
-static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int);
+static struct dentry *befs_lookup(struct inode *, struct dentry *,
+                                 unsigned int);
 static struct inode *befs_iget(struct super_block *, unsigned long);
 static struct inode *befs_alloc_inode(struct super_block *sb);
 static void befs_destroy_inode(struct inode *inode);
@@ -51,6 +53,10 @@ static void befs_put_super(struct super_block *);
 static int befs_remount(struct super_block *, int *, char *);
 static int befs_statfs(struct dentry *, struct kstatfs *);
 static int parse_options(char *, struct befs_mount_options *);
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type);
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type);
 
 static const struct super_operations befs_sops = {
        .alloc_inode    = befs_alloc_inode,     /* allocate a new inode */
@@ -83,9 +89,14 @@ static const struct address_space_operations befs_symlink_aops = {
        .readpage       = befs_symlink_readpage,
 };
 
-/* 
+static const struct export_operations befs_export_operations = {
+       .fh_to_dentry   = befs_fh_to_dentry,
+       .fh_to_parent   = befs_fh_to_parent,
+};
+
+/*
  * Called by generic_file_read() to read a page of data
- * 
+ *
  * In turn, simply calls a generic block read function and
  * passes it the address of befs_get_block, for mapping file
  * positions to disk blocks.
@@ -102,15 +113,13 @@ befs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, befs_get_block);
 }
 
-/* 
- * Generic function to map a file position (block) to a 
+/*
+ * Generic function to map a file position (block) to a
  * disk offset (passed back in bh_result).
  *
  * Used by many higher level functions.
  *
  * Calls befs_fblock2brun() in datastream.c to do the real work.
- *
- * -WD 10-26-01
  */
 
 static int
@@ -269,15 +278,15 @@ befs_alloc_inode(struct super_block *sb)
        struct befs_inode_info *bi;
 
        bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL);
-        if (!bi)
-                return NULL;
-        return &bi->vfs_inode;
+       if (!bi)
+               return NULL;
+       return &bi->vfs_inode;
 }
 
 static void befs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-        kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
+       kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
 static void befs_destroy_inode(struct inode *inode)
@@ -287,7 +296,7 @@ static void befs_destroy_inode(struct inode *inode)
 
 static void init_once(void *foo)
 {
-        struct befs_inode_info *bi = (struct befs_inode_info *) foo;
+       struct befs_inode_info *bi = (struct befs_inode_info *) foo;
 
        inode_init_once(&bi->vfs_inode);
 }
@@ -338,7 +347,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
        /*
         * set uid and gid.  But since current BeOS is single user OS, so
         * you can change by "uid" or "gid" options.
-        */   
+        */
 
        inode->i_uid = befs_sb->mount_opts.use_uid ?
                befs_sb->mount_opts.uid :
@@ -353,14 +362,14 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
         * BEFS's time is 64 bits, but current VFS is 32 bits...
         * BEFS don't have access time. Nor inode change time. VFS
         * doesn't have creation time.
-        * Also, the lower 16 bits of the last_modified_time and 
+        * Also, the lower 16 bits of the last_modified_time and
         * create_time are just a counter to help ensure uniqueness
         * for indexing purposes. (PFD, page 54)
         */
 
        inode->i_mtime.tv_sec =
            fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
-       inode->i_mtime.tv_nsec = 0;   /* lower 16 bits are not a time */        
+       inode->i_mtime.tv_nsec = 0;   /* lower 16 bits are not a time */
        inode->i_ctime = inode->i_mtime;
        inode->i_atime = inode->i_mtime;
 
@@ -414,10 +423,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
        unlock_new_inode(inode);
        return inode;
 
-      unacquire_bh:
+unacquire_bh:
        brelse(bh);
 
-      unacquire_none:
+unacquire_none:
        iget_failed(inode);
        befs_debug(sb, "<--- %s - Bad inode", __func__);
        return ERR_PTR(-EIO);
@@ -442,7 +451,7 @@ befs_init_inodecache(void)
 }
 
 /* Called at fs teardown.
- * 
+ *
  * Taken from NFS implementation by Al Viro.
  */
 static void
@@ -491,13 +500,10 @@ fail:
 }
 
 /*
- * UTF-8 to NLS charset  convert routine
- * 
+ * UTF-8 to NLS charset convert routine
  *
- * Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than
- * the nls tables directly
+ * Uses uni2char() / char2uni() rather than the nls tables directly
  */
-
 static int
 befs_utf2nls(struct super_block *sb, const char *in,
             int in_len, char **out, int *out_len)
@@ -521,9 +527,8 @@ befs_utf2nls(struct super_block *sb, const char *in,
        }
 
        *out = result = kmalloc(maxlen, GFP_NOFS);
-       if (!*out) {
+       if (!*out)
                return -ENOMEM;
-       }
 
        for (i = o = 0; i < in_len; i += utflen, o += unilen) {
 
@@ -546,7 +551,7 @@ befs_utf2nls(struct super_block *sb, const char *in,
 
        return o;
 
-      conv_err:
+conv_err:
        befs_error(sb, "Name using character set %s contains a character that "
                   "cannot be converted to unicode.", nls->charset);
        befs_debug(sb, "<--- %s", __func__);
@@ -561,18 +566,18 @@ befs_utf2nls(struct super_block *sb, const char *in,
  * @in_len: Length of input string in bytes
  * @out: The output string in UTF-8 format
  * @out_len: Length of the output buffer
- * 
+ *
  * Converts input string @in, which is in the format of the loaded NLS map,
  * into a utf8 string.
- * 
+ *
  * The destination string @out is allocated by this function and the caller is
  * responsible for freeing it with kfree()
- * 
+ *
  * On return, *@out_len is the length of @out in bytes.
  *
  * On success, the return value is the number of utf8 characters written to
  * the output buffer @out.
- *  
+ *
  * On Failure, a negative number coresponding to the error code is returned.
  */
 
@@ -585,9 +590,11 @@ befs_nls2utf(struct super_block *sb, const char *in,
        wchar_t uni;
        int unilen, utflen;
        char *result;
-       /* There're nls characters that will translate to 3-chars-wide UTF-8
-        * characters, a additional byte is needed to save the final \0
-        * in special cases */
+       /*
+        * There are nls characters that will translate to 3-chars-wide UTF-8
+        * characters, an additional byte is needed to save the final \0
+        * in special cases
+        */
        int maxlen = (3 * in_len) + 1;
 
        befs_debug(sb, "---> %s\n", __func__);
@@ -624,14 +631,41 @@ befs_nls2utf(struct super_block *sb, const char *in,
 
        return i;
 
-      conv_err:
-       befs_error(sb, "Name using charecter set %s contains a charecter that "
+conv_err:
+       befs_error(sb, "Name using character set %s contains a character that "
                   "cannot be converted to unicode.", nls->charset);
        befs_debug(sb, "<--- %s", __func__);
        kfree(result);
        return -EILSEQ;
 }
 
+static struct inode *befs_nfs_get_inode(struct super_block *sb, uint64_t ino,
+                                        uint32_t generation)
+{
+       /* No need to handle i_generation */
+       return befs_iget(sb, ino);
+}
+
+/*
+ * Map a NFS file handle to a corresponding dentry
+ */
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type)
+{
+       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+                                   befs_nfs_get_inode);
+}
+
+/*
+ * Find the parent for a file specified by NFS handle
+ */
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type)
+{
+       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+                                   befs_nfs_get_inode);
+}
+
 enum {
        Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
 };
@@ -666,6 +700,7 @@ parse_options(char *options, struct befs_mount_options *opts)
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
+
                if (!*p)
                        continue;
 
@@ -721,7 +756,7 @@ parse_options(char *options, struct befs_mount_options *opts)
 }
 
 /* This function has the responsibiltiy of getting the
- * filesystem ready for unmounting. 
+ * filesystem ready for unmounting.
  * Basically, we free everything that we allocated in
  * befs_read_inode
  */
@@ -782,8 +817,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
         * Linux 2.4.10 and later refuse to read blocks smaller than
         * the logical block size for the device. But we also need to read at
         * least 1k to get the second 512 bytes of the volume.
-        * -WD 10-26-01
-        */ 
+        */
        blocksize = sb_min_blocksize(sb, 1024);
        if (!blocksize) {
                if (!silent)
@@ -791,7 +825,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
                goto unacquire_priv_sbp;
        }
 
-       if (!(bh = sb_bread(sb, sb_block))) {
+       bh = sb_bread(sb, sb_block);
+       if (!bh) {
                if (!silent)
                        befs_error(sb, "unable to read superblock");
                goto unacquire_priv_sbp;
@@ -816,7 +851,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
 
        brelse(bh);
 
-       if( befs_sb->num_blocks > ~((sector_t)0) ) {
+       if (befs_sb->num_blocks > ~((sector_t)0)) {
                if (!silent)
                        befs_error(sb, "blocks count: %llu is larger than the host can use",
                                        befs_sb->num_blocks);
@@ -831,6 +866,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        /* Set real blocksize of fs */
        sb_set_blocksize(sb, (ulong) befs_sb->block_size);
        sb->s_op = &befs_sops;
+       sb->s_export_op = &befs_export_operations;
        root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
        if (IS_ERR(root)) {
                ret = PTR_ERR(root);
@@ -861,16 +897,16 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        return 0;
-/*****************/
-      unacquire_bh:
+
+unacquire_bh:
        brelse(bh);
 
-      unacquire_priv_sbp:
+unacquire_priv_sbp:
        kfree(befs_sb->mount_opts.iocharset);
        kfree(sb->s_fs_info);
        sb->s_fs_info = NULL;
 
-      unacquire_none:
+unacquire_none:
        return ret;
 }
 
@@ -919,7 +955,7 @@ static struct file_system_type befs_fs_type = {
        .name           = "befs",
        .mount          = befs_mount,
        .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV,      
+       .fs_flags       = FS_REQUIRES_DEV,
 };
 MODULE_ALIAS_FS("befs");
 
@@ -956,9 +992,9 @@ exit_befs_fs(void)
 }
 
 /*
-Macros that typecheck the init and exit functions,
-ensures that they are called at init and cleanup,
-and eliminates warnings about unused functions.
-*/
+ * Macros that typecheck the init and exit functions,
+ * ensures that they are called at init and cleanup,
+ * and eliminates warnings about unused functions.
+ */
 module_init(init_befs_fs)
 module_exit(exit_befs_fs)
index dc4556376a2206ac43431bddb3815b60615bcb42..ec1df30a7e9ab859e25ca0ea384f0681fc24adcc 100644 (file)
@@ -2,7 +2,5 @@
  * super.h
  */
 
-int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb);
-
+int befs_load_sb(struct super_block *sb, befs_super_block *disk_sb);
 int befs_check_sb(struct super_block *sb);
-
index 543b48c29ac3157eec076bf78b15190d3cd38316..3f4908c286988769cc20ae63aba326f4321616e5 100644 (file)
@@ -487,45 +487,6 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
        return compat_sys_fcntl64(fd, cmd, arg);
 }
 
-COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
-{
-       long ret;
-       aio_context_t ctx64;
-
-       mm_segment_t oldfs = get_fs();
-       if (unlikely(get_user(ctx64, ctx32p)))
-               return -EFAULT;
-
-       set_fs(KERNEL_DS);
-       /* The __user pointer cast is valid because of the set_fs() */
-       ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
-       set_fs(oldfs);
-       /* truncating is ok because it's a user address */
-       if (!ret)
-               ret = put_user((u32) ctx64, ctx32p);
-       return ret;
-}
-
-COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
-                      compat_long_t, min_nr,
-                      compat_long_t, nr,
-                      struct io_event __user *, events,
-                      struct compat_timespec __user *, timeout)
-{
-       struct timespec t;
-       struct timespec __user *ut = NULL;
-
-       if (timeout) {
-               if (compat_get_timespec(&t, timeout))
-                       return -EFAULT;
-
-               ut = compat_alloc_user_space(sizeof(*ut));
-               if (copy_to_user(ut, &t, sizeof(t)) )
-                       return -EFAULT;
-       } 
-       return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
-}
-
 /* A write operation does a read from user space and vice versa */
 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
 
@@ -602,42 +563,6 @@ out:
        return ret;
 }
 
-static inline long
-copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
-{
-       compat_uptr_t uptr;
-       int i;
-
-       for (i = 0; i < nr; ++i) {
-               if (get_user(uptr, ptr32 + i))
-                       return -EFAULT;
-               if (put_user(compat_ptr(uptr), ptr64 + i))
-                       return -EFAULT;
-       }
-       return 0;
-}
-
-#define MAX_AIO_SUBMITS        (PAGE_SIZE/sizeof(struct iocb *))
-
-COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
-                      int, nr, u32 __user *, iocb)
-{
-       struct iocb __user * __user *iocb64; 
-       long ret;
-
-       if (unlikely(nr < 0))
-               return -EINVAL;
-
-       if (nr > MAX_AIO_SUBMITS)
-               nr = MAX_AIO_SUBMITS;
-       
-       iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
-       ret = copy_iocb(nr, iocb, iocb64);
-       if (!ret)
-               ret = do_io_submit(ctx_id, nr, iocb64, 1);
-       return ret;
-}
-
 struct compat_ncp_mount_data {
        compat_int_t version;
        compat_uint_t ncp_fd;
index 8112eacf10f3afe7243385e46e37625b4ea3d03e..eadbf5069c388ab5431a271a39e5814cbb50b0d0 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -19,7 +19,7 @@
  * current->executable is only used by the procfs.  This allows a dispatch
  * table to check for several different types  of binary formats.  We keep
  * trying until we recognize the file or we run out of supported binary
- * formats. 
+ * formats.
  */
 
 #include <linux/slab.h>
@@ -1268,6 +1268,13 @@ int flush_old_exec(struct linux_binprm * bprm)
        flush_thread();
        current->personality &= ~bprm->per_clear;
 
+       /*
+        * We have to apply CLOEXEC before we change whether the process is
+        * dumpable (in setup_new_exec) to avoid a race with a process in userspace
+        * trying to access the should-be-closed file descriptors of a process
+        * undergoing exec(2).
+        */
+       do_close_on_exec(current->files);
        return 0;
 
 out:
@@ -1330,7 +1337,6 @@ void setup_new_exec(struct linux_binprm * bprm)
           group */
        current->self_exec_id++;
        flush_signal_handlers(current, 0);
-       do_close_on_exec(current->files);
 }
 EXPORT_SYMBOL(setup_new_exec);
 
index e173afe9266109f4e7b948433d4d422e90bacc72..0093ea2512a85809e16605088074a8335513e81c 100644 (file)
@@ -1478,6 +1478,10 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
        else
                ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+       if (i_size_read(inode) < 0) {
+               ret = -EFSCORRUPTED;
+               goto bad_inode;
+       }
        ei->i_dtime = 0;
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
        ei->i_state = 0;
index f7e28f8ea04d2a629ae7c5ead8b84ca71913cc8a..b5b1259e064f8d9661110ba1f1f73d1a0ff19d51 100644 (file)
@@ -96,10 +96,6 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
        return &mountpoint_hashtable[tmp & mp_hash_mask];
 }
 
-/*
- * allocation is serialized by namespace_sem, but we need the spinlock to
- * serialize with freeing.
- */
 static int mnt_alloc_id(struct mount *mnt)
 {
        int res;
@@ -1034,6 +1030,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                if (IS_MNT_SLAVE(old))
                        list_add(&mnt->mnt_slave, &old->mnt_slave);
                mnt->mnt_master = old->mnt_master;
+       } else {
+               CLEAR_MNT_SHARED(mnt);
        }
        if (flag & CL_MAKE_SHARED)
                set_mnt_shared(mnt);
@@ -1828,9 +1826,7 @@ struct vfsmount *clone_private_mount(const struct path *path)
        if (IS_MNT_UNBINDABLE(old_mnt))
                return ERR_PTR(-EINVAL);
 
-       down_read(&namespace_sem);
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
-       up_read(&namespace_sem);
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
index cb22a9f9ae7e3694db1532a683ae34d4d313e787..fad81041f5ab6a60e23e53f45ae429bbde7a9470 100644 (file)
@@ -1273,8 +1273,8 @@ out_error:
  */
 static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       int error;
        struct inode *inode = d_inode(dentry);
+       int error = 0;
 
        /*
         * I believe we can only get a negative dentry here in the case of a
@@ -1293,7 +1293,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
                return 0;
        }
 
-       error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       if (nfs_mapping_need_revalidate_inode(inode))
+               error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
        dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
                        __func__, inode->i_ino, error ? "invalid" : "valid");
        return !error;
@@ -2285,8 +2286,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
                if (cache == NULL)
                        goto out;
                /* Found an entry, is our attribute cache valid? */
-               if (!nfs_attribute_cache_expired(inode) &&
-                   !(nfsi->cache_validity & NFS_INO_INVALID_ATTR))
+               if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
                        break;
                err = -ECHILD;
                if (!may_block)
@@ -2334,12 +2334,12 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
                cache = NULL;
        if (cache == NULL)
                goto out;
-       err = nfs_revalidate_inode_rcu(NFS_SERVER(inode), inode);
-       if (err)
+       if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
                goto out;
        res->jiffies = cache->jiffies;
        res->cred = cache->cred;
        res->mask = cache->mask;
+       err = 0;
 out:
        rcu_read_unlock();
        return err;
@@ -2491,12 +2491,13 @@ EXPORT_SYMBOL_GPL(nfs_may_open);
 static int nfs_execute_ok(struct inode *inode, int mask)
 {
        struct nfs_server *server = NFS_SERVER(inode);
-       int ret;
+       int ret = 0;
 
-       if (mask & MAY_NOT_BLOCK)
-               ret = nfs_revalidate_inode_rcu(server, inode);
-       else
-               ret = nfs_revalidate_inode(server, inode);
+       if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) {
+               if (mask & MAY_NOT_BLOCK)
+                       return -ECHILD;
+               ret = __nfs_revalidate_inode(server, inode);
+       }
        if (ret == 0 && !execute_ok(inode))
                ret = -EACCES;
        return ret;
index 55208b9b3c110b1bf9ded64a90a93c196c0fa160..157cb43ce9dbef4bd1c190ae54da4a3ec07e780e 100644 (file)
@@ -101,21 +101,11 @@ EXPORT_SYMBOL_GPL(nfs_file_release);
 static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
 {
        struct nfs_server *server = NFS_SERVER(inode);
-       struct nfs_inode *nfsi = NFS_I(inode);
-       const unsigned long force_reval = NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
-       unsigned long cache_validity = nfsi->cache_validity;
-
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
-           (cache_validity & force_reval) != force_reval)
-               goto out_noreval;
 
        if (filp->f_flags & O_DIRECT)
                goto force_reval;
-       if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
-               goto force_reval;
-       if (nfs_attribute_timeout(inode))
+       if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE))
                goto force_reval;
-out_noreval:
        return 0;
 force_reval:
        return __nfs_revalidate_inode(server, inode);
index a5589b791439af1c6f426cb95e2a0670edd2a30b..f956ca20a8a3595e36e6cae0e913dc90a47b1e22 100644 (file)
@@ -282,7 +282,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
                             s->nfs_client->cl_minorversion);
 
 out_test_devid:
-       if (filelayout_test_devid_unavailable(devid))
+       if (ret->ds_clp == NULL ||
+           filelayout_test_devid_unavailable(devid))
                ret = NULL;
 out:
        return ret;
index 9e111d07f66747b200051955d9c21997187d4002..45962fe5098c6ff9e87a1a23158409841ad02033 100644 (file)
@@ -1126,7 +1126,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
        case -EPIPE:
                dprintk("%s DS connection error %d\n", __func__,
                        task->tk_status);
-               nfs4_mark_deviceid_unavailable(devid);
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
                rpc_wake_up(&tbl->slot_tbl_waitq);
                /* fall through */
        default:
@@ -1175,7 +1176,8 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
        default:
                dprintk("%s DS connection error %d\n", __func__,
                        task->tk_status);
-               nfs4_mark_deviceid_unavailable(devid);
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
        }
        /* FIXME: Need to prevent infinite looping here. */
        return -NFS4ERR_RESET_TO_PNFS;
index 3cc39d1c1206512b4b58b189f7bd39be20a4611a..e5a6f248697b369003e89ed526608d7cd2a296eb 100644 (file)
@@ -177,7 +177,7 @@ out_err:
 static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
                struct nfs4_deviceid_node *devid)
 {
-       nfs4_mark_deviceid_unavailable(devid);
+       nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
        if (!ff_layout_has_available_ds(lseg))
                pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
                                lseg);
index 5864146e05e6ad36d616593b10aeddeb8398eece..011e4f8c1e015d72f6ec7b485f27555200168457 100644 (file)
@@ -160,6 +160,43 @@ int nfs_sync_mapping(struct address_space *mapping)
        return ret;
 }
 
+static int nfs_attribute_timeout(struct inode *inode)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+
+       return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static bool nfs_check_cache_invalid_delegated(struct inode *inode, unsigned long flags)
+{
+       unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+       /* Special case for the pagecache or access cache */
+       if (flags == NFS_INO_REVAL_PAGECACHE &&
+           !(cache_validity & NFS_INO_REVAL_FORCED))
+               return false;
+       return (cache_validity & flags) != 0;
+}
+
+static bool nfs_check_cache_invalid_not_delegated(struct inode *inode, unsigned long flags)
+{
+       unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+       if ((cache_validity & flags) != 0)
+               return true;
+       if (nfs_attribute_timeout(inode))
+               return true;
+       return false;
+}
+
+bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
+{
+       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+               return nfs_check_cache_invalid_delegated(inode, flags);
+
+       return nfs_check_cache_invalid_not_delegated(inode, flags);
+}
+
 static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -795,6 +832,8 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
        if (!is_sync)
                return;
        inode = d_inode(ctx->dentry);
+       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+               return;
        nfsi = NFS_I(inode);
        if (inode->i_mapping->nrpages == 0)
                return;
@@ -1044,13 +1083,6 @@ out:
        return status;
 }
 
-int nfs_attribute_timeout(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
-}
-
 int nfs_attribute_cache_expired(struct inode *inode)
 {
        if (nfs_have_delegated_attributes(inode))
@@ -1073,15 +1105,6 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
 }
 EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
 
-int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode)
-{
-       if (!(NFS_I(inode)->cache_validity &
-                       (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
-                       && !nfs_attribute_cache_expired(inode))
-               return NFS_STALE(inode) ? -ESTALE : 0;
-       return -ECHILD;
-}
-
 static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -1114,17 +1137,8 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
 
 bool nfs_mapping_need_revalidate_inode(struct inode *inode)
 {
-       unsigned long cache_validity = NFS_I(inode)->cache_validity;
-
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
-               const unsigned long force_reval =
-                       NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
-               return (cache_validity & force_reval) == force_reval;
-       }
-
-       return (cache_validity & NFS_INO_REVAL_PAGECACHE)
-               || nfs_attribute_timeout(inode)
-               || NFS_STALE(inode);
+       return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) ||
+               NFS_STALE(inode);
 }
 
 int nfs_revalidate_mapping_rcu(struct inode *inode)
@@ -1536,13 +1550,6 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
 {
        unsigned long invalid = NFS_INO_INVALID_ATTR;
 
-       /*
-        * Don't revalidate the pagecache if we hold a delegation, but do
-        * force an attribute update
-        */
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
-               invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
-
        if (S_ISDIR(inode->i_mode))
                invalid |= NFS_INO_INVALID_DATA;
        nfs_set_cache_invalid(inode, invalid);
index 6b79c2ca9b9a5eed783117d43b12006f6260f107..09ca5095c04e427c881785170aefe7fdf58e7621 100644 (file)
@@ -381,6 +381,7 @@ extern int nfs_drop_inode(struct inode *);
 extern void nfs_clear_inode(struct inode *);
 extern void nfs_evict_inode(struct inode *);
 void nfs_zap_acl_cache(struct inode *inode);
+extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
 extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
 extern int nfs_wait_atomic_killable(atomic_t *p);
 
index d33242c8d95d58a5366a4a57283005702852c29b..6dcbc5defb7a8dd670b63995eb553c379e47a0d4 100644 (file)
@@ -1089,8 +1089,15 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
 
        spin_lock(&dir->i_lock);
        nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-       if (!cinfo->atomic || cinfo->before != dir->i_version)
+       if (cinfo->atomic && cinfo->before == dir->i_version) {
+               nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+               nfsi->attrtimeo_timestamp = jiffies;
+       } else {
                nfs_force_lookup_revalidate(dir);
+               if (cinfo->before != dir->i_version)
+                       nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
+                               NFS_INO_INVALID_ACL;
+       }
        dir->i_version = cinfo->after;
        nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        nfs_fscache_invalidate(dir);
@@ -3115,6 +3122,16 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
                        res_stateid = &calldata->res.stateid;
                        renew_lease(server, calldata->timestamp);
                        break;
+               case -NFS4ERR_ACCESS:
+                       if (calldata->arg.bitmask != NULL) {
+                               calldata->arg.bitmask = NULL;
+                               calldata->res.fattr = NULL;
+                               task->tk_status = 0;
+                               rpc_restart_call_prepare(task);
+                               goto out_release;
+
+                       }
+                       break;
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_STALE_STATEID:
                case -NFS4ERR_EXPIRED:
@@ -3140,7 +3157,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
                        res_stateid, calldata->arg.fmode);
 out_release:
        nfs_release_seqid(calldata->arg.seqid);
-       nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+       nfs_refresh_inode(calldata->inode, &calldata->fattr);
        dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
 }
 
@@ -3193,9 +3210,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
-       if (calldata->arg.fmode == 0) {
+       if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
+       if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
                /* Close-to-open cache consistency revalidation */
                if (!nfs4_have_delegation(inode, FMODE_READ))
                        calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
@@ -3207,7 +3225,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                nfs4_map_atomic_open_share(NFS_SERVER(inode),
                                calldata->arg.fmode, 0);
 
-       nfs_fattr_init(calldata->res.fattr);
+       if (calldata->res.fattr == NULL)
+               calldata->arg.bitmask = NULL;
+       else if (calldata->arg.bitmask == NULL)
+               calldata->res.fattr = NULL;
        calldata->timestamp = jiffies;
        if (nfs4_setup_sequence(NFS_SERVER(inode),
                                &calldata->arg.seq_args,
@@ -3274,6 +3295,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
        calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
        if (IS_ERR(calldata->arg.seqid))
                goto out_free_calldata;
+       nfs_fattr_init(&calldata->fattr);
        calldata->arg.fmode = 0;
        calldata->lr.arg.ld_private = &calldata->lr.ld_private;
        calldata->res.fattr = &calldata->fattr;
@@ -5673,6 +5695,14 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_STALE_STATEID:
                task->tk_status = 0;
                break;
+       case -NFS4ERR_ACCESS:
+               if (data->args.bitmask) {
+                       data->args.bitmask = NULL;
+                       data->res.fattr = NULL;
+                       task->tk_status = 0;
+                       rpc_restart_call_prepare(task);
+                       return;
+               }
        default:
                if (nfs4_async_handle_error(task, data->res.server,
                                            NULL, NULL) == -EAGAIN) {
@@ -5692,6 +5722,7 @@ static void nfs4_delegreturn_release(void *calldata)
                if (data->lr.roc)
                        pnfs_roc_release(&data->lr.arg, &data->lr.res,
                                        data->res.lr_ret);
+               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
                nfs_iput_and_deactive(inode);
        }
        kfree(calldata);
@@ -5780,10 +5811,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
        if (status != 0)
                goto out;
        status = data->rpc_status;
-       if (status == 0)
-               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
-       else
-               nfs_refresh_inode(inode, &data->fattr);
 out:
        rpc_put_task(task);
        return status;
index 95baf7d340f04117ef4a123ac3da5972b07693a1..1d152f4470cd6f6b0bcc6f73fd572e331b18778b 100644 (file)
@@ -494,21 +494,18 @@ nfs4_alloc_state_owner(struct nfs_server *server,
 }
 
 static void
-nfs4_drop_state_owner(struct nfs4_state_owner *sp)
-{
-       struct rb_node *rb_node = &sp->so_server_node;
-
-       if (!RB_EMPTY_NODE(rb_node)) {
-               struct nfs_server *server = sp->so_server;
-               struct nfs_client *clp = server->nfs_client;
-
-               spin_lock(&clp->cl_lock);
-               if (!RB_EMPTY_NODE(rb_node)) {
-                       rb_erase(rb_node, &server->state_owners);
-                       RB_CLEAR_NODE(rb_node);
-               }
-               spin_unlock(&clp->cl_lock);
-       }
+nfs4_reset_state_owner(struct nfs4_state_owner *sp)
+{
+       /* This state_owner is no longer usable, but must
+        * remain in place so that state recovery can find it
+        * and the opens associated with it.
+        * It may also be used for new 'open' request to
+        * return a delegation to the server.
+        * So update the 'create_time' so that it looks like
+        * a new state_owner.  This will cause the server to
+        * request an OPEN_CONFIRM to start a new sequence.
+        */
+       sp->so_seqid.create_time = ktime_get();
 }
 
 static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
@@ -797,21 +794,33 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
 
 /*
  * Search the state->lock_states for an existing lock_owner
- * that is compatible with current->files
+ * that is compatible with either of the given owners.
+ * If the second is non-zero, then the first refers to a Posix-lock
+ * owner (current->files) and the second refers to a flock/OFD
+ * owner (struct file*).  In that case, prefer a match for the first
+ * owner.
+ * If both sorts of locks are held on the one file we cannot know
+ * which stateid was intended to be used, so a "correct" choice cannot
+ * be made.  Failing that, a "consistent" choice is preferable.  The
+ * consistent choice we make is to prefer the first owner, that of a
+ * Posix lock.
  */
 static struct nfs4_lock_state *
 __nfs4_find_lock_state(struct nfs4_state *state,
                       fl_owner_t fl_owner, fl_owner_t fl_owner2)
 {
-       struct nfs4_lock_state *pos;
+       struct nfs4_lock_state *pos, *ret = NULL;
        list_for_each_entry(pos, &state->lock_states, ls_locks) {
-               if (pos->ls_owner != fl_owner &&
-                   pos->ls_owner != fl_owner2)
-                       continue;
-               atomic_inc(&pos->ls_count);
-               return pos;
+               if (pos->ls_owner == fl_owner) {
+                       ret = pos;
+                       break;
+               }
+               if (pos->ls_owner == fl_owner2)
+                       ret = pos;
        }
-       return NULL;
+       if (ret)
+               atomic_inc(&ret->ls_count);
+       return ret;
 }
 
 /*
@@ -1101,7 +1110,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
 
        sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
        if (status == -NFS4ERR_BAD_SEQID)
-               nfs4_drop_state_owner(sp);
+               nfs4_reset_state_owner(sp);
        if (!nfs4_has_session(sp->so_server->nfs_client))
                nfs_increment_seqid(status, seqid);
 }
index 1af6268a7d8c4c71779d3b64f7ce087b431df723..e9255cb453e664c385c9a94969f69bc3514024b1 100644 (file)
@@ -502,11 +502,13 @@ static int nfs4_stat_to_errno(int);
                                (compound_encode_hdr_maxsz + \
                                 encode_sequence_maxsz + \
                                 encode_putfh_maxsz + \
+                                encode_layoutreturn_maxsz + \
                                 encode_open_downgrade_maxsz)
 #define NFS4_dec_open_downgrade_sz \
                                (compound_decode_hdr_maxsz + \
                                 decode_sequence_maxsz + \
                                 decode_putfh_maxsz + \
+                                decode_layoutreturn_maxsz + \
                                 decode_open_downgrade_maxsz)
 #define NFS4_enc_close_sz      (compound_encode_hdr_maxsz + \
                                 encode_sequence_maxsz + \
@@ -2277,9 +2279,9 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_putfh(xdr, args->fh, &hdr);
        if (args->lr_args)
                encode_layoutreturn(xdr, args->lr_args, &hdr);
-       encode_close(xdr, args, &hdr);
        if (args->bitmask != NULL)
                encode_getfattr(xdr, args->bitmask, &hdr);
+       encode_close(xdr, args, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2356,6 +2358,8 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
+       if (args->lr_args)
+               encode_layoutreturn(xdr, args->lr_args, &hdr);
        encode_open_downgrade(xdr, args, &hdr);
        encode_nops(&hdr);
 }
@@ -2701,7 +2705,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
        encode_putfh(xdr, args->fhandle, &hdr);
        if (args->lr_args)
                encode_layoutreturn(xdr, args->lr_args, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
+       if (args->bitmask)
+               encode_getfattr(xdr, args->bitmask, &hdr);
        encode_delegreturn(xdr, args->stateid, &hdr);
        encode_nops(&hdr);
 }
@@ -6151,6 +6156,12 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
        status = decode_putfh(xdr);
        if (status)
                goto out;
+       if (res->lr_res) {
+               status = decode_layoutreturn(xdr, res->lr_res);
+               res->lr_ret = status;
+               if (status)
+                       goto out;
+       }
        status = decode_open_downgrade(xdr, res);
 out:
        return status;
@@ -6484,16 +6495,12 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
                if (status)
                        goto out;
        }
+       if (res->fattr != NULL) {
+               status = decode_getfattr(xdr, res->fattr, res->server);
+               if (status != 0)
+                       goto out;
+       }
        status = decode_close(xdr, res);
-       if (status != 0)
-               goto out;
-       /*
-        * Note: Server may do delete on close for this file
-        *      in which case the getattr call will fail with
-        *      an ESTALE error. Shouldn't be a problem,
-        *      though, since fattr->valid will remain unset.
-        */
-       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6966,9 +6973,11 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
                if (status)
                        goto out;
        }
-       status = decode_getfattr(xdr, res->fattr, res->server);
-       if (status != 0)
-               goto out;
+       if (res->fattr) {
+               status = decode_getfattr(xdr, res->fattr, res->server);
+               if (status != 0)
+                       goto out;
+       }
        status = decode_delegreturn(xdr);
 out:
        return status;
index 896df7bdf85f6c5a92b1c206cb799afea454c1e9..59554f3adf2948a10dd945b5f8441c236f53e9f2 100644 (file)
@@ -1251,6 +1251,7 @@ bool pnfs_roc(struct inode *ino,
        nfs4_stateid stateid;
        enum pnfs_iomode iomode = 0;
        bool layoutreturn = false, roc = false;
+       bool skip_read = false;
 
        if (!nfs_have_layout(ino))
                return false;
@@ -1270,18 +1271,27 @@ retry:
        }
 
        /* no roc if we hold a delegation */
-       if (nfs4_check_delegation(ino, FMODE_READ))
-               goto out_noroc;
+       if (nfs4_check_delegation(ino, FMODE_READ)) {
+               if (nfs4_check_delegation(ino, FMODE_WRITE))
+                       goto out_noroc;
+               skip_read = true;
+       }
 
        list_for_each_entry(ctx, &nfsi->open_files, list) {
                state = ctx->state;
+               if (state == NULL)
+                       continue;
                /* Don't return layout if there is open file state */
-               if (state != NULL && state->state != 0)
+               if (state->state & FMODE_WRITE)
                        goto out_noroc;
+               if (state->state & FMODE_READ)
+                       skip_read = true;
        }
 
 
        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
+               if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
+                       continue;
                /* If we are sending layoutreturn, invalidate all valid lsegs */
                if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
                        continue;
index 741077deef3b5544dd71ae9635facb8ac1a228a9..a3645249f7ecfa4cbdae8434bb87b47ffd02dbb1 100644 (file)
@@ -150,12 +150,10 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
  */
 void fsnotify_unmount_inodes(struct super_block *sb)
 {
-       struct inode *inode, *next_i, *need_iput = NULL;
+       struct inode *inode, *iput_inode = NULL;
 
        spin_lock(&sb->s_inode_list_lock);
-       list_for_each_entry_safe(inode, next_i, &sb->s_inodes, i_sb_list) {
-               struct inode *need_iput_tmp;
-
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                /*
                 * We cannot __iget() an inode in state I_FREEING,
                 * I_WILL_FREE, or I_NEW which is fine because by that point
@@ -178,49 +176,24 @@ void fsnotify_unmount_inodes(struct super_block *sb)
                        continue;
                }
 
-               need_iput_tmp = need_iput;
-               need_iput = NULL;
-
-               /* In case fsnotify_inode_delete() drops a reference. */
-               if (inode != need_iput_tmp)
-                       __iget(inode);
-               else
-                       need_iput_tmp = NULL;
+               __iget(inode);
                spin_unlock(&inode->i_lock);
-
-               /* In case the dropping of a reference would nuke next_i. */
-               while (&next_i->i_sb_list != &sb->s_inodes) {
-                       spin_lock(&next_i->i_lock);
-                       if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
-                                               atomic_read(&next_i->i_count)) {
-                               __iget(next_i);
-                               need_iput = next_i;
-                               spin_unlock(&next_i->i_lock);
-                               break;
-                       }
-                       spin_unlock(&next_i->i_lock);
-                       next_i = list_next_entry(next_i, i_sb_list);
-               }
-
-               /*
-                * We can safely drop s_inode_list_lock here because either
-                * we actually hold references on both inode and next_i or
-                * end of list.  Also no new inodes will be added since the
-                * umount has begun.
-                */
                spin_unlock(&sb->s_inode_list_lock);
 
-               if (need_iput_tmp)
-                       iput(need_iput_tmp);
+               if (iput_inode)
+                       iput(iput_inode);
 
                /* for each watch, send FS_UNMOUNT and then remove it */
                fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
 
                fsnotify_inode_delete(inode);
 
-               iput(inode);
+               iput_inode = inode;
 
                spin_lock(&sb->s_inode_list_lock);
        }
        spin_unlock(&sb->s_inode_list_lock);
+
+       if (iput_inode)
+               iput(iput_inode);
 }
index 87e577a49b0d567550e09912c179a719a7ee692c..cec495a921e32cbbd4ff5bd5d2944bbf5c93f09b 100644 (file)
@@ -634,7 +634,15 @@ static void qsync_work_fn(struct work_struct *work)
                                                      dqi_sync_work.work);
        struct super_block *sb = oinfo->dqi_gqinode->i_sb;
 
-       dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
+       /*
+        * We have to be careful here not to deadlock on s_umount as umount
+        * disabling quotas may be in progress and it waits for this work to
+        * complete. If trylock fails, we'll do the sync next time...
+        */
+       if (down_read_trylock(&sb->s_umount)) {
+               dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
+               up_read(&sb->s_umount);
+       }
        schedule_delayed_work(&oinfo->dqi_sync_work,
                              msecs_to_jiffies(oinfo->dqi_syncms));
 }
index 8a54fd8a4fa57a76f7e0389ec8d875108c59e3a1..32c5a40c1257ecda129944a1abf4cdac83a2e8a1 100644 (file)
@@ -454,7 +454,7 @@ out:
 /* Sync changes in local quota file into global quota file and
  * reinitialize local quota file.
  * The function expects local quota file to be already locked and
- * dqonoff_mutex locked. */
+ * s_umount locked in shared mode. */
 static int ocfs2_recover_local_quota_file(struct inode *lqinode,
                                          int type,
                                          struct ocfs2_quota_recovery *rec)
@@ -597,7 +597,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
        printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
               "slot %u\n", osb->dev_str, slot_num);
 
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       down_read(&sb->s_umount);
        for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
                if (list_empty(&(rec->r_list[type])))
                        continue;
@@ -674,7 +674,7 @@ out_put:
                        break;
        }
 out:
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       up_read(&sb->s_umount);
        kfree(rec);
        return status;
 }
@@ -840,7 +840,10 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
        }
        ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
 
-       /* dqonoff_mutex protects us against racing with recovery thread... */
+       /*
+        * s_umount held in exclusive mode protects us against racing with
+        * recovery thread...
+        */
        if (oinfo->dqi_rec) {
                ocfs2_free_quota_recovery(oinfo->dqi_rec);
                mark_clean = 0;
index d171d2c53f7f8928762acb9731d202063328967b..f8933cb53d682aaf4c7e22efb75a43d4c1c688ac 100644 (file)
@@ -4834,7 +4834,7 @@ int ocfs2_reflink_remap_range(struct file *file_in,
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
                        &len, is_dedupe);
-       if (ret || len == 0)
+       if (ret <= 0)
                goto out_unlock;
 
        /* Lock out changes to the allocation maps and remap. */
index c894d945b084d71d7672f588d561acccf143be43..a24e42f953418b1d675481ed826d47106c1730f4 100644 (file)
@@ -985,7 +985,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
        for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
                if (!sb_has_quota_loaded(sb, type))
                        continue;
-               /* Cancel periodic syncing before we grab dqonoff_mutex */
                oinfo = sb_dqinfo(sb, type)->dqi_priv;
                cancel_delayed_work_sync(&oinfo->dqi_sync_work);
                inode = igrab(sb->s_dquot.files[type]);
index 234a9ac49958ed978f67aad68d81fb75cf5717ce..06a793f4ae38739c3bb70b3c5a35d5adf6b8e3c1 100644 (file)
@@ -67,49 +67,47 @@ int get_dominating_id(struct mount *mnt, const struct path *root)
 
 static int do_make_slave(struct mount *mnt)
 {
-       struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
-       struct mount *slave_mnt;
+       struct mount *master, *slave_mnt;
 
-       /*
-        * slave 'mnt' to a peer mount that has the
-        * same root dentry. If none is available then
-        * slave it to anything that is available.
-        */
-       while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
-              peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
-
-       if (peer_mnt == mnt) {
-               peer_mnt = next_peer(mnt);
-               if (peer_mnt == mnt)
-                       peer_mnt = NULL;
-       }
-       if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
-           list_empty(&mnt->mnt_share))
-               mnt_release_group_id(mnt);
-
-       list_del_init(&mnt->mnt_share);
-       mnt->mnt_group_id = 0;
-
-       if (peer_mnt)
-               master = peer_mnt;
-
-       if (master) {
-               list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
-                       slave_mnt->mnt_master = master;
-               list_move(&mnt->mnt_slave, &master->mnt_slave_list);
-               list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
-               INIT_LIST_HEAD(&mnt->mnt_slave_list);
+       if (list_empty(&mnt->mnt_share)) {
+               if (IS_MNT_SHARED(mnt)) {
+                       mnt_release_group_id(mnt);
+                       CLEAR_MNT_SHARED(mnt);
+               }
+               master = mnt->mnt_master;
+               if (!master) {
+                       struct list_head *p = &mnt->mnt_slave_list;
+                       while (!list_empty(p)) {
+                               slave_mnt = list_first_entry(p,
+                                               struct mount, mnt_slave);
+                               list_del_init(&slave_mnt->mnt_slave);
+                               slave_mnt->mnt_master = NULL;
+                       }
+                       return 0;
+               }
        } else {
-               struct list_head *p = &mnt->mnt_slave_list;
-               while (!list_empty(p)) {
-                        slave_mnt = list_first_entry(p,
-                                       struct mount, mnt_slave);
-                       list_del_init(&slave_mnt->mnt_slave);
-                       slave_mnt->mnt_master = NULL;
+               struct mount *m;
+               /*
+                * slave 'mnt' to a peer mount that has the
+                * same root dentry. If none is available then
+                * slave it to anything that is available.
+                */
+               for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
+                       if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
+                               master = m;
+                               break;
+                       }
                }
+               list_del_init(&mnt->mnt_share);
+               mnt->mnt_group_id = 0;
+               CLEAR_MNT_SHARED(mnt);
        }
+       list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
+               slave_mnt->mnt_master = master;
+       list_move(&mnt->mnt_slave, &master->mnt_slave_list);
+       list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
+       INIT_LIST_HEAD(&mnt->mnt_slave_list);
        mnt->mnt_master = master;
-       CLEAR_MNT_SHARED(mnt);
        return 0;
 }
 
index 8738a0d62c095021fe39208f8ec6cf4a18beab70..406fed92362a3da805b7f1834268acd2e996d46f 100644 (file)
  * spinlock to internal buffers before writing.
  *
  * Lock ordering (including related VFS locks) is the following:
- *   dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
- * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
+ *   s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex
  */
 
 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -572,7 +571,8 @@ int dquot_scan_active(struct super_block *sb,
        struct dquot *dquot, *old_dquot = NULL;
        int ret = 0;
 
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+       WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
+
        spin_lock(&dq_list_lock);
        list_for_each_entry(dquot, &inuse_list, dq_inuse) {
                if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
@@ -603,7 +603,6 @@ int dquot_scan_active(struct super_block *sb,
        spin_unlock(&dq_list_lock);
 out:
        dqput(old_dquot);
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
        return ret;
 }
 EXPORT_SYMBOL(dquot_scan_active);
@@ -617,7 +616,8 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
        int cnt;
        int err, ret = 0;
 
-       mutex_lock(&dqopt->dqonoff_mutex);
+       WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
+
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (type != -1 && cnt != type)
                        continue;
@@ -653,7 +653,6 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                    && info_dirty(&dqopt->info[cnt]))
                        sb->dq_op->write_info(sb, cnt);
        dqstats_inc(DQST_SYNCS);
-       mutex_unlock(&dqopt->dqonoff_mutex);
 
        return ret;
 }
@@ -683,7 +682,6 @@ int dquot_quota_sync(struct super_block *sb, int type)
         * Now when everything is written we can discard the pagecache so
         * that userspace sees the changes.
         */
-       mutex_lock(&dqopt->dqonoff_mutex);
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (type != -1 && cnt != type)
                        continue;
@@ -693,7 +691,6 @@ int dquot_quota_sync(struct super_block *sb, int type)
                truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
                inode_unlock(dqopt->files[cnt]);
        }
-       mutex_unlock(&dqopt->dqonoff_mutex);
 
        return 0;
 }
@@ -935,7 +932,7 @@ static int dqinit_needed(struct inode *inode, int type)
        return 0;
 }
 
-/* This routine is guarded by dqonoff_mutex mutex */
+/* This routine is guarded by s_umount semaphore */
 static void add_dquot_ref(struct super_block *sb, int type)
 {
        struct inode *inode, *old_inode = NULL;
@@ -2050,21 +2047,13 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
        struct quota_info *dqopt = sb_dqopt(sb);
        int err;
 
-       mutex_lock(&dqopt->dqonoff_mutex);
-       if (!sb_has_quota_active(sb, qid->type)) {
-               err = -ESRCH;
-               goto out;
-       }
-       if (!dqopt->ops[qid->type]->get_next_id) {
-               err = -ENOSYS;
-               goto out;
-       }
+       if (!sb_has_quota_active(sb, qid->type))
+               return -ESRCH;
+       if (!dqopt->ops[qid->type]->get_next_id)
+               return -ENOSYS;
        mutex_lock(&dqopt->dqio_mutex);
        err = dqopt->ops[qid->type]->get_next_id(sb, qid);
        mutex_unlock(&dqopt->dqio_mutex);
-out:
-       mutex_unlock(&dqopt->dqonoff_mutex);
-
        return err;
 }
 EXPORT_SYMBOL(dquot_get_next_id);
@@ -2107,6 +2096,10 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
        struct quota_info *dqopt = sb_dqopt(sb);
        struct inode *toputinode[MAXQUOTAS];
 
+       /* s_umount should be held in exclusive mode */
+       if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+               up_read(&sb->s_umount);
+
        /* Cannot turn off usage accounting without turning off limits, or
         * suspend quotas and simultaneously turn quotas off. */
        if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
@@ -2114,18 +2107,14 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
            DQUOT_USAGE_ENABLED)))
                return -EINVAL;
 
-       /* We need to serialize quota_off() for device */
-       mutex_lock(&dqopt->dqonoff_mutex);
-
        /*
         * Skip everything if there's nothing to do. We have to do this because
         * sometimes we are called when fill_super() failed and calling
         * sync_fs() in such cases does no good.
         */
-       if (!sb_any_quota_loaded(sb)) {
-               mutex_unlock(&dqopt->dqonoff_mutex);
+       if (!sb_any_quota_loaded(sb))
                return 0;
-       }
+
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                toputinode[cnt] = NULL;
                if (type != -1 && cnt != type)
@@ -2179,7 +2168,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
                dqopt->info[cnt].dqi_bgrace = 0;
                dqopt->ops[cnt] = NULL;
        }
-       mutex_unlock(&dqopt->dqonoff_mutex);
 
        /* Skip syncing and setting flags if quota files are hidden */
        if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
@@ -2196,20 +2184,14 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
         * must also discard the blockdev buffers so that we see the
         * changes done by userspace on the next quotaon() */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++)
-               if (toputinode[cnt]) {
-                       mutex_lock(&dqopt->dqonoff_mutex);
-                       /* If quota was reenabled in the meantime, we have
-                        * nothing to do */
-                       if (!sb_has_quota_loaded(sb, cnt)) {
-                               inode_lock(toputinode[cnt]);
-                               toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
+               /* This can happen when suspending quotas on remount-ro... */
+               if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
+                       inode_lock(toputinode[cnt]);
+                       toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
                                  S_NOATIME | S_NOQUOTA);
-                               truncate_inode_pages(&toputinode[cnt]->i_data,
-                                                    0);
-                               inode_unlock(toputinode[cnt]);
-                               mark_inode_dirty_sync(toputinode[cnt]);
-                       }
-                       mutex_unlock(&dqopt->dqonoff_mutex);
+                       truncate_inode_pages(&toputinode[cnt]->i_data, 0);
+                       inode_unlock(toputinode[cnt]);
+                       mark_inode_dirty_sync(toputinode[cnt]);
                }
        if (sb->s_bdev)
                invalidate_bdev(sb->s_bdev);
@@ -2281,6 +2263,10 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
                error = -EINVAL;
                goto out_fmt;
        }
+       if (sb_has_quota_loaded(sb, type)) {
+               error = -EBUSY;
+               goto out_fmt;
+       }
 
        if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
                /* As we bypass the pagecache we must now flush all the
@@ -2292,11 +2278,6 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
                sync_filesystem(sb);
                invalidate_bdev(sb->s_bdev);
        }
-       mutex_lock(&dqopt->dqonoff_mutex);
-       if (sb_has_quota_loaded(sb, type)) {
-               error = -EBUSY;
-               goto out_lock;
-       }
 
        if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
                /* We don't want quota and atime on quota files (deadlocks
@@ -2317,7 +2298,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        error = -EIO;
        dqopt->files[type] = igrab(inode);
        if (!dqopt->files[type])
-               goto out_lock;
+               goto out_file_flags;
        error = -EINVAL;
        if (!fmt->qf_ops->check_quota_file(sb, type))
                goto out_file_init;
@@ -2340,14 +2321,13 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
        spin_unlock(&dq_state_lock);
 
        add_dquot_ref(sb, type);
-       mutex_unlock(&dqopt->dqonoff_mutex);
 
        return 0;
 
 out_file_init:
        dqopt->files[type] = NULL;
        iput(inode);
-out_lock:
+out_file_flags:
        if (oldflags != -1) {
                inode_lock(inode);
                /* Set the flags back (in the case of accidental quotaon()
@@ -2356,7 +2336,6 @@ out_lock:
                inode->i_flags |= oldflags;
                inode_unlock(inode);
        }
-       mutex_unlock(&dqopt->dqonoff_mutex);
 out_fmt:
        put_quota_format(fmt);
 
@@ -2371,15 +2350,16 @@ int dquot_resume(struct super_block *sb, int type)
        int ret = 0, cnt;
        unsigned int flags;
 
+       /* s_umount should be held in exclusive mode */
+       if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+               up_read(&sb->s_umount);
+
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
                if (type != -1 && cnt != type)
                        continue;
-
-               mutex_lock(&dqopt->dqonoff_mutex);
-               if (!sb_has_quota_suspended(sb, cnt)) {
-                       mutex_unlock(&dqopt->dqonoff_mutex);
+               if (!sb_has_quota_suspended(sb, cnt))
                        continue;
-               }
+
                inode = dqopt->files[cnt];
                dqopt->files[cnt] = NULL;
                spin_lock(&dq_state_lock);
@@ -2388,7 +2368,6 @@ int dquot_resume(struct super_block *sb, int type)
                                                        cnt);
                dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
                spin_unlock(&dq_state_lock);
-               mutex_unlock(&dqopt->dqonoff_mutex);
 
                flags = dquot_generic_flag(flags, cnt);
                ret = vfs_load_quota_inode(inode, cnt,
@@ -2424,42 +2403,30 @@ EXPORT_SYMBOL(dquot_quota_on);
 int dquot_enable(struct inode *inode, int type, int format_id,
                 unsigned int flags)
 {
-       int ret = 0;
        struct super_block *sb = inode->i_sb;
-       struct quota_info *dqopt = sb_dqopt(sb);
 
        /* Just unsuspend quotas? */
        BUG_ON(flags & DQUOT_SUSPENDED);
+       /* s_umount should be held in exclusive mode */
+       if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+               up_read(&sb->s_umount);
 
        if (!flags)
                return 0;
        /* Just updating flags needed? */
        if (sb_has_quota_loaded(sb, type)) {
-               mutex_lock(&dqopt->dqonoff_mutex);
-               /* Now do a reliable test... */
-               if (!sb_has_quota_loaded(sb, type)) {
-                       mutex_unlock(&dqopt->dqonoff_mutex);
-                       goto load_quota;
-               }
                if (flags & DQUOT_USAGE_ENABLED &&
-                   sb_has_quota_usage_enabled(sb, type)) {
-                       ret = -EBUSY;
-                       goto out_lock;
-               }
+                   sb_has_quota_usage_enabled(sb, type))
+                       return -EBUSY;
                if (flags & DQUOT_LIMITS_ENABLED &&
-                   sb_has_quota_limits_enabled(sb, type)) {
-                       ret = -EBUSY;
-                       goto out_lock;
-               }
+                   sb_has_quota_limits_enabled(sb, type))
+                       return -EBUSY;
                spin_lock(&dq_state_lock);
                sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
                spin_unlock(&dq_state_lock);
-out_lock:
-               mutex_unlock(&dqopt->dqonoff_mutex);
-               return ret;
+               return 0;
        }
 
-load_quota:
        return vfs_load_quota_inode(inode, type, format_id, flags);
 }
 EXPORT_SYMBOL(dquot_enable);
@@ -2751,7 +2718,6 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state)
        struct quota_info *dqopt = sb_dqopt(sb);
        int type;
   
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
        memset(state, 0, sizeof(*state));
        for (type = 0; type < MAXQUOTAS; type++) {
                if (!sb_has_quota_active(sb, type))
@@ -2773,7 +2739,6 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state)
                tstate->nextents = 1;   /* We don't know... */
                spin_unlock(&dq_data_lock);
        }
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
        return 0;
 }
 EXPORT_SYMBOL(dquot_get_state);
@@ -2787,18 +2752,13 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
        if ((ii->i_fieldmask & QC_WARNS_MASK) ||
            (ii->i_fieldmask & QC_RT_SPC_TIMER))
                return -EINVAL;
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       if (!sb_has_quota_active(sb, type)) {
-               err = -ESRCH;
-               goto out;
-       }
+       if (!sb_has_quota_active(sb, type))
+               return -ESRCH;
        mi = sb_dqopt(sb)->info + type;
        if (ii->i_fieldmask & QC_FLAGS) {
                if ((ii->i_flags & QCI_ROOT_SQUASH &&
-                    mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
-                       err = -EINVAL;
-                       goto out;
-               }
+                    mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
+                       return -EINVAL;
        }
        spin_lock(&dq_data_lock);
        if (ii->i_fieldmask & QC_SPC_TIMER)
@@ -2815,8 +2775,6 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
        mark_info_dirty(sb, type);
        /* Force write to disk */
        sb->dq_op->write_info(sb, type);
-out:
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
        return err;
 }
 EXPORT_SYMBOL(dquot_set_dqinfo);
index 5acd0c4769afa878f4435205cccb68897b81b035..07e08c7d05cae23d92ab891cb384e5dda90509df 100644 (file)
@@ -104,13 +104,9 @@ static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
 {
        __u32 fmt;
 
-       mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
-       if (!sb_has_quota_active(sb, type)) {
-               mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+       if (!sb_has_quota_active(sb, type))
                return -ESRCH;
-       }
        fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
-       mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
        if (copy_to_user(addr, &fmt, sizeof(fmt)))
                return -EFAULT;
        return 0;
@@ -789,9 +785,14 @@ static int quotactl_cmd_write(int cmd)
        }
        return 1;
 }
-
 #endif /* CONFIG_BLOCK */
 
+/* Return true if quotactl command is manipulating quota on/off state */
+static bool quotactl_cmd_onoff(int cmd)
+{
+       return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
+}
+
 /*
  * look up a superblock on which quota ops will be performed
  * - use the name of a block device to find the superblock thereon
@@ -809,7 +810,9 @@ static struct super_block *quotactl_block(const char __user *special, int cmd)
        putname(tmp);
        if (IS_ERR(bdev))
                return ERR_CAST(bdev);
-       if (quotactl_cmd_write(cmd))
+       if (quotactl_cmd_onoff(cmd))
+               sb = get_super_exclusive_thawed(bdev);
+       else if (quotactl_cmd_write(cmd))
                sb = get_super_thawed(bdev);
        else
                sb = get_super(bdev);
@@ -872,7 +875,10 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
 
        ret = do_quotactl(sb, type, cmds, id, addr, pathp);
 
-       drop_super(sb);
+       if (!quotactl_cmd_onoff(cmds))
+               drop_super(sb);
+       else
+               drop_super_exclusive(sb);
 out:
        if (pathp && !IS_ERR(pathp))
                path_put(pathp);
index da6de12b5c46d4a56e15e892f527153682ff6e30..7537b6b6b5a2dc80caa8774f8534ccc5e6b4fee2 100644 (file)
@@ -1669,6 +1669,9 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
  * Check that the two inodes are eligible for cloning, the ranges make
  * sense, and then flush all dirty data.  Caller must ensure that the
  * inodes have been locked against any other modifications.
+ *
+ * Returns: 0 for "nothing to clone", 1 for "something to clone", or
+ * the usual negative error code.
  */
 int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                               struct inode *inode_out, loff_t pos_out,
@@ -1695,17 +1698,15 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 
        /* Are we going all the way to the end? */
        isize = i_size_read(inode_in);
-       if (isize == 0) {
-               *len = 0;
+       if (isize == 0)
                return 0;
-       }
 
        /* Zero length dedupe exits immediately; reflink goes to EOF. */
        if (*len == 0) {
-               if (is_dedupe) {
-                       *len = 0;
+               if (is_dedupe || pos_in == isize)
                        return 0;
-               }
+               if (pos_in > isize)
+                       return -EINVAL;
                *len = isize - pos_in;
        }
 
@@ -1769,7 +1770,7 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                        return -EBADE;
        }
 
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
 
@@ -1955,6 +1956,9 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
                goto out;
        ret = 0;
 
+       if (off + len > i_size_read(src))
+               return -EINVAL;
+
        /* pre-format output fields to sane values */
        for (i = 0; i < count; i++) {
                same->info[i].bytes_deduped = 0ULL;
index 368bfb92b115c0e99ce4c654f6fdecb6ec5a2763..a11f271800ef990987b85bc1df1614b2d549650a 100644 (file)
@@ -190,6 +190,13 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
         */
        m->version = file->f_version;
 
+       /*
+        * if request is to read from zero offset, reset iterator to first
+        * record as it might have been already advanced by previous requests
+        */
+       if (*ppos == 0)
+               m->index = 0;
+
        /* Don't assume *ppos is where we left it */
        if (unlikely(*ppos != m->read_pos)) {
                while ((err = traverse(m, *ppos)) == -EAGAIN)
index 8ed7c9d8c0fbaf7ec81de0afa9ac3edafe8932aa..873d83104e79aed14a24c417f211e38ae4038122 100644 (file)
@@ -1087,7 +1087,13 @@ EXPORT_SYMBOL(do_splice_direct);
 
 static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
 {
-       while (pipe->nrbufs == pipe->buffers) {
+       for (;;) {
+               if (unlikely(!pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       return -EPIPE;
+               }
+               if (pipe->nrbufs != pipe->buffers)
+                       return 0;
                if (flags & SPLICE_F_NONBLOCK)
                        return -EAGAIN;
                if (signal_pending(current))
@@ -1096,7 +1102,6 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
                pipe_wait(pipe);
                pipe->waiting_writers--;
        }
-       return 0;
 }
 
 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
index c183835566c19c56fd30d6a1c22d380d10668411..1709ed029a2cae70c3d4a6cccccca760a0ad003f 100644 (file)
@@ -244,7 +244,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
        mutex_init(&s->s_vfs_rename_mutex);
        lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
        mutex_init(&s->s_dquot.dqio_mutex);
-       mutex_init(&s->s_dquot.dqonoff_mutex);
        s->s_maxbytes = MAX_NON_LFS;
        s->s_op = &default_op;
        s->s_time_gran = 1000000000;
@@ -558,6 +557,13 @@ void drop_super(struct super_block *sb)
 
 EXPORT_SYMBOL(drop_super);
 
+void drop_super_exclusive(struct super_block *sb)
+{
+       up_write(&sb->s_umount);
+       put_super(sb);
+}
+EXPORT_SYMBOL(drop_super_exclusive);
+
 /**
  *     iterate_supers - call function for all active superblocks
  *     @f: function to call
@@ -628,15 +634,7 @@ void iterate_supers_type(struct file_system_type *type,
 
 EXPORT_SYMBOL(iterate_supers_type);
 
-/**
- *     get_super - get the superblock of a device
- *     @bdev: device to get the superblock for
- *     
- *     Scans the superblock list and finds the superblock of the file system
- *     mounted on the device given. %NULL is returned if no match is found.
- */
-
-struct super_block *get_super(struct block_device *bdev)
+static struct super_block *__get_super(struct block_device *bdev, bool excl)
 {
        struct super_block *sb;
 
@@ -651,11 +649,17 @@ rescan:
                if (sb->s_bdev == bdev) {
                        sb->s_count++;
                        spin_unlock(&sb_lock);
-                       down_read(&sb->s_umount);
+                       if (!excl)
+                               down_read(&sb->s_umount);
+                       else
+                               down_write(&sb->s_umount);
                        /* still alive? */
                        if (sb->s_root && (sb->s_flags & MS_BORN))
                                return sb;
-                       up_read(&sb->s_umount);
+                       if (!excl)
+                               up_read(&sb->s_umount);
+                       else
+                               up_write(&sb->s_umount);
                        /* nope, got unmounted */
                        spin_lock(&sb_lock);
                        __put_super(sb);
@@ -666,31 +670,66 @@ rescan:
        return NULL;
 }
 
-EXPORT_SYMBOL(get_super);
-
 /**
- *     get_super_thawed - get thawed superblock of a device
+ *     get_super - get the superblock of a device
  *     @bdev: device to get the superblock for
  *
  *     Scans the superblock list and finds the superblock of the file system
- *     mounted on the device. The superblock is returned once it is thawed
- *     (or immediately if it was not frozen). %NULL is returned if no match
- *     is found.
+ *     mounted on the device given. %NULL is returned if no match is found.
  */
-struct super_block *get_super_thawed(struct block_device *bdev)
+struct super_block *get_super(struct block_device *bdev)
+{
+       return __get_super(bdev, false);
+}
+EXPORT_SYMBOL(get_super);
+
+static struct super_block *__get_super_thawed(struct block_device *bdev,
+                                             bool excl)
 {
        while (1) {
-               struct super_block *s = get_super(bdev);
+               struct super_block *s = __get_super(bdev, excl);
                if (!s || s->s_writers.frozen == SB_UNFROZEN)
                        return s;
-               up_read(&s->s_umount);
+               if (!excl)
+                       up_read(&s->s_umount);
+               else
+                       up_write(&s->s_umount);
                wait_event(s->s_writers.wait_unfrozen,
                           s->s_writers.frozen == SB_UNFROZEN);
                put_super(s);
        }
 }
+
+/**
+ *     get_super_thawed - get thawed superblock of a device
+ *     @bdev: device to get the superblock for
+ *
+ *     Scans the superblock list and finds the superblock of the file system
+ *     mounted on the device. The superblock is returned once it is thawed
+ *     (or immediately if it was not frozen). %NULL is returned if no match
+ *     is found.
+ */
+struct super_block *get_super_thawed(struct block_device *bdev)
+{
+       return __get_super_thawed(bdev, false);
+}
 EXPORT_SYMBOL(get_super_thawed);
 
+/**
+ *     get_super_exclusive_thawed - get thawed superblock of a device
+ *     @bdev: device to get the superblock for
+ *
+ *     Scans the superblock list and finds the superblock of the file system
+ *     mounted on the device. The superblock is returned once it is thawed
+ *     (or immediately if it was not frozen) and s_umount semaphore is held
+ *     in exclusive mode. %NULL is returned if no match is found.
+ */
+struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
+{
+       return __get_super_thawed(bdev, true);
+}
+EXPORT_SYMBOL(get_super_exclusive_thawed);
+
 /**
  * get_active_super - get an active reference to the superblock of a device
  * @bdev: device to get the superblock for
index 45ceb94e89e42a362d633a35d2861e2808910e1a..1bc0bd6a9848cb14064bb09bb810daaac267a4d9 100644 (file)
@@ -1191,7 +1191,7 @@ out:
        return err;
 }
 
-void ufs_truncate_blocks(struct inode *inode)
+static void ufs_truncate_blocks(struct inode *inode)
 {
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
              S_ISLNK(inode->i_mode)))
index aca2d4bd4303b07b41a86d0bbe1e43bb4fe88308..07593a362cd03d0fdae120122d6d1c5aba8d809b 100644 (file)
@@ -1161,7 +1161,7 @@ xfs_reflink_remap_range(
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
                        &len, is_dedupe);
-       if (ret || len == 0)
+       if (ret <= 0)
                goto out_unlock;
 
        trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
index d7d0f495a34e975d7c045efab5dd029d5f393687..303315b9693fc999022b192f91b4681505f3c571 100644 (file)
@@ -13,6 +13,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 }
 #endif
 
+extern bool acpi_permanent_mmap;
+
 void __iomem *__ref
 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
index 5c7356adc10b5f96f0fc39f430ae4741fbda8ea1..f5e10dd8e86b712a4c0e97a206d36ff88fabe02f 100644 (file)
@@ -513,10 +513,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                             acpi_get_table(acpi_string signature, u32 instance,
                                            struct acpi_table_header
                                            **out_table))
+ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table))
+
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                            acpi_get_table_by_index(u32 table_index,
-                                                    struct acpi_table_header
-                                                    **out_table))
+                           acpi_get_table_by_index(u32 table_index,
+                                                   struct acpi_table_header
+                                                   **out_table))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                             acpi_install_table_handler(acpi_table_handler
                                                        handler, void *context))
@@ -965,15 +967,6 @@ void acpi_terminate_debugger(void);
 /*
  * Divergences
  */
-ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                           acpi_get_table_with_size(acpi_string signature,
-                                                    u32 instance,
-                                                    struct acpi_table_header
-                                                    **out_table,
-                                                    acpi_size *tbl_size))
-
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                            acpi_get_data_full(acpi_handle object,
                                               acpi_object_handler handler,
index c19700e2a2fe25d169a64180593438d9815c3f77..da5708caf8a12493de0e52376c27b7c0bffd1378 100644 (file)
@@ -371,6 +371,7 @@ struct acpi_table_desc {
        union acpi_name_union signature;
        acpi_owner_id owner_id;
        u8 flags;
+       u16 validation_count;
 };
 
 /* Masks for Flags field above */
index a5509d87230a4778de5566f8cfe7b705a1dd89f1..7dbb1141f546077ceec4c9fe39b077ae8e1ba379 100644 (file)
@@ -142,7 +142,6 @@ static inline void acpi_os_terminate_command_signals(void)
 /*
  * OSL interfaces added by Linux
  */
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
 
 #endif                         /* __KERNEL__ */
 
diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h
deleted file mode 100644 (file)
index 99c6d90..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * This header provides generic constants for ethernet MDIO bindings
- */
-
-#ifndef _DT_BINDINGS_NET_MDIO_H
-#define _DT_BINDINGS_NET_MDIO_H
-
-/*
- * EEE capability Advertisement
- */
-
-#define MDIO_EEE_100TX         0x0002  /* 100TX EEE cap */
-#define MDIO_EEE_1000T         0x0004  /* 1000T EEE cap */
-#define MDIO_EEE_10GT          0x0008  /* 10GT EEE cap */
-#define MDIO_EEE_1000KX                0x0010  /* 1000KX EEE cap */
-#define MDIO_EEE_10GKX4                0x0020  /* 10G KX4 EEE cap */
-#define MDIO_EEE_10GKR         0x0040  /* 10G KR EEE cap */
-
-#endif
index 9eb42dbc5582ace99283629f0905861ac820c7d5..fdd0a343f45527ee2b5d4a787ad5388a6ce4e47a 100644 (file)
@@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
 /* prototypes */
 #ifdef CONFIG_AIO
 extern void exit_aio(struct mm_struct *mm);
-extern long do_io_submit(aio_context_t ctx_id, long nr,
-                        struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
 #else
 static inline void exit_aio(struct mm_struct *mm) { }
-static inline long do_io_submit(aio_context_t ctx_id, long nr,
-                               struct iocb __user * __user *iocbpp,
-                               bool compat) { return 0; }
 static inline void kiocb_set_cancel_fn(struct kiocb *req,
                                       kiocb_cancel_fn *cancel) { }
 #endif /* CONFIG_AIO */
index 286b2a2643833615633e82d7d9e667b66abbfaef..83695641bd5ec272551857c448cc9b4f354898b8 100644 (file)
@@ -288,7 +288,6 @@ enum blk_queue_state {
 struct blk_queue_tag {
        struct request **tag_index;     /* map of busy tags */
        unsigned long *tag_map;         /* bit map of free/busy tags */
-       int busy;                       /* current depth */
        int max_depth;                  /* what we will send to device */
        int real_max_depth;             /* what the array can hold */
        atomic_t refcnt;                /* map can be shared */
index a951fd10aaaad07cf7ab897615a4e3d84caf6a42..6a524bf6a06d112613075547c5b17a069da00077 100644 (file)
@@ -18,6 +18,7 @@ enum cache_type {
 
 /**
  * struct cacheinfo - represent a cache leaf node
+ * @id: This cache's id. It is unique among caches with the same (type, level).
  * @type: type of the cache - data, inst or unified
  * @level: represents the hierarchy in the multi-level cache
  * @coherency_line_size: size of each cache line usually representing
@@ -44,6 +45,7 @@ enum cache_type {
  * keeping, the remaining members form the core properties of the cache
  */
 struct cacheinfo {
+       unsigned int id;
        enum cache_type type;
        unsigned int level;
        unsigned int coherency_line_size;
@@ -61,6 +63,7 @@ struct cacheinfo {
 #define CACHE_WRITE_ALLOCATE   BIT(3)
 #define CACHE_ALLOCATE_POLICY_MASK     \
        (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+#define CACHE_ID               BIT(4)
 
        struct device_node *of_node;
        bool disable_sysfs;
index 9a30b921f7401487cb6238b9bcb6e9f096e9a39f..2319b8c108e87b9e87c11cc4c9aa314d24eb0364 100644 (file)
 #ifndef _CONFIGFS_H_
 #define _CONFIGFS_H_
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-
-#include <linux/atomic.h>
+#include <linux/stat.h>   /* S_IRUGO */
+#include <linux/types.h>  /* ssize_t */
+#include <linux/list.h>   /* struct list_head */
+#include <linux/kref.h>   /* struct kref */
+#include <linux/mutex.h>  /* struct mutex */
 
 #define CONFIGFS_ITEM_NAME_LEN 20
 
index e6e4146bf9ae5ee9def7da466f369127895713df..2ba074328894cea30d6273a574417d789fae271d 100644 (file)
@@ -2903,8 +2903,10 @@ extern void put_filesystem(struct file_system_type *fs);
 extern struct file_system_type *get_fs_type(const char *name);
 extern struct super_block *get_super(struct block_device *);
 extern struct super_block *get_super_thawed(struct block_device *);
+extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
 extern struct super_block *get_active_super(struct block_device *bdev);
 extern void drop_super(struct super_block *sb);
+extern void drop_super_exclusive(struct super_block *sb);
 extern void iterate_supers(void (*)(struct super_block *, void *), void *);
 extern void iterate_supers_type(struct file_system_type *,
                                void (*)(struct super_block *, void *), void *);
index 0eb7c2e7f0d633b577169fe46dca6767a0c9c415..7f6952f8d6aad7281f5403364bcfc13bdd30d36d 100644 (file)
@@ -11,6 +11,7 @@
 #define _LINUX_IMA_H
 
 #include <linux/fs.h>
+#include <linux/kexec.h>
 struct linux_binprm;
 
 #ifdef CONFIG_IMA
@@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
                              enum kernel_read_file_id id);
 extern void ima_post_path_mknod(struct dentry *dentry);
 
+#ifdef CONFIG_IMA_KEXEC
+extern void ima_add_kexec_buffer(struct kimage *image);
+#endif
+
 #else
 static inline int ima_bprm_check(struct linux_binprm *bprm)
 {
@@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
 
 #endif /* CONFIG_IMA */
 
+#ifndef CONFIG_IMA_KEXEC
+struct kimage;
+
+static inline void ima_add_kexec_buffer(struct kimage *image)
+{}
+#endif
+
 #ifdef CONFIG_IMA_APPRAISE
 extern void ima_inode_post_setattr(struct dentry *dentry);
 extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
index fec597fb34cbb10e8b7b8a873c6c0e9395c60595..a4860bc9b73d4ffe927d087c24b70970084ff330 100644 (file)
@@ -115,6 +115,8 @@ enum {
 #define AXP806_CLDO2_V_CTRL            0x25
 #define AXP806_CLDO3_V_CTRL            0x26
 #define AXP806_VREF_TEMP_WARN_L                0xf3
+#define AXP806_BUS_ADDR_EXT            0xfe
+#define AXP806_REG_ADDR_EXT            0xff
 
 /* Interrupt */
 #define AXP152_IRQ1_EN                 0x40
@@ -226,6 +228,10 @@ enum {
 #define AXP20X_OCV_MAX                 0xf
 
 /* AXP22X specific registers */
+#define AXP22X_PMIC_ADC_H              0x56
+#define AXP22X_PMIC_ADC_L              0x57
+#define AXP22X_TS_ADC_H                        0x58
+#define AXP22X_TS_ADC_L                        0x59
 #define AXP22X_BATLOW_THRES1           0xe6
 
 /* AXP288 specific registers */
index 8e1cdbef3dad05e3d2665f2b12c9e9128ed75583..2c0127cb06c590f0ed3a13cdf5d890c477ebb6f5 100644 (file)
@@ -28,8 +28,6 @@
 #include <linux/mfd/core.h>
 #include <linux/platform_data/edma.h>
 
-#include <mach/hardware.h>
-
 struct regmap;
 
 /*
@@ -99,8 +97,6 @@ struct davinci_vcif {
        dma_addr_t dma_rx_addr;
 };
 
-struct davinci_vc;
-
 struct davinci_vc {
        /* Device data */
        struct device *dev;
index 6d435a3c06bcc6f7804181966c5e7e4f9a119caf..83701ef7d3c7bfbb1dc59f6e92a6f0664a7c48c5 100644 (file)
@@ -290,6 +290,7 @@ enum rk818_reg {
 #define SWITCH2_EN     BIT(6)
 #define SWITCH1_EN     BIT(5)
 #define DEV_OFF_RST    BIT(3)
+#define DEV_OFF                BIT(0)
 
 #define VB_LO_ACT              BIT(4)
 #define VB_LO_SEL_3500MV       (7 << 0)
index cadc6543909d96ef553bcde9935bfecaaca42a26..e5a6cdeb77dbcc5e8603199bb6021bdc17f8516c 100644 (file)
 #define RN5T618_DC3CTL2                        0x31
 #define RN5T618_DC4CTL                 0x32
 #define RN5T618_DC4CTL2                        0x33
+#define RN5T618_DC5CTL                 0x34
+#define RN5T618_DC5CTL2                        0x35
 #define RN5T618_DC1DAC                 0x36
 #define RN5T618_DC2DAC                 0x37
 #define RN5T618_DC3DAC                 0x38
 #define RN5T618_DC4DAC                 0x39
+#define RN5T618_DC5DAC                 0x3a
 #define RN5T618_DC1DAC_SLP             0x3b
 #define RN5T618_DC2DAC_SLP             0x3c
 #define RN5T618_DC3DAC_SLP             0x3d
 #define RN5T618_LDO3DAC                        0x4e
 #define RN5T618_LDO4DAC                        0x4f
 #define RN5T618_LDO5DAC                        0x50
+#define RN5T618_LDO6DAC                        0x51
+#define RN5T618_LDO7DAC                        0x52
+#define RN5T618_LDO8DAC                        0x53
+#define RN5T618_LDO9DAC                        0x54
+#define RN5T618_LDO10DAC               0x55
 #define RN5T618_LDORTCDAC              0x56
 #define RN5T618_LDORTC2DAC             0x57
 #define RN5T618_LDO1DAC_SLP            0x58
@@ -231,6 +239,7 @@ enum {
 enum {
        RN5T567 = 0,
        RN5T618,
+       RC5T619,
 };
 
 struct rn5t618 {
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
new file mode 100644 (file)
index 0000000..d7a29f2
--- /dev/null
@@ -0,0 +1,94 @@
+/* Header of ADC MFD core driver for sunxi platforms
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __SUN4I_GPADC__H__
+#define __SUN4I_GPADC__H__
+
+#define SUN4I_GPADC_CTRL0                              0x00
+
+#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY(x)             ((GENMASK(7, 0) & (x)) << 24)
+#define SUN4I_GPADC_CTRL0_ADC_FIRST_DLY_MODE           BIT(23)
+#define SUN4I_GPADC_CTRL0_ADC_CLK_SELECT               BIT(22)
+#define SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(x)           ((GENMASK(1, 0) & (x)) << 20)
+#define SUN4I_GPADC_CTRL0_FS_DIV(x)                    ((GENMASK(3, 0) & (x)) << 16)
+#define SUN4I_GPADC_CTRL0_T_ACQ(x)                     (GENMASK(15, 0) & (x))
+
+#define SUN4I_GPADC_CTRL1                              0x04
+
+#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE(x)                ((GENMASK(7, 0) & (x)) << 12)
+#define SUN4I_GPADC_CTRL1_STYLUS_UP_DEBOUNCE_EN                BIT(9)
+#define SUN4I_GPADC_CTRL1_TOUCH_PAN_CALI_EN            BIT(6)
+#define SUN4I_GPADC_CTRL1_TP_DUAL_EN                   BIT(5)
+#define SUN4I_GPADC_CTRL1_TP_MODE_EN                   BIT(4)
+#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT                        BIT(3)
+#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x)           (GENMASK(2, 0) & (x))
+
+/* TP_CTRL1 bits for sun6i SOCs */
+#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN            BIT(7)
+#define SUN6I_GPADC_CTRL1_TP_DUAL_EN                   BIT(6)
+#define SUN6I_GPADC_CTRL1_TP_MODE_EN                   BIT(5)
+#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT                        BIT(4)
+#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x)           (GENMASK(3, 0) & BIT(x))
+
+#define SUN4I_GPADC_CTRL2                              0x08
+
+#define SUN4I_GPADC_CTRL2_TP_SENSITIVE_ADJUST(x)       ((GENMASK(3, 0) & (x)) << 28)
+#define SUN4I_GPADC_CTRL2_TP_MODE_SELECT(x)            ((GENMASK(1, 0) & (x)) << 26)
+#define SUN4I_GPADC_CTRL2_PRE_MEA_EN                   BIT(24)
+#define SUN4I_GPADC_CTRL2_PRE_MEA_THRE_CNT(x)          (GENMASK(23, 0) & (x))
+
+#define SUN4I_GPADC_CTRL3                              0x0c
+
+#define SUN4I_GPADC_CTRL3_FILTER_EN                    BIT(2)
+#define SUN4I_GPADC_CTRL3_FILTER_TYPE(x)               (GENMASK(1, 0) & (x))
+
+#define SUN4I_GPADC_TPR                                        0x18
+
+#define SUN4I_GPADC_TPR_TEMP_ENABLE                    BIT(16)
+#define SUN4I_GPADC_TPR_TEMP_PERIOD(x)                 (GENMASK(15, 0) & (x))
+
+#define SUN4I_GPADC_INT_FIFOC                          0x10
+
+#define SUN4I_GPADC_INT_FIFOC_TEMP_IRQ_EN              BIT(18)
+#define SUN4I_GPADC_INT_FIFOC_TP_OVERRUN_IRQ_EN                BIT(17)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_IRQ_EN           BIT(16)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_XY_CHANGE                BIT(13)
+#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(x)    ((GENMASK(4, 0) & (x)) << 8)
+#define SUN4I_GPADC_INT_FIFOC_TP_DATA_DRQ_EN           BIT(7)
+#define SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH            BIT(4)
+#define SUN4I_GPADC_INT_FIFOC_TP_UP_IRQ_EN             BIT(1)
+#define SUN4I_GPADC_INT_FIFOC_TP_DOWN_IRQ_EN           BIT(0)
+
+#define SUN4I_GPADC_INT_FIFOS                          0x14
+
+#define SUN4I_GPADC_INT_FIFOS_TEMP_DATA_PENDING                BIT(18)
+#define SUN4I_GPADC_INT_FIFOS_FIFO_OVERRUN_PENDING     BIT(17)
+#define SUN4I_GPADC_INT_FIFOS_FIFO_DATA_PENDING                BIT(16)
+#define SUN4I_GPADC_INT_FIFOS_TP_IDLE_FLG              BIT(2)
+#define SUN4I_GPADC_INT_FIFOS_TP_UP_PENDING            BIT(1)
+#define SUN4I_GPADC_INT_FIFOS_TP_DOWN_PENDING          BIT(0)
+
+#define SUN4I_GPADC_CDAT                               0x1c
+#define SUN4I_GPADC_TEMP_DATA                          0x20
+#define SUN4I_GPADC_DATA                               0x24
+
+#define SUN4I_GPADC_IRQ_FIFO_DATA                      0
+#define SUN4I_GPADC_IRQ_TEMP_DATA                      1
+
+/* 10s delay before suspending the IP */
+#define SUN4I_GPADC_AUTOSUSPEND_DELAY                  10000
+
+struct sun4i_gpadc_dev {
+       struct device                   *dev;
+       struct regmap                   *regmap;
+       struct regmap_irq_chip_data     *regmap_irqc;
+       void __iomem                    *base;
+};
+
+#endif
index 3cbec4b2496a6a5cdbef0816aa92e157e8fcfd4a..eac285756b37a918c1bfb11845b2a160f559618f 100644 (file)
 #define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
 #define TPS65217_PPATH_USB_CURRENT_MASK        0x03
 
-#define TPS65217_INT_RESERVEDM         BIT(7)
 #define TPS65217_INT_PBM               BIT(6)
 #define TPS65217_INT_ACM               BIT(5)
 #define TPS65217_INT_USBM              BIT(4)
 #define TPS65217_INT_PBI               BIT(2)
 #define TPS65217_INT_ACI               BIT(1)
 #define TPS65217_INT_USBI              BIT(0)
+#define TPS65217_INT_SHIFT             4
+#define TPS65217_INT_MASK              (TPS65217_INT_PBM | TPS65217_INT_ACM | \
+                                       TPS65217_INT_USBM)
 
 #define TPS65217_CHGCONFIG0_TREG       BIT(7)
 #define TPS65217_CHGCONFIG0_DPPM       BIT(6)
index d1db9527fab5897e320dffc6925017b8bc29207d..bccd2d68b1e306c741ef295f9fe73d2d9e830086 100644 (file)
@@ -282,10 +282,9 @@ struct tps65218 {
        struct regulator_desc desc[TPS65218_NUM_REGULATOR];
        struct tps_info *info[TPS65218_NUM_REGULATOR];
        struct regmap *regmap;
+       u8 *strobes;
 };
 
-int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
-                                       unsigned int *val);
 int tps65218_reg_write(struct tps65218 *tps, unsigned int reg,
                        unsigned int val, unsigned int level);
 int tps65218_set_bits(struct tps65218 *tps, unsigned int reg,
index 1a603701550e33e51dd2f4c78a5d4311abc0112a..b25d0297ba887a901da8dd5349ac777e908da533 100644 (file)
@@ -319,21 +319,7 @@ struct tps65912 {
        struct regmap_irq_chip_data *irq_data;
 };
 
-static const struct regmap_range tps65912_yes_ranges[] = {
-       regmap_reg_range(TPS65912_INT_STS, TPS65912_GPIO5),
-};
-
-static const struct regmap_access_table tps65912_volatile_table = {
-       .yes_ranges = tps65912_yes_ranges,
-       .n_yes_ranges = ARRAY_SIZE(tps65912_yes_ranges),
-};
-
-static const struct regmap_config tps65912_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 8,
-       .cache_type = REGCACHE_RBTREE,
-       .volatile_table = &tps65912_volatile_table,
-};
+extern const struct regmap_config tps65912_regmap_config;
 
 int tps65912_device_init(struct tps65912 *tps);
 int tps65912_device_exit(struct tps65912 *tps);
index cb631973839a7ff7dd10a8426de10d3a30393e9a..f1da8c8dd473869897c3363f9e299bd28086c8d5 100644 (file)
@@ -340,10 +340,8 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
 extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
 extern int nfs_permission(struct inode *, int);
 extern int nfs_open(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
 extern int nfs_attribute_cache_expired(struct inode *inode);
 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
 extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
index 78a98821f9d0a98780e6129b6b1f4cc9c0fc4d80..3434eef2a5aad963d579670ed081b68db382239d 100644 (file)
@@ -520,7 +520,6 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev,
 struct quota_info {
        unsigned int flags;                     /* Flags for diskquotas on this device */
        struct mutex dqio_mutex;                /* lock device while I/O in progress */
-       struct mutex dqonoff_mutex;             /* Serialize quotaon & quotaoff */
        struct inode *files[MAXQUOTAS];         /* inodes of quotafiles */
        struct mem_dqinfo info[MAXQUOTAS];      /* Information for each quota type */
        const struct quota_format_ops *ops[MAXQUOTAS];  /* Operations for each type */
index 57c9e0622a38dbd8a742015cd9742bcd52abf226..56375edf2ed22c7f32dd1d92c6b4fe59f3b42229 100644 (file)
@@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
 
 #ifdef CONFIG_PRINTK
 
-#define WARN_ON_RATELIMIT(condition, state)                    \
-               WARN_ON((condition) && __ratelimit(state))
+#define WARN_ON_RATELIMIT(condition, state)    ({              \
+       bool __rtn_cond = !!(condition);                        \
+       WARN_ON(__rtn_cond && __ratelimit(state));              \
+       __rtn_cond;                                             \
+})
 
 #define WARN_RATELIMIT(condition, format, ...)                 \
 ({                                                             \
index a440cf178191ee8b84c01068201abb5c846af031..4d1905245c7aa50df56acf0f77c77f3347c28c04 100644 (file)
@@ -1821,6 +1821,9 @@ struct task_struct {
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
 #endif
+#ifdef CONFIG_INTEL_RDT_A
+       int closid;
+#endif
 #ifdef CONFIG_FUTEX
        struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
index 931a47ba45718ad5c329b1085c7fac5319e7448f..1beab5532035dc2126405384d44457f183de2a90 100644 (file)
@@ -205,10 +205,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
 
        dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
        if (dev) {
-               ip4 = (struct in_device *)dev->ip_ptr;
-               if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
+               ip4 = in_dev_get(dev);
+               if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
                        ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
                                               (struct in6_addr *)gid);
+                       in_dev_put(ip4);
+               }
                dev_put(dev);
        }
 }
index 4ac24f5a3308a8c8bcec7c973433f1f714cda9b2..275581d483ddd90d97c550ee8bf44d705833ecf8 100644 (file)
@@ -1,12 +1,14 @@
 #ifndef ISCSI_TARGET_CORE_H
 #define ISCSI_TARGET_CORE_H
 
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
+#include <linux/dma-direction.h>     /* enum dma_data_direction */
+#include <linux/list.h>              /* struct list_head */
+#include <linux/socket.h>            /* struct sockaddr_storage */
+#include <linux/types.h>             /* u8 */
+#include <scsi/iscsi_proto.h>        /* itt_t */
+#include <target/target_core_base.h> /* struct se_cmd */
+
+struct sock;
 
 #define ISCSIT_VERSION                 "v4.1.0"
 #define ISCSI_MAX_DATASN_MISSING_COUNT 16
index e615bb485d0b3a79ea43e7494db956db17805ac0..c27dd471656dc2da4745516d47253b5d06285cf3 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef ISCSI_TARGET_STAT_H
 #define ISCSI_TARGET_STAT_H
 
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+
 /*
  * For struct iscsi_tiqn->tiqn_wwn default groups
  */
index 40ac7cd801505db68a979a4fc4a22a6eb71019be..1277e9ba031818e22cd8720a4125f1c9ebf0cb12 100644 (file)
@@ -1,6 +1,6 @@
-#include <linux/module.h>
-#include <linux/list.h>
-#include "iscsi_target_core.h"
+#include "iscsi_target_core.h" /* struct iscsi_cmd */
+
+struct sockaddr_storage;
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME  16
index f6f3bc52c1ac2e21611ba7be2a274c7cb442166d..b54b98dc2d4a77681dd3ecf883d75e062589ee8c 100644 (file)
@@ -1,8 +1,14 @@
 #ifndef TARGET_CORE_BACKEND_H
 #define TARGET_CORE_BACKEND_H
 
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define TRANSPORT_FLAG_PASSTHROUGH             1
 
+struct request_queue;
+struct scatterlist;
+
 struct target_backend_ops {
        char name[16];
        char inquiry_prod[16];
index 00558287936d9a0bcc2e386e8fa0b2bcb9af4f97..29e6858bb1648b636dcce48072f9fe43e5a8a884 100644 (file)
@@ -1,14 +1,10 @@
 #ifndef TARGET_CORE_BASE_H
 #define TARGET_CORE_BASE_H
 
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/percpu_ida.h>
-#include <linux/t10-pi.h>
-#include <net/sock.h>
-#include <net/tcp.h>
+#include <linux/configfs.h>      /* struct config_group */
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/percpu_ida.h>    /* struct percpu_ida */
+#include <linux/semaphore.h>     /* struct semaphore */
 
 #define TARGET_CORE_VERSION            "v5.0"
 
index 5cd6faa6e0d166ed07444cf5e3735e2626483205..358041bad1da0350b776d7ff174ab682b0bb82ef 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef TARGET_CORE_FABRIC_H
 #define TARGET_CORE_FABRIC_H
 
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
index 9bd559472c9280a6317e336f3a62471fd0aafa49..e230af2e68558fa8ed1778b2c154686f8b1e2481 100644 (file)
@@ -57,6 +57,7 @@
 #define CGROUP_SUPER_MAGIC     0x27e0eb
 #define CGROUP2_SUPER_MAGIC    0x63677270
 
+#define RDTGROUP_SUPER_MAGIC   0x7655821
 
 #define STACK_END_MAGIC                0x57AC6E9D
 
index 215871bda3a20150e88e001f364ecfa9ae052e64..d416f3baf3924d8093cf275fb1d290bf9d3564e4 100644 (file)
@@ -1194,7 +1194,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
        /* Reserve the 1st slot for get_trampoline_vaddr() */
        set_bit(0, area->bitmap);
        atomic_set(&area->slot_count, 1);
-       copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
+       arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
 
        if (!xol_add_vma(mm, area))
                return area;
index cc2fa35ca480367fe96430ef23ec7b869974c0e7..85e5546cd791cc31261cd6deb8aea933bb41c008 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/kcov.h>
+#include <asm/setup.h>
 
 /*
  * kcov descriptor (one per opened debugfs file).
@@ -73,6 +74,11 @@ void notrace __sanitizer_cov_trace_pc(void)
        if (mode == KCOV_MODE_TRACE) {
                unsigned long *area;
                unsigned long pos;
+               unsigned long ip = _RET_IP_;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+               ip -= kaslr_offset();
+#endif
 
                /*
                 * There is some code that runs in interrupts but for which
@@ -86,7 +92,7 @@ void notrace __sanitizer_cov_trace_pc(void)
                /* The first word is number of subsequent PCs. */
                pos = READ_ONCE(area[0]) + 1;
                if (likely(pos < t->kcov_size)) {
-                       area[pos] = _RET_IP_;
+                       area[pos] = ip;
                        WRITE_ONCE(area[0], pos);
                }
        }
index 0c2df7f737925b6d0bd5bf624ceb12e8a6d53d86..b56a558e406db6375bea4b07e5873a4c6f0b401e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/fs.h>
+#include <linux/ima.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <linux/syscalls.h>
@@ -132,6 +133,9 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
                return ret;
        image->kernel_buf_len = size;
 
+       /* IMA needs to pass the measurement list to the next kernel. */
+       ima_add_kexec_buffer(image);
+
        /* Call arch image probe handlers */
        ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
                                            image->kernel_buf_len);
index 635482e60ca39f93e706f51dbccf7a19dfefe284..8acef8576ce9b211ea3ede4ccf63220466b40e22 100644 (file)
@@ -150,6 +150,9 @@ cond_syscall(sys_io_destroy);
 cond_syscall(sys_io_submit);
 cond_syscall(sys_io_cancel);
 cond_syscall(sys_io_getevents);
+cond_syscall(compat_sys_io_setup);
+cond_syscall(compat_sys_io_submit);
+cond_syscall(compat_sys_io_getevents);
 cond_syscall(sys_sysfs);
 cond_syscall(sys_syslog);
 cond_syscall(sys_process_vm_readv);
index 7446097f72bd8b590ea192b9fb6c2955461c1338..cb66a46488401b6a99091b489df029c3035a79dd 100644 (file)
@@ -26,7 +26,7 @@ config CONSOLE_LOGLEVEL_DEFAULT
          the kernel bootargs. loglevel=<x> continues to override whatever
          value is specified here as well.
 
-         Note: This does not affect the log level of un-prefixed prink()
+         Note: This does not affect the log level of un-prefixed printk()
          usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
          option.
 
index 228892dabba6f5579478988b84ef007fa04bd661..25f57230380104f419257ea43c1cd3d2e31d7e65 100644 (file)
 }
 
 #define iterate_all_kinds(i, n, v, I, B, K) {                  \
-       size_t skip = i->iov_offset;                            \
-       if (unlikely(i->type & ITER_BVEC)) {                    \
-               struct bio_vec v;                               \
-               struct bvec_iter __bi;                          \
-               iterate_bvec(i, n, v, __bi, skip, (B))          \
-       } else if (unlikely(i->type & ITER_KVEC)) {             \
-               const struct kvec *kvec;                        \
-               struct kvec v;                                  \
-               iterate_kvec(i, n, v, kvec, skip, (K))          \
-       } else {                                                \
-               const struct iovec *iov;                        \
-               struct iovec v;                                 \
-               iterate_iovec(i, n, v, iov, skip, (I))          \
+       if (likely(n)) {                                        \
+               size_t skip = i->iov_offset;                    \
+               if (unlikely(i->type & ITER_BVEC)) {            \
+                       struct bio_vec v;                       \
+                       struct bvec_iter __bi;                  \
+                       iterate_bvec(i, n, v, __bi, skip, (B))  \
+               } else if (unlikely(i->type & ITER_KVEC)) {     \
+                       const struct kvec *kvec;                \
+                       struct kvec v;                          \
+                       iterate_kvec(i, n, v, kvec, skip, (K))  \
+               } else {                                        \
+                       const struct iovec *iov;                \
+                       struct iovec v;                         \
+                       iterate_iovec(i, n, v, iov, skip, (I))  \
+               }                                               \
        }                                                       \
 }
 
@@ -576,7 +578,7 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
                WARN_ON(1);
                return false;
        }
-       if (unlikely(i->count < bytes))                         \
+       if (unlikely(i->count < bytes))
                return false;
 
        iterate_all_kinds(i, bytes, v, ({
@@ -620,7 +622,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
                WARN_ON(1);
                return false;
        }
-       if (unlikely(i->count < bytes))                         \
+       if (unlikely(i->count < bytes))
                return false;
        iterate_all_kinds(i, bytes, v, ({
                if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
@@ -837,11 +839,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
        unsigned long res = 0;
        size_t size = i->count;
 
-       if (!size)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE)) {
-               if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
+               if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
                        return size | i->iov_offset;
                return size;
        }
@@ -856,10 +855,8 @@ EXPORT_SYMBOL(iov_iter_alignment);
 
 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 {
-        unsigned long res = 0;
+       unsigned long res = 0;
        size_t size = i->count;
-       if (!size)
-               return 0;
 
        if (unlikely(i->type & ITER_PIPE)) {
                WARN_ON(1);
@@ -874,7 +871,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
                (res |= (!res ? 0 : (unsigned long)v.iov_base) |
                        (size != v.iov_len ? size : 0))
                );
-               return res;
+       return res;
 }
 EXPORT_SYMBOL(iov_iter_gap_alignment);
 
@@ -908,6 +905,9 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
        size_t capacity;
        int idx;
 
+       if (!maxsize)
+               return 0;
+
        if (!sanity(i))
                return -EFAULT;
 
@@ -926,9 +926,6 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (!maxsize)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE))
                return pipe_get_pages(i, pages, maxsize, maxpages, start);
        iterate_all_kinds(i, maxsize, v, ({
@@ -975,6 +972,9 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
        int idx;
        int npages;
 
+       if (!maxsize)
+               return 0;
+
        if (!sanity(i))
                return -EFAULT;
 
@@ -1006,9 +1006,6 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (!maxsize)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE))
                return pipe_get_pages_alloc(i, pages, maxsize, start);
        iterate_all_kinds(i, maxsize, v, ({
index 6c707bfe02fde002feae9ea2fab3fc9647c3baa0..a430131125815803ba25a7014917c6118087dd5c 100644 (file)
@@ -139,7 +139,20 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                }
 
                if (end_index >= start_index) {
-                       unsigned long count = invalidate_mapping_pages(mapping,
+                       unsigned long count;
+
+                       /*
+                        * It's common to FADV_DONTNEED right after
+                        * the read or write that instantiates the
+                        * pages, in which case there will be some
+                        * sitting on the local LRU cache. Try to
+                        * avoid the expensive remote drain and the
+                        * second cache tree walk below by flushing
+                        * them out right away.
+                        */
+                       lru_add_drain();
+
+                       count = invalidate_mapping_pages(mapping,
                                                start_index, end_index);
 
                        /*
index 782dd866366554e53dda3e6c69c807ec90bd0e08..7bb12e07ffef4273e156893cc150ea5c9d79e8bd 100644 (file)
@@ -100,6 +100,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
                neigh->parms->neigh_cleanup(neigh);
 
        __neigh_notify(neigh, RTM_DELNEIGH, 0);
+       call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
        neigh_release(neigh);
 }
 
index 6c9615c90f37a8bf53a423b489d1e67c8dbf66c4..618ab5079816edf2d449cf7d17c7a954185bc6ad 100644 (file)
@@ -958,7 +958,7 @@ static int __ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        cork->length += length;
-       if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+       if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
            (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
index 8b13881ed0644c3c32e87718752f8613023c72eb..976073417b03cb1f3e550f897174c140bd49752b 100644 (file)
@@ -148,7 +148,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports = (__be16 *)skb_transport_header(skb);
 
-       if (skb_transport_offset(skb) + 4 > skb->len)
+       if (skb_transport_offset(skb) + 4 > (int)skb->len)
                return;
 
        /* All current transport protocols have the port numbers in the
index fa5c037227cb2a503c88b0990932a888ca2e8957..9eabf490133a304b5940021e941368ea6653860d 100644 (file)
@@ -798,6 +798,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
        struct rtable *rt;
        struct flowi4 fl4;
        const struct iphdr *iph = (const struct iphdr *) skb->data;
+       struct net *net = dev_net(skb->dev);
        int oif = skb->dev->ifindex;
        u8 tos = RT_TOS(iph->tos);
        u8 prot = iph->protocol;
@@ -805,7 +806,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
 
        rt = (struct rtable *) dst;
 
-       __build_flow_key(sock_net(sk), &fl4, sk, iph, oif, tos, prot, mark, 0);
+       __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
        __ip_do_redirect(rt, skb, &fl4, true);
 }
 
index b45101f3d2bd2e0f0077305a061add4f7ea0de27..31a255b555ad86a3537c077862e3ea38f9b44284 100644 (file)
@@ -769,6 +769,7 @@ static void tcp_tasklet_func(unsigned long data)
                list_del(&tp->tsq_node);
 
                sk = (struct sock *)tp;
+               smp_mb__before_atomic();
                clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
 
                if (!sk->sk_lock.owned &&
index 0489e19258adeb86e456c167c77702d8b6efee84..1407426bc862dca082bce07d037d9da3304b1715 100644 (file)
@@ -701,7 +701,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
                struct sockaddr_in6 sin6;
                __be16 *ports = (__be16 *) skb_transport_header(skb);
 
-               if (skb_transport_offset(skb) + 4 <= skb->len) {
+               if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
                        /* All current transport protocols have the port numbers in the
                         * first four bytes of the transport header and this function is
                         * written with this assumption in mind.
index 291ebc260e70ef7fbb4310d22ed0c915a372d0b5..ea89073c824747f185beb7da0f4aab6b74832149 100644 (file)
@@ -591,7 +591,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        }
 
        offset += skb_transport_offset(skb);
-       BUG_ON(skb_copy_bits(skb, offset, &csum, 2));
+       err = skb_copy_bits(skb, offset, &csum, 2);
+       if (err < 0) {
+               ip6_flush_pending_frames(sk);
+               goto out;
+       }
 
        /* in case cksum was not initialized */
        if (unlikely(csum))
index d19044f2b1f4d1216ec7a4f25eaabecbf99c6214..c87d359b9b37a07711c65e61a1cb882148f76ee0 100644 (file)
@@ -2195,6 +2195,7 @@ static int validate_set(const struct nlattr *a,
        case OVS_KEY_ATTR_ETHERNET:
                if (mac_proto != MAC_PROTO_ETHERNET)
                        return -EINVAL;
+               break;
 
        case OVS_KEY_ATTR_TUNNEL:
                if (masked)
index 4c93badeabf2236d3d90c933e6169174b3ba68d1..ea961144084fadb3ee98abd708ce5cd31eba42a5 100644 (file)
@@ -135,7 +135,7 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
        /* Release any MRs associated with this socket */
        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
        while ((node = rb_first(&rs->rs_rdma_keys))) {
-               mr = container_of(node, struct rds_mr, r_rb_node);
+               mr = rb_entry(node, struct rds_mr, r_rb_node);
                if (mr->r_trans == rs->rs_transport)
                        mr->r_invalidate = 0;
                rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
index 7af712526f01eaa9ed77f33ff6030a2fe7253713..e3a58e02119877d237f9aa18c5f9d9fdfb44d0c9 100644 (file)
@@ -134,8 +134,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                        saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
                        daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
 
-                       metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0,
-                                                     dst_port, TUNNEL_KEY,
+                       metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, dst_port,
+                                                     0, TUNNEL_KEY,
                                                      key_id, 0);
                }
 
index 35ac28d0720cb89310cea548bed003957bb10a46..333f8e26843128b4bd03a1c6409a21064f7c56e5 100644 (file)
@@ -442,32 +442,32 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
        }
 }
 
-static void fl_set_key_flags(struct nlattr **tb,
-                            u32 *flags_key, u32 *flags_mask)
+static int fl_set_key_flags(struct nlattr **tb,
+                           u32 *flags_key, u32 *flags_mask)
 {
        u32 key, mask;
 
-       if (!tb[TCA_FLOWER_KEY_FLAGS])
-               return;
+       /* mask is mandatory for flags */
+       if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
+               return -EINVAL;
 
        key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
-
-       if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
-               mask = ~0;
-       else
-               mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
+       mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
 
        *flags_key  = 0;
        *flags_mask = 0;
 
        fl_set_key_flag(key, mask, flags_key, flags_mask,
                        TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
+
+       return 0;
 }
 
 static int fl_set_key(struct net *net, struct nlattr **tb,
                      struct fl_flow_key *key, struct fl_flow_key *mask)
 {
        __be16 ethertype;
+       int ret = 0;
 #ifdef CONFIG_NET_CLS_IND
        if (tb[TCA_FLOWER_INDEV]) {
                int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
@@ -614,9 +614,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
                       sizeof(key->enc_tp.dst));
 
-       fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
+       if (tb[TCA_FLOWER_KEY_FLAGS])
+               ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
 
-       return 0;
+       return ret;
 }
 
 static bool fl_mask_eq(struct fl_flow_mask *mask1,
index 86309a3156a580d2be0a1cffbc93b62a4e1b10bf..a4f738ac77283b19927aef0a0e6c9fc8dafcdd42 100644 (file)
@@ -136,7 +136,7 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
                struct fq_flow *aux;
 
                parent = *p;
-               aux = container_of(parent, struct fq_flow, rate_node);
+               aux = rb_entry(parent, struct fq_flow, rate_node);
                if (f->time_next_packet >= aux->time_next_packet)
                        p = &parent->rb_right;
                else
@@ -188,7 +188,7 @@ static void fq_gc(struct fq_sched_data *q,
        while (*p) {
                parent = *p;
 
-               f = container_of(parent, struct fq_flow, fq_node);
+               f = rb_entry(parent, struct fq_flow, fq_node);
                if (f->sk == sk)
                        break;
 
@@ -256,7 +256,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
        while (*p) {
                parent = *p;
 
-               f = container_of(parent, struct fq_flow, fq_node);
+               f = rb_entry(parent, struct fq_flow, fq_node);
                if (f->sk == sk) {
                        /* socket might have been reallocated, so check
                         * if its sk_hash is the same.
@@ -424,7 +424,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 
        q->time_next_delayed_flow = ~0ULL;
        while ((p = rb_first(&q->delayed)) != NULL) {
-               struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
+               struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
 
                if (f->time_next_packet > now) {
                        q->time_next_delayed_flow = f->time_next_packet;
@@ -563,7 +563,7 @@ static void fq_reset(struct Qdisc *sch)
        for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
                root = &q->fq_root[idx];
                while ((p = rb_first(root)) != NULL) {
-                       f = container_of(p, struct fq_flow, fq_node);
+                       f = rb_entry(p, struct fq_flow, fq_node);
                        rb_erase(p, root);
 
                        fq_flow_purge(f);
@@ -593,7 +593,7 @@ static void fq_rehash(struct fq_sched_data *q,
                oroot = &old_array[idx];
                while ((op = rb_first(oroot)) != NULL) {
                        rb_erase(op, oroot);
-                       of = container_of(op, struct fq_flow, fq_node);
+                       of = rb_entry(op, struct fq_flow, fq_node);
                        if (fq_gc_candidate(of)) {
                                fcnt++;
                                kmem_cache_free(fq_flow_cachep, of);
@@ -606,7 +606,7 @@ static void fq_rehash(struct fq_sched_data *q,
                        while (*np) {
                                parent = *np;
 
-                               nf = container_of(parent, struct fq_flow, fq_node);
+                               nf = rb_entry(parent, struct fq_flow, fq_node);
                                BUG_ON(nf->sk == of->sk);
 
                                if (nf->sk > of->sk)
index 9f7b380cf0a3836aca117b6d9bde05e0fecea32b..b7e4097bfdab22dc0f43b314aadf3c001cc3b24f 100644 (file)
@@ -152,7 +152,7 @@ struct netem_skb_cb {
 
 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
 {
-       return container_of(rb, struct sk_buff, rbnode);
+       return rb_entry(rb, struct sk_buff, rbnode);
 }
 
 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
index 68428e1f71810fbe65b7f86c750c3ad61f0266ec..d3cc30c25c41091c2bf18022506dff4145d29944 100644 (file)
@@ -1471,7 +1471,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
         * threshold.  The idea is to recover slowly, but up
         * to the initial advertised window.
         */
-       if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
+       if (asoc->rwnd_press) {
                int change = min(asoc->pathmtu, asoc->rwnd_press);
                asoc->rwnd += change;
                asoc->rwnd_press -= change;
@@ -1539,7 +1539,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
                        asoc->rwnd = 0;
                }
        } else {
-               asoc->rwnd_over = len - asoc->rwnd;
+               asoc->rwnd_over += len - asoc->rwnd;
                asoc->rwnd = 0;
        }
 
index 401c60750b206c00f9fb14f6b635d15a4342ae0f..1ebc184a0e2355e348c6c6f575e0b20a1daf5dd9 100644 (file)
@@ -292,6 +292,8 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
                }
 
                af->from_addr_param(&addr, rawaddr, htons(port), 0);
+               if (sctp_bind_addr_state(bp, &addr) != -1)
+                       goto next;
                retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
                                            SCTP_ADDR_SRC, gfp);
                if (retval) {
@@ -300,6 +302,7 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
                        break;
                }
 
+next:
                len = ntohs(param->length);
                addrs_len -= len;
                raw_addr_list += len;
index 7b523e3f551f1761d891b606f293b2b721a87ca8..616a9428e0c4f3ba2b2cf910f339074f79488e62 100644 (file)
@@ -205,26 +205,30 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
        list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
                if (!addr->valid)
                        continue;
-               if (sctp_in_scope(net, &addr->a, scope)) {
-                       /* Now that the address is in scope, check to see if
-                        * the address type is really supported by the local
-                        * sock as well as the remote peer.
-                        */
-                       if ((((AF_INET == addr->a.sa.sa_family) &&
-                             (copy_flags & SCTP_ADDR4_PEERSUPP))) ||
-                           (((AF_INET6 == addr->a.sa.sa_family) &&
-                             (copy_flags & SCTP_ADDR6_ALLOWED) &&
-                             (copy_flags & SCTP_ADDR6_PEERSUPP)))) {
-                               error = sctp_add_bind_addr(bp, &addr->a,
-                                                   sizeof(addr->a),
-                                                   SCTP_ADDR_SRC, GFP_ATOMIC);
-                               if (error)
-                                       goto end_copy;
-                       }
-               }
+               if (!sctp_in_scope(net, &addr->a, scope))
+                       continue;
+
+               /* Now that the address is in scope, check to see if
+                * the address type is really supported by the local
+                * sock as well as the remote peer.
+                */
+               if (addr->a.sa.sa_family == AF_INET &&
+                   !(copy_flags & SCTP_ADDR4_PEERSUPP))
+                       continue;
+               if (addr->a.sa.sa_family == AF_INET6 &&
+                   (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
+                    !(copy_flags & SCTP_ADDR6_PEERSUPP)))
+                       continue;
+
+               if (sctp_bind_addr_state(bp, &addr->a) != -1)
+                       continue;
+
+               error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a),
+                                          SCTP_ADDR_SRC, GFP_ATOMIC);
+               if (error)
+                       break;
        }
 
-end_copy:
        rcu_read_unlock();
        return error;
 }
index 13315ff1193c1c9a7ea4ce675f9c19c8ea08cef0..09e9d535bd7487d81574cf8572a41b6e697566fd 100644 (file)
@@ -35,40 +35,43 @@ hostprogs-y += tc_l2_redirect
 hostprogs-y += lwt_len_hist
 hostprogs-y += xdp_tx_iptunnel
 
-test_lru_dist-objs := test_lru_dist.o libbpf.o
-sock_example-objs := sock_example.o libbpf.o
-fds_example-objs := bpf_load.o libbpf.o fds_example.o
-sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
-sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
-sockex3-objs := bpf_load.o libbpf.o sockex3_user.o
-tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
-tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
-tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
-tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
-tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
-tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
-test_probe_write_user-objs := bpf_load.o libbpf.o test_probe_write_user_user.o
-trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
-lathist-objs := bpf_load.o libbpf.o lathist_user.o
-offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
-spintest-objs := bpf_load.o libbpf.o spintest_user.o
-map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
-test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
-test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
-test_cgrp2_attach-objs := libbpf.o test_cgrp2_attach.o
-test_cgrp2_attach2-objs := libbpf.o test_cgrp2_attach2.o cgroup_helpers.o
-test_cgrp2_sock-objs := libbpf.o test_cgrp2_sock.o
-test_cgrp2_sock2-objs := bpf_load.o libbpf.o test_cgrp2_sock2.o
-xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
+# Libbpf dependencies
+LIBBPF := ../../tools/lib/bpf/bpf.o
+
+test_lru_dist-objs := test_lru_dist.o $(LIBBPF)
+sock_example-objs := sock_example.o $(LIBBPF)
+fds_example-objs := bpf_load.o $(LIBBPF) fds_example.o
+sockex1-objs := bpf_load.o $(LIBBPF) sockex1_user.o
+sockex2-objs := bpf_load.o $(LIBBPF) sockex2_user.o
+sockex3-objs := bpf_load.o $(LIBBPF) sockex3_user.o
+tracex1-objs := bpf_load.o $(LIBBPF) tracex1_user.o
+tracex2-objs := bpf_load.o $(LIBBPF) tracex2_user.o
+tracex3-objs := bpf_load.o $(LIBBPF) tracex3_user.o
+tracex4-objs := bpf_load.o $(LIBBPF) tracex4_user.o
+tracex5-objs := bpf_load.o $(LIBBPF) tracex5_user.o
+tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
+test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
+trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
+lathist-objs := bpf_load.o $(LIBBPF) lathist_user.o
+offwaketime-objs := bpf_load.o $(LIBBPF) offwaketime_user.o
+spintest-objs := bpf_load.o $(LIBBPF) spintest_user.o
+map_perf_test-objs := bpf_load.o $(LIBBPF) map_perf_test_user.o
+test_overhead-objs := bpf_load.o $(LIBBPF) test_overhead_user.o
+test_cgrp2_array_pin-objs := $(LIBBPF) test_cgrp2_array_pin.o
+test_cgrp2_attach-objs := $(LIBBPF) test_cgrp2_attach.o
+test_cgrp2_attach2-objs := $(LIBBPF) test_cgrp2_attach2.o cgroup_helpers.o
+test_cgrp2_sock-objs := $(LIBBPF) test_cgrp2_sock.o
+test_cgrp2_sock2-objs := bpf_load.o $(LIBBPF) test_cgrp2_sock2.o
+xdp1-objs := bpf_load.o $(LIBBPF) xdp1_user.o
 # reuse xdp1 source intentionally
-xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
-test_current_task_under_cgroup-objs := bpf_load.o libbpf.o cgroup_helpers.o \
+xdp2-objs := bpf_load.o $(LIBBPF) xdp1_user.o
+test_current_task_under_cgroup-objs := bpf_load.o $(LIBBPF) cgroup_helpers.o \
                                       test_current_task_under_cgroup_user.o
-trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
-sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
-tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
-lwt_len_hist-objs := bpf_load.o libbpf.o lwt_len_hist_user.o
-xdp_tx_iptunnel-objs := bpf_load.o libbpf.o xdp_tx_iptunnel_user.o
+trace_event-objs := bpf_load.o $(LIBBPF) trace_event_user.o
+sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o
+tc_l2_redirect-objs := bpf_load.o $(LIBBPF) tc_l2_redirect_user.o
+lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o
+xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -104,7 +107,10 @@ always += lwt_len_hist_kern.o
 always += xdp_tx_iptunnel_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
+HOSTCFLAGS += -I$(srctree)/tools/lib/
 HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
+HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
+HOSTCFLAGS += -I$(srctree)/tools/perf
 
 HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
 HOSTLOADLIBES_fds_example += -lelf
index a43eae3f05518765a7a7a8b38410753d2ef494d0..79f9a58f1872714ce6dd1047cb9d1dbd8842ad60 100644 (file)
@@ -1,8 +1,8 @@
 eBPF sample programs
 ====================
 
-This directory contains a mini eBPF library, test stubs, verifier
-test-suite and examples for using eBPF.
+This directory contains a test stubs, verifier test-suite and examples
+for using eBPF. The examples use libbpf from tools/lib/bpf.
 
 Build dependencies
 ==================
index e30b6de94f2ee393afc1f9feb23d0a461a46a7da..396e204888b34f342bd8f01afc4e0d1a3d52f823 100644 (file)
 #include <poll.h>
 #include <ctype.h>
 #include "libbpf.h"
-#include "bpf_helpers.h"
 #include "bpf_load.h"
+#include "perf-sys.h"
 
 #define DEBUGFS "/sys/kernel/debug/tracing/"
 
 static char license[128];
 static int kern_version;
 static bool processed_sec[128];
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
 int map_fd[MAX_MAPS];
 int prog_fd[MAX_PROGS];
 int event_fd[MAX_PROGS];
 int prog_cnt;
 int prog_array_fd = -1;
 
+struct bpf_map_def {
+       unsigned int type;
+       unsigned int key_size;
+       unsigned int value_size;
+       unsigned int max_entries;
+       unsigned int map_flags;
+};
+
 static int populate_prog_array(const char *event, int prog_fd)
 {
        int ind = atoi(event), err;
 
-       err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
+       err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
        if (err < 0) {
                printf("failed to store prog_fd in prog_array\n");
                return -1;
@@ -58,6 +67,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
        bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
        bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
        bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
+       size_t insns_cnt = size / sizeof(struct bpf_insn);
        enum bpf_prog_type prog_type;
        char buf[256];
        int fd, efd, err, id;
@@ -87,9 +97,10 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                return -1;
        }
 
-       fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
+       fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
+                             bpf_log_buf, BPF_LOG_BUF_SIZE);
        if (fd < 0) {
-               printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
+               printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf);
                return -1;
        }
 
@@ -169,7 +180,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
        id = atoi(buf);
        attr.config = id;
 
-       efd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+       efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
        if (efd < 0) {
                printf("event %d fd %d err %s\n", id, efd, strerror(errno));
                return -1;
index fb46a421ab411fefe2c23ed36af4cbfee27a3807..c827827299b3b77831650e762376b061b6c93021 100644 (file)
@@ -1,12 +1,15 @@
 #ifndef __BPF_LOAD_H
 #define __BPF_LOAD_H
 
+#include "libbpf.h"
+
 #define MAX_MAPS 32
 #define MAX_PROGS 32
 
 extern int map_fd[MAX_MAPS];
 extern int prog_fd[MAX_PROGS];
 extern int event_fd[MAX_PROGS];
+extern char bpf_log_buf[BPF_LOG_BUF_SIZE];
 extern int prog_cnt;
 
 /* parses elf file compiled by llvm .c->.o
index 625e797be6ef897f929adebffe9dcbf71cd59e03..e29bd52ff9e85c6dd6a2996bd213567e51e0328e 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "bpf_load.h"
 #include "libbpf.h"
+#include "sock_example.h"
 
 #define BPF_F_PIN      (1 << 0)
 #define BPF_F_GET      (1 << 1)
@@ -49,17 +50,19 @@ static int bpf_map_create(void)
 
 static int bpf_prog_create(const char *object)
 {
-       static const struct bpf_insn insns[] = {
+       static struct bpf_insn insns[] = {
                BPF_MOV64_IMM(BPF_REG_0, 1),
                BPF_EXIT_INSN(),
        };
+       size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
 
        if (object) {
                assert(!load_bpf_file((char *)object));
                return prog_fd[0];
        } else {
-               return bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER,
-                                    insns, sizeof(insns), "GPL", 0);
+               return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER,
+                                       insns, insns_cnt, "GPL", 0,
+                                       bpf_log_buf, BPF_LOG_BUF_SIZE);
        }
 }
 
@@ -83,12 +86,12 @@ static int bpf_do_map(const char *file, uint32_t flags, uint32_t key,
        }
 
        if ((flags & BPF_F_KEY_VAL) == BPF_F_KEY_VAL) {
-               ret = bpf_update_elem(fd, &key, &value, 0);
+               ret = bpf_map_update_elem(fd, &key, &value, 0);
                printf("bpf: fd:%d u->(%u:%u) ret:(%d,%s)\n", fd, key, value,
                       ret, strerror(errno));
                assert(ret == 0);
        } else if (flags & BPF_F_KEY) {
-               ret = bpf_lookup_elem(fd, &key, &value);
+               ret = bpf_map_lookup_elem(fd, &key, &value);
                printf("bpf: fd:%d l->(%u):%u ret:(%d,%s)\n", fd, key, value,
                       ret, strerror(errno));
                assert(ret == 0);
index 65da8c1576de00a555fcb7056907c6d4939cbd58..6477bad5b4e2b9e6ae8fe7558b18cd95f86197b5 100644 (file)
@@ -73,7 +73,7 @@ static void get_data(int fd)
        for (c = 0; c < MAX_CPU; c++) {
                for (i = 0; i < MAX_ENTRIES; i++) {
                        key = c * MAX_ENTRIES + i;
-                       bpf_lookup_elem(fd, &key, &value);
+                       bpf_map_lookup_elem(fd, &key, &value);
 
                        cpu_hist[c].data[i] = value;
                        if (value > cpu_hist[c].max)
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
deleted file mode 100644 (file)
index 9ce707b..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/* eBPF mini library */
-#include <stdlib.h>
-#include <stdio.h>
-#include <linux/unistd.h>
-#include <unistd.h>
-#include <string.h>
-#include <linux/netlink.h>
-#include <linux/bpf.h>
-#include <errno.h>
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <linux/if_packet.h>
-#include <arpa/inet.h>
-#include "libbpf.h"
-
-static __u64 ptr_to_u64(void *ptr)
-{
-       return (__u64) (unsigned long) ptr;
-}
-
-int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
-                  int max_entries, int map_flags)
-{
-       union bpf_attr attr = {
-               .map_type = map_type,
-               .key_size = key_size,
-               .value_size = value_size,
-               .max_entries = max_entries,
-               .map_flags = map_flags,
-       };
-
-       return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
-}
-
-int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags)
-{
-       union bpf_attr attr = {
-               .map_fd = fd,
-               .key = ptr_to_u64(key),
-               .value = ptr_to_u64(value),
-               .flags = flags,
-       };
-
-       return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
-}
-
-int bpf_lookup_elem(int fd, void *key, void *value)
-{
-       union bpf_attr attr = {
-               .map_fd = fd,
-               .key = ptr_to_u64(key),
-               .value = ptr_to_u64(value),
-       };
-
-       return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
-}
-
-int bpf_delete_elem(int fd, void *key)
-{
-       union bpf_attr attr = {
-               .map_fd = fd,
-               .key = ptr_to_u64(key),
-       };
-
-       return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
-}
-
-int bpf_get_next_key(int fd, void *key, void *next_key)
-{
-       union bpf_attr attr = {
-               .map_fd = fd,
-               .key = ptr_to_u64(key),
-               .next_key = ptr_to_u64(next_key),
-       };
-
-       return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
-}
-
-#define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
-
-char bpf_log_buf[LOG_BUF_SIZE];
-
-int bpf_prog_load(enum bpf_prog_type prog_type,
-                 const struct bpf_insn *insns, int prog_len,
-                 const char *license, int kern_version)
-{
-       union bpf_attr attr = {
-               .prog_type = prog_type,
-               .insns = ptr_to_u64((void *) insns),
-               .insn_cnt = prog_len / sizeof(struct bpf_insn),
-               .license = ptr_to_u64((void *) license),
-               .log_buf = ptr_to_u64(bpf_log_buf),
-               .log_size = LOG_BUF_SIZE,
-               .log_level = 1,
-       };
-
-       /* assign one field outside of struct init to make sure any
-        * padding is zero initialized
-        */
-       attr.kern_version = kern_version;
-
-       bpf_log_buf[0] = 0;
-
-       return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
-}
-
-int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
-{
-       union bpf_attr attr = {
-               .target_fd = target_fd,
-               .attach_bpf_fd = prog_fd,
-               .attach_type = type,
-       };
-
-       return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
-}
-
-int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
-{
-       union bpf_attr attr = {
-               .target_fd = target_fd,
-               .attach_type = type,
-       };
-
-       return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr));
-}
-
-int bpf_obj_pin(int fd, const char *pathname)
-{
-       union bpf_attr attr = {
-               .pathname       = ptr_to_u64((void *)pathname),
-               .bpf_fd         = fd,
-       };
-
-       return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr));
-}
-
-int bpf_obj_get(const char *pathname)
-{
-       union bpf_attr attr = {
-               .pathname       = ptr_to_u64((void *)pathname),
-       };
-
-       return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr));
-}
-
-int open_raw_sock(const char *name)
-{
-       struct sockaddr_ll sll;
-       int sock;
-
-       sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
-       if (sock < 0) {
-               printf("cannot create raw socket\n");
-               return -1;
-       }
-
-       memset(&sll, 0, sizeof(sll));
-       sll.sll_family = AF_PACKET;
-       sll.sll_ifindex = if_nametoindex(name);
-       sll.sll_protocol = htons(ETH_P_ALL);
-       if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
-               printf("bind to %s: %s\n", name, strerror(errno));
-               close(sock);
-               return -1;
-       }
-
-       return sock;
-}
-
-int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
-                   int group_fd, unsigned long flags)
-{
-       return syscall(__NR_perf_event_open, attr, pid, cpu,
-                      group_fd, flags);
-}
index 94a901d86fc2ea4867a3993f87380e9894d3b1f5..3705fba453a005fb32f5dfb51dd5763f5f364ccf 100644 (file)
@@ -2,27 +2,9 @@
 #ifndef __LIBBPF_H
 #define __LIBBPF_H
 
-struct bpf_insn;
-
-int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
-                  int max_entries, int map_flags);
-int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
-int bpf_lookup_elem(int fd, void *key, void *value);
-int bpf_delete_elem(int fd, void *key);
-int bpf_get_next_key(int fd, void *key, void *next_key);
-
-int bpf_prog_load(enum bpf_prog_type prog_type,
-                 const struct bpf_insn *insns, int insn_len,
-                 const char *license, int kern_version);
-
-int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
-int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+#include <bpf/bpf.h>
 
-int bpf_obj_pin(int fd, const char *pathname);
-int bpf_obj_get(const char *pathname);
-
-#define LOG_BUF_SIZE (256 * 1024)
-extern char bpf_log_buf[LOG_BUF_SIZE];
+struct bpf_insn;
 
 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 
@@ -203,10 +185,4 @@ extern char bpf_log_buf[LOG_BUF_SIZE];
                .off   = 0,                                     \
                .imm   = 0 })
 
-/* create RAW socket and bind to interface 'name' */
-int open_raw_sock(const char *name);
-
-struct perf_event_attr;
-int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
-                   int group_fd, unsigned long flags);
 #endif
index 05d783fc5dafbc50393b796b8e0968bc293f5b07..ec8f3bbcbef3dc871177b55cac6f27e54dfaff6f 100644 (file)
@@ -14,6 +14,8 @@
 #define MAX_INDEX 64
 #define MAX_STARS 38
 
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
 static void stars(char *str, long val, long max, int width)
 {
        int i;
@@ -41,13 +43,13 @@ int main(int argc, char **argv)
                return -1;
        }
 
-       while (bpf_get_next_key(map_fd, &key, &next_key) == 0) {
+       while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
                if (next_key >= MAX_INDEX) {
                        fprintf(stderr, "Key %lu out of bounds\n", next_key);
                        continue;
                }
 
-               bpf_lookup_elem(map_fd, &next_key, values);
+               bpf_map_lookup_elem(map_fd, &next_key, values);
 
                sum = 0;
                for (i = 0; i < nr_cpus; i++)
index 6f002a9c24faa38a81ba9b233519b6370478a2b0..9cce2a66bd664c40668b3c8ca0dd12bd148bb21b 100644 (file)
@@ -49,14 +49,14 @@ static void print_stack(struct key_t *key, __u64 count)
        int i;
 
        printf("%s;", key->target);
-       if (bpf_lookup_elem(map_fd[3], &key->tret, ip) != 0) {
+       if (bpf_map_lookup_elem(map_fd[3], &key->tret, ip) != 0) {
                printf("---;");
        } else {
                for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
                        print_ksym(ip[i]);
        }
        printf("-;");
-       if (bpf_lookup_elem(map_fd[3], &key->wret, ip) != 0) {
+       if (bpf_map_lookup_elem(map_fd[3], &key->wret, ip) != 0) {
                printf("---;");
        } else {
                for (i = 0; i < PERF_MAX_STACK_DEPTH; i++)
@@ -77,8 +77,8 @@ static void print_stacks(int fd)
        struct key_t key = {}, next_key;
        __u64 value;
 
-       while (bpf_get_next_key(fd, &key, &next_key) == 0) {
-               bpf_lookup_elem(fd, &next_key, &value);
+       while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+               bpf_map_lookup_elem(fd, &next_key, &value);
                print_stack(&next_key, value);
                key = next_key;
        }
index 260a6bdd6413ecf2e24b434ce5c30ca137d37ca2..be59d7dcbdde3664c573a78d6d27adee03234a16 100644 (file)
@@ -21,6 +21,7 @@
 #include <sys/ioctl.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "perf-sys.h"
 
 #define DEFAULT_FREQ   99
 #define DEFAULT_SECS   5
@@ -49,7 +50,7 @@ static int sampling_start(int *pmu_fd, int freq)
        };
 
        for (i = 0; i < nr_cpus; i++) {
-               pmu_fd[i] = perf_event_open(&pe_sample_attr, -1 /* pid */, i,
+               pmu_fd[i] = sys_perf_event_open(&pe_sample_attr, -1 /* pid */, i,
                                            -1 /* group_fd */, 0 /* flags */);
                if (pmu_fd[i] < 0) {
                        fprintf(stderr, "ERROR: Initializing perf sampling\n");
@@ -95,8 +96,8 @@ static void print_ip_map(int fd)
 
        /* fetch IPs and counts */
        key = 0, i = 0;
-       while (bpf_get_next_key(fd, &key, &next_key) == 0) {
-               bpf_lookup_elem(fd, &next_key, &value);
+       while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+               bpf_map_lookup_elem(fd, &next_key, &value);
                counts[i].ip = next_key;
                counts[i++].count = value;
                key = next_key;
index 28b60baa9fa82b4fbb0db4526aeddec65459bf96..6fc6e193ef1b12ecbb13f5c7729552306632708b 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/ip.h>
 #include <stddef.h>
 #include "libbpf.h"
+#include "sock_example.h"
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
 
 static int test_sock(void)
 {
@@ -54,9 +57,10 @@ static int test_sock(void)
                BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
                BPF_EXIT_INSN(),
        };
+       size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
 
-       prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, sizeof(prog),
-                               "GPL", 0);
+       prog_fd = bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, insns_cnt,
+                                  "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE);
        if (prog_fd < 0) {
                printf("failed to load prog '%s'\n", strerror(errno));
                goto cleanup;
@@ -72,13 +76,13 @@ static int test_sock(void)
 
        for (i = 0; i < 10; i++) {
                key = IPPROTO_TCP;
-               assert(bpf_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0);
 
                key = IPPROTO_UDP;
-               assert(bpf_lookup_elem(map_fd, &key, &udp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0);
 
                key = IPPROTO_ICMP;
-               assert(bpf_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0);
 
                printf("TCP %lld UDP %lld ICMP %lld packets\n",
                       tcp_cnt, udp_cnt, icmp_cnt);
diff --git a/samples/bpf/sock_example.h b/samples/bpf/sock_example.h
new file mode 100644 (file)
index 0000000..09f7fe7
--- /dev/null
@@ -0,0 +1,35 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <linux/if_packet.h>
+#include <arpa/inet.h>
+#include "libbpf.h"
+
+static inline int open_raw_sock(const char *name)
+{
+       struct sockaddr_ll sll;
+       int sock;
+
+       sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL));
+       if (sock < 0) {
+               printf("cannot create raw socket\n");
+               return -1;
+       }
+
+       memset(&sll, 0, sizeof(sll));
+       sll.sll_family = AF_PACKET;
+       sll.sll_ifindex = if_nametoindex(name);
+       sll.sll_protocol = htons(ETH_P_ALL);
+       if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
+               printf("bind to %s: %s\n", name, strerror(errno));
+               close(sock);
+               return -1;
+       }
+
+       return sock;
+}
index 678ce469355152650ee055136821f4ad8b50ed85..6cd2feb3e9b364aa4ec75c538fe954e581fbd84e 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/bpf.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "sock_example.h"
 #include <unistd.h>
 #include <arpa/inet.h>
 
@@ -32,13 +33,13 @@ int main(int ac, char **argv)
                int key;
 
                key = IPPROTO_TCP;
-               assert(bpf_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd[0], &key, &tcp_cnt) == 0);
 
                key = IPPROTO_UDP;
-               assert(bpf_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd[0], &key, &udp_cnt) == 0);
 
                key = IPPROTO_ICMP;
-               assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0);
 
                printf("TCP %lld UDP %lld ICMP %lld bytes\n",
                       tcp_cnt, udp_cnt, icmp_cnt);
index 8a4085c2d117acf2e1a47a8d85f6a21f7bbeff5e..0e0207c9084130c362b600f6b8cd453d2541c4dc 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/bpf.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "sock_example.h"
 #include <unistd.h>
 #include <arpa/inet.h>
 #include <sys/resource.h>
@@ -39,8 +40,8 @@ int main(int ac, char **argv)
                int key = 0, next_key;
                struct pair value;
 
-               while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
-                       bpf_lookup_elem(map_fd[0], &next_key, &value);
+               while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
+                       bpf_map_lookup_elem(map_fd[0], &next_key, &value);
                        printf("ip %s bytes %lld packets %lld\n",
                               inet_ntoa((struct in_addr){htonl(next_key)}),
                               value.bytes, value.packets);
index 3fcfd8c4b2a3c69da6a4719559cdd4edbbca0b30..b5524d417eb57acd808aa07f4e876191df734166 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/bpf.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "sock_example.h"
 #include <unistd.h>
 #include <arpa/inet.h>
 #include <sys/resource.h>
@@ -54,8 +55,8 @@ int main(int argc, char **argv)
 
                sleep(1);
                printf("IP     src.port -> dst.port               bytes      packets\n");
-               while (bpf_get_next_key(map_fd[2], &key, &next_key) == 0) {
-                       bpf_lookup_elem(map_fd[2], &next_key, &value);
+               while (bpf_map_get_next_key(map_fd[2], &key, &next_key) == 0) {
+                       bpf_map_lookup_elem(map_fd[2], &next_key, &value);
                        printf("%s.%05d -> %s.%05d %12lld %12lld\n",
                               inet_ntoa((struct in_addr){htonl(next_key.src)}),
                               next_key.port16[0],
index 311ede532230646808a44c74b61316e21d70649a..80676c25fa5017abd8c91b51f1757c7e53e5df93 100644 (file)
@@ -31,8 +31,8 @@ int main(int ac, char **argv)
        for (i = 0; i < 5; i++) {
                key = 0;
                printf("kprobing funcs:");
-               while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
-                       bpf_lookup_elem(map_fd[0], &next_key, &value);
+               while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
+                       bpf_map_lookup_elem(map_fd[0], &next_key, &value);
                        assert(next_key == value);
                        sym = ksym_search(value);
                        printf(" %s", sym->name);
@@ -41,8 +41,8 @@ int main(int ac, char **argv)
                if (key)
                        printf("\n");
                key = 0;
-               while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0)
-                       bpf_delete_elem(map_fd[0], &next_key);
+               while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0)
+                       bpf_map_delete_elem(map_fd[0], &next_key);
                sleep(1);
        }
 
index 4013c5337b912dd38fed05c15a944f1748d50487..28995a77656073721d5ea5c78eb33f9aceb66a2f 100644 (file)
@@ -60,9 +60,9 @@ int main(int argc, char **argv)
        }
 
        /* bpf_tunnel_key.remote_ipv4 expects host byte orders */
-       ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
+       ret = bpf_map_update_elem(array_fd, &array_key, &ifindex, 0);
        if (ret) {
-               perror("bpf_update_elem");
+               perror("bpf_map_update_elem");
                goto out;
        }
 
index 70e86f7be69dae90dfcc99aa95c3cca866092640..8a1b8b5d8def4717b64fbac2b58209bd3aeacab6 100644 (file)
@@ -85,9 +85,9 @@ int main(int argc, char **argv)
                }
        }
 
-       ret = bpf_update_elem(array_fd, &array_key, &cg2_fd, 0);
+       ret = bpf_map_update_elem(array_fd, &array_key, &cg2_fd, 0);
        if (ret) {
-               perror("bpf_update_elem");
+               perror("bpf_map_update_elem");
                goto out;
        }
 
index a19484c45b79f0d600be81eadec23120e583e214..504058631ffccef319f9bc7f6a3349d6f854bdbd 100644 (file)
@@ -36,6 +36,8 @@ enum {
        MAP_KEY_BYTES,
 };
 
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
 static int prog_load(int map_fd, int verdict)
 {
        struct bpf_insn prog[] = {
@@ -66,9 +68,11 @@ static int prog_load(int map_fd, int verdict)
                BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
                BPF_EXIT_INSN(),
        };
+       size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
 
-       return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
-                            prog, sizeof(prog), "GPL", 0);
+       return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+                               prog, insns_cnt, "GPL", 0,
+                               bpf_log_buf, BPF_LOG_BUF_SIZE);
 }
 
 static int usage(const char *argv0)
@@ -108,10 +112,10 @@ static int attach_filter(int cg_fd, int type, int verdict)
        }
        while (1) {
                key = MAP_KEY_PACKETS;
-               assert(bpf_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
 
                key = MAP_KEY_BYTES;
-               assert(bpf_lookup_elem(map_fd, &key, &byte_cnt) == 0);
+               assert(bpf_map_lookup_elem(map_fd, &key, &byte_cnt) == 0);
 
                printf("cgroup received %lld packets, %lld bytes\n",
                       pkt_cnt, byte_cnt);
index ddfac42ed4df65636b9a1c2b1e405d04c343f7b7..6e69be37f87f700dfd6a2c13a8d6310164b0cdd9 100644 (file)
@@ -32,6 +32,8 @@
 #define BAR            "/foo/bar/"
 #define PING_CMD       "ping -c1 -w1 127.0.0.1"
 
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
 static int prog_load(int verdict)
 {
        int ret;
@@ -39,9 +41,11 @@ static int prog_load(int verdict)
                BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
                BPF_EXIT_INSN(),
        };
+       size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
 
-       ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
-                            prog, sizeof(prog), "GPL", 0);
+       ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
+                              prog, insns_cnt, "GPL", 0,
+                              bpf_log_buf, BPF_LOG_BUF_SIZE);
 
        if (ret < 0) {
                log_err("Loading program");
index d467b3c1c55c385ff1a1c20efe044072b6c7a539..0791b949cbe418e49fa29ab1efb52007e29d67d8 100644 (file)
@@ -23,6 +23,8 @@
 
 #include "libbpf.h"
 
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
 static int prog_load(int idx)
 {
        struct bpf_insn prog[] = {
@@ -33,9 +35,10 @@ static int prog_load(int idx)
                BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */
                BPF_EXIT_INSN(),
        };
+       size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
 
-       return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, prog, sizeof(prog),
-                            "GPL", 0);
+       return bpf_load_program(BPF_PROG_TYPE_CGROUP_SOCK, prog, insns_cnt,
+                               "GPL", 0, bpf_log_buf, BPF_LOG_BUF_SIZE);
 }
 
 static int usage(const char *argv0)
index 95aaaa84613039a063dd33192e6e115187b38a06..65b5fb51c1dbc5b34dcf4dc01e6d6e16eb43b265 100644 (file)
@@ -36,7 +36,7 @@ int main(int argc, char **argv)
        if (!cg2)
                goto err;
 
-       if (bpf_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
+       if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
                log_err("Adding target cgroup to map");
                goto err;
        }
@@ -50,7 +50,7 @@ int main(int argc, char **argv)
         */
 
        sync();
-       bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+       bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
 
        if (local_pid != remote_pid) {
                fprintf(stderr,
@@ -64,10 +64,10 @@ int main(int argc, char **argv)
                goto err;
 
        remote_pid = 0;
-       bpf_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
+       bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY);
 
        sync();
-       bpf_lookup_elem(map_fd[1], &idx, &remote_pid);
+       bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid);
 
        if (local_pid == remote_pid) {
                fprintf(stderr, "BPF cgroup negative test did not work\n");
index 316230a0ed2306271288d21c25970d505e9d0507..d96dc88d3b04067135fe80c95628fca37a29c022 100644 (file)
@@ -134,7 +134,7 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
        int seen = 0;
 
        lru->total++;
-       if (!bpf_lookup_elem(lru->map_fd, &key, &node)) {
+       if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) {
                if (node) {
                        list_move(&node->list, &lru->list);
                        return 1;
@@ -151,7 +151,7 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
                node = list_last_entry(&lru->list,
                                       struct pfect_lru_node,
                                       list);
-               bpf_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
+               bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST);
        }
 
        node->key = key;
@@ -159,10 +159,10 @@ static int pfect_lru_lookup_or_insert(struct pfect_lru *lru,
 
        lru->nr_misses++;
        if (seen) {
-               assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
+               assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST));
        } else {
                lru->nr_unique++;
-               assert(!bpf_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));
        }
 
        return seen;
@@ -285,11 +285,11 @@ static void do_test_lru_dist(int task, void *data)
 
                pfect_lru_lookup_or_insert(&pfect_lru, key);
 
-               if (!bpf_lookup_elem(lru_map_fd, &key, &value))
+               if (!bpf_map_lookup_elem(lru_map_fd, &key, &value))
                        continue;
 
-               if (bpf_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
-                       printf("bpf_update_elem(lru_map_fd, %llu): errno:%d\n",
+               if (bpf_map_update_elem(lru_map_fd, &key, &value, BPF_NOEXIST)) {
+                       printf("bpf_map_update_elem(lru_map_fd, %llu): errno:%d\n",
                               key, errno);
                        assert(0);
                }
@@ -358,19 +358,19 @@ static void test_lru_loss0(int map_type, int map_flags)
        for (key = 1; key <= 1000; key++) {
                int start_key, end_key;
 
-               assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
+               assert(bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
 
                start_key = 101;
                end_key = min(key, 900);
 
                while (start_key <= end_key) {
-                       bpf_lookup_elem(map_fd, &start_key, value);
+                       bpf_map_lookup_elem(map_fd, &start_key, value);
                        start_key++;
                }
        }
 
        for (key = 1; key <= 1000; key++) {
-               if (bpf_lookup_elem(map_fd, &key, value)) {
+               if (bpf_map_lookup_elem(map_fd, &key, value)) {
                        if (key <= 100)
                                old_unused_losses++;
                        else if (key <= 900)
@@ -408,10 +408,10 @@ static void test_lru_loss1(int map_type, int map_flags)
        value[0] = 1234;
 
        for (key = 1; key <= 1000; key++)
-               assert(!bpf_update_elem(map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
 
        for (key = 1; key <= 1000; key++) {
-               if (bpf_lookup_elem(map_fd, &key, value))
+               if (bpf_map_lookup_elem(map_fd, &key, value))
                        nr_losses++;
        }
 
@@ -436,7 +436,7 @@ static void do_test_parallel_lru_loss(int task, void *data)
        next_ins_key = stable_base;
        value[0] = 1234;
        for (i = 0; i < nr_stable_elems; i++) {
-               assert(bpf_update_elem(map_fd, &next_ins_key, value,
+               assert(bpf_map_update_elem(map_fd, &next_ins_key, value,
                                       BPF_NOEXIST) == 0);
                next_ins_key++;
        }
@@ -448,9 +448,9 @@ static void do_test_parallel_lru_loss(int task, void *data)
 
                if (rn % 10) {
                        key = rn % nr_stable_elems + stable_base;
-                       bpf_lookup_elem(map_fd, &key, value);
+                       bpf_map_lookup_elem(map_fd, &key, value);
                } else {
-                       bpf_update_elem(map_fd, &next_ins_key, value,
+                       bpf_map_update_elem(map_fd, &next_ins_key, value,
                                        BPF_NOEXIST);
                        next_ins_key++;
                }
@@ -458,7 +458,7 @@ static void do_test_parallel_lru_loss(int task, void *data)
 
        key = stable_base;
        for (i = 0; i < nr_stable_elems; i++) {
-               if (bpf_lookup_elem(map_fd, &key, value))
+               if (bpf_map_lookup_elem(map_fd, &key, value))
                        nr_losses++;
                key++;
        }
index a44bf347bedd30a8f9e90a40d78fb3c1446ac062..b5bf178a6ecc60eb7fa99b28abbe75dca8b06288 100644 (file)
@@ -50,7 +50,7 @@ int main(int ac, char **argv)
        mapped_addr_in->sin_port = htons(5555);
        mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255");
 
-       assert(!bpf_update_elem(map_fd[0], &mapped_addr, &serv_addr, BPF_ANY));
+       assert(!bpf_map_update_elem(map_fd[0], &mapped_addr, &serv_addr, BPF_ANY));
 
        assert(listen(serverfd, 5) == 0);
 
index 9a130d31ecf258b601d20e52367443a18f02c11c..0c5561d193a487f2257ccece359530b97b06a170 100644 (file)
@@ -20,6 +20,7 @@
 #include <sys/resource.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "perf-sys.h"
 
 #define SAMPLE_FREQ 50
 
@@ -61,14 +62,14 @@ static void print_stack(struct key_t *key, __u64 count)
        int i;
 
        printf("%3lld %s;", count, key->comm);
-       if (bpf_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
+       if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
                printf("---;");
        } else {
                for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
                        print_ksym(ip[i]);
        }
        printf("-;");
-       if (bpf_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
+       if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
                printf("---;");
        } else {
                for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
@@ -98,10 +99,10 @@ static void print_stacks(void)
        int fd = map_fd[0], stack_map = map_fd[1];
 
        sys_read_seen = sys_write_seen = false;
-       while (bpf_get_next_key(fd, &key, &next_key) == 0) {
-               bpf_lookup_elem(fd, &next_key, &value);
+       while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+               bpf_map_lookup_elem(fd, &next_key, &value);
                print_stack(&next_key, value);
-               bpf_delete_elem(fd, &next_key);
+               bpf_map_delete_elem(fd, &next_key);
                key = next_key;
        }
 
@@ -111,8 +112,8 @@ static void print_stacks(void)
        }
 
        /* clear stack map */
-       while (bpf_get_next_key(stack_map, &stackid, &next_id) == 0) {
-               bpf_delete_elem(stack_map, &next_id);
+       while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
+               bpf_map_delete_elem(stack_map, &next_id);
                stackid = next_id;
        }
 }
@@ -125,9 +126,9 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
 
        /* open perf_event on all cpus */
        for (i = 0; i < nr_cpus; i++) {
-               pmu_fd[i] = perf_event_open(attr, -1, i, -1, 0);
+               pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
                if (pmu_fd[i] < 0) {
-                       printf("perf_event_open failed\n");
+                       printf("sys_perf_event_open failed\n");
                        goto all_cpu_err;
                }
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
@@ -146,9 +147,9 @@ static void test_perf_event_task(struct perf_event_attr *attr)
        int pmu_fd;
 
        /* open task bound event */
-       pmu_fd = perf_event_open(attr, 0, -1, -1, 0);
+       pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
        if (pmu_fd < 0) {
-               printf("perf_event_open failed\n");
+               printf("sys_perf_event_open failed\n");
                return;
        }
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
index 661a7d052f2cbb7a6a0f20acf4c71d34d49feb05..f4fa6af22def9fa9c01df1525c7a61b3aecb8cdd 100644 (file)
@@ -21,6 +21,7 @@
 #include <signal.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "perf-sys.h"
 
 static int pmu_fd;
 
@@ -61,7 +62,7 @@ struct perf_event_sample {
        char data[];
 };
 
-void perf_event_read(print_fn fn)
+static void perf_event_read(print_fn fn)
 {
        __u64 data_tail = header->data_tail;
        __u64 data_head = header->data_head;
@@ -159,10 +160,10 @@ static void test_bpf_perf_event(void)
        };
        int key = 0;
 
-       pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+       pmu_fd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
 
        assert(pmu_fd >= 0);
-       assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
        ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
 }
 
index 3e225e331f664847736242fc5fd1c7abce9c696b..ded9804c503418c9ed46b1ffa677455720e33a7b 100644 (file)
@@ -48,12 +48,12 @@ static void print_hist_for_pid(int fd, void *task)
        long max_value = 0;
        int i, ind;
 
-       while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+       while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
                if (memcmp(&next_key, task, SIZE)) {
                        key = next_key;
                        continue;
                }
-               bpf_lookup_elem(fd, &next_key, values);
+               bpf_map_lookup_elem(fd, &next_key, values);
                value = 0;
                for (i = 0; i < nr_cpus; i++)
                        value += values[i];
@@ -83,7 +83,7 @@ static void print_hist(int fd)
        int task_cnt = 0;
        int i;
 
-       while (bpf_get_next_key(fd, &key, &next_key) == 0) {
+       while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
                int found = 0;
 
                for (i = 0; i < task_cnt; i++)
@@ -136,8 +136,8 @@ int main(int ac, char **argv)
 
        for (i = 0; i < 5; i++) {
                key = 0;
-               while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
-                       bpf_lookup_elem(map_fd[0], &next_key, &value);
+               while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
+                       bpf_map_lookup_elem(map_fd[0], &next_key, &value);
                        printf("location 0x%lx count %ld\n", next_key, value);
                        key = next_key;
                }
index d0851cb4fa8d2c967662d9bcdc33b3b859eff9cc..8f7d199d59459cf16520b62a0ef12dcb7e6f6c04 100644 (file)
@@ -28,7 +28,7 @@ static void clear_stats(int fd)
 
        memset(values, 0, sizeof(values));
        for (key = 0; key < SLOTS; key++)
-               bpf_update_elem(fd, &key, values, BPF_ANY);
+               bpf_map_update_elem(fd, &key, values, BPF_ANY);
 }
 
 const char *color[] = {
@@ -89,7 +89,7 @@ static void print_hist(int fd)
        int i;
 
        for (key = 0; key < SLOTS; key++) {
-               bpf_lookup_elem(fd, &key, values);
+               bpf_map_lookup_elem(fd, &key, values);
                value = 0;
                for (i = 0; i < nr_cpus; i++)
                        value += values[i];
index bc4a3bdea6ed4cbceed4478899702b8ee04dd8eb..03449f773cb1f4da6b1e42c272cc1c83da36074f 100644 (file)
@@ -37,8 +37,8 @@ static void print_old_objects(int fd)
        key = write(1, "\e[1;1H\e[2J", 12); /* clear screen */
 
        key = -1;
-       while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
-               bpf_lookup_elem(map_fd[0], &next_key, &v);
+       while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
+               bpf_map_lookup_elem(map_fd[0], &next_key, &v);
                key = next_key;
                if (val - v.val < 1000000000ll)
                        /* object was allocated more then 1 sec ago */
index 8ea4976cfcf1201a52f0ce4f4f27f06454c1f61f..ca7874ed77f4326db742366641c982612e9261b0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bpf.h>
 #include "libbpf.h"
 #include "bpf_load.h"
+#include "perf-sys.h"
 
 #define SAMPLE_PERIOD  0x7fffffffffffffffULL
 
@@ -30,13 +31,13 @@ static void test_bpf_perf_event(void)
        };
 
        for (i = 0; i < nr_cpus; i++) {
-               pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+               pmu_fd[i] = sys_perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
                if (pmu_fd[i] < 0) {
                        printf("event syscall failed\n");
                        goto exit;
                }
 
-               bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
+               bpf_map_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
                ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
        }
 
index 5f040a0d771291f7493226e9a4cdc312732fad1d..d2be65d1fd86ed199909bc6c68327f7d31e6b35a 100644 (file)
@@ -43,7 +43,7 @@ static void poll_stats(int interval)
                for (key = 0; key < nr_keys; key++) {
                        __u64 sum = 0;
 
-                       assert(bpf_lookup_elem(map_fd[0], &key, values) == 0);
+                       assert(bpf_map_lookup_elem(map_fd[0], &key, values) == 0);
                        for (i = 0; i < nr_cpus; i++)
                                sum += (values[i] - prev[key][i]);
                        if (sum)
index 7a71f5c746848fe29df7076c3aa7acb48101c544..70e192fc61aa4b9a317a1e8b11df0d36034cc877 100644 (file)
@@ -51,7 +51,7 @@ static void poll_stats(unsigned int kill_after_s)
                for (proto = 0; proto < nr_protos; proto++) {
                        __u64 sum = 0;
 
-                       assert(bpf_lookup_elem(map_fd[0], &proto, values) == 0);
+                       assert(bpf_map_lookup_elem(map_fd[0], &proto, values) == 0);
                        for (i = 0; i < nr_cpus; i++)
                                sum += (values[i] - prev[proto][i]);
 
@@ -237,8 +237,8 @@ int main(int argc, char **argv)
 
        while (min_port <= max_port) {
                vip.dport = htons(min_port++);
-               if (bpf_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
-                       perror("bpf_update_elem(&vip2tnl)");
+               if (bpf_map_update_elem(map_fd[1], &vip, &tnl, BPF_NOEXIST)) {
+                       perror("bpf_map_update_elem(&vip2tnl)");
                        return 1;
                }
        }
index 1d1ac51359e3abe9ff1e32ebaba24de04f2909e9..6fc2b8789a0bf677f2ca874dab6eaec96ef4ca3b 100644 (file)
@@ -1,4 +1,6 @@
 hostprogs-y    := genheaders
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+       -I$(srctree)/include/uapi -I$(srctree)/include \
+       -I$(srctree)/security/selinux/include
 
 always         := $(hostprogs-y)
index 539855ff31f977f32a1afbc16b35c9bfc2df6387..f4dd41f900d5ce8a672479139938feb8b96105f9 100644 (file)
@@ -1,3 +1,7 @@
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
index dba7eff69a00962e99ca2e34ff4a4f47da59f58b..d6a83cafe59f46d35df53048005be42a1fca6c90 100644 (file)
@@ -1,5 +1,7 @@
 hostprogs-y    := mdp
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+       -I$(srctree)/include/uapi -I$(srctree)/include \
+       -I$(srctree)/security/selinux/include
 
 always         := $(hostprogs-y)
 clean-files    := policy.* file_contexts
index e10beb11b696e4f6d289e3c74a7dddf970b1b66b..c29fa4a6228d6f59f9346721d4569cb15002b3c6 100644 (file)
  * Authors: Serge E. Hallyn <serue@us.ibm.com>
  */
 
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
index 5487827fa86c7f1b236521dca9d36c1805bfc1f8..370eb2f4dd379f7cf0708f17306f5a98c721443b 100644 (file)
@@ -27,6 +27,18 @@ config IMA
          to learn more about IMA.
          If unsure, say N.
 
+config IMA_KEXEC
+       bool "Enable carrying the IMA measurement list across a soft boot"
+       depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+       default n
+       help
+          TPM PCRs are only reset on a hard reboot.  In order to validate
+          a TPM's quote after a soft boot, the IMA measurement list of the
+          running kernel must be saved and restored on boot.
+
+          Depending on the IMA policy, the measurement list can grow to
+          be very large.
+
 config IMA_MEASURE_PCR_IDX
        int
        depends on IMA
index 9aeaedad1e2b9f69e89fa19750490b9ad5cdcc0b..29f198bde02b9ff12fb38b18dfda191b9c3c1e91 100644 (file)
@@ -8,4 +8,5 @@ obj-$(CONFIG_IMA) += ima.o
 ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
         ima_policy.o ima_template.o ima_template_lib.o
 ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
 obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
index db25f54a04fe5bd28cdd90dbb82d794bca5e9262..5e6180a4da7d2a9d583d4b81bbcfb76a1d9e558a 100644 (file)
 
 #include "../integrity.h"
 
+#ifdef CONFIG_HAVE_IMA_KEXEC
+#include <asm/ima.h>
+#endif
+
 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
                     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
 enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
@@ -81,6 +85,7 @@ struct ima_template_field {
 
 /* IMA template descriptor definition */
 struct ima_template_desc {
+       struct list_head list;
        char *name;
        char *fmt;
        int num_fields;
@@ -102,6 +107,27 @@ struct ima_queue_entry {
 };
 extern struct list_head ima_measurements;      /* list of all measurements */
 
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+       u16 version;
+       u16 _reserved0;
+       u32 _reserved1;
+       u64 buffer_size;
+       u64 count;
+};
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format.  The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
 /* Internal IMA function definitions */
 int ima_init(void);
 int ima_fs_init(void);
@@ -122,7 +148,12 @@ int ima_init_crypto(void);
 void ima_putc(struct seq_file *m, void *data, int datalen);
 void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
 struct ima_template_desc *ima_template_desc_current(void);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
 int ima_init_template(void);
+void ima_init_template_list(void);
 
 /*
  * used to protect h_table and sha_table
index 38f2ed830dd6fb23c216b014e39405318705d8ee..802d5d20f36fe46ecb787163f4dafd532741c78b 100644 (file)
@@ -477,11 +477,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
                u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
                u8 *data_to_hash = field_data[i].data;
                u32 datalen = field_data[i].len;
+               u32 datalen_to_hash =
+                   !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
 
                if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
                        rc = crypto_shash_update(shash,
-                                               (const u8 *) &field_data[i].len,
-                                               sizeof(field_data[i].len));
+                                               (const u8 *) &datalen_to_hash,
+                                               sizeof(datalen_to_hash));
                        if (rc)
                                break;
                } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
index 3df46906492dec72a6381aa2a6aa67500e6d7883..ca303e5d2b9403d858c9df8f5512c80040b0f3f7 100644 (file)
 
 static DEFINE_MUTEX(ima_write_mutex);
 
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+       ima_canonical_fmt = 1;
+#endif
+       return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
 static int valid_policy = 1;
 #define TMPBUFLEN 12
 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
@@ -116,13 +126,13 @@ void ima_putc(struct seq_file *m, void *data, int datalen)
  *       [eventdata length]
  *       eventdata[n]=template specific data
  */
-static int ima_measurements_show(struct seq_file *m, void *v)
+int ima_measurements_show(struct seq_file *m, void *v)
 {
        /* the list never shrinks, so we don't need a lock here */
        struct ima_queue_entry *qe = v;
        struct ima_template_entry *e;
        char *template_name;
-       int namelen;
+       u32 pcr, namelen, template_data_len; /* temporary fields */
        bool is_ima_template = false;
        int i;
 
@@ -139,25 +149,29 @@ static int ima_measurements_show(struct seq_file *m, void *v)
         * PCR used defaults to the same (config option) in
         * little-endian format, unless set in policy
         */
-       ima_putc(m, &e->pcr, sizeof(e->pcr));
+       pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
+       ima_putc(m, &pcr, sizeof(e->pcr));
 
        /* 2nd: template digest */
        ima_putc(m, e->digest, TPM_DIGEST_SIZE);
 
        /* 3rd: template name size */
-       namelen = strlen(template_name);
+       namelen = !ima_canonical_fmt ? strlen(template_name) :
+               cpu_to_le32(strlen(template_name));
        ima_putc(m, &namelen, sizeof(namelen));
 
        /* 4th:  template name */
-       ima_putc(m, template_name, namelen);
+       ima_putc(m, template_name, strlen(template_name));
 
        /* 5th:  template length (except for 'ima' template) */
        if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
                is_ima_template = true;
 
-       if (!is_ima_template)
-               ima_putc(m, &e->template_data_len,
-                        sizeof(e->template_data_len));
+       if (!is_ima_template) {
+               template_data_len = !ima_canonical_fmt ? e->template_data_len :
+                       cpu_to_le32(e->template_data_len);
+               ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+       }
 
        /* 6th:  template specific data */
        for (i = 0; i < e->template_desc->num_fields; i++) {
index 2ac1f41db5c05677007d09aac73c1fa1aa19ff03..2967d497a665c9f627af9ecf77f1554a97f5addc 100644 (file)
@@ -129,6 +129,8 @@ int __init ima_init(void)
        if (rc != 0)
                return rc;
 
+       ima_load_kexec_buffer();
+
        rc = ima_add_boot_aggregate();  /* boot aggregate must be first entry */
        if (rc != 0)
                return rc;
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644 (file)
index 0000000..e473eee
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+                                    unsigned long segment_size)
+{
+       struct ima_queue_entry *qe;
+       struct seq_file file;
+       struct ima_kexec_hdr khdr;
+       int ret = 0;
+
+       /* segment size can't change between kexec load and execute */
+       file.buf = vmalloc(segment_size);
+       if (!file.buf) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       file.size = segment_size;
+       file.read_pos = 0;
+       file.count = sizeof(khdr);      /* reserved space */
+
+       memset(&khdr, 0, sizeof(khdr));
+       khdr.version = 1;
+       list_for_each_entry_rcu(qe, &ima_measurements, later) {
+               if (file.count < file.size) {
+                       khdr.count++;
+                       ima_measurements_show(&file, qe);
+               } else {
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+
+       if (ret < 0)
+               goto out;
+
+       /*
+        * fill in reserved space with some buffer details
+        * (eg. version, buffer size, number of measurements)
+        */
+       khdr.buffer_size = file.count;
+       if (ima_canonical_fmt) {
+               khdr.version = cpu_to_le16(khdr.version);
+               khdr.count = cpu_to_le64(khdr.count);
+               khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+       }
+       memcpy(file.buf, &khdr, sizeof(khdr));
+
+       print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
+                       16, 1, file.buf,
+                       file.count < 100 ? file.count : 100, true);
+
+       *buffer_size = file.count;
+       *buffer = file.buf;
+out:
+       if (ret == -EINVAL)
+               vfree(file.buf);
+       return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_mutex is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+       struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+                                 .buf_min = 0, .buf_max = ULONG_MAX,
+                                 .top_down = true };
+       unsigned long binary_runtime_size;
+
+       /* use more understandable variable names than defined in kbuf */
+       void *kexec_buffer = NULL;
+       size_t kexec_buffer_size;
+       size_t kexec_segment_size;
+       int ret;
+
+       /*
+        * Reserve an extra half page of memory for additional measurements
+        * added during the kexec load.
+        */
+       binary_runtime_size = ima_get_binary_runtime_size();
+       if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+               kexec_segment_size = ULONG_MAX;
+       else
+               kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
+                                          PAGE_SIZE / 2, PAGE_SIZE);
+       if ((kexec_segment_size == ULONG_MAX) ||
+           ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
+               pr_err("Binary measurement list too large.\n");
+               return;
+       }
+
+       ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+                                 kexec_segment_size);
+       if (!kexec_buffer) {
+               pr_err("Not enough memory for the kexec measurement buffer.\n");
+               return;
+       }
+
+       kbuf.buffer = kexec_buffer;
+       kbuf.bufsz = kexec_buffer_size;
+       kbuf.memsz = kexec_segment_size;
+       ret = kexec_add_buffer(&kbuf);
+       if (ret) {
+               pr_err("Error passing over kexec measurement buffer.\n");
+               return;
+       }
+
+       ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
+       if (ret) {
+               pr_err("Error passing over kexec measurement buffer.\n");
+               return;
+       }
+
+       pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+                kbuf.mem);
+}
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void ima_load_kexec_buffer(void)
+{
+       void *kexec_buffer = NULL;
+       size_t kexec_buffer_size = 0;
+       int rc;
+
+       rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+       switch (rc) {
+       case 0:
+               rc = ima_restore_measurement_list(kexec_buffer_size,
+                                                 kexec_buffer);
+               if (rc != 0)
+                       pr_err("Failed to restore the measurement list: %d\n",
+                               rc);
+
+               ima_free_kexec_buffer();
+               break;
+       case -ENOTSUPP:
+               pr_debug("Restoring the measurement list not supported\n");
+               break;
+       case -ENOENT:
+               pr_debug("No measurement list to restore\n");
+               break;
+       default:
+               pr_debug("Error restoring the measurement list: %d\n", rc);
+       }
+}
index 423d111b3b9475ef43092b77a744ed6fe65599ab..50818c60538b8e0e764de72c842cfb608abb305a 100644 (file)
@@ -418,6 +418,7 @@ static int __init init_ima(void)
 {
        int error;
 
+       ima_init_template_list();
        hash_setup(CONFIG_IMA_DEFAULT_HASH);
        error = ima_init();
        if (!error) {
index 32f6ac0f96dfb0ed3c8d296c817e97946834c28e..d9aa5ab712044a4d7e0a3a11ecfb86547198e3af 100644 (file)
 #define AUDIT_CAUSE_LEN_MAX 32
 
 LIST_HEAD(ima_measurements);   /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
 
 /* key: inode (before secure-hashing a file) */
 struct ima_h_table ima_htable = {
@@ -64,12 +69,32 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
        return ret;
 }
 
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+       int size = 0;
+
+       size += sizeof(u32);    /* pcr */
+       size += sizeof(entry->digest);
+       size += sizeof(int);    /* template name size field */
+       size += strlen(entry->template_desc->name) + 1;
+       size += sizeof(entry->template_data_len);
+       size += entry->template_data_len;
+       return size;
+}
+
 /* ima_add_template_entry helper function:
- * - Add template entry to measurement list and hash table.
+ * - Add template entry to the measurement list and hash table, for
+ *   all entries except those carried across kexec.
  *
  * (Called with ima_extend_list_mutex held.)
  */
-static int ima_add_digest_entry(struct ima_template_entry *entry)
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+                               bool update_htable)
 {
        struct ima_queue_entry *qe;
        unsigned int key;
@@ -85,11 +110,34 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
        list_add_tail_rcu(&qe->later, &ima_measurements);
 
        atomic_long_inc(&ima_htable.len);
-       key = ima_hash_key(entry->digest);
-       hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+       if (update_htable) {
+               key = ima_hash_key(entry->digest);
+               hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+       }
+
+       if (binary_runtime_size != ULONG_MAX) {
+               int size;
+
+               size = get_binary_runtime_size(entry);
+               binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+                    binary_runtime_size + size : ULONG_MAX;
+       }
        return 0;
 }
 
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+       if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+               return ULONG_MAX;
+       else
+               return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+};
+
 static int ima_pcr_extend(const u8 *hash, int pcr)
 {
        int result = 0;
@@ -103,8 +151,13 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
        return result;
 }
 
-/* Add template entry to the measurement list and hash table,
- * and extend the pcr.
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
  */
 int ima_add_template_entry(struct ima_template_entry *entry, int violation,
                           const char *op, struct inode *inode,
@@ -126,7 +179,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
                }
        }
 
-       result = ima_add_digest_entry(entry);
+       result = ima_add_digest_entry(entry, 1);
        if (result < 0) {
                audit_cause = "ENOMEM";
                audit_info = 0;
@@ -149,3 +202,13 @@ out:
                            op, audit_cause, result, audit_info);
        return result;
 }
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+       int result = 0;
+
+       mutex_lock(&ima_extend_list_mutex);
+       result = ima_add_digest_entry(entry, 0);
+       mutex_unlock(&ima_extend_list_mutex);
+       return result;
+}
index febd12ed9b55ab4d6009176c355655c646272437..cebb37c63629fc89465cd020f2efbd550442cfc7 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/rculist.h>
 #include "ima.h"
 #include "ima_template_lib.h"
 
-static struct ima_template_desc defined_templates[] = {
+static struct ima_template_desc builtin_templates[] = {
        {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
        {.name = "ima-ng", .fmt = "d-ng|n-ng"},
        {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
        {.name = "", .fmt = ""},        /* placeholder for a custom format */
 };
 
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+
 static struct ima_template_field supported_fields[] = {
        {.field_id = "d", .field_init = ima_eventdigest_init,
         .field_show = ima_show_template_digest},
@@ -37,6 +41,7 @@ static struct ima_template_field supported_fields[] = {
        {.field_id = "sig", .field_init = ima_eventsig_init,
         .field_show = ima_show_template_sig},
 };
+#define MAX_TEMPLATE_NAME_LEN 15
 
 static struct ima_template_desc *ima_template;
 static struct ima_template_desc *lookup_template_desc(const char *name);
@@ -52,6 +57,8 @@ static int __init ima_template_setup(char *str)
        if (ima_template)
                return 1;
 
+       ima_init_template_list();
+
        /*
         * Verify that a template with the supplied name exists.
         * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
@@ -80,7 +87,7 @@ __setup("ima_template=", ima_template_setup);
 
 static int __init ima_template_fmt_setup(char *str)
 {
-       int num_templates = ARRAY_SIZE(defined_templates);
+       int num_templates = ARRAY_SIZE(builtin_templates);
 
        if (ima_template)
                return 1;
@@ -91,22 +98,28 @@ static int __init ima_template_fmt_setup(char *str)
                return 1;
        }
 
-       defined_templates[num_templates - 1].fmt = str;
-       ima_template = defined_templates + num_templates - 1;
+       builtin_templates[num_templates - 1].fmt = str;
+       ima_template = builtin_templates + num_templates - 1;
+
        return 1;
 }
 __setup("ima_template_fmt=", ima_template_fmt_setup);
 
 static struct ima_template_desc *lookup_template_desc(const char *name)
 {
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
-               if (strcmp(defined_templates[i].name, name) == 0)
-                       return defined_templates + i;
+       struct ima_template_desc *template_desc;
+       int found = 0;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+               if ((strcmp(template_desc->name, name) == 0) ||
+                   (strcmp(template_desc->fmt, name) == 0)) {
+                       found = 1;
+                       break;
+               }
        }
-
-       return NULL;
+       rcu_read_unlock();
+       return found ? template_desc : NULL;
 }
 
 static struct ima_template_field *lookup_template_field(const char *field_id)
@@ -142,9 +155,14 @@ static int template_desc_init_fields(const char *template_fmt,
 {
        const char *template_fmt_ptr;
        struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
-       int template_num_fields = template_fmt_size(template_fmt);
+       int template_num_fields;
        int i, len;
 
+       if (num_fields && *num_fields > 0) /* already initialized? */
+               return 0;
+
+       template_num_fields = template_fmt_size(template_fmt);
+
        if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
                pr_err("format string '%s' contains too many fields\n",
                       template_fmt);
@@ -182,11 +200,28 @@ static int template_desc_init_fields(const char *template_fmt,
        return 0;
 }
 
+void ima_init_template_list(void)
+{
+       int i;
+
+       if (!list_empty(&defined_templates))
+               return;
+
+       spin_lock(&template_list);
+       for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+               list_add_tail_rcu(&builtin_templates[i].list,
+                                 &defined_templates);
+       }
+       spin_unlock(&template_list);
+}
+
 struct ima_template_desc *ima_template_desc_current(void)
 {
-       if (!ima_template)
+       if (!ima_template) {
+               ima_init_template_list();
                ima_template =
                    lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+       }
        return ima_template;
 }
 
@@ -205,3 +240,239 @@ int __init ima_init_template(void)
 
        return result;
 }
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+       struct ima_template_desc *template_desc = NULL;
+       int ret;
+
+       ret = template_desc_init_fields(template_name, NULL, NULL);
+       if (ret < 0) {
+               pr_err("attempting to initialize the template \"%s\" failed\n",
+                       template_name);
+               goto out;
+       }
+
+       template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+       if (!template_desc)
+               goto out;
+
+       template_desc->name = "";
+       template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+       if (!template_desc->fmt)
+               goto out;
+
+       spin_lock(&template_list);
+       list_add_tail_rcu(&template_desc->list, &defined_templates);
+       spin_unlock(&template_list);
+out:
+       return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+                                    void *template_data,
+                                    int template_data_size,
+                                    struct ima_template_entry **entry)
+{
+       struct binary_field_data {
+               u32 len;
+               u8 data[0];
+       } __packed;
+
+       struct binary_field_data *field_data;
+       int offset = 0;
+       int ret = 0;
+       int i;
+
+       *entry = kzalloc(sizeof(**entry) +
+                   template_desc->num_fields * sizeof(struct ima_field_data),
+                   GFP_NOFS);
+       if (!*entry)
+               return -ENOMEM;
+
+       (*entry)->template_desc = template_desc;
+       for (i = 0; i < template_desc->num_fields; i++) {
+               field_data = template_data + offset;
+
+               /* Each field of the template data is prefixed with a length. */
+               if (offset > (template_data_size - sizeof(*field_data))) {
+                       pr_err("Restoring the template field failed\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               offset += sizeof(*field_data);
+
+               if (ima_canonical_fmt)
+                       field_data->len = le32_to_cpu(field_data->len);
+
+               if (offset > (template_data_size - field_data->len)) {
+                       pr_err("Restoring the template field data failed\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               offset += field_data->len;
+
+               (*entry)->template_data[i].len = field_data->len;
+               (*entry)->template_data_len += sizeof(field_data->len);
+
+               (*entry)->template_data[i].data =
+                       kzalloc(field_data->len + 1, GFP_KERNEL);
+               if (!(*entry)->template_data[i].data) {
+                       ret = -ENOMEM;
+                       break;
+               }
+               memcpy((*entry)->template_data[i].data, field_data->data,
+                       field_data->len);
+               (*entry)->template_data_len += field_data->len;
+       }
+
+       if (ret < 0) {
+               ima_free_template_entry(*entry);
+               *entry = NULL;
+       }
+
+       return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+       struct binary_hdr_v1 {
+               u32 pcr;
+               u8 digest[TPM_DIGEST_SIZE];
+               u32 template_name_len;
+               char template_name[0];
+       } __packed;
+       char template_name[MAX_TEMPLATE_NAME_LEN];
+
+       struct binary_data_v1 {
+               u32 template_data_size;
+               char template_data[0];
+       } __packed;
+
+       struct ima_kexec_hdr *khdr = buf;
+       struct binary_hdr_v1 *hdr_v1;
+       struct binary_data_v1 *data_v1;
+
+       void *bufp = buf + sizeof(*khdr);
+       void *bufendp;
+       struct ima_template_entry *entry;
+       struct ima_template_desc *template_desc;
+       unsigned long count = 0;
+       int ret = 0;
+
+       if (!buf || size < sizeof(*khdr))
+               return 0;
+
+       if (ima_canonical_fmt) {
+               khdr->version = le16_to_cpu(khdr->version);
+               khdr->count = le64_to_cpu(khdr->count);
+               khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
+       }
+
+       if (khdr->version != 1) {
+               pr_err("attempting to restore a incompatible measurement list");
+               return -EINVAL;
+       }
+
+       if (khdr->count > ULONG_MAX - 1) {
+               pr_err("attempting to restore too many measurements");
+               return -EINVAL;
+       }
+
+       /*
+        * ima kexec buffer prefix: version, buffer size, count
+        * v1 format: pcr, digest, template-name-len, template-name,
+        *            template-data-size, template-data
+        */
+       bufendp = buf + khdr->buffer_size;
+       while ((bufp < bufendp) && (count++ < khdr->count)) {
+               hdr_v1 = bufp;
+               if (bufp > (bufendp - sizeof(*hdr_v1))) {
+                       pr_err("attempting to restore partial measurement\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               bufp += sizeof(*hdr_v1);
+
+               if (ima_canonical_fmt)
+                       hdr_v1->template_name_len =
+                           le32_to_cpu(hdr_v1->template_name_len);
+
+               if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) ||
+                   (bufp > (bufendp - hdr_v1->template_name_len))) {
+                       pr_err("attempting to restore a template name \
+                               that is too long\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len;
+
+               /* template name is not null terminated */
+               memcpy(template_name, hdr_v1->template_name,
+                      hdr_v1->template_name_len);
+               template_name[hdr_v1->template_name_len] = 0;
+
+               if (strcmp(template_name, "ima") == 0) {
+                       pr_err("attempting to restore an unsupported \
+                               template \"%s\" failed\n", template_name);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               template_desc = lookup_template_desc(template_name);
+               if (!template_desc) {
+                       template_desc = restore_template_fmt(template_name);
+                       if (!template_desc)
+                               break;
+               }
+
+               /*
+                * Only the running system's template format is initialized
+                * on boot.  As needed, initialize the other template formats.
+                */
+               ret = template_desc_init_fields(template_desc->fmt,
+                                               &(template_desc->fields),
+                                               &(template_desc->num_fields));
+               if (ret < 0) {
+                       pr_err("attempting to restore the template fmt \"%s\" \
+                               failed\n", template_desc->fmt);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (bufp > (bufendp - sizeof(data_v1->template_data_size))) {
+                       pr_err("restoring the template data size failed\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               bufp += (u_int8_t) sizeof(data_v1->template_data_size);
+
+               if (ima_canonical_fmt)
+                       data_v1->template_data_size =
+                           le32_to_cpu(data_v1->template_data_size);
+
+               if (bufp > (bufendp - data_v1->template_data_size)) {
+                       pr_err("restoring the template data failed\n");
+                       ret = -EINVAL;
+                       break;
+               }
+               bufp += data_v1->template_data_size;
+
+               ret = ima_restore_template_data(template_desc,
+                                               data_v1->template_data,
+                                               data_v1->template_data_size,
+                                               &entry);
+               if (ret < 0)
+                       break;
+
+               memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE);
+               entry->pcr =
+                   !ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr);
+               ret = ima_restore_measurement_entry(entry);
+               if (ret < 0)
+                       break;
+
+       }
+       return ret;
+}
index f9bae04ba1762cd3e91728e4506719739b541e7d..f9ba37b3928dce36bb940e7aa4620848aca1f15b 100644 (file)
@@ -103,8 +103,11 @@ static void ima_show_template_data_binary(struct seq_file *m,
        u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
            strlen(field_data->data) : field_data->len;
 
-       if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
-               ima_putc(m, &len, sizeof(len));
+       if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+               u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
+
+               ima_putc(m, &field_len, sizeof(field_len));
+       }
 
        if (!len)
                return;
index e2d4ad3a4b4c5e01a063747a4286e1b51c05f697..13ae49b0baa091f3ca9202fd51e3a20e46d9b6f4 100644 (file)
@@ -1,3 +1,5 @@
+#include <linux/capability.h>
+
 #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
     "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
 
index a2cdf3370afe75f5f5029f5fd78a4a32fa9cc1f7..15d1d5c63c3c40faa5f62316370ea9d8e711fa75 100644 (file)
@@ -384,9 +384,6 @@ static void snd_complete_urb(struct urb *urb)
        if (unlikely(atomic_read(&ep->chip->shutdown)))
                goto exit_clear;
 
-       if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
-               goto exit_clear;
-
        if (usb_pipeout(ep->pipe)) {
                retire_outbound_urb(ep, ctx);
                /* can be stopped during retire callback */
@@ -537,11 +534,6 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
                        alive, ep->ep_num);
        clear_bit(EP_FLAG_STOPPING, &ep->flags);
 
-       ep->data_subs = NULL;
-       ep->sync_slave = NULL;
-       ep->retire_data_urb = NULL;
-       ep->prepare_data_urb = NULL;
-
        return 0;
 }
 
@@ -1028,6 +1020,10 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
 
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, false);
+               ep->data_subs = NULL;
+               ep->sync_slave = NULL;
+               ep->retire_data_urb = NULL;
+               ep->prepare_data_urb = NULL;
                set_bit(EP_FLAG_STOPPING, &ep->flags);
        }
 }
index 9e5fc168c8a3d8cb3d8ef424eed6d67a74f2e8ac..0eb0e87dbe9f511672102f2123129328288a9159 100644 (file)
@@ -73,6 +73,8 @@ enum bpf_cmd {
        BPF_PROG_LOAD,
        BPF_OBJ_PIN,
        BPF_OBJ_GET,
+       BPF_PROG_ATTACH,
+       BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -85,6 +87,8 @@ enum bpf_map_type {
        BPF_MAP_TYPE_PERCPU_ARRAY,
        BPF_MAP_TYPE_STACK_TRACE,
        BPF_MAP_TYPE_CGROUP_ARRAY,
+       BPF_MAP_TYPE_LRU_HASH,
+       BPF_MAP_TYPE_LRU_PERCPU_HASH,
 };
 
 enum bpf_prog_type {
@@ -95,8 +99,23 @@ enum bpf_prog_type {
        BPF_PROG_TYPE_SCHED_ACT,
        BPF_PROG_TYPE_TRACEPOINT,
        BPF_PROG_TYPE_XDP,
+       BPF_PROG_TYPE_PERF_EVENT,
+       BPF_PROG_TYPE_CGROUP_SKB,
+       BPF_PROG_TYPE_CGROUP_SOCK,
+       BPF_PROG_TYPE_LWT_IN,
+       BPF_PROG_TYPE_LWT_OUT,
+       BPF_PROG_TYPE_LWT_XMIT,
 };
 
+enum bpf_attach_type {
+       BPF_CGROUP_INET_INGRESS,
+       BPF_CGROUP_INET_EGRESS,
+       BPF_CGROUP_INET_SOCK_CREATE,
+       __MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
 #define BPF_PSEUDO_MAP_FD      1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -105,6 +124,13 @@ enum bpf_prog_type {
 #define BPF_EXIST      2 /* update existing element */
 
 #define BPF_F_NO_PREALLOC      (1U << 0)
+/* Instead of having one common LRU list in the
+ * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
+ * which can scale and perform better.
+ * Note, the LRU nodes (including free nodes) cannot be moved
+ * across different LRU lists.
+ */
+#define BPF_F_NO_COMMON_LRU    (1U << 1)
 
 union bpf_attr {
        struct { /* anonymous struct used by BPF_MAP_CREATE command */
@@ -140,243 +166,327 @@ union bpf_attr {
                __aligned_u64   pathname;
                __u32           bpf_fd;
        };
+
+       struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+               __u32           target_fd;      /* container object to attach to */
+               __u32           attach_bpf_fd;  /* eBPF program to attach */
+               __u32           attach_type;
+       };
 } __attribute__((aligned(8)));
 
+/* BPF helper function descriptions:
+ *
+ * void *bpf_map_lookup_elem(&map, &key)
+ *     Return: Map value or NULL
+ *
+ * int bpf_map_update_elem(&map, &key, &value, flags)
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_map_delete_elem(&map, &key)
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_probe_read(void *dst, int size, void *src)
+ *     Return: 0 on success or negative error
+ *
+ * u64 bpf_ktime_get_ns(void)
+ *     Return: current ktime
+ *
+ * int bpf_trace_printk(const char *fmt, int fmt_size, ...)
+ *     Return: length of buffer written or negative error
+ *
+ * u32 bpf_prandom_u32(void)
+ *     Return: random value
+ *
+ * u32 bpf_raw_smp_processor_id(void)
+ *     Return: SMP processor ID
+ *
+ * int bpf_skb_store_bytes(skb, offset, from, len, flags)
+ *     store bytes into packet
+ *     @skb: pointer to skb
+ *     @offset: offset within packet from skb->mac_header
+ *     @from: pointer where to copy bytes from
+ *     @len: number of bytes to store into packet
+ *     @flags: bit 0 - if true, recompute skb->csum
+ *             other bits - reserved
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_l3_csum_replace(skb, offset, from, to, flags)
+ *     recompute IP checksum
+ *     @skb: pointer to skb
+ *     @offset: offset within packet where IP checksum is located
+ *     @from: old value of header field
+ *     @to: new value of header field
+ *     @flags: bits 0-3 - size of header field
+ *             other bits - reserved
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_l4_csum_replace(skb, offset, from, to, flags)
+ *     recompute TCP/UDP checksum
+ *     @skb: pointer to skb
+ *     @offset: offset within packet where TCP/UDP checksum is located
+ *     @from: old value of header field
+ *     @to: new value of header field
+ *     @flags: bits 0-3 - size of header field
+ *             bit 4 - is pseudo header
+ *             other bits - reserved
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_tail_call(ctx, prog_array_map, index)
+ *     jump into another BPF program
+ *     @ctx: context pointer passed to next program
+ *     @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+ *     @index: index inside array that selects specific program to run
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_clone_redirect(skb, ifindex, flags)
+ *     redirect to another netdev
+ *     @skb: pointer to skb
+ *     @ifindex: ifindex of the net device
+ *     @flags: bit 0 - if set, redirect to ingress instead of egress
+ *             other bits - reserved
+ *     Return: 0 on success or negative error
+ *
+ * u64 bpf_get_current_pid_tgid(void)
+ *     Return: current->tgid << 32 | current->pid
+ *
+ * u64 bpf_get_current_uid_gid(void)
+ *     Return: current_gid << 32 | current_uid
+ *
+ * int bpf_get_current_comm(char *buf, int size_of_buf)
+ *     stores current->comm into buf
+ *     Return: 0 on success or negative error
+ *
+ * u32 bpf_get_cgroup_classid(skb)
+ *     retrieve a proc's classid
+ *     @skb: pointer to skb
+ *     Return: classid if != 0
+ *
+ * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci)
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_vlan_pop(skb)
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_get_tunnel_key(skb, key, size, flags)
+ * int bpf_skb_set_tunnel_key(skb, key, size, flags)
+ *     retrieve or populate tunnel metadata
+ *     @skb: pointer to skb
+ *     @key: pointer to 'struct bpf_tunnel_key'
+ *     @size: size of 'struct bpf_tunnel_key'
+ *     @flags: room for future extensions
+ *     Return: 0 on success or negative error
+ *
+ * u64 bpf_perf_event_read(&map, index)
+ *     Return: Number events read or error code
+ *
+ * int bpf_redirect(ifindex, flags)
+ *     redirect to another netdev
+ *     @ifindex: ifindex of the net device
+ *     @flags: bit 0 - if set, redirect to ingress instead of egress
+ *             other bits - reserved
+ *     Return: TC_ACT_REDIRECT
+ *
+ * u32 bpf_get_route_realm(skb)
+ *     retrieve a dst's tclassid
+ *     @skb: pointer to skb
+ *     Return: realm if != 0
+ *
+ * int bpf_perf_event_output(ctx, map, index, data, size)
+ *     output perf raw sample
+ *     @ctx: struct pt_regs*
+ *     @map: pointer to perf_event_array map
+ *     @index: index of event in the map
+ *     @data: data on stack to be output as raw data
+ *     @size: size of data
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_get_stackid(ctx, map, flags)
+ *     walk user or kernel stack and return id
+ *     @ctx: struct pt_regs*
+ *     @map: pointer to stack_trace map
+ *     @flags: bits 0-7 - numer of stack frames to skip
+ *             bit 8 - collect user stack instead of kernel
+ *             bit 9 - compare stacks by hash only
+ *             bit 10 - if two different stacks hash into the same stackid
+ *                      discard old
+ *             other bits - reserved
+ *     Return: >= 0 stackid on success or negative error
+ *
+ * s64 bpf_csum_diff(from, from_size, to, to_size, seed)
+ *     calculate csum diff
+ *     @from: raw from buffer
+ *     @from_size: length of from buffer
+ *     @to: raw to buffer
+ *     @to_size: length of to buffer
+ *     @seed: optional seed
+ *     Return: csum result or negative error code
+ *
+ * int bpf_skb_get_tunnel_opt(skb, opt, size)
+ *     retrieve tunnel options metadata
+ *     @skb: pointer to skb
+ *     @opt: pointer to raw tunnel option data
+ *     @size: size of @opt
+ *     Return: option size
+ *
+ * int bpf_skb_set_tunnel_opt(skb, opt, size)
+ *     populate tunnel options metadata
+ *     @skb: pointer to skb
+ *     @opt: pointer to raw tunnel option data
+ *     @size: size of @opt
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_change_proto(skb, proto, flags)
+ *     Change protocol of the skb. Currently supported is v4 -> v6,
+ *     v6 -> v4 transitions. The helper will also resize the skb. eBPF
+ *     program is expected to fill the new headers via skb_store_bytes
+ *     and lX_csum_replace.
+ *     @skb: pointer to skb
+ *     @proto: new skb->protocol type
+ *     @flags: reserved
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_change_type(skb, type)
+ *     Change packet type of skb.
+ *     @skb: pointer to skb
+ *     @type: new skb->pkt_type type
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_under_cgroup(skb, map, index)
+ *     Check cgroup2 membership of skb
+ *     @skb: pointer to skb
+ *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ *     @index: index of the cgroup in the bpf_map
+ *     Return:
+ *       == 0 skb failed the cgroup2 descendant test
+ *       == 1 skb succeeded the cgroup2 descendant test
+ *        < 0 error
+ *
+ * u32 bpf_get_hash_recalc(skb)
+ *     Retrieve and possibly recalculate skb->hash.
+ *     @skb: pointer to skb
+ *     Return: hash
+ *
+ * u64 bpf_get_current_task(void)
+ *     Returns current task_struct
+ *     Return: current
+ *
+ * int bpf_probe_write_user(void *dst, void *src, int len)
+ *     safely attempt to write to a location
+ *     @dst: destination address in userspace
+ *     @src: source address on stack
+ *     @len: number of bytes to copy
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_current_task_under_cgroup(map, index)
+ *     Check cgroup2 membership of current task
+ *     @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
+ *     @index: index of the cgroup in the bpf_map
+ *     Return:
+ *       == 0 current failed the cgroup2 descendant test
+ *       == 1 current succeeded the cgroup2 descendant test
+ *        < 0 error
+ *
+ * int bpf_skb_change_tail(skb, len, flags)
+ *     The helper will resize the skb to the given new size, to be used f.e.
+ *     with control messages.
+ *     @skb: pointer to skb
+ *     @len: new skb length
+ *     @flags: reserved
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_skb_pull_data(skb, len)
+ *     The helper will pull in non-linear data in case the skb is non-linear
+ *     and not all of len are part of the linear section. Only needed for
+ *     read/write with direct packet access.
+ *     @skb: pointer to skb
+ *     @len: len to make read/writeable
+ *     Return: 0 on success or negative error
+ *
+ * s64 bpf_csum_update(skb, csum)
+ *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ *     @skb: pointer to skb
+ *     @csum: csum to add
+ *     Return: csum on success or negative error
+ *
+ * void bpf_set_hash_invalid(skb)
+ *     Invalidate current skb->hash.
+ *     @skb: pointer to skb
+ *
+ * int bpf_get_numa_node_id()
+ *     Return: Id of current NUMA node.
+ *
+ * int bpf_skb_change_head()
+ *     Grows headroom of skb and adjusts MAC header offset accordingly.
+ *     Will extends/reallocae as required automatically.
+ *     May change skb data pointer and will thus invalidate any check
+ *     performed for direct packet access.
+ *     @skb: pointer to skb
+ *     @len: length of header to be pushed in front
+ *     @flags: Flags (unused for now)
+ *     Return: 0 on success or negative error
+ *
+ * int bpf_xdp_adjust_head(xdp_md, delta)
+ *     Adjust the xdp_md.data by delta
+ *     @xdp_md: pointer to xdp_md
+ *     @delta: An positive/negative integer to be added to xdp_md.data
+ *     Return: 0 on success or negative on error
+ */
+#define __BPF_FUNC_MAPPER(FN)          \
+       FN(unspec),                     \
+       FN(map_lookup_elem),            \
+       FN(map_update_elem),            \
+       FN(map_delete_elem),            \
+       FN(probe_read),                 \
+       FN(ktime_get_ns),               \
+       FN(trace_printk),               \
+       FN(get_prandom_u32),            \
+       FN(get_smp_processor_id),       \
+       FN(skb_store_bytes),            \
+       FN(l3_csum_replace),            \
+       FN(l4_csum_replace),            \
+       FN(tail_call),                  \
+       FN(clone_redirect),             \
+       FN(get_current_pid_tgid),       \
+       FN(get_current_uid_gid),        \
+       FN(get_current_comm),           \
+       FN(get_cgroup_classid),         \
+       FN(skb_vlan_push),              \
+       FN(skb_vlan_pop),               \
+       FN(skb_get_tunnel_key),         \
+       FN(skb_set_tunnel_key),         \
+       FN(perf_event_read),            \
+       FN(redirect),                   \
+       FN(get_route_realm),            \
+       FN(perf_event_output),          \
+       FN(skb_load_bytes),             \
+       FN(get_stackid),                \
+       FN(csum_diff),                  \
+       FN(skb_get_tunnel_opt),         \
+       FN(skb_set_tunnel_opt),         \
+       FN(skb_change_proto),           \
+       FN(skb_change_type),            \
+       FN(skb_under_cgroup),           \
+       FN(get_hash_recalc),            \
+       FN(get_current_task),           \
+       FN(probe_write_user),           \
+       FN(current_task_under_cgroup),  \
+       FN(skb_change_tail),            \
+       FN(skb_pull_data),              \
+       FN(csum_update),                \
+       FN(set_hash_invalid),           \
+       FN(get_numa_node_id),           \
+       FN(skb_change_head),            \
+       FN(xdp_adjust_head),
+
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
  */
+#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
 enum bpf_func_id {
-       BPF_FUNC_unspec,
-       BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
-       BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
-       BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
-       BPF_FUNC_probe_read,      /* int bpf_probe_read(void *dst, int size, void *src) */
-       BPF_FUNC_ktime_get_ns,    /* u64 bpf_ktime_get_ns(void) */
-       BPF_FUNC_trace_printk,    /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
-       BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
-       BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
-
-       /**
-        * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
-        * @skb: pointer to skb
-        * @offset: offset within packet from skb->mac_header
-        * @from: pointer where to copy bytes from
-        * @len: number of bytes to store into packet
-        * @flags: bit 0 - if true, recompute skb->csum
-        *         other bits - reserved
-        * Return: 0 on success
-        */
-       BPF_FUNC_skb_store_bytes,
-
-       /**
-        * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
-        * @skb: pointer to skb
-        * @offset: offset within packet where IP checksum is located
-        * @from: old value of header field
-        * @to: new value of header field
-        * @flags: bits 0-3 - size of header field
-        *         other bits - reserved
-        * Return: 0 on success
-        */
-       BPF_FUNC_l3_csum_replace,
-
-       /**
-        * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
-        * @skb: pointer to skb
-        * @offset: offset within packet where TCP/UDP checksum is located
-        * @from: old value of header field
-        * @to: new value of header field
-        * @flags: bits 0-3 - size of header field
-        *         bit 4 - is pseudo header
-        *         other bits - reserved
-        * Return: 0 on success
-        */
-       BPF_FUNC_l4_csum_replace,
-
-       /**
-        * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
-        * @ctx: context pointer passed to next program
-        * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
-        * @index: index inside array that selects specific program to run
-        * Return: 0 on success
-        */
-       BPF_FUNC_tail_call,
-
-       /**
-        * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
-        * @skb: pointer to skb
-        * @ifindex: ifindex of the net device
-        * @flags: bit 0 - if set, redirect to ingress instead of egress
-        *         other bits - reserved
-        * Return: 0 on success
-        */
-       BPF_FUNC_clone_redirect,
-
-       /**
-        * u64 bpf_get_current_pid_tgid(void)
-        * Return: current->tgid << 32 | current->pid
-        */
-       BPF_FUNC_get_current_pid_tgid,
-
-       /**
-        * u64 bpf_get_current_uid_gid(void)
-        * Return: current_gid << 32 | current_uid
-        */
-       BPF_FUNC_get_current_uid_gid,
-
-       /**
-        * bpf_get_current_comm(char *buf, int size_of_buf)
-        * stores current->comm into buf
-        * Return: 0 on success
-        */
-       BPF_FUNC_get_current_comm,
-
-       /**
-        * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
-        * @skb: pointer to skb
-        * Return: classid if != 0
-        */
-       BPF_FUNC_get_cgroup_classid,
-       BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
-       BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
-
-       /**
-        * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
-        * retrieve or populate tunnel metadata
-        * @skb: pointer to skb
-        * @key: pointer to 'struct bpf_tunnel_key'
-        * @size: size of 'struct bpf_tunnel_key'
-        * @flags: room for future extensions
-        * Retrun: 0 on success
-        */
-       BPF_FUNC_skb_get_tunnel_key,
-       BPF_FUNC_skb_set_tunnel_key,
-       BPF_FUNC_perf_event_read,       /* u64 bpf_perf_event_read(&map, index) */
-       /**
-        * bpf_redirect(ifindex, flags) - redirect to another netdev
-        * @ifindex: ifindex of the net device
-        * @flags: bit 0 - if set, redirect to ingress instead of egress
-        *         other bits - reserved
-        * Return: TC_ACT_REDIRECT
-        */
-       BPF_FUNC_redirect,
-
-       /**
-        * bpf_get_route_realm(skb) - retrieve a dst's tclassid
-        * @skb: pointer to skb
-        * Return: realm if != 0
-        */
-       BPF_FUNC_get_route_realm,
-
-       /**
-        * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
-        * @ctx: struct pt_regs*
-        * @map: pointer to perf_event_array map
-        * @index: index of event in the map
-        * @data: data on stack to be output as raw data
-        * @size: size of data
-        * Return: 0 on success
-        */
-       BPF_FUNC_perf_event_output,
-       BPF_FUNC_skb_load_bytes,
-
-       /**
-        * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
-        * @ctx: struct pt_regs*
-        * @map: pointer to stack_trace map
-        * @flags: bits 0-7 - numer of stack frames to skip
-        *         bit 8 - collect user stack instead of kernel
-        *         bit 9 - compare stacks by hash only
-        *         bit 10 - if two different stacks hash into the same stackid
-        *                  discard old
-        *         other bits - reserved
-        * Return: >= 0 stackid on success or negative error
-        */
-       BPF_FUNC_get_stackid,
-
-       /**
-        * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
-        * @from: raw from buffer
-        * @from_size: length of from buffer
-        * @to: raw to buffer
-        * @to_size: length of to buffer
-        * @seed: optional seed
-        * Return: csum result
-        */
-       BPF_FUNC_csum_diff,
-
-       /**
-        * bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
-        * retrieve or populate tunnel options metadata
-        * @skb: pointer to skb
-        * @opt: pointer to raw tunnel option data
-        * @size: size of @opt
-        * Return: 0 on success for set, option size for get
-        */
-       BPF_FUNC_skb_get_tunnel_opt,
-       BPF_FUNC_skb_set_tunnel_opt,
-
-       /**
-        * bpf_skb_change_proto(skb, proto, flags)
-        * Change protocol of the skb. Currently supported is
-        * v4 -> v6, v6 -> v4 transitions. The helper will also
-        * resize the skb. eBPF program is expected to fill the
-        * new headers via skb_store_bytes and lX_csum_replace.
-        * @skb: pointer to skb
-        * @proto: new skb->protocol type
-        * @flags: reserved
-        * Return: 0 on success or negative error
-        */
-       BPF_FUNC_skb_change_proto,
-
-       /**
-        * bpf_skb_change_type(skb, type)
-        * Change packet type of skb.
-        * @skb: pointer to skb
-        * @type: new skb->pkt_type type
-        * Return: 0 on success or negative error
-        */
-       BPF_FUNC_skb_change_type,
-
-       /**
-        * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb
-        * @skb: pointer to skb
-        * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type
-        * @index: index of the cgroup in the bpf_map
-        * Return:
-        *   == 0 skb failed the cgroup2 descendant test
-        *   == 1 skb succeeded the cgroup2 descendant test
-        *    < 0 error
-        */
-       BPF_FUNC_skb_under_cgroup,
-
-       /**
-        * bpf_get_hash_recalc(skb)
-        * Retrieve and possibly recalculate skb->hash.
-        * @skb: pointer to skb
-        * Return: hash
-        */
-       BPF_FUNC_get_hash_recalc,
-
-       /**
-        * u64 bpf_get_current_task(void)
-        * Returns current task_struct
-        * Return: current
-        */
-       BPF_FUNC_get_current_task,
-
-       /**
-        * bpf_probe_write_user(void *dst, void *src, int len)
-        * safely attempt to write to a location
-        * @dst: destination address in userspace
-        * @src: source address on stack
-        * @len: number of bytes to copy
-        * Return: 0 on success or negative error
-        */
-       BPF_FUNC_probe_write_user,
-
+       __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
        __BPF_FUNC_MAX_ID,
 };
+#undef __BPF_ENUM_FN
 
 /* All flags used by eBPF helper functions, placed here. */
 
@@ -450,6 +560,31 @@ struct bpf_tunnel_key {
        __u32 tunnel_label;
 };
 
+/* Generic BPF return codes which all BPF program types may support.
+ * The values are binary compatible with their TC_ACT_* counter-part to
+ * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
+ * programs.
+ *
+ * XDP is handled seprately, see XDP_*.
+ */
+enum bpf_ret_code {
+       BPF_OK = 0,
+       /* 1 reserved */
+       BPF_DROP = 2,
+       /* 3-6 reserved */
+       BPF_REDIRECT = 7,
+       /* >127 are reserved for prog type specific return codes */
+};
+
+struct bpf_sock {
+       __u32 bound_dev_if;
+       __u32 family;
+       __u32 type;
+       __u32 protocol;
+};
+
+#define XDP_PACKET_HEADROOM 256
+
 /* User return codes for XDP prog type.
  * A valid XDP program must return one of these defined values. All other
  * return codes are reserved for future use. Unknown return codes will result
index 8143536b462a54a9d633e455615085b8a842aa01..3ddb58a36d3c2534ce6207581a4fee1730c6e6a0 100644 (file)
@@ -54,7 +54,7 @@ static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
 }
 
 int bpf_create_map(enum bpf_map_type map_type, int key_size,
-                  int value_size, int max_entries)
+                  int value_size, int max_entries, __u32 map_flags)
 {
        union bpf_attr attr;
 
@@ -64,13 +64,14 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
        attr.key_size = key_size;
        attr.value_size = value_size;
        attr.max_entries = max_entries;
+       attr.map_flags = map_flags;
 
        return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
 }
 
 int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
                     size_t insns_cnt, char *license,
-                    u32 kern_version, char *log_buf, size_t log_buf_sz)
+                    __u32 kern_version, char *log_buf, size_t log_buf_sz)
 {
        int fd;
        union bpf_attr attr;
@@ -98,7 +99,7 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
 }
 
 int bpf_map_update_elem(int fd, void *key, void *value,
-                       u64 flags)
+                       __u64 flags)
 {
        union bpf_attr attr;
 
@@ -166,3 +167,26 @@ int bpf_obj_get(const char *pathname)
 
        return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
 }
+
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+{
+       union bpf_attr attr;
+
+       bzero(&attr, sizeof(attr));
+       attr.target_fd     = target_fd;
+       attr.attach_bpf_fd = prog_fd;
+       attr.attach_type   = type;
+
+       return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
+}
+
+int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
+{
+       union bpf_attr attr;
+
+       bzero(&attr, sizeof(attr));
+       attr.target_fd   = target_fd;
+       attr.attach_type = type;
+
+       return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+}
index 253c3dbb06b420a39558512f89b80ce81a719287..a2f9853dd88259d810e6506ec5ac48d863bc074b 100644 (file)
 #include <linux/bpf.h>
 
 int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
-                  int max_entries);
+                  int max_entries, __u32 map_flags);
 
 /* Recommend log buffer size */
 #define BPF_LOG_BUF_SIZE 65536
 int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
                     size_t insns_cnt, char *license,
-                    u32 kern_version, char *log_buf,
+                    __u32 kern_version, char *log_buf,
                     size_t log_buf_sz);
 
 int bpf_map_update_elem(int fd, void *key, void *value,
-                       u64 flags);
+                       __u64 flags);
 
 int bpf_map_lookup_elem(int fd, void *key, void *value);
 int bpf_map_delete_elem(int fd, void *key);
 int bpf_map_get_next_key(int fd, void *key, void *next_key);
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+
 
 #endif
index 2e974593f3e8dc2b14b47d61b9354b31f7c8c0c5..84e6b35da4bd7da84022a1d914f765c5f7d8e3c6 100644 (file)
@@ -854,7 +854,8 @@ bpf_object__create_maps(struct bpf_object *obj)
                *pfd = bpf_create_map(def->type,
                                      def->key_size,
                                      def->value_size,
-                                     def->max_entries);
+                                     def->max_entries,
+                                     0);
                if (*pfd < 0) {
                        size_t j;
                        int err = *pfd;
index 7775b1eb2bee6bd092fe854b63b12150aa50699f..76173969ab80375c468e2722ec5e2773dab96a7c 100644 (file)
@@ -132,6 +132,10 @@ OPTIONS for 'perf sched timehist'
 --migrations::
        Show migration events.
 
+-I::
+--idle-hist::
+       Show idle-related events only.
+
 --time::
        Only analyze samples within given time window: <start>,<stop>. Times
        have the format seconds.microseconds. If start is not given (i.e., time
index e5af38eede17f5ec09aaaabf9ad963b6524e7cb1..8fc24824705e0bfecd696bfc50a53ec4924cf9d7 100644 (file)
@@ -201,6 +201,7 @@ goals := $(filter-out all sub-make, $(MAKECMDGOALS))
 $(goals) all: sub-make
 
 sub-make: fixdep
+       @./check-headers.sh
        $(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
 
 else # force_fixdep
@@ -404,99 +405,6 @@ export JEVENTS
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
 $(PERF_IN): prepare FORCE
-       @(test -f ../../include/uapi/linux/perf_event.h && ( \
-        (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/linux/hash.h && ( \
-        (diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
-        || echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
-        (diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
-        || echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
-        (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/asm/required-features.h && ( \
-        (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
-        (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/lib/memcpy_64.S && ( \
-        (diff -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/lib/memset_64.S && ( \
-        (diff -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
-        || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
-       @(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/arm64/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/perf_regs.h ../../arch/arm64/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/powerpc/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/perf_regs.h ../../arch/powerpc/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/uapi/asm/perf_regs.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/perf_regs.h ../../arch/x86/include/uapi/asm/perf_regs.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm.h ../../arch/x86/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/kvm_perf.h ../../arch/x86/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/uapi/asm/svm.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/svm.h ../../arch/x86/include/uapi/asm/svm.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/svm.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/x86/include/uapi/asm/vmx.h && ( \
-        (diff -B ../arch/x86/include/uapi/asm/vmx.h ../../arch/x86/include/uapi/asm/vmx.h >/dev/null) \
-        || echo "Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/powerpc/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/powerpc/include/uapi/asm/kvm.h ../../arch/powerpc/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/powerpc/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/s390/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm.h ../../arch/s390/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/s390/include/uapi/asm/kvm_perf.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/kvm_perf.h ../../arch/s390/include/uapi/asm/kvm_perf.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/s390/include/uapi/asm/sie.h && ( \
-        (diff -B ../arch/s390/include/uapi/asm/sie.h ../../arch/s390/include/uapi/asm/sie.h >/dev/null) \
-        || echo "Warning: tools/arch/s390/include/uapi/asm/sie.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/arm/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm/include/uapi/asm/kvm.h ../../arch/arm/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-       @(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
-        (diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
-        || echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
-        (diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/asm-generic/bitops/__fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/asm-generic/bitops/fls.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/asm-generic/bitops/fls64.h && ( \
-        (diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
-        || echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/linux/coresight-pmu.h && ( \
-       (diff -B ../include/linux/coresight-pmu.h ../../include/linux/coresight-pmu.h >/dev/null) \
-       || echo "Warning: tools/include/linux/coresight-pmu.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/uapi/asm-generic/mman-common.h && ( \
-       (diff -B ../include/uapi/asm-generic/mman-common.h ../../include/uapi/asm-generic/mman-common.h >/dev/null) \
-       || echo "Warning: tools/include/uapi/asm-generic/mman-common.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/uapi/asm-generic/mman.h && ( \
-       (diff -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>$$" ../include/uapi/asm-generic/mman.h ../../include/uapi/asm-generic/mman.h >/dev/null) \
-       || echo "Warning: tools/include/uapi/asm-generic/mman.h differs from kernel" >&2 )) || true
-       @(test -f ../../include/uapi/linux/mman.h && ( \
-       (diff -B -I "^#include <\(uapi/\)*asm/mman.h>$$" ../include/uapi/linux/mman.h ../../include/uapi/linux/mman.h >/dev/null) \
-       || echo "Warning: tools/include/uapi/linux/mman.h differs from kernel" >&2 )) || true
        $(Q)$(MAKE) $(build)=perf
 
 $(JEVENTS_IN): FORCE
index 465012b320ee75a594465d4b76a2648811f7cb59..6d9d6c40a9164442ce2d0c8a9cdd8831cd4a6297 100644 (file)
@@ -48,7 +48,7 @@ static const struct option options[] = {
 };
 
 static const char * const bench_futex_lock_pi_usage[] = {
-       "perf bench futex requeue <options>",
+       "perf bench futex lock-pi <options>",
        NULL
 };
 
index 4b419631753d4882c9038e888113a76cc9f3b124..f8ca7a4ebabcf2f9b805283ddf4df82804316bee 100644 (file)
@@ -208,7 +208,7 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
 static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                union perf_event *event,
                                struct perf_sample *sample,
-                               struct perf_evsel *evsel __maybe_unused,
+                               struct perf_evsel *evsel,
                                struct machine *machine)
 {
        struct c2c_hists *c2c_hists = &c2c.hists;
@@ -379,7 +379,7 @@ static int symbol_width(struct hists *hists, struct sort_entry *se)
 
 static int c2c_width(struct perf_hpp_fmt *fmt,
                     struct perf_hpp *hpp __maybe_unused,
-                    struct hists *hists __maybe_unused)
+                    struct hists *hists)
 {
        struct c2c_fmt *c2c_fmt;
        struct c2c_dimension *dim;
@@ -1127,7 +1127,7 @@ MEAN_ENTRY(mean_lcl_entry,  lcl_hitm);
 MEAN_ENTRY(mean_load_entry, load);
 
 static int
-cpucnt_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+cpucnt_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
             struct hist_entry *he)
 {
        struct c2c_hist_entry *c2c_he;
@@ -1141,7 +1141,7 @@ cpucnt_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
 }
 
 static int
-cl_idx_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+cl_idx_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
             struct hist_entry *he)
 {
        struct c2c_hist_entry *c2c_he;
@@ -1155,7 +1155,7 @@ cl_idx_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
 }
 
 static int
-cl_idx_empty_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+cl_idx_empty_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                   struct hist_entry *he)
 {
        int width = c2c_width(fmt, hpp, he->hists);
@@ -1779,7 +1779,6 @@ static int c2c_hists__init(struct c2c_hists *hists,
        return hpp_list__parse(&hists->list, NULL, sort);
 }
 
-__maybe_unused
 static int c2c_hists__reinit(struct c2c_hists *c2c_hists,
                             const char *output,
                             const char *sort)
@@ -2658,7 +2657,7 @@ out:
        return err;
 }
 
-static int parse_record_events(const struct option *opt __maybe_unused,
+static int parse_record_events(const struct option *opt,
                               const char *str, int unset __maybe_unused)
 {
        bool *event_set = (bool *) opt->value;
index d1ce29be560e5e7dad2a2faca9a68c2246510a11..cd7bc4d104e27e878e1ed694bf6be1d9b89d0a9d 100644 (file)
@@ -70,8 +70,8 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
        OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
        OPT_INCR('v', "verbose", &verbose,
                 "be more verbose (show counter open errors, etc)"),
-       OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
-       OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"),
+       OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"),
+       OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"),
        OPT_END()
        };
 
index fa26865364b64808e39407e5a3a6e5c9aa277d64..74d6a035133a96a7303287e130abe73af3fc4622 100644 (file)
@@ -1687,6 +1687,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                goto out;
        }
 
+       /* Enable ignoring missing threads when -u option is defined. */
+       rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
+
        err = -ENOMEM;
        if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
                usage_with_options(record_usage, record_options);
index d2afbe4a240dbc5c90168ff15a60405fa324bf8f..06cc759a459758472fc73925222b3eadd28aeee0 100644 (file)
@@ -648,7 +648,7 @@ report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
 }
 
 static int
-parse_branch_mode(const struct option *opt __maybe_unused,
+parse_branch_mode(const struct option *opt,
                  const char *str __maybe_unused, int unset)
 {
        int *branch_mode = opt->value;
index 1a3f1be93372fe3ef3ddb4ffd7cf427c9b8392cd..d53e706a6f1706d2d69caa93132521f55d89d072 100644 (file)
@@ -200,6 +200,7 @@ struct perf_sched {
        /* options for timehist command */
        bool            summary;
        bool            summary_only;
+       bool            idle_hist;
        bool            show_callchain;
        unsigned int    max_stack;
        bool            show_cpu_visual;
@@ -230,6 +231,15 @@ struct evsel_runtime {
        u32 ncpu;       /* highest cpu slot allocated */
 };
 
+/* per cpu idle time data */
+struct idle_thread_runtime {
+       struct thread_runtime   tr;
+       struct thread           *last_thread;
+       struct rb_root          sorted_root;
+       struct callchain_root   callchain;
+       struct callchain_cursor cursor;
+};
+
 /* track idle times per cpu */
 static struct thread **idle_threads;
 static int idle_max_cpu;
@@ -1765,7 +1775,7 @@ static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
        return r->last_time[cpu];
 }
 
-static int comm_width = 20;
+static int comm_width = 30;
 
 static char *timehist_get_commstr(struct thread *thread)
 {
@@ -1807,7 +1817,7 @@ static void timehist_header(struct perf_sched *sched)
                printf(" ");
        }
 
-       printf(" %-20s  %9s  %9s  %9s",
+       printf(" %-*s  %9s  %9s  %9s", comm_width,
                "task name", "wait time", "sch delay", "run time");
 
        printf("\n");
@@ -1820,7 +1830,8 @@ static void timehist_header(struct perf_sched *sched)
        if (sched->show_cpu_visual)
                printf(" %*s ", ncpus, "");
 
-       printf(" %-20s  %9s  %9s  %9s\n", "[tid/pid]", "(msec)", "(msec)", "(msec)");
+       printf(" %-*s  %9s  %9s  %9s\n", comm_width,
+              "[tid/pid]", "(msec)", "(msec)", "(msec)");
 
        /*
         * separator
@@ -1830,7 +1841,7 @@ static void timehist_header(struct perf_sched *sched)
        if (sched->show_cpu_visual)
                printf(" %.*s ", ncpus, graph_dotted_line);
 
-       printf(" %.20s  %.9s  %.9s  %.9s",
+       printf(" %.*s  %.9s  %.9s  %.9s", comm_width,
                graph_dotted_line, graph_dotted_line, graph_dotted_line,
                graph_dotted_line);
 
@@ -1939,39 +1950,40 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
        r->total_run_time += r->dt_run;
 }
 
-static bool is_idle_sample(struct perf_sched *sched,
-                          struct perf_sample *sample,
-                          struct perf_evsel *evsel,
-                          struct machine *machine)
+static bool is_idle_sample(struct perf_sample *sample,
+                          struct perf_evsel *evsel)
 {
-       struct thread *thread;
-       struct callchain_cursor *cursor = &callchain_cursor;
-
        /* pid 0 == swapper == idle task */
-       if (sample->pid == 0)
-               return true;
+       if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
+               return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
 
-       if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) {
-               if (perf_evsel__intval(evsel, sample, "prev_pid") == 0)
-                       return true;
-       }
+       return sample->pid == 0;
+}
+
+static void save_task_callchain(struct perf_sched *sched,
+                               struct perf_sample *sample,
+                               struct perf_evsel *evsel,
+                               struct machine *machine)
+{
+       struct callchain_cursor *cursor = &callchain_cursor;
+       struct thread *thread;
 
        /* want main thread for process - has maps */
        thread = machine__findnew_thread(machine, sample->pid, sample->pid);
        if (thread == NULL) {
                pr_debug("Failed to get thread for pid %d.\n", sample->pid);
-               return false;
+               return;
        }
 
        if (!symbol_conf.use_callchain || sample->callchain == NULL)
-               return false;
+               return;
 
        if (thread__resolve_callchain(thread, cursor, evsel, sample,
                                      NULL, NULL, sched->max_stack + 2) != 0) {
                if (verbose)
                        error("Failed to resolve callchain. Skipping\n");
 
-               return false;
+               return;
        }
 
        callchain_cursor_commit(cursor);
@@ -1994,8 +2006,24 @@ static bool is_idle_sample(struct perf_sched *sched,
 
                callchain_cursor_advance(cursor);
        }
+}
+
+static int init_idle_thread(struct thread *thread)
+{
+       struct idle_thread_runtime *itr;
+
+       thread__set_comm(thread, idle_comm, 0);
+
+       itr = zalloc(sizeof(*itr));
+       if (itr == NULL)
+               return -ENOMEM;
 
-       return false;
+       init_stats(&itr->tr.run_stats);
+       callchain_init(&itr->callchain);
+       callchain_cursor_reset(&itr->cursor);
+       thread__set_priv(thread, itr);
+
+       return 0;
 }
 
 /*
@@ -2004,7 +2032,7 @@ static bool is_idle_sample(struct perf_sched *sched,
  */
 static int init_idle_threads(int ncpu)
 {
-       int i;
+       int i, ret;
 
        idle_threads = zalloc(ncpu * sizeof(struct thread *));
        if (!idle_threads)
@@ -2018,7 +2046,9 @@ static int init_idle_threads(int ncpu)
                if (idle_threads[i] == NULL)
                        return -ENOMEM;
 
-               thread__set_comm(idle_threads[i], idle_comm, 0);
+               ret = init_idle_thread(idle_threads[i]);
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
@@ -2065,14 +2095,23 @@ static struct thread *get_idle_thread(int cpu)
                idle_threads[cpu] = thread__new(0, 0);
 
                if (idle_threads[cpu]) {
-                       idle_threads[cpu]->tid = 0;
-                       thread__set_comm(idle_threads[cpu], idle_comm, 0);
+                       if (init_idle_thread(idle_threads[cpu]) < 0)
+                               return NULL;
                }
        }
 
        return idle_threads[cpu];
 }
 
+static void save_idle_callchain(struct idle_thread_runtime *itr,
+                               struct perf_sample *sample)
+{
+       if (!symbol_conf.use_callchain || sample->callchain == NULL)
+               return;
+
+       callchain_cursor__copy(&itr->cursor, &callchain_cursor);
+}
+
 /*
  * handle runtime stats saved per thread
  */
@@ -2111,7 +2150,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
 {
        struct thread *thread;
 
-       if (is_idle_sample(sched, sample, evsel, machine)) {
+       if (is_idle_sample(sample, evsel)) {
                thread = get_idle_thread(sample->cpu);
                if (thread == NULL)
                        pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
@@ -2124,13 +2163,37 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
                        pr_debug("Failed to get thread for tid %d. skipping sample.\n",
                                 sample->tid);
                }
+
+               save_task_callchain(sched, sample, evsel, machine);
+               if (sched->idle_hist) {
+                       struct thread *idle;
+                       struct idle_thread_runtime *itr;
+
+                       idle = get_idle_thread(sample->cpu);
+                       if (idle == NULL) {
+                               pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
+                               return NULL;
+                       }
+
+                       itr = thread__priv(idle);
+                       if (itr == NULL)
+                               return NULL;
+
+                       itr->last_thread = thread;
+
+                       /* copy task callchain when entering to idle */
+                       if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
+                               save_idle_callchain(itr, sample);
+               }
        }
 
        return thread;
 }
 
 static bool timehist_skip_sample(struct perf_sched *sched,
-                                struct thread *thread)
+                                struct thread *thread,
+                                struct perf_evsel *evsel,
+                                struct perf_sample *sample)
 {
        bool rc = false;
 
@@ -2139,10 +2202,19 @@ static bool timehist_skip_sample(struct perf_sched *sched,
                sched->skipped_samples++;
        }
 
+       if (sched->idle_hist) {
+               if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
+                       rc = true;
+               else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
+                        perf_evsel__intval(evsel, sample, "next_pid") != 0)
+                       rc = true;
+       }
+
        return rc;
 }
 
 static void timehist_print_wakeup_event(struct perf_sched *sched,
+                                       struct perf_evsel *evsel,
                                        struct perf_sample *sample,
                                        struct machine *machine,
                                        struct thread *awakened)
@@ -2155,8 +2227,8 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
                return;
 
        /* show wakeup unless both awakee and awaker are filtered */
-       if (timehist_skip_sample(sched, thread) &&
-           timehist_skip_sample(sched, awakened)) {
+       if (timehist_skip_sample(sched, thread, evsel, sample) &&
+           timehist_skip_sample(sched, awakened, evsel, sample)) {
                return;
        }
 
@@ -2201,7 +2273,7 @@ static int timehist_sched_wakeup_event(struct perf_tool *tool,
        /* show wakeups if requested */
        if (sched->show_wakeups &&
            !perf_time__skip_sample(&sched->ptime, sample->time))
-               timehist_print_wakeup_event(sched, sample, machine, thread);
+               timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
 
        return 0;
 }
@@ -2228,8 +2300,8 @@ static void timehist_print_migration_event(struct perf_sched *sched,
        if (thread == NULL)
                return;
 
-       if (timehist_skip_sample(sched, thread) &&
-           timehist_skip_sample(sched, migrated)) {
+       if (timehist_skip_sample(sched, thread, evsel, sample) &&
+           timehist_skip_sample(sched, migrated, evsel, sample)) {
                return;
        }
 
@@ -2314,7 +2386,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
                goto out;
        }
 
-       if (timehist_skip_sample(sched, thread))
+       if (timehist_skip_sample(sched, thread, evsel, sample))
                goto out;
 
        tr = thread__get_runtime(thread);
@@ -2333,7 +2405,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
        if (ptime->start && ptime->start > t)
                goto out;
 
-       if (ptime->start > tprev)
+       if (tprev && ptime->start > tprev)
                tprev = ptime->start;
 
        /*
@@ -2350,7 +2422,39 @@ static int timehist_sched_change_event(struct perf_tool *tool,
                        t = ptime->end;
        }
 
-       timehist_update_runtime_stats(tr, t, tprev);
+       if (!sched->idle_hist || thread->tid == 0) {
+               timehist_update_runtime_stats(tr, t, tprev);
+
+               if (sched->idle_hist) {
+                       struct idle_thread_runtime *itr = (void *)tr;
+                       struct thread_runtime *last_tr;
+
+                       BUG_ON(thread->tid != 0);
+
+                       if (itr->last_thread == NULL)
+                               goto out;
+
+                       /* add current idle time as last thread's runtime */
+                       last_tr = thread__get_runtime(itr->last_thread);
+                       if (last_tr == NULL)
+                               goto out;
+
+                       timehist_update_runtime_stats(last_tr, t, tprev);
+                       /*
+                        * remove delta time of last thread as it's not updated
+                        * and otherwise it will show an invalid value next
+                        * time.  we only care total run time and run stat.
+                        */
+                       last_tr->dt_run = 0;
+                       last_tr->dt_wait = 0;
+                       last_tr->dt_delay = 0;
+
+                       if (itr->cursor.nr)
+                               callchain_append(&itr->callchain, &itr->cursor, t - tprev);
+
+                       itr->last_thread = NULL;
+               }
+       }
 
        if (!sched->summary_only)
                timehist_print_sample(sched, sample, &al, thread, t);
@@ -2457,6 +2561,60 @@ static int show_deadthread_runtime(struct thread *t, void *priv)
        return __show_thread_runtime(t, priv);
 }
 
+static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
+{
+       const char *sep = " <- ";
+       struct callchain_list *chain;
+       size_t ret = 0;
+       char bf[1024];
+       bool first;
+
+       if (node == NULL)
+               return 0;
+
+       ret = callchain__fprintf_folded(fp, node->parent);
+       first = (ret == 0);
+
+       list_for_each_entry(chain, &node->val, list) {
+               if (chain->ip >= PERF_CONTEXT_MAX)
+                       continue;
+               if (chain->ms.sym && chain->ms.sym->ignore)
+                       continue;
+               ret += fprintf(fp, "%s%s", first ? "" : sep,
+                              callchain_list__sym_name(chain, bf, sizeof(bf),
+                                                       false));
+               first = false;
+       }
+
+       return ret;
+}
+
+static size_t timehist_print_idlehist_callchain(struct rb_root *root)
+{
+       size_t ret = 0;
+       FILE *fp = stdout;
+       struct callchain_node *chain;
+       struct rb_node *rb_node = rb_first(root);
+
+       printf("  %16s  %8s  %s\n", "Idle time (msec)", "Count", "Callchains");
+       printf("  %.16s  %.8s  %.50s\n", graph_dotted_line, graph_dotted_line,
+              graph_dotted_line);
+
+       while (rb_node) {
+               chain = rb_entry(rb_node, struct callchain_node, rb_node);
+               rb_node = rb_next(rb_node);
+
+               ret += fprintf(fp, "  ");
+               print_sched_time(chain->hit, 12);
+               ret += 16;  /* print_sched_time returns 2nd arg + 4 */
+               ret += fprintf(fp, " %8d  ", chain->count);
+               ret += callchain__fprintf_folded(fp, chain);
+               ret += fprintf(fp, "\n");
+       }
+
+       return ret;
+}
+
 static void timehist_print_summary(struct perf_sched *sched,
                                   struct perf_session *session)
 {
@@ -2469,12 +2627,15 @@ static void timehist_print_summary(struct perf_sched *sched,
 
        memset(&totals, 0, sizeof(totals));
 
-       if (comm_width < 30)
-               comm_width = 30;
-
-       printf("\nRuntime summary\n");
-       printf("%*s  parent   sched-in  ", comm_width, "comm");
-       printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
+       if (sched->idle_hist) {
+               printf("\nIdle-time summary\n");
+               printf("%*s  parent  sched-out  ", comm_width, "comm");
+               printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
+       } else {
+               printf("\nRuntime summary\n");
+               printf("%*s  parent   sched-in  ", comm_width, "comm");
+               printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
+       }
        printf("%*s            (count)  ", comm_width, "");
        printf("     (msec)     (msec)      (msec)      (msec)       %%\n");
        printf("%.117s\n", graph_dotted_line);
@@ -2490,7 +2651,7 @@ static void timehist_print_summary(struct perf_sched *sched,
                printf("<no terminated tasks>\n");
 
        /* CPU idle stats not tracked when samples were skipped */
-       if (sched->skipped_samples)
+       if (sched->skipped_samples && !sched->idle_hist)
                return;
 
        printf("\nIdle stats:\n");
@@ -2509,6 +2670,35 @@ static void timehist_print_summary(struct perf_sched *sched,
                        printf("    CPU %2d idle entire time window\n", i);
        }
 
+       if (sched->idle_hist && symbol_conf.use_callchain) {
+               callchain_param.mode  = CHAIN_FOLDED;
+               callchain_param.value = CCVAL_PERIOD;
+
+               callchain_register_param(&callchain_param);
+
+               printf("\nIdle stats by callchain:\n");
+               for (i = 0; i < idle_max_cpu; ++i) {
+                       struct idle_thread_runtime *itr;
+
+                       t = idle_threads[i];
+                       if (!t)
+                               continue;
+
+                       itr = thread__priv(t);
+                       if (itr == NULL)
+                               continue;
+
+                       callchain_param.sort(&itr->sorted_root, &itr->callchain,
+                                            0, &callchain_param);
+
+                       printf("  CPU %2d:", i);
+                       print_sched_time(itr->tr.total_run_time, 6);
+                       printf(" msec\n");
+                       timehist_print_idlehist_callchain(&itr->sorted_root);
+                       printf("\n");
+               }
+       }
+
        printf("\n"
               "    Total number of unique tasks: %" PRIu64 "\n"
               "Total number of context switches: %" PRIu64 "\n"
@@ -3036,6 +3226,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
        OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
        OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
+       OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
        OPT_STRING(0, "time", &sched.time_str, "str",
                   "Time span for analysis (start,stop)"),
        OPT_PARENT(sched_options)
index 688dea7cb08f7337bc6c620f98aa3e996ce4d581..a02f2e9656284fa5d5c4a4b9d45f55c28e167e89 100644 (file)
@@ -2195,7 +2195,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
 }
 
 static
-int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+int process_stat_config_event(struct perf_tool *tool,
                              union perf_event *event,
                              struct perf_session *session __maybe_unused)
 {
@@ -2238,7 +2238,7 @@ static int set_maps(struct perf_stat *st)
 }
 
 static
-int process_thread_map_event(struct perf_tool *tool __maybe_unused,
+int process_thread_map_event(struct perf_tool *tool,
                             union perf_event *event,
                             struct perf_session *session __maybe_unused)
 {
@@ -2257,7 +2257,7 @@ int process_thread_map_event(struct perf_tool *tool __maybe_unused,
 }
 
 static
-int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
+int process_cpu_map_event(struct perf_tool *tool,
                          union perf_event *event,
                          struct perf_session *session __maybe_unused)
 {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
new file mode 100755 (executable)
index 0000000..c747bfd
--- /dev/null
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+HEADERS='
+include/uapi/linux/perf_event.h
+include/linux/hash.h
+include/uapi/linux/hw_breakpoint.h
+arch/x86/include/asm/disabled-features.h
+arch/x86/include/asm/required-features.h
+arch/x86/include/asm/cpufeatures.h
+arch/arm/include/uapi/asm/perf_regs.h
+arch/arm64/include/uapi/asm/perf_regs.h
+arch/powerpc/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/perf_regs.h
+arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/powerpc/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm.h
+arch/s390/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/arm/include/uapi/asm/kvm.h
+arch/arm64/include/uapi/asm/kvm.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
+include/asm-generic/bitops/fls64.h
+include/linux/coresight-pmu.h
+include/uapi/asm-generic/mman-common.h
+'
+
+check () {
+  file=$1
+  opts=
+
+  shift
+  while [ -n "$*" ]; do
+    opts="$opts \"$1\""
+    shift
+  done
+
+  cmd="diff $opts ../$file ../../$file > /dev/null"
+
+  test -f ../../$file &&
+  eval $cmd || echo "Warning: $file differs from kernel" >&2
+}
+
+
+# simple diff check
+for i in $HEADERS; do
+  check $i -B
+done
+
+# diff with extra ignore lines
+check arch/x86/lib/memcpy_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check arch/x86/lib/memset_64.S        -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"
+check include/uapi/asm-generic/mman.h -B -I "^#include <\(uapi/\)*asm-generic/mman-common.h>"
+check include/uapi/linux/mman.h       -B -I "^#include <\(uapi/\)*asm/mman.h>"
index 9a0236a4cf95fb7226af43e33b88e351359b8a2b..1c27d947c2fe55c9f0d0390695e6b586d839ec0b 100644 (file)
@@ -55,6 +55,7 @@ struct record_opts {
        bool         all_user;
        bool         tail_synthesize;
        bool         overwrite;
+       bool         ignore_missing_thread;
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int auxtrace_mmap_pages;
index 23605202d4a1acf2aaf26882f67b6dd6fbf3ee91..a77dcc0d24e3f356c9da230cd84a35eddde99852 100644 (file)
@@ -185,6 +185,10 @@ static struct test generic_tests[] = {
                .desc = "Synthesize thread map",
                .func = test__thread_map_synthesize,
        },
+       {
+               .desc = "Remove thread map",
+               .func = test__thread_map_remove,
+       },
        {
                .desc = "Synthesize cpu map",
                .func = test__cpu_map_synthesize,
index 0d7b251305afcffd355727c3792696b82192d7f3..a512f0c8ff5b50160b0206c602769df89185c521 100644 (file)
@@ -80,6 +80,7 @@ const char *test__bpf_subtest_get_desc(int subtest);
 int test__bpf_subtest_get_nr(void);
 int test_session_topology(int subtest);
 int test__thread_map_synthesize(int subtest);
+int test__thread_map_remove(int subtest);
 int test__cpu_map_synthesize(int subtest);
 int test__synthesize_stat_config(int subtest);
 int test__synthesize_stat(int subtest);
index cee2a2cdc93353fc18915889bd32b9a43cad9c30..a4a4b4625ac3d8864531b781c747945b53f1f849 100644 (file)
@@ -1,3 +1,4 @@
+#include <stdlib.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <sys/prctl.h>
@@ -93,3 +94,46 @@ int test__thread_map_synthesize(int subtest __maybe_unused)
 
        return 0;
 }
+
+int test__thread_map_remove(int subtest __maybe_unused)
+{
+       struct thread_map *threads;
+       char *str;
+       int i;
+
+       TEST_ASSERT_VAL("failed to allocate map string",
+                       asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
+
+       threads = thread_map__new_str(str, NULL, 0);
+
+       TEST_ASSERT_VAL("failed to allocate thread_map",
+                       threads);
+
+       if (verbose)
+               thread_map__fprintf(threads, stderr);
+
+       TEST_ASSERT_VAL("failed to remove thread",
+                       !thread_map__remove(threads, 0));
+
+       TEST_ASSERT_VAL("thread_map count != 1", threads->nr == 1);
+
+       if (verbose)
+               thread_map__fprintf(threads, stderr);
+
+       TEST_ASSERT_VAL("failed to remove thread",
+                       !thread_map__remove(threads, 0));
+
+       TEST_ASSERT_VAL("thread_map count != 0", threads->nr == 0);
+
+       if (verbose)
+               thread_map__fprintf(threads, stderr);
+
+       TEST_ASSERT_VAL("failed to not remove thread",
+                       thread_map__remove(threads, 0));
+
+       for (i = 0; i < threads->nr; i++)
+               free(threads->map[i].comm);
+
+       free(threads);
+       return 0;
+}
index fd710ab33684e65e6b62acc62ac51b5e12d02b35..af1cfde6b97b12962045d9bc66126c217335ad21 100644 (file)
@@ -42,7 +42,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
 
        P_MMAP_FLAG(SHARED);
        P_MMAP_FLAG(PRIVATE);
+#ifdef MAP_32BIT
        P_MMAP_FLAG(32BIT);
+#endif
        P_MMAP_FLAG(ANONYMOUS);
        P_MMAP_FLAG(DENYWRITE);
        P_MMAP_FLAG(EXECUTABLE);
index ec7a30fad14918bcdf8f01b6c90970da8f43a676..ba36aac340bc7d6531eaeaace7ee40ee38851806 100644 (file)
@@ -215,7 +215,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                        ui_browser__set_color(browser, color);
                if (dl->ins.ops && dl->ins.ops->scnprintf) {
                        if (ins__is_jump(&dl->ins)) {
-                               bool fwd = dl->ops.target.offset > (u64)dl->offset;
+                               bool fwd = dl->ops.target.offset > dl->offset;
 
                                ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
                                                                    SLSMG_UARROW_CHAR);
@@ -245,7 +245,8 @@ static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sy
 {
        if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins)
            || !disasm_line__has_offset(dl)
-           || dl->ops.target.offset >= symbol__size(sym))
+           || dl->ops.target.offset < 0
+           || dl->ops.target.offset >= (s64)symbol__size(sym))
                return false;
 
        return true;
index ea7e0de4b9c1445ae56f1ff04cd6d0e9b97cc5d2..06cc04e5806a2692fffabbc2c038b82380dfcafd 100644 (file)
@@ -223,13 +223,19 @@ bool ins__is_call(const struct ins *ins)
 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
 {
        const char *s = strchr(ops->raw, '+');
+       const char *c = strchr(ops->raw, ',');
 
-       ops->target.addr = strtoull(ops->raw, NULL, 16);
+       if (c++ != NULL)
+               ops->target.addr = strtoull(c, NULL, 16);
+       else
+               ops->target.addr = strtoull(ops->raw, NULL, 16);
 
-       if (s++ != NULL)
+       if (s++ != NULL) {
                ops->target.offset = strtoull(s, NULL, 16);
-       else
-               ops->target.offset = UINT64_MAX;
+               ops->target.offset_avail = true;
+       } else {
+               ops->target.offset_avail = false;
+       }
 
        return 0;
 }
@@ -237,7 +243,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
-       if (!ops->target.addr)
+       if (!ops->target.addr || ops->target.offset < 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
        return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
@@ -641,7 +647,8 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 
        pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
 
-       if (addr < sym->start || addr >= sym->end) {
+       if ((addr < sym->start || addr >= sym->end) &&
+           (addr != sym->end || sym->start != sym->end)) {
                pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n",
                       __func__, __LINE__, sym->name, sym->start, addr, sym->end);
                return -ERANGE;
@@ -1205,9 +1212,11 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
        if (dl == NULL)
                return -1;
 
-       if (dl->ops.target.offset == UINT64_MAX)
+       if (!disasm_line__has_offset(dl)) {
                dl->ops.target.offset = dl->ops.target.addr -
                                        map__rip_2objdump(map, sym->start);
+               dl->ops.target.offset_avail = true;
+       }
 
        /* kcore has no symbols, so add the call target name */
        if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
index 87e4cadc5d2758ce4603428560c9f8548e4fc851..09776b5af991d3334fd35c1b9218999bdfda38da 100644 (file)
@@ -24,7 +24,8 @@ struct ins_operands {
                char    *raw;
                char    *name;
                u64     addr;
-               u64     offset;
+               s64     offset;
+               bool    offset_avail;
        } target;
        union {
                struct {
@@ -68,7 +69,7 @@ struct disasm_line {
 
 static inline bool disasm_line__has_offset(const struct disasm_line *dl)
 {
-       return dl->ops.target.offset != UINT64_MAX;
+       return dl->ops.target.offset_avail;
 }
 
 void disasm_line__free(struct disasm_line *dl);
index b2365a63db45c16f62c13093f55187f78ffd9c12..04e536ae4d88423e3c05a98fd1bd97ff64379b14 100644 (file)
@@ -990,6 +990,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
         * it overloads any global configuration.
         */
        apply_config_terms(evsel, opts);
+
+       evsel->ignore_missing_thread = opts->ignore_missing_thread;
 }
 
 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -1419,6 +1421,33 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
        return fprintf(fp, "  %-32s %s\n", name, val);
 }
 
+static bool ignore_missing_thread(struct perf_evsel *evsel,
+                                 struct thread_map *threads,
+                                 int thread, int err)
+{
+       if (!evsel->ignore_missing_thread)
+               return false;
+
+       /* The system wide setup does not work with threads. */
+       if (evsel->system_wide)
+               return false;
+
+       /* The -ESRCH is perf event syscall errno for pid's not found. */
+       if (err != -ESRCH)
+               return false;
+
+       /* If there's only one thread, let it fail. */
+       if (threads->nr == 1)
+               return false;
+
+       if (thread_map__remove(threads, thread))
+               return false;
+
+       pr_warning("WARNING: Ignored open failure for pid %d\n",
+                  thread_map__pid(threads, thread));
+       return true;
+}
+
 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                              struct thread_map *threads)
 {
@@ -1474,7 +1503,7 @@ retry_sample_id:
        for (cpu = 0; cpu < cpus->nr; cpu++) {
 
                for (thread = 0; thread < nthreads; thread++) {
-                       int group_fd;
+                       int fd, group_fd;
 
                        if (!evsel->cgrp && !evsel->system_wide)
                                pid = thread_map__pid(threads, thread);
@@ -1484,21 +1513,37 @@ retry_open:
                        pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
                                  pid, cpus->map[cpu], group_fd, flags);
 
-                       FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
-                                                                    pid,
-                                                                    cpus->map[cpu],
-                                                                    group_fd, flags);
-                       if (FD(evsel, cpu, thread) < 0) {
+                       fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
+                                                group_fd, flags);
+
+                       FD(evsel, cpu, thread) = fd;
+
+                       if (fd < 0) {
                                err = -errno;
+
+                               if (ignore_missing_thread(evsel, threads, thread, err)) {
+                                       /*
+                                        * We just removed 1 thread, so take a step
+                                        * back on thread index and lower the upper
+                                        * nthreads limit.
+                                        */
+                                       nthreads--;
+                                       thread--;
+
+                                       /* ... and pretend like nothing have happened. */
+                                       err = 0;
+                                       continue;
+                               }
+
                                pr_debug2("\nsys_perf_event_open failed, error %d\n",
                                          err);
                                goto try_fallback;
                        }
 
-                       pr_debug2(" = %d\n", FD(evsel, cpu, thread));
+                       pr_debug2(" = %d\n", fd);
 
                        if (evsel->bpf_fd >= 0) {
-                               int evt_fd = FD(evsel, cpu, thread);
+                               int evt_fd = fd;
                                int bpf_fd = evsel->bpf_fd;
 
                                err = ioctl(evt_fd,
index 6abb89cd27f95e1c11cdd25786b0cade3a804fe6..06ef6f29efa12a6c1300eca641e9446a0a4d17f8 100644 (file)
@@ -120,6 +120,7 @@ struct perf_evsel {
        bool                    tracking;
        bool                    per_pkg;
        bool                    precise_max;
+       bool                    ignore_missing_thread;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
index df2482b2ba45c47883a254db2ff4d6551aeea83b..dc93940de351540b89af3dfef487572e84d1f3f8 100644 (file)
@@ -1459,7 +1459,8 @@ int dso__load(struct dso *dso, struct map *map)
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
         */
-       if (is_regular_file(dso->long_name) &&
+       if (!dso->has_build_id &&
+           is_regular_file(dso->long_name) &&
            filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
                dso__set_build_id(dso, build_id);
 
index 40585f5b7027d1d1eb7c0d78e754c1a7963a74f9..f9eab200fd757ee0f6824cad0b2bbe9316afbec1 100644 (file)
@@ -448,3 +448,25 @@ bool thread_map__has(struct thread_map *threads, pid_t pid)
 
        return false;
 }
+
+int thread_map__remove(struct thread_map *threads, int idx)
+{
+       int i;
+
+       if (threads->nr < 1)
+               return -EINVAL;
+
+       if (idx >= threads->nr)
+               return -EINVAL;
+
+       /*
+        * Free the 'idx' item and shift the rest up.
+        */
+       free(threads->map[idx].comm);
+
+       for (i = idx; i < threads->nr - 1; i++)
+               threads->map[i] = threads->map[i + 1];
+
+       threads->nr--;
+       return 0;
+}
index bd3b971588da57ce14951df6d0a0870a6def1355..ea0ef08c6303413b6ececf2b93ec905914e558aa 100644 (file)
@@ -58,4 +58,5 @@ static inline char *thread_map__comm(struct thread_map *map, int thread)
 
 void thread_map__read_comms(struct thread_map *threads);
 bool thread_map__has(struct thread_map *threads, pid_t pid);
+int thread_map__remove(struct thread_map *threads, int idx);
 #endif /* __PERF_THREAD_MAP_H */