Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2016 19:23:25 +0000 (11:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2016 19:23:25 +0000 (11:23 -0800)
Pull networking fixes from David Miller:

 1) We have to be careful to not try and place a checksum after the end
    of a rawv6 packet, fix from Dave Jones with help from Hannes
    Frederic Sowa.

 2) Missing memory barriers in tcp_tasklet_func() lead to crashes, from
    Eric Dumazet.

 3) Several bug fixes for the new XDP support in virtio_net, from Jason
    Wang.

 4) Increase headroom in RX skbs in be2net driver to accomodate
    encapsulations such as geneve. From Kalesh A P.

 5) Fix SKB frag unmapping on TX in mvpp2, from Thomas Petazzoni.

 6) Pre-pulling UDP headers created a regression in RECVORIGDSTADDR
    socket option support, from Willem de Bruijn.

 7) UID based routing added a potential OOPS in ip_do_redirect() when we
    see an SKB without a socket attached. We just need it for the
    network namespace which we can get from skb->dev instead. Fix from
    Lorenzo Colitti.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
  sctp: fix recovering from 0 win with small data chunks
  sctp: do not loose window information if in rwnd_over
  virtio-net: XDP support for small buffers
  virtio-net: remove big packet XDP codes
  virtio-net: forbid XDP when VIRTIO_NET_F_GUEST_UFO is support
  virtio-net: make rx buf size estimation works for XDP
  virtio-net: unbreak csumed packets for XDP_PASS
  virtio-net: correctly handle XDP_PASS for linearized packets
  virtio-net: fix page miscount during XDP linearizing
  virtio-net: correctly xmit linearized page on XDP_TX
  virtio-net: remove the warning before XDP linearizing
  mlxsw: spectrum_router: Correctly remove nexthop groups
  mlxsw: spectrum_router: Don't reflect dead neighs
  neigh: Send netevent after marking neigh as dead
  ipv6: handle -EFAULT from skb_copy_bits
  inet: fix IP(V6)_RECVORIGDSTADDR for udp sockets
  net/sched: cls_flower: Mandate mask when matching on flags
  net/sched: act_tunnel_key: Fix setting UDP dst port in metadata under IPv6
  stmmac: CSR clock configuration fix
  net: ipv4: Don't crash if passing a null sk to ip_do_redirect.
  ...

284 files changed:
CREDITS
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/devicetree/bindings/mmc/sdhci-cadence.txt
Documentation/features/io/sg-chain/arch-support.txt
Documentation/scsi/g_NCR5380.txt
Documentation/x86/intel_rdt_ui.txt [new file with mode: 0644]
MAINTAINERS
arch/arc/Kconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/entry-compact.S
arch/arc/kernel/intc-arcv2.c
arch/arc/mm/cache.c
arch/arm64/include/asm/acpi.h
arch/arm64/kernel/acpi.c
arch/parisc/Kconfig
arch/parisc/include/asm/elf.h
arch/parisc/include/asm/pdcpat.h
arch/parisc/include/asm/processor.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/firmware.c
arch/parisc/kernel/inventory.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/process.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/time.c
arch/x86/Kconfig
arch/x86/events/intel/cqm.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel_rdt.h [new file with mode: 0644]
arch/x86/include/asm/intel_rdt_common.h [new file with mode: 0644]
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/intel_rdt.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c [new file with mode: 0644]
arch/x86/kernel/cpu/intel_rdt_schemata.c [new file with mode: 0644]
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
block/bsg.c
block/ioctl.c
block/scsi_ioctl.c
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/bus.c
drivers/acpi/nfit/core.c
drivers/acpi/osl.c
drivers/acpi/processor_core.c
drivers/acpi/scan.c
drivers/acpi/spcr.c
drivers/acpi/tables.c
drivers/base/cacheinfo.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_puda.c
drivers/infiniband/hw/i40iw/i40iw_type.h
drivers/infiniband/hw/i40iw/i40iw_ucontext.h
drivers/infiniband/hw/i40iw/i40iw_uk.c
drivers/infiniband/hw/i40iw/i40iw_user.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.h
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/mailbox/pcc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/mmc/core/core.c
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/nvme/host/pci.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fsf.h
drivers/s390/scsi/zfcp_reqlist.h
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-9xxx.h
drivers/scsi/3w-sas.c
drivers/scsi/3w-sas.h
drivers/scsi/3w-xxxx.c
drivers/scsi/3w-xxxx.h
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/aacraid/linit.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/g_NCR5380.c
drivers/scsi/g_NCR5380.h
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvscsi.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/Kconfig [new file with mode: 0644]
drivers/scsi/qedi/Makefile [new file with mode: 0644]
drivers/scsi/qedi/qedi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_dbg.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_dbg.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_debugfs.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_fw.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_gbl.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_hsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_iscsi.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_iscsi.h [new file with mode: 0644]
drivers/scsi/qedi/qedi_main.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_sysfs.c [new file with mode: 0644]
drivers/scsi/qedi/qedi_version.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sg.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.h
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/target/iscsi/cxgbit/cxgbit_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target.h
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_auth.h
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_datain_values.h
drivers/target/iscsi/iscsi_target_device.h
drivers/target/iscsi/iscsi_target_erl0.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl1.h
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_erl2.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nego.h
drivers/target/iscsi/iscsi_target_nodeattrib.h
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_parameters.h
drivers/target/iscsi/iscsi_target_seq_pdu_list.h
drivers/target/iscsi/iscsi_target_tmr.h
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tpg.h
drivers/target/iscsi/iscsi_target_transport.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_iblock.h
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_sbc.c
drivers/target/target_core_ua.h
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/target/tcm_fc/tcm_fc.h
drivers/usb/gadget/function/f_tcm.c
fs/aio.c
fs/befs/befs.h
fs/befs/befs_fs_types.h
fs/befs/btree.c
fs/befs/btree.h
fs/befs/datastream.c
fs/befs/datastream.h
fs/befs/debug.c
fs/befs/inode.c
fs/befs/inode.h
fs/befs/io.c
fs/befs/io.h
fs/befs/linuxvfs.c
fs/befs/super.h
fs/compat.c
fs/exec.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/file.c
fs/nfs/filelayout/filelayoutdev.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.c
fs/ocfs2/refcounttree.c
fs/pnode.c
fs/read_write.c
fs/seq_file.c
fs/splice.c
fs/ufs/inode.c
fs/xfs/xfs_reflink.c
include/acpi/acpi_io.h
include/acpi/acpixf.h
include/acpi/actbl.h
include/acpi/platform/aclinuxex.h
include/linux/aio.h
include/linux/blkdev.h
include/linux/cacheinfo.h
include/linux/configfs.h
include/linux/nfs_fs.h
include/linux/sched.h
include/rdma/ib_addr.h
include/target/iscsi/iscsi_target_core.h
include/target/iscsi/iscsi_target_stat.h
include/target/iscsi/iscsi_transport.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/uapi/linux/magic.h
kernel/sys_ni.c
lib/iov_iter.c
scripts/selinux/genheaders/Makefile
scripts/selinux/genheaders/genheaders.c
scripts/selinux/mdp/Makefile
scripts/selinux/mdp/mdp.c
security/selinux/include/classmap.h
sound/usb/endpoint.c

diff --git a/CREDITS b/CREDITS
index 10a9eee807b6315540ed15062c811b8b6a7315c5..c58560701d13158f535046c09fd4b825922ced94 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3949,8 +3949,6 @@ E: gwingerde@gmail.com
 D: Ralink rt2x00 WLAN driver
 D: Minix V2 file-system
 D: Misc fixes
-S: Geessinkweg 177
-S: 7544 TX Enschede
 S: The Netherlands
 
 N: Lars Wirzenius
index 49874173705507f72fac2f5f55da46fc34c3cc52..2a4a423d08e0d3ed8bce7cc0c9bcd36bb30bb19d 100644 (file)
@@ -272,6 +272,22 @@ Description:       Parameters for the CPU cache attributes
                                     the modified cache line is written to main
                                     memory only when it is replaced
 
+
+What:          /sys/devices/system/cpu/cpu*/cache/index*/id
+Date:          September 2016
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:   Cache id
+
+               The id provides a unique number for a specific instance of
+               a cache of a particular type. E.g. there may be a level
+               3 unified cache on each socket in a server and we may
+               assign them ids 0, 1, 2, ...
+
+               Note that id value can be non-contiguous. E.g. level 1
+               caches typically exist per core, but there may not be a
+               power of two cores on a socket, so these caches may be
+               numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ...
+
 What:          /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
                /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
                /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
index 750374fc9d945d39f4426e22eed96532e3ba3e9f..c0f37cb41a9b47516f29cafa573d7886d103a2a7 100644 (file)
@@ -1,7 +1,9 @@
 * Cadence SD/SDIO/eMMC Host Controller
 
 Required properties:
-- compatible: should be "cdns,sd4hc".
+- compatible: should be one of the following:
+    "cdns,sd4hc"               - default of the IP
+    "socionext,uniphier-sd4hc" - for Socionext UniPhier SoCs
 - reg: offset and length of the register set for the device.
 - interrupts: a single interrupt specifier.
 - clocks: phandle to the input clock.
@@ -19,7 +21,7 @@ if supported.  See mmc.txt for details.
 
 Example:
        emmc: sdhci@5a000000 {
-               compatible = "cdns,sd4hc";
+               compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
                reg = <0x5a000000 0x400>;
                interrupts = <0 78 4>;
                clocks = <&clk 4>;
index b9b675539b9df20ed3a754a01a4f5819d12bf7bb..6ca98f9911bbb31693ff342039d5c84772e37d91 100644 (file)
@@ -7,7 +7,7 @@
     |         arch |status|
     -----------------------
     |       alpha: | TODO |
-    |         arc: | TODO |
+    |         arc: |  ok  |
     |         arm: |  ok  |
     |       arm64: |  ok  |
     |       avr32: | TODO |
index e2c187947e588d6146c464852e07735966f95145..37b1967a00a9f48e1d6cc870747ce3ae0ead07d6 100644 (file)
@@ -6,17 +6,15 @@ NCR53c400 extensions (c) 1994,1995,1996 Kevin Lentin
 This file documents the NCR53c400 extensions by Kevin Lentin and some
 enhancements to the NCR5380 core.
 
-This driver supports both NCR5380 and NCR53c400 cards in port or memory
-mapped modes. Currently this driver can only support one of those mapping
-modes at a time but it does support both of these chips at the same time.
-The next release of this driver will support port & memory mapped cards at
-the same time. It should be able to handle multiple different cards in the
-same machine.
+This driver supports NCR5380 and NCR53c400 and compatible cards in port or
+memory mapped modes.
 
-The drivers/scsi/Makefile has an override in it for the most common
-NCR53c400 card, the Trantor T130B in its default configuration:
-       Port: 0x350
-       IRQ : 5
+Use of an interrupt is recommended, if supported by the board, as this will
+allow targets to disconnect and thereby improve SCSI bus utilization.
+
+If the irq parameter is 254 or is omitted entirely, the driver will probe
+for the correct IRQ line automatically. If the irq parameter is 0 or 255
+then no IRQ will be used.
 
 The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
 supported by the driver.
@@ -47,22 +45,24 @@ These old-style parameters can support only one card:
   dtc_3181e=1  to set up for a Domex Technology Corp 3181E board
   hp_c2502=1   to set up for a Hewlett Packard C2502 board
 
-e.g.
-OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
-NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0
-  for a port mapped NCR5380 board or
-
-OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
-NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
-  for a memory mapped NCR53C400 board with interrupts disabled or
+E.g. Trantor T130B in its default configuration:
+modprobe g_NCR5380 irq=5 base=0x350 card=1
+or alternatively, using the old syntax,
+modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_53c400=1
 
-NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
-  for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ
-             and HP C2502 at 0x300 with IRQ 7
+E.g. a port mapped NCR5380 board, driver to probe for IRQ:
+modprobe g_NCR5380 base=0x350 card=0
+or alternatively,
+modprobe g_NCR5380 ncr_addr=0x350 ncr_5380=1
 
-(255 should be specified for no or DMA interrupt, 254 to autoprobe for an 
-     IRQ line if overridden on the command line.)
+E.g. a memory mapped NCR53C400 board with no IRQ:
+modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+or alternatively,
+modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
 
+E.g. two cards, DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+and HP C2502 at 0x300 with IRQ 7:
+modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
 
 Kevin Lentin
 K.Lentin@cs.monash.edu.au
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
new file mode 100644 (file)
index 0000000..d918d26
--- /dev/null
@@ -0,0 +1,214 @@
+User Interface for Resource Allocation in Intel Resource Director Technology
+
+Copyright (C) 2016 Intel Corporation
+
+Fenghua Yu <fenghua.yu@intel.com>
+Tony Luck <tony.luck@intel.com>
+
+This feature is enabled by the CONFIG_INTEL_RDT_A Kconfig and the
+X86 /proc/cpuinfo flag bits "rdt", "cat_l3" and "cdp_l3".
+
+To use the feature mount the file system:
+
+ # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+
+mount options are:
+
+"cdp": Enable code/data prioritization in L3 cache allocations.
+
+
+Info directory
+--------------
+
+The 'info' directory contains information about the enabled
+resources. Each resource has its own subdirectory. The subdirectory
+names reflect the resource names. Each subdirectory contains the
+following files:
+
+"num_closids":  The number of CLOSIDs which are valid for this
+               resource. The kernel uses the smallest number of
+               CLOSIDs of all enabled resources as limit.
+
+"cbm_mask":     The bitmask which is valid for this resource. This
+               mask is equivalent to 100%.
+
+"min_cbm_bits": The minimum number of consecutive bits which must be
+               set when writing a mask.
+
+
+Resource groups
+---------------
+Resource groups are represented as directories in the resctrl file
+system. The default group is the root directory. Other groups may be
+created as desired by the system administrator using the "mkdir(1)"
+command, and removed using "rmdir(1)".
+
+There are three files associated with each group:
+
+"tasks": A list of tasks that belongs to this group. Tasks can be
+       added to a group by writing the task ID to the "tasks" file
+       (which will automatically remove them from the previous
+       group to which they belonged). New tasks created by fork(2)
+       and clone(2) are added to the same group as their parent.
+       If a pid is not in any sub partition, it is in root partition
+       (i.e. default partition).
+
+"cpus": A bitmask of logical CPUs assigned to this group. Writing
+       a new mask can add/remove CPUs from this group. Added CPUs
+       are removed from their previous group. Removed ones are
+       given to the default (root) group. You cannot remove CPUs
+       from the default group.
+
+"schemata": A list of all the resources available to this group.
+       Each resource has its own line and format - see below for
+       details.
+
+When a task is running the following rules define which resources
+are available to it:
+
+1) If the task is a member of a non-default group, then the schemata
+for that group is used.
+
+2) Else if the task belongs to the default group, but is running on a
+CPU that is assigned to some specific group, then the schemata for
+the CPU's group is used.
+
+3) Otherwise the schemata for the default group is used.
+
+
+Schemata files - general concepts
+---------------------------------
+Each line in the file describes one resource. The line starts with
+the name of the resource, followed by specific values to be applied
+in each of the instances of that resource on the system.
+
+Cache IDs
+---------
+On current generation systems there is one L3 cache per socket and L2
+caches are generally just shared by the hyperthreads on a core, but this
+isn't an architectural requirement. We could have multiple separate L3
+caches on a socket, multiple cores could share an L2 cache. So instead
+of using "socket" or "core" to define the set of logical cpus sharing
+a resource we use a "Cache ID". At a given cache level this will be a
+unique number across the whole system (but it isn't guaranteed to be a
+contiguous sequence, there may be gaps).  To find the ID for each logical
+CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
+
+Cache Bit Masks (CBM)
+---------------------
+For cache resources we describe the portion of the cache that is available
+for allocation using a bitmask. The maximum value of the mask is defined
+by each cpu model (and may be different for different cache levels). It
+is found using CPUID, but is also provided in the "info" directory of
+the resctrl file system in "info/{resource}/cbm_mask". X86 hardware
+requires that these masks have all the '1' bits in a contiguous block. So
+0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9
+and 0xA are not.  On a system with a 20-bit mask each bit represents 5%
+of the capacity of the cache. You could partition the cache into four
+equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
+
+
+L3 details (code and data prioritization disabled)
+--------------------------------------------------
+With CDP disabled the L3 schemata format is:
+
+       L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L3 details (CDP enabled via mount option to resctrl)
+----------------------------------------------------
+When CDP is enabled L3 control is split into two separate resources
+so you can specify independent masks for code and data like this:
+
+       L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+       L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L2 details
+----------
+L2 cache does not support code and data prioritization, so the
+schemata format is always:
+
+       L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+Example 1
+---------
+On a two socket machine (one L3 cache per socket) with just four bits
+for cache bit masks
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+
+The default resource group is unmodified, so we have access to all parts
+of all caches (its schemata file reads "L3:0=f;1=f").
+
+Tasks that are under the control of group "p0" may only allocate from the
+"lower" 50% on cache ID 0, and the "upper" 50% of cache ID 1.
+Tasks in group "p1" use the "lower" 50% of cache on both sockets.
+
+Example 2
+---------
+Again two sockets, but this time with a more realistic 20-bit mask.
+
+Two real time tasks pid=1234 running on processor 0 and pid=5678 running on
+processor 1 on socket 0 on a 2-socket and dual core machine. To avoid noisy
+neighbors, each of the two real-time tasks exclusively occupies one quarter
+of L3 cache on socket 0.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff;1=fffff" > schemata
+
+Next we make a resource group for our first real time task and give
+it access to the "top" 25% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=f8000;1=fffff" > p0/schemata
+
+Finally we move our first real time task into this resource group. We
+also use taskset(1) to ensure the task always runs on a dedicated CPU
+on socket 0. Most uses of resource groups will also constrain which
+processors tasks run on.
+
+# echo 1234 > p0/tasks
+# taskset -cp 1 1234
+
+Ditto for the second real time task (with the remaining 25% of cache):
+
+# mkdir p1
+# echo "L3:0=7c00;1=fffff" > p1/schemata
+# echo 5678 > p1/tasks
+# taskset -cp 2 5678
+
+Example 3
+---------
+
+A single socket system which has real-time tasks running on core 4-7 and
+non real-time workload assigned to core 0-3. The real-time tasks share text
+and data, so a per task association is not required and due to interaction
+with the kernel it's desired that the kernel on these cores shares L3 with
+the tasks.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff" > schemata
+
+Next we make a resource group for our real time cores and give
+it access to the "top" 50% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=ffc00;" > p0/schemata
+
+Finally we move core 4-7 over to the new group and make sure that the
+kernel and the tasks running there get 50% of the cache.
+
+# echo C0 > p0/cpus
index f6eb97b35e0fd58fe353cefce963ce7ff162491f..979126a9a150028d26b3531fd5715833dbedc0b4 100644 (file)
@@ -143,7 +143,7 @@ S:  Maintained
 F:     drivers/net/ethernet/3com/typhoon*
 
 3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
-M:     Adam Radford <linuxraid@lsi.com>
+M:     Adam Radford <aradford@gmail.com>
 L:     linux-scsi@vger.kernel.org
 W:     http://www.lsi.com
 S:     Supported
@@ -1747,7 +1747,7 @@ F:        drivers/staging/media/platform/s5p-cec/
 
 ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
 M:     Andrzej Pietrasiewicz <andrzej.p@samsung.com>
-M:     Jacek Anaszewski <j.anaszewski@samsung.com>
+M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -7229,7 +7229,7 @@ F:        drivers/scsi/53c700*
 
 LED SUBSYSTEM
 M:     Richard Purdie <rpurdie@rpsys.net>
-M:     Jacek Anaszewski <j.anaszewski@samsung.com>
+M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
@@ -10136,6 +10136,12 @@ F:     drivers/net/ethernet/qlogic/qed/
 F:     include/linux/qed/
 F:     drivers/net/ethernet/qlogic/qede/
 
+QLOGIC QL41xxx ISCSI DRIVER
+M:     QLogic-Storage-Upstream@cavium.com
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/qedi/
+
 QNX4 FILESYSTEM
 M:     Anders Larsen <al@alarsen.net>
 W:     http://www.alarsen.net/linux/qnx4fs/
@@ -10327,6 +10333,14 @@ L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/sw/rdmavt
 
+RDT - RESOURCE ALLOCATION
+M:     Fenghua Yu <fenghua.yu@intel.com>
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     arch/x86/kernel/cpu/intel_rdt*
+F:     arch/x86/include/asm/intel_rdt*
+F:     Documentation/x86/intel_rdt*
+
 READ-COPY UPDATE (RCU)
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:     Josh Triplett <josh@joshtriplett.org>
index ab12723d39a01d8751807728a43c7b08cdc6a814..c75d29077e4a654276219883629444deec89c955 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_SG_CHAIN
        select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
index da41a54ea2d747011b6c20dcc0b1738e2a2e8062..f659942744de0c474962b118292100eeed7397ce 100644 (file)
@@ -244,7 +244,7 @@ struct cpuinfo_arc_mmu {
 };
 
 struct cpuinfo_arc_cache {
-       unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
+       unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
 };
 
 struct cpuinfo_arc_bpu {
index a093adbdb017580f6da74abb551ea02e14e04da5..fc662f49c55ac91916af7cbd830b1c978f827ffe 100644 (file)
@@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
  */
 #define PG_dc_clean    PG_arch_1
 
+#define CACHE_COLORS_NUM       4
+#define CACHE_COLORS_MSK       (CACHE_COLORS_NUM - 1)
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
+
 /*
  * Simple wrapper over config option
  * Bootup code ensures that hardware matches kernel configuration
@@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
        return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
 }
 
-#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
-
 /*
  * checks if two addresses (after page aligning) index into same cache set
  */
index e880dfa3fcd370c0f7ae8f9010a79df9ab8c3399..a64c447b0337804568c69cf2e15998deabdd9b40 100644 (file)
 #define AUX_IRQ_ACT_BIT_U      31
 
 /*
- * User space should be interruptable even by lowest prio interrupt
- * Safe even if actual interrupt priorities is fewer or even one
+ * Hardware supports 16 priorities (0 highest, 15 lowest)
+ * Linux by default runs at 1, priority 0 reserved for NMI style interrupts
  */
-#define ARCV2_IRQ_DEF_PRIO     15
+#define ARCV2_IRQ_DEF_PRIO     1
 
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
index 7a1c124ff021d53377d24ff5d1bb9cccbb2831de..0b6388a5f0b828323c03b53f0903a35bdc25b0cb 100644 (file)
@@ -67,12 +67,23 @@ ENTRY(handle_interrupt)
 
        INTERRUPT_PROLOGUE  irq
 
-       clri            ; To make status32.IE agree with CPU internal state
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       TRACE_ASM_IRQ_DISABLE
-#endif
-
+       # irq control APIs local_irq_save/restore/disable/enable fiddle with
+       # global interrupt enable bits in STATUS32 (.IE for 1 prio, .E[] for 2 prio)
+       # However a taken interrupt doesn't clear these bits. Thus irqs_disabled()
+       # query in hard ISR path would return false (since .IE is set) which would
+       # trips genirq interrupt handling asserts.
+       #
+       # So do a "soft" disable of interrutps here.
+       #
+       # Note this disable is only for consistent book-keeping as further interrupts
+       # will be disabled anyways even w/o this. Hardware tracks active interrupts
+       # seperately in AUX_IRQ_ACTIVE.active and will not take new interrupts
+       # unless this one returns (or higher prio becomes pending in 2-prio scheme)
+
+       IRQ_DISABLE
+
+       ; icause is banked: one per priority level
+       ; so a higher prio interrupt taken here won't clobber prev prio icause
        lr  r0, [ICAUSE]
        mov   blink, ret_from_exception
 
@@ -171,6 +182,7 @@ END(EV_TLBProtV)
 ; All 2 entry points to here already disable interrupts
 
 .Lrestore_regs:
+restore_regs:
 
        # Interrpts are actually disabled from this point on, but will get
        # reenabled after we return from interrupt/exception.
index 98812c1248dfaf85b28ed023287ae78f27286073..9211707634dcf57e1aa0ac13fa4a6bbc79b99eeb 100644 (file)
@@ -259,7 +259,7 @@ ENTRY(EV_TLBProtV)
 
        EXCEPTION_PROLOGUE
 
-       lr  r2, [ecr]
+       mov r2, r9      ; ECR set into r9 already
        lr  r0, [efa]   ; Faulting Data address (not part of pt_regs saved above)
 
        ; Exception auto-disables further Intr/exceptions.
index 62b59409a5d97eb8b95276a3f17b1e430b98899b..994dca7014db645b32cfb22753cb25bae4c46566 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
-static int irq_prio;
-
 /*
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,7 +22,7 @@ static int irq_prio;
  */
 void arc_init_IRQ(void)
 {
-       unsigned int tmp;
+       unsigned int tmp, irq_prio;
 
        struct irq_build {
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -67,12 +65,12 @@ void arc_init_IRQ(void)
 
        irq_prio = irq_bcr.prio;        /* Encoded as N-1 for N levels */
        pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
-               irq_prio + 1, irq_prio,
+               irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
                irq_bcr.firq ? " FIRQ (not used)":"");
 
        /* setup status32, don't enable intr yet as kernel doesn't want */
        tmp = read_aux_reg(0xa);
-       tmp |= STATUS_AD_MASK | (irq_prio << 1);
+       tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
        tmp &= ~STATUS_IE_MASK;
        asm volatile("kflag %0  \n"::"r"(tmp));
 }
@@ -93,7 +91,7 @@ void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
        write_aux_reg(AUX_IRQ_SELECT, data->irq);
-       write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
+       write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
 
        /*
         * hw auto enables (linux unmask) all by default
index 50d71695cd4ecbeefd64f28e9f44265195b01d15..ec86ac0e33213b889cd6100e10e95fda8f3c31e4 100644 (file)
@@ -40,7 +40,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        struct cpuinfo_arc_cache *p;
 
 #define PR_CACHE(p, cfg, str)                                          \
-       if (!(p)->ver)                                                  \
+       if (!(p)->line_len)                                             \
                n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");     \
        else                                                            \
                n += scnprintf(buf + n, len - n,                        \
@@ -54,7 +54,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
        PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
 
        p = &cpuinfo_arc700[c].slc;
-       if (p->ver)
+       if (p->line_len)
                n += scnprintf(buf + n, len - n,
                               "SLC\t\t: %uK, %uB Line%s\n",
                               p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
@@ -104,7 +104,6 @@ static void read_decode_cache_bcr_arcv2(int cpu)
        READ_BCR(ARC_REG_SLC_BCR, sbcr);
        if (sbcr.ver) {
                READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
-               p_slc->ver = sbcr.ver;
                p_slc->sz_k = 128 << slc_cfg.sz;
                l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
        }
@@ -152,7 +151,6 @@ void read_decode_cache_bcr(void)
 
        p_ic->line_len = 8 << ibcr.line_len;
        p_ic->sz_k = 1 << (ibcr.sz - 1);
-       p_ic->ver = ibcr.ver;
        p_ic->vipt = 1;
        p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
 
@@ -176,7 +174,6 @@ dc_chk:
 
        p_dc->line_len = 16 << dbcr.line_len;
        p_dc->sz_k = 1 << (dbcr.sz - 1);
-       p_dc->ver = dbcr.ver;
 
 slc_chk:
        if (is_isa_arcv2())
@@ -945,17 +942,13 @@ void arc_cache_init(void)
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
 
-               if (!ic->ver)
+               if (!ic->line_len)
                        panic("cache support enabled but non-existent cache\n");
 
                if (ic->line_len != L1_CACHE_BYTES)
                        panic("ICache line [%d] != kernel Config [%d]",
                              ic->line_len, L1_CACHE_BYTES);
 
-               if (ic->ver != CONFIG_ARC_MMU_VER)
-                       panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
-                             ic->ver, CONFIG_ARC_MMU_VER);
-
                /*
                 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
                 * pair to provide vaddr/paddr respectively, just as in MMU v3
@@ -969,7 +962,7 @@ void arc_cache_init(void)
        if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
                struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
 
-               if (!dc->ver)
+               if (!dc->line_len)
                        panic("cache support enabled but non-existent cache\n");
 
                if (dc->line_len != L1_CACHE_BYTES)
@@ -979,11 +972,16 @@ void arc_cache_init(void)
                /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
                if (is_isa_arcompact()) {
                        int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
-
-                       if (dc->alias && !handled)
-                               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-                       else if (!dc->alias && handled)
+                       int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
+
+                       if (dc->alias) {
+                               if (!handled)
+                                       panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+                               if (CACHE_COLORS_NUM != num_colors)
+                                       panic("CACHE_COLORS_NUM not optimized for config\n");
+                       } else if (!dc->alias && handled) {
                                panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+                       }
                }
        }
 
index d0de0e032bc2c689f849ee679754f7b18d9451c3..c1976c0adca73025ec4526fc40be077101f8ef62 100644 (file)
@@ -29,7 +29,7 @@
 
 /* Basic configuration for ACPI */
 #ifdef CONFIG_ACPI
-/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
+/* ACPI table mapping after acpi_permanent_mmap is set */
 static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
                                            acpi_size size)
 {
index 252a6d9c1da5d7868fffd23ec6ed93a95944c91d..64d9cbd61678233dc24648f11bb5b242df3ee244 100644 (file)
@@ -132,14 +132,13 @@ static int __init acpi_fadt_sanity_check(void)
        struct acpi_table_header *table;
        struct acpi_table_fadt *fadt;
        acpi_status status;
-       acpi_size tbl_size;
        int ret = 0;
 
        /*
         * FADT is required on arm64; retrieve it to check its presence
         * and carry out revision and ACPI HW reduced compliancy tests
         */
-       status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
+       status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
        if (ACPI_FAILURE(status)) {
                const char *msg = acpi_format_exception(status);
 
@@ -170,10 +169,10 @@ static int __init acpi_fadt_sanity_check(void)
 
 out:
        /*
-        * acpi_get_table_with_size() creates FADT table mapping that
+        * acpi_get_table() creates FADT table mapping that
         * should be released after parsing and before resuming boot
         */
-       early_acpi_os_unmap_memory(table, tbl_size);
+       acpi_put_table(table);
        return ret;
 }
 
index a14b865870131a052c995fed98cc9e3ed7a3cd2d..3a71f38cdc0553eeb8c026b2b12d3b412e2f2657 100644 (file)
@@ -7,6 +7,7 @@ config PARISC
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_SYSCALL_TRACEPOINTS
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_HAS_ELF_RANDOMIZE
        select RTC_CLASS
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
index 78c9fd32c5546b6ec91d591e853d89a761383c92..a6b2a421571edfb5f981e1558d75525a23a59404 100644 (file)
@@ -348,9 +348,10 @@ struct pt_regs;    /* forward declaration... */
 
 #define ELF_HWCAP      0
 
-#define STACK_RND_MASK (is_32bit_task() ? \
-                               0x7ff >> (PAGE_SHIFT - 12) : \
-                               0x3ffff >> (PAGE_SHIFT - 12))
+/* Masks for stack and mmap randomization */
+#define BRK_RND_MASK   (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK  (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
+#define STACK_RND_MASK MMAP_RND_MASK
 
 struct mm_struct;
 extern unsigned long arch_randomize_brk(struct mm_struct *);
index 47539f11795835e8397bb77602d8e82d99e14a3f..e1d289092705f00431408f90ca6bd48e47c8cba4 100644 (file)
@@ -289,7 +289,7 @@ extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
 extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr);
 extern int pdc_pat_cell_num_to_loc(void *, unsigned long);
 
-extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
+extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa);
 
 extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset);
 
index ca40741378be76c70ecf718a7240e0b96c316530..a3661ee6b060c1d258ab740e5468cfffb665f8d5 100644 (file)
@@ -93,9 +93,7 @@ struct system_cpuinfo_parisc {
 /* Per CPU data structure - ie varies per CPU.  */
 struct cpuinfo_parisc {
        unsigned long it_value;     /* Interval Timer at last timer Intr */
-       unsigned long it_delta;     /* Interval delta (tic_10ms / HZ * 100) */
        unsigned long irq_count;    /* number of IRQ's since boot */
-       unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
        unsigned long cpuid;        /* aka slot_number or set to NO_PROC_ID */
        unsigned long hpa;          /* Host Physical address */
        unsigned long txn_addr;     /* MMIO addr of EIR or id_eid */
@@ -103,8 +101,6 @@ struct cpuinfo_parisc {
        unsigned long pending_ipi;  /* bitmap of type ipi_message_type */
 #endif
        unsigned long bh_count;     /* number of times bh was invoked */
-       unsigned long prof_counter; /* per CPU profiling support */
-       unsigned long prof_multiplier;  /* per CPU profiling support */
        unsigned long fp_rev;
        unsigned long fp_model;
        unsigned int state;
index 4fcff2dcc9c304decddf7b7643e90f7803968531..ad4cb1613c57a56552e32d795d5d83fdffd99672 100644 (file)
@@ -878,6 +878,9 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
+       /* NOTE: Need to enable interrupts incase we schedule. */
+       ssm     PSW_SM_I, %r0
+
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -904,11 +907,6 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
-       /* NOTE: We need to enable interrupts if we have to deliver
-        * signals. We used to do this earlier but it caused kernel
-        * stack overflows. */
-       ssm     PSW_SM_I, %r0
-
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -960,10 +958,6 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
-       /* NOTE: We need to enable interrupts if we schedule.  We used
-        * to do this earlier but it caused kernel stack overflows. */
-       ssm     PSW_SM_I, %r0
-
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e5d71905cad567cc03e22ffdeb7f7295d635b12b..9d797ae4fa22248665a13e0ff29d72532e9e4bdb 100644 (file)
@@ -1258,7 +1258,7 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long
  *
  * Retrieve the cpu number for the cpu at the specified HPA.
  */
-int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa)
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa)
 {
        int retval;
        unsigned long flags;
index c05d1876d27c4975453194686976c6cb0147531d..c9789d9c73b40478bb5ff931fd2c3afb30a4840f 100644 (file)
@@ -216,9 +216,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
        register_parisc_device(dev);    /* advertise device */
 
 #ifdef DEBUG_PAT
-       pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
        /* dump what we see so far... */
        switch (PAT_GET_ENTITY(dev->mod_info)) {
+               pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
                unsigned long i;
 
        case PAT_ENTITY_PROC:
@@ -259,9 +259,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
                                pa_pdc_cell->mod[4 + i * 3]);   /* finish (ie end) */
                        printk(KERN_DEBUG 
                                "  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
-                               i, io_pdc_cell->mod[2 + i * 3], /* type */
-                               io_pdc_cell->mod[3 + i * 3],    /* start */
-                               io_pdc_cell->mod[4 + i * 3]);   /* finish (ie end) */
+                               i, io_pdc_cell.mod[2 + i * 3],  /* type */
+                               io_pdc_cell.mod[3 + i * 3],     /* start */
+                               io_pdc_cell.mod[4 + i * 3]);    /* finish (ie end) */
                }
                printk(KERN_DEBUG "\n");
                break;
index 518f4f5f1f43ec6b2dcaceb9b2f5c9536097d59f..6eabce62463bbbcf29031143c46d7b75c841b6fa 100644 (file)
@@ -301,7 +301,6 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
 static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
        loff_t *ppos)
 {
-       int err;
        size_t image_size;
        uint32_t image_type;
        uint32_t interface_type;
@@ -320,8 +319,8 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
        if (count != sizeof(uint32_t))
                return -EIO;
 
-       if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0) 
-               return err;
+       if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
+               return -EFAULT;
 
        /* Get the interface type and test type */
        interface_type = (image_type >> 16) & 0xffff;
index 40639439d8b35c7cec7c60d978ae6aa5b070b22f..ea6603ee8d24981abe93c5bc79477e626e54254a 100644 (file)
@@ -276,11 +276,7 @@ void *dereference_function_descriptor(void *ptr)
 
 static inline unsigned long brk_rnd(void)
 {
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-       else
-               return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+       return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
 }
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
index 0c2a94a0f7518b8082ecda3f0307ead9534ea972..85de47f4eb594564bc9639ff76a099b5ff9aa8c7 100644 (file)
@@ -78,11 +78,6 @@ DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 static void
 init_percpu_prof(unsigned long cpunum)
 {
-       struct cpuinfo_parisc *p;
-
-       p = &per_cpu(cpu_data, cpunum);
-       p->prof_counter = 1;
-       p->prof_multiplier = 1;
 }
 
 
@@ -99,6 +94,7 @@ static int processor_probe(struct parisc_device *dev)
        unsigned long txn_addr;
        unsigned long cpuid;
        struct cpuinfo_parisc *p;
+       struct pdc_pat_cpu_num cpu_info __maybe_unused;
 
 #ifdef CONFIG_SMP
        if (num_online_cpus() >= nr_cpu_ids) {
@@ -123,10 +119,6 @@ static int processor_probe(struct parisc_device *dev)
                ulong status;
                unsigned long bytecnt;
                pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
-#undef USE_PAT_CPUID
-#ifdef USE_PAT_CPUID
-               struct pdc_pat_cpu_num cpu_info;
-#endif
 
                pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
                if (!pa_pdc_cell)
@@ -145,22 +137,27 @@ static int processor_probe(struct parisc_device *dev)
 
                kfree(pa_pdc_cell);
 
+               /* get the cpu number */
+               status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
+               BUG_ON(PDC_OK != status);
+
+               pr_info("Logical CPU #%lu is physical cpu #%lu at location "
+                       "0x%lx with hpa %pa\n",
+                       cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
+                       &dev->hpa.start);
+
+#undef USE_PAT_CPUID
 #ifdef USE_PAT_CPUID
 /* We need contiguous numbers for cpuid. Firmware's notion
  * of cpuid is for physical CPUs and we just don't care yet.
  * We'll care when we need to query PAT PDC about a CPU *after*
  * boot time (ie shutdown a CPU from an OS perspective).
  */
-               /* get the cpu number */
-               status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
-
-               BUG_ON(PDC_OK != status);
-
                if (cpu_info.cpu_num >= NR_CPUS) {
-                       printk(KERN_WARNING "IGNORING CPU at 0x%x,"
+                       printk(KERN_WARNING "IGNORING CPU at %pa,"
                                " cpu_slot_id > NR_CPUS"
                                " (%ld > %d)\n",
-                               dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
+                               &dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
                        /* Ignore CPU since it will only crash */
                        boot_cpu_data.cpu_count--;
                        return 1;
index 0a393a04e89182cba498fa64774dd32177860eb7..a81e177cac7be0c7d1d0f86f0a871d350fea5fdc 100644 (file)
@@ -225,19 +225,17 @@ static unsigned long mmap_rnd(void)
 {
        unsigned long rnd = 0;
 
-       /*
-       *  8 bits of randomness in 32bit mmaps, 20 address space bits
-       * 28 bits of randomness in 64bit mmaps, 40 address space bits
-       */
-       if (current->flags & PF_RANDOMIZE) {
-               if (is_32bit_task())
-                       rnd = get_random_int() % (1<<8);
-               else
-                       rnd = get_random_int() % (1<<28);
-       }
+       if (current->flags & PF_RANDOMIZE)
+               rnd = get_random_int() & MMAP_RND_MASK;
+
        return rnd << PAGE_SHIFT;
 }
 
+unsigned long arch_mmap_rnd(void)
+{
+       return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+}
+
 static unsigned long mmap_legacy_base(void)
 {
        return TASK_UNMAPPED_BASE + mmap_rnd();
index 325f30d82b6434368425d652402fabf66fd4f8ee..4215f5596c8b6291516a9d39190e77df1b2bf488 100644 (file)
@@ -59,10 +59,9 @@ static unsigned long clocktick __read_mostly;        /* timer cycles per tick */
  */
 irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
 {
-       unsigned long now, now2;
+       unsigned long now;
        unsigned long next_tick;
-       unsigned long cycles_elapsed, ticks_elapsed = 1;
-       unsigned long cycles_remainder;
+       unsigned long ticks_elapsed = 0;
        unsigned int cpu = smp_processor_id();
        struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 
@@ -71,102 +70,49 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
 
        profile_tick(CPU_PROFILING);
 
-       /* Initialize next_tick to the expected tick time. */
+       /* Initialize next_tick to the old expected tick time. */
        next_tick = cpuinfo->it_value;
 
-       /* Get current cycle counter (Control Register 16). */
-       now = mfctl(16);
-
-       cycles_elapsed = now - next_tick;
-
-       if ((cycles_elapsed >> 6) < cpt) {
-               /* use "cheap" math (add/subtract) instead
-                * of the more expensive div/mul method
-                */
-               cycles_remainder = cycles_elapsed;
-               while (cycles_remainder > cpt) {
-                       cycles_remainder -= cpt;
-                       ticks_elapsed++;
-               }
-       } else {
-               /* TODO: Reduce this to one fdiv op */
-               cycles_remainder = cycles_elapsed % cpt;
-               ticks_elapsed += cycles_elapsed / cpt;
-       }
-
-       /* convert from "division remainder" to "remainder of clock tick" */
-       cycles_remainder = cpt - cycles_remainder;
-
-       /* Determine when (in CR16 cycles) next IT interrupt will fire.
-        * We want IT to fire modulo clocktick even if we miss/skip some.
-        * But those interrupts don't in fact get delivered that regularly.
-        */
-       next_tick = now + cycles_remainder;
+       /* Calculate how many ticks have elapsed. */
+       do {
+               ++ticks_elapsed;
+               next_tick += cpt;
+               now = mfctl(16);
+       } while (next_tick - now > cpt);
 
+       /* Store (in CR16 cycles) up to when we are accounting right now. */
        cpuinfo->it_value = next_tick;
 
-       /* Program the IT when to deliver the next interrupt.
-        * Only bottom 32-bits of next_tick are writable in CR16!
-        */
-       mtctl(next_tick, 16);
+       /* Go do system house keeping. */
+       if (cpu == 0)
+               xtime_update(ticks_elapsed);
+
+       update_process_times(user_mode(get_irq_regs()));
 
-       /* Skip one clocktick on purpose if we missed next_tick.
+       /* Skip clockticks on purpose if we know we would miss those.
         * The new CR16 must be "later" than current CR16 otherwise
         * itimer would not fire until CR16 wrapped - e.g 4 seconds
         * later on a 1Ghz processor. We'll account for the missed
-        * tick on the next timer interrupt.
+        * ticks on the next timer interrupt.
+        * We want IT to fire modulo clocktick even if we miss/skip some.
+        * But those interrupts don't in fact get delivered that regularly.
         *
         * "next_tick - now" will always give the difference regardless
         * if one or the other wrapped. If "now" is "bigger" we'll end up
         * with a very large unsigned number.
         */
-       now2 = mfctl(16);
-       if (next_tick - now2 > cpt)
-               mtctl(next_tick+cpt, 16);
+       while (next_tick - mfctl(16) > cpt)
+               next_tick += cpt;
 
-#if 1
-/*
- * GGG: DEBUG code for how many cycles programming CR16 used.
- */
-       if (unlikely(now2 - now > 0x3000))      /* 12K cycles */
-               printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
-                       " cyc %lX rem %lX "
-                       " next/now %lX/%lX\n",
-                       cpu, now2 - now, cycles_elapsed, cycles_remainder,
-                       next_tick, now );
-#endif
-
-       /* Can we differentiate between "early CR16" (aka Scenario 1) and
-        * "long delay" (aka Scenario 3)? I don't think so.
-        *
-        * Timer_interrupt will be delivered at least a few hundred cycles
-        * after the IT fires. But it's arbitrary how much time passes
-        * before we call it "late". I've picked one second.
-        *
-        * It's important NO printk's are between reading CR16 and
-        * setting up the next value. May introduce huge variance.
-        */
-       if (unlikely(ticks_elapsed > HZ)) {
-               /* Scenario 3: very long delay?  bad in any case */
-               printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
-                       " cycles %lX rem %lX "
-                       " next/now %lX/%lX\n",
-                       cpu,
-                       cycles_elapsed, cycles_remainder,
-                       next_tick, now );
-       }
-
-       /* Done mucking with unreliable delivery of interrupts.
-        * Go do system house keeping.
+       /* Program the IT when to deliver the next interrupt.
+        * Only bottom 32-bits of next_tick are writable in CR16!
+        * Timer interrupt will be delivered at least a few hundred cycles
+        * after the IT fires, so if we are too close (<= 500 cycles) to the
+        * next cycle, simply skip it.
         */
-
-       if (!--cpuinfo->prof_counter) {
-               cpuinfo->prof_counter = cpuinfo->prof_multiplier;
-               update_process_times(user_mode(get_irq_regs()));
-       }
-
-       if (cpu == 0)
-               xtime_update(ticks_elapsed);
+       if (next_tick - mfctl(16) <= 500)
+               next_tick += cpt;
+       mtctl(next_tick, 16);
 
        return IRQ_HANDLED;
 }
index 64024c9995314a9488bb83261baa1d6f7a9b5cae..e487493bbd47f0f84caed99f6c952bf6b3062413 100644 (file)
@@ -412,6 +412,19 @@ config GOLDFISH
        def_bool y
        depends on X86_GOLDFISH
 
+config INTEL_RDT_A
+       bool "Intel Resource Director Technology Allocation support"
+       default n
+       depends on X86 && CPU_SUP_INTEL
+       select KERNFS
+       help
+         Select to enable resource allocation which is a sub-feature of
+         Intel Resource Director Technology(RDT). More information about
+         RDT can be found in the Intel x86 Architecture Software
+         Developer Manual.
+
+         Say N if unsure.
+
 if X86_32
 config X86_EXTENDED_PLATFORM
        bool "Support for extended (non-PC) x86 platforms"
index 8f82b02934fa701a451ee5e7d8f76193eaefc2fa..0c45cc8e64ba77f6988cff5a0e5443dfe0449435 100644 (file)
@@ -7,9 +7,9 @@
 #include <linux/perf_event.h>
 #include <linux/slab.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel_rdt_common.h>
 #include "../perf_event.h"
 
-#define MSR_IA32_PQR_ASSOC     0x0c8f
 #define MSR_IA32_QM_CTR                0x0c8e
 #define MSR_IA32_QM_EVTSEL     0x0c8d
 
@@ -24,32 +24,13 @@ static unsigned int cqm_l3_scale; /* supposedly cacheline size */
 static bool cqm_enabled, mbm_enabled;
 unsigned int mbm_socket_max;
 
-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid:              The cached Resource Monitoring ID
- * @closid:            The cached Class Of Service ID
- * @rmid_usecnt:       The usage counter for rmid
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
-       u32                     rmid;
-       u32                     closid;
-       int                     rmid_usecnt;
-};
-
 /*
  * The cached intel_pqr_state is strictly per CPU and can never be
  * updated from a remote CPU. Both functions which modify the state
  * (intel_cqm_event_start and intel_cqm_event_stop) are called with
  * interrupts disabled, which is sufficient for the protection.
  */
-static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
 static struct hrtimer *mbm_timers;
 /**
  * struct sample - mbm event's (local or total) data
index 6ccbf1aaa7ce1f72021757593c99b555480f78da..eafee3161d1c0fa04cd82b12dcf15003a89d04b4 100644 (file)
 
 #define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
 #define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3     ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2     ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3     ( 7*32+ 6) /* Code and Data Prioritization L3 */
 
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
 #define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A      ( 9*32+15) /* Resource Director Technology Allocation */
 #define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
 #define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
 #define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644 (file)
index 0000000..95ce5c8
--- /dev/null
@@ -0,0 +1,224 @@
+#ifndef _ASM_X86_INTEL_RDT_H
+#define _ASM_X86_INTEL_RDT_H
+
+#ifdef CONFIG_INTEL_RDT_A
+
+#include <linux/kernfs.h>
+#include <linux/jump_label.h>
+
+#include <asm/intel_rdt_common.h>
+
+#define IA32_L3_QOS_CFG                0xc81
+#define IA32_L3_CBM_BASE       0xc90
+#define IA32_L2_CBM_BASE       0xd10
+
+#define L3_QOS_CDP_ENABLE      0x01ULL
+
+/**
+ * struct rdtgroup - store rdtgroup's data in resctrl file system.
+ * @kn:                                kernfs node
+ * @rdtgroup_list:             linked list for all rdtgroups
+ * @closid:                    closid for this rdtgroup
+ * @cpu_mask:                  CPUs assigned to this rdtgroup
+ * @flags:                     status bits
+ * @waitcount:                 how many cpus expect to find this
+ *                             group when they acquire rdtgroup_mutex
+ */
+struct rdtgroup {
+       struct kernfs_node      *kn;
+       struct list_head        rdtgroup_list;
+       int                     closid;
+       struct cpumask          cpu_mask;
+       int                     flags;
+       atomic_t                waitcount;
+};
+
+/* rdtgroup.flags */
+#define        RDT_DELETED             1
+
+/* List of all resource groups */
+extern struct list_head rdt_all_groups;
+
+int __init rdtgroup_init(void);
+
+/**
+ * struct rftype - describe each file in the resctrl file system
+ * @name: file name
+ * @mode: access mode
+ * @kf_ops: operations
+ * @seq_show: show content of the file
+ * @write: write to the file
+ */
+struct rftype {
+       char                    *name;
+       umode_t                 mode;
+       struct kernfs_ops       *kf_ops;
+
+       int (*seq_show)(struct kernfs_open_file *of,
+                       struct seq_file *sf, void *v);
+       /*
+        * write() is the generic write callback which maps directly to
+        * kernfs write operation and overrides all other operations.
+        * Maximum write size is determined by ->max_write_len.
+        */
+       ssize_t (*write)(struct kernfs_open_file *of,
+                        char *buf, size_t nbytes, loff_t off);
+};
+
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @enabled:                   Is this feature enabled on this machine
+ * @capable:                   Is this feature available on this machine
+ * @name:                      Name to use in "schemata" file
+ * @num_closid:                        Number of CLOSIDs available
+ * @max_cbm:                   Largest Cache Bit Mask allowed
+ * @min_cbm_bits:              Minimum number of consecutive bits to be set
+ *                             in a cache bit mask
+ * @domains:                   All domains for this resource
+ * @num_domains:               Number of domains active
+ * @msr_base:                  Base MSR address for CBMs
+ * @tmp_cbms:                  Scratch space when updating schemata
+ * @num_tmp_cbms:              Number of CBMs in tmp_cbms
+ * @cache_level:               Which cache level defines scope of this domain
+ * @cbm_idx_multi:             Multiplier of CBM index
+ * @cbm_idx_offset:            Offset of CBM index. CBM index is computed by:
+ *                             closid * cbm_idx_multi + cbm_idx_offset
+ */
+struct rdt_resource {
+       bool                    enabled;
+       bool                    capable;
+       char                    *name;
+       int                     num_closid;
+       int                     cbm_len;
+       int                     min_cbm_bits;
+       u32                     max_cbm;
+       struct list_head        domains;
+       int                     num_domains;
+       int                     msr_base;
+       u32                     *tmp_cbms;
+       int                     num_tmp_cbms;
+       int                     cache_level;
+       int                     cbm_idx_multi;
+       int                     cbm_idx_offset;
+};
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list:      all instances of this resource
+ * @id:                unique id for this instance
+ * @cpu_mask:  which cpus share this resource
+ * @cbm:       array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+       struct list_head        list;
+       int                     id;
+       struct cpumask          cpu_mask;
+       u32                     *cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res:       The resource to use
+ * @low:       Beginning index from base MSR
+ * @high:      End index
+ */
+struct msr_param {
+       struct rdt_resource     *res;
+       int                     low;
+       int                     high;
+};
+
+extern struct mutex rdtgroup_mutex;
+
+extern struct rdt_resource rdt_resources_all[];
+extern struct rdtgroup rdtgroup_default;
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+
+int __init rdtgroup_init(void);
+
+enum {
+       RDT_RESOURCE_L3,
+       RDT_RESOURCE_L3DATA,
+       RDT_RESOURCE_L3CODE,
+       RDT_RESOURCE_L2,
+
+       /* Must be the last */
+       RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r)                                     \
+       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+            r++)                                                             \
+               if (r->capable)
+
+#define for_each_enabled_rdt_resource(r)                                     \
+       for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+            r++)                                                             \
+               if (r->enabled)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+       struct {
+               unsigned int cbm_len:5;
+       } split;
+       unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
+union cpuid_0x10_1_edx {
+       struct {
+               unsigned int cos_max:16;
+       } split;
+       unsigned int full;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+void rdt_cbm_update(void *arg);
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
+void rdtgroup_kn_unlock(struct kernfs_node *kn);
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+                               char *buf, size_t nbytes, loff_t off);
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+                          struct seq_file *s, void *v);
+
+/*
+ * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ *   which supports resource control and we enable by mounting the
+ *   resctrl file system.
+ * - Caches the per cpu CLOSid values and does the MSR write only
+ *   when a task with a different CLOSid is scheduled in.
+ *
+ * Must be called with preemption disabled.
+ */
+static inline void intel_rdt_sched_in(void)
+{
+       if (static_branch_likely(&rdt_enable_key)) {
+               struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+               int closid;
+
+               /*
+                * If this task has a closid assigned, use it.
+                * Else use the closid assigned to this cpu.
+                */
+               closid = current->closid;
+               if (closid == 0)
+                       closid = this_cpu_read(cpu_closid);
+
+               if (closid != state->closid) {
+                       state->closid = closid;
+                       wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
+               }
+       }
+}
+
+#else
+
+static inline void intel_rdt_sched_in(void) {}
+
+#endif /* CONFIG_INTEL_RDT_A */
+#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
new file mode 100644 (file)
index 0000000..b31081b
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_INTEL_RDT_COMMON_H
+#define _ASM_X86_INTEL_RDT_COMMON_H
+
+#define MSR_IA32_PQR_ASSOC     0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid:              The cached Resource Monitoring ID
+ * @closid:            The cached Class Of Service ID
+ * @rmid_usecnt:       The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+       u32                     rmid;
+       u32                     closid;
+       int                     rmid_usecnt;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
index 33b63670bf09e6d34c1f194cfe3600d9ea16dcb4..52000010c62ebaaf60939186128d44af757d2226 100644 (file)
@@ -32,6 +32,8 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)         += centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)     += transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)           += umc.o
 
+obj-$(CONFIG_INTEL_RDT_A)      += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
+
 obj-$(CONFIG_X86_MCE)                  += mcheck/
 obj-$(CONFIG_MTRR)                     += mtrr/
 obj-$(CONFIG_MICROCODE)                        += microcode/
index be633715650212059e8a67eb7a4974b188d5e076..0282b0df004a86023d00abceb53cbed24fe118f6 100644 (file)
@@ -153,6 +153,7 @@ struct _cpuid4_info_regs {
        union _cpuid4_leaf_eax eax;
        union _cpuid4_leaf_ebx ebx;
        union _cpuid4_leaf_ecx ecx;
+       unsigned int id;
        unsigned long size;
        struct amd_northbridge *nb;
 };
@@ -894,6 +895,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
 static void ci_leaf_init(struct cacheinfo *this_leaf,
                         struct _cpuid4_info_regs *base)
 {
+       this_leaf->id = base->id;
+       this_leaf->attributes = CACHE_ID;
        this_leaf->level = base->eax.split.level;
        this_leaf->type = cache_type_map[base->eax.split.type];
        this_leaf->coherency_line_size =
@@ -920,6 +923,22 @@ static int __init_cache_level(unsigned int cpu)
        return 0;
 }
 
+/*
+ * The max shared threads number comes from CPUID.4:EAX[25-14] with input
+ * ECX as cache index. Then right shift apicid by the number's order to get
+ * cache id for this cache node.
+ */
+static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
+{
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+       unsigned long num_threads_sharing;
+       int index_msb;
+
+       num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
+       index_msb = get_count_order(num_threads_sharing);
+       id4_regs->id = c->apicid >> index_msb;
+}
+
 static int __populate_cache_leaves(unsigned int cpu)
 {
        unsigned int idx, ret;
@@ -931,6 +950,7 @@ static int __populate_cache_leaves(unsigned int cpu)
                ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
                if (ret)
                        return ret;
+               get_cache_id(cpu, &id4_regs);
                ci_leaf_init(this_leaf++, &id4_regs);
                __cache_cpumap_setup(cpu, idx, &id4_regs);
        }
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
new file mode 100644 (file)
index 0000000..5a533fe
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ *    Fenghua Yu <fenghua.yu@intel.com>
+ *    Tony Luck <tony.luck@intel.com>
+ *    Vikas Shivappa <vikas.shivappa@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpuhotplug.h>
+
+#include <asm/intel-family.h>
+#include <asm/intel_rdt.h>
+
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+
+struct rdt_resource rdt_resources_all[] = {
+       {
+               .name           = "L3",
+               .domains        = domain_init(RDT_RESOURCE_L3),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 1,
+               .cbm_idx_offset = 0
+       },
+       {
+               .name           = "L3DATA",
+               .domains        = domain_init(RDT_RESOURCE_L3DATA),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 2,
+               .cbm_idx_offset = 0
+       },
+       {
+               .name           = "L3CODE",
+               .domains        = domain_init(RDT_RESOURCE_L3CODE),
+               .msr_base       = IA32_L3_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 3,
+               .cbm_idx_multi  = 2,
+               .cbm_idx_offset = 1
+       },
+       {
+               .name           = "L2",
+               .domains        = domain_init(RDT_RESOURCE_L2),
+               .msr_base       = IA32_L2_CBM_BASE,
+               .min_cbm_bits   = 1,
+               .cache_level    = 2,
+               .cbm_idx_multi  = 1,
+               .cbm_idx_offset = 0
+       },
+};
+
+static int cbm_idx(struct rdt_resource *r, int closid)
+{
+       return closid * r->cbm_idx_multi + r->cbm_idx_offset;
+}
+
+/*
+ * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
+ * as they do not have CPUID enumeration support for Cache allocation.
+ * The check for Vendor/Family/Model is not enough to guarantee that
+ * the MSRs won't #GP fault because only the following SKUs support
+ * CAT:
+ *     Intel(R) Xeon(R)  CPU E5-2658  v3  @  2.20GHz
+ *     Intel(R) Xeon(R)  CPU E5-2648L v3  @  1.80GHz
+ *     Intel(R) Xeon(R)  CPU E5-2628L v3  @  2.00GHz
+ *     Intel(R) Xeon(R)  CPU E5-2618L v3  @  2.30GHz
+ *     Intel(R) Xeon(R)  CPU E5-2608L v3  @  2.00GHz
+ *     Intel(R) Xeon(R)  CPU E5-2658A v3  @  2.20GHz
+ *
+ * Probe by trying to write the first of the L3 cach mask registers
+ * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
+ * is always 20 on hsw server parts. The minimum cache bitmask length
+ * allowed for HSW server is always 2 bits. Hardcode all of them.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+           boot_cpu_data.x86 == 6 &&
+           boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
+               struct rdt_resource *r  = &rdt_resources_all[RDT_RESOURCE_L3];
+               u32 l, h, max_cbm = BIT_MASK(20) - 1;
+
+               if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+                       return false;
+               rdmsr(IA32_L3_CBM_BASE, l, h);
+
+               /* If all the bits were set in MSR, return success */
+               if (l != max_cbm)
+                       return false;
+
+               r->num_closid = 4;
+               r->cbm_len = 20;
+               r->max_cbm = max_cbm;
+               r->min_cbm_bits = 2;
+               r->capable = true;
+               r->enabled = true;
+
+               return true;
+       }
+
+       return false;
+}
+
+static void rdt_get_config(int idx, struct rdt_resource *r)
+{
+       union cpuid_0x10_1_eax eax;
+       union cpuid_0x10_1_edx edx;
+       u32 ebx, ecx;
+
+       cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
+       r->num_closid = edx.split.cos_max + 1;
+       r->cbm_len = eax.split.cbm_len + 1;
+       r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+       r->capable = true;
+       r->enabled = true;
+}
+
+static void rdt_get_cdp_l3_config(int type)
+{
+       struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+       struct rdt_resource *r = &rdt_resources_all[type];
+
+       r->num_closid = r_l3->num_closid / 2;
+       r->cbm_len = r_l3->cbm_len;
+       r->max_cbm = r_l3->max_cbm;
+       r->capable = true;
+       /*
+        * By default, CDP is disabled. CDP can be enabled by mount parameter
+        * "cdp" during resctrl file system mount time.
+        */
+       r->enabled = false;
+}
+
+static inline bool get_rdt_resources(void)
+{
+       bool ret = false;
+
+       if (cache_alloc_hsw_probe())
+               return true;
+
+       if (!boot_cpu_has(X86_FEATURE_RDT_A))
+               return false;
+
+       if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
+               rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+               if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+                       rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+                       rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+               }
+               ret = true;
+       }
+       if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+               /* CPUID 0x10.2 fields are same format at 0x10.1 */
+               rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+               ret = true;
+       }
+
+       return ret;
+}
+
+static int get_cache_id(int cpu, int level)
+{
+       struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+       int i;
+
+       for (i = 0; i < ci->num_leaves; i++) {
+               if (ci->info_list[i].level == level)
+                       return ci->info_list[i].id;
+       }
+
+       return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+       struct msr_param *m = (struct msr_param *)arg;
+       struct rdt_resource *r = m->res;
+       int i, cpu = smp_processor_id();
+       struct rdt_domain *d;
+
+       list_for_each_entry(d, &r->domains, list) {
+               /* Find the domain that contains this CPU */
+               if (cpumask_test_cpu(cpu, &d->cpu_mask))
+                       goto found;
+       }
+       pr_info_once("cpu %d not found in any domain for resource %s\n",
+                    cpu, r->name);
+
+       return;
+
+found:
+       for (i = m->low; i < m->high; i++) {
+               int idx = cbm_idx(r, i);
+
+               wrmsrl(r->msr_base + idx, d->cbm[i]);
+       }
+}
+
+/*
+ * rdt_find_domain - Find a domain in a resource that matches input resource id
+ *
+ * Search resource r's domain list to find the resource id. If the resource
+ * id is found in a domain, return the domain. Otherwise, if requested by
+ * caller, return the first domain whose id is bigger than the input id.
+ * The domain list is sorted by id in ascending order.
+ */
+static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+                                         struct list_head **pos)
+{
+       struct rdt_domain *d;
+       struct list_head *l;
+
+       if (id < 0)
+               return ERR_PTR(id);
+
+       list_for_each(l, &r->domains) {
+               d = list_entry(l, struct rdt_domain, list);
+               /* When id is found, return its domain. */
+               if (id == d->id)
+                       return d;
+               /* Stop searching when finding id's position in sorted list. */
+               if (id < d->id)
+                       break;
+       }
+
+       if (pos)
+               *pos = l;
+
+       return NULL;
+}
+
+/*
+ * domain_add_cpu - Add a cpu to a resource's domain list.
+ *
+ * If an existing domain in the resource r's domain list matches the cpu's
+ * resource id, add the cpu in the domain.
+ *
+ * Otherwise, a new domain is allocated and inserted into the right position
+ * in the domain list sorted by id in ascending order.
+ *
+ * The order in the domain list is visible to users when we print entries
+ * in the schemata file and schemata input is validated to have the same order
+ * as this list.
+ */
+static void domain_add_cpu(int cpu, struct rdt_resource *r)
+{
+       int i, id = get_cache_id(cpu, r->cache_level);
+       struct list_head *add_pos = NULL;
+       struct rdt_domain *d;
+
+       d = rdt_find_domain(r, id, &add_pos);
+       if (IS_ERR(d)) {
+               pr_warn("Could't find cache id for cpu %d\n", cpu);
+               return;
+       }
+
+       if (d) {
+               cpumask_set_cpu(cpu, &d->cpu_mask);
+               return;
+       }
+
+       d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
+       if (!d)
+               return;
+
+       d->id = id;
+
+       d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
+       if (!d->cbm) {
+               kfree(d);
+               return;
+       }
+
+       for (i = 0; i < r->num_closid; i++) {
+               int idx = cbm_idx(r, i);
+
+               d->cbm[i] = r->max_cbm;
+               wrmsrl(r->msr_base + idx, d->cbm[i]);
+       }
+
+       cpumask_set_cpu(cpu, &d->cpu_mask);
+       list_add_tail(&d->list, add_pos);
+       r->num_domains++;
+}
+
+static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+{
+       int id = get_cache_id(cpu, r->cache_level);
+       struct rdt_domain *d;
+
+       d = rdt_find_domain(r, id, NULL);
+       if (IS_ERR_OR_NULL(d)) {
+               pr_warn("Could't find cache id for cpu %d\n", cpu);
+               return;
+       }
+
+       cpumask_clear_cpu(cpu, &d->cpu_mask);
+       if (cpumask_empty(&d->cpu_mask)) {
+               r->num_domains--;
+               kfree(d->cbm);
+               list_del(&d->list);
+               kfree(d);
+       }
+}
+
+static void clear_closid(int cpu)
+{
+       struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+       per_cpu(cpu_closid, cpu) = 0;
+       state->closid = 0;
+       wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+       for_each_capable_rdt_resource(r)
+               domain_add_cpu(cpu, r);
+       /* The cpu is set in default rdtgroup after online. */
+       cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
+       clear_closid(cpu);
+       mutex_unlock(&rdtgroup_mutex);
+
+       return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+       for_each_capable_rdt_resource(r)
+               domain_remove_cpu(cpu, r);
+       list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+               if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+                       break;
+       }
+       clear_closid(cpu);
+       mutex_unlock(&rdtgroup_mutex);
+
+       return 0;
+}
+
+static int __init intel_rdt_late_init(void)
+{
+       struct rdt_resource *r;
+       int state, ret;
+
+       if (!get_rdt_resources())
+               return -ENODEV;
+
+       state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                                 "x86/rdt/cat:online:",
+                                 intel_rdt_online_cpu, intel_rdt_offline_cpu);
+       if (state < 0)
+               return state;
+
+       ret = rdtgroup_init();
+       if (ret) {
+               cpuhp_remove_state(state);
+               return ret;
+       }
+
+       for_each_capable_rdt_resource(r)
+               pr_info("Intel RDT %s allocation detected\n", r->name);
+
+       return 0;
+}
+
+late_initcall(intel_rdt_late_init);
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
new file mode 100644 (file)
index 0000000..8af04af
--- /dev/null
@@ -0,0 +1,1115 @@
+/*
+ * User interface for Resource Alloction in Resource Director Technology(RDT)
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/task_work.h>
+
+#include <uapi/linux/magic.h>
+
+#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_common.h>
+
+DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
+struct kernfs_root *rdt_root;
+struct rdtgroup rdtgroup_default;
+LIST_HEAD(rdt_all_groups);
+
+/* Kernel fs node for "info" directory under root */
+static struct kernfs_node *kn_info;
+
+/*
+ * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+ * we can keep a bitmap of free CLOSIDs in a single integer.
+ *
+ * Using a global CLOSID across all resources has some advantages and
+ * some drawbacks:
+ * + We can simply set "current->closid" to assign a task to a resource
+ *   group.
+ * + Context switch code can avoid extra memory references deciding which
+ *   CLOSID to load into the PQR_ASSOC MSR
+ * - We give up some options in configuring resource groups across multi-socket
+ *   systems.
+ * - Our choices on how to configure each resource become progressively more
+ *   limited as the number of resources grows.
+ */
+static int closid_free_map;
+
+static void closid_init(void)
+{
+       struct rdt_resource *r;
+       int rdt_min_closid = 32;
+
+       /* Compute rdt_min_closid across all resources */
+       for_each_enabled_rdt_resource(r)
+               rdt_min_closid = min(rdt_min_closid, r->num_closid);
+
+       closid_free_map = BIT_MASK(rdt_min_closid) - 1;
+
+       /* CLOSID 0 is always reserved for the default group */
+       closid_free_map &= ~1;
+}
+
+int closid_alloc(void)
+{
+       int closid = ffs(closid_free_map);
+
+       if (closid == 0)
+               return -ENOSPC;
+       closid--;
+       closid_free_map &= ~(1 << closid);
+
+       return closid;
+}
+
+static void closid_free(int closid)
+{
+       closid_free_map |= 1 << closid;
+}
+
+/* set uid and gid of rdtgroup dirs and files to that of the creator */
+static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
+{
+       struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
+                               .ia_uid = current_fsuid(),
+                               .ia_gid = current_fsgid(), };
+
+       if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
+           gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
+               return 0;
+
+       return kernfs_setattr(kn, &iattr);
+}
+
+static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
+{
+       struct kernfs_node *kn;
+       int ret;
+
+       kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+                                 0, rft->kf_ops, rft, NULL, NULL);
+       if (IS_ERR(kn))
+               return PTR_ERR(kn);
+
+       ret = rdtgroup_kn_set_ugid(kn);
+       if (ret) {
+               kernfs_remove(kn);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
+                             int len)
+{
+       struct rftype *rft;
+       int ret;
+
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       for (rft = rfts; rft < rfts + len; rft++) {
+               ret = rdtgroup_add_file(kn, rft);
+               if (ret)
+                       goto error;
+       }
+
+       return 0;
+error:
+       pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
+       while (--rft >= rfts)
+               kernfs_remove_by_name(kn, rft->name);
+       return ret;
+}
+
+static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+       struct kernfs_open_file *of = m->private;
+       struct rftype *rft = of->kn->priv;
+
+       if (rft->seq_show)
+               return rft->seq_show(of, m, arg);
+       return 0;
+}
+
+static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
+                                  size_t nbytes, loff_t off)
+{
+       struct rftype *rft = of->kn->priv;
+
+       if (rft->write)
+               return rft->write(of, buf, nbytes, off);
+
+       return -EINVAL;
+}
+
+static struct kernfs_ops rdtgroup_kf_single_ops = {
+       .atomic_write_len       = PAGE_SIZE,
+       .write                  = rdtgroup_file_write,
+       .seq_show               = rdtgroup_seqfile_show,
+};
+
+static int rdtgroup_cpus_show(struct kernfs_open_file *of,
+                             struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+       if (rdtgrp)
+               seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
+       else
+               ret = -ENOENT;
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret;
+}
+
+/*
+ * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * because __switch_to() is executed with interrupts disabled. A local call
+ * from rdt_update_closid() is proteced against __switch_to() because
+ * preemption is disabled.
+ */
+static void rdt_update_cpu_closid(void *closid)
+{
+       if (closid)
+               this_cpu_write(cpu_closid, *(int *)closid);
+       /*
+        * We cannot unconditionally write the MSR because the current
+        * executing task might have its own closid selected. Just reuse
+        * the context switch code.
+        */
+       intel_rdt_sched_in();
+}
+
+/*
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
+ *
+ * Per task closids must have been set up before calling this function.
+ *
+ * The per cpu closids are updated with the smp function call, when @closid
+ * is not NULL. If @closid is NULL then all affected percpu closids must
+ * have been set up before calling this function.
+ */
+static void
+rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
+{
+       int cpu = get_cpu();
+
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_update_cpu_closid(closid);
+       smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
+       put_cpu();
+}
+
+static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
+                                  char *buf, size_t nbytes, loff_t off)
+{
+       cpumask_var_t tmpmask, newmask;
+       struct rdtgroup *rdtgrp, *r;
+       int ret;
+
+       if (!buf)
+               return -EINVAL;
+
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
+       if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
+               free_cpumask_var(tmpmask);
+               return -ENOMEM;
+       }
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+
+       ret = cpumask_parse(buf, newmask);
+       if (ret)
+               goto unlock;
+
+       /* check that user didn't specify any offline cpus */
+       cpumask_andnot(tmpmask, newmask, cpu_online_mask);
+       if (cpumask_weight(tmpmask)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       /* Check whether cpus are dropped from this group */
+       cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+       if (cpumask_weight(tmpmask)) {
+               /* Can't drop from default group */
+               if (rdtgrp == &rdtgroup_default) {
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+               /* Give any dropped cpus to rdtgroup_default */
+               cpumask_or(&rdtgroup_default.cpu_mask,
+                          &rdtgroup_default.cpu_mask, tmpmask);
+               rdt_update_closid(tmpmask, &rdtgroup_default.closid);
+       }
+
+       /*
+        * If we added cpus, remove them from previous group that owned them
+        * and update per-cpu closid
+        */
+       cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+       if (cpumask_weight(tmpmask)) {
+               list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
+                       if (r == rdtgrp)
+                               continue;
+                       cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
+               }
+               rdt_update_closid(tmpmask, &rdtgrp->closid);
+       }
+
+       /* Done pushing/pulling - update this group with new mask */
+       cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+unlock:
+       rdtgroup_kn_unlock(of->kn);
+       free_cpumask_var(tmpmask);
+       free_cpumask_var(newmask);
+
+       return ret ?: nbytes;
+}
+
+struct task_move_callback {
+       struct callback_head    work;
+       struct rdtgroup         *rdtgrp;
+};
+
+static void move_myself(struct callback_head *head)
+{
+       struct task_move_callback *callback;
+       struct rdtgroup *rdtgrp;
+
+       callback = container_of(head, struct task_move_callback, work);
+       rdtgrp = callback->rdtgrp;
+
+       /*
+        * If resource group was deleted before this task work callback
+        * was invoked, then assign the task to root group and free the
+        * resource group.
+        */
+       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+           (rdtgrp->flags & RDT_DELETED)) {
+               current->closid = 0;
+               kfree(rdtgrp);
+       }
+
+       preempt_disable();
+       /* update PQR_ASSOC MSR to make resource group go into effect */
+       intel_rdt_sched_in();
+       preempt_enable();
+
+       kfree(callback);
+}
+
+static int __rdtgroup_move_task(struct task_struct *tsk,
+                               struct rdtgroup *rdtgrp)
+{
+       struct task_move_callback *callback;
+       int ret;
+
+       callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+       if (!callback)
+               return -ENOMEM;
+       callback->work.func = move_myself;
+       callback->rdtgrp = rdtgrp;
+
+       /*
+        * Take a refcount, so rdtgrp cannot be freed before the
+        * callback has been invoked.
+        */
+       atomic_inc(&rdtgrp->waitcount);
+       ret = task_work_add(tsk, &callback->work, true);
+       if (ret) {
+               /*
+                * Task is exiting. Drop the refcount and free the callback.
+                * No need to check the refcount as the group cannot be
+                * deleted before the write function unlocks rdtgroup_mutex.
+                */
+               atomic_dec(&rdtgrp->waitcount);
+               kfree(callback);
+       } else {
+               tsk->closid = rdtgrp->closid;
+       }
+       return ret;
+}
+
+static int rdtgroup_task_write_permission(struct task_struct *task,
+                                         struct kernfs_open_file *of)
+{
+       const struct cred *tcred = get_task_cred(task);
+       const struct cred *cred = current_cred();
+       int ret = 0;
+
+       /*
+        * Even if we're attaching all tasks in the thread group, we only
+        * need to check permissions on one of them.
+        */
+       if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
+           !uid_eq(cred->euid, tcred->uid) &&
+           !uid_eq(cred->euid, tcred->suid))
+               ret = -EPERM;
+
+       put_cred(tcred);
+       return ret;
+}
+
+static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
+                             struct kernfs_open_file *of)
+{
+       struct task_struct *tsk;
+       int ret;
+
+       rcu_read_lock();
+       if (pid) {
+               tsk = find_task_by_vpid(pid);
+               if (!tsk) {
+                       rcu_read_unlock();
+                       return -ESRCH;
+               }
+       } else {
+               tsk = current;
+       }
+
+       get_task_struct(tsk);
+       rcu_read_unlock();
+
+       ret = rdtgroup_task_write_permission(tsk, of);
+       if (!ret)
+               ret = __rdtgroup_move_task(tsk, rdtgrp);
+
+       put_task_struct(tsk);
+       return ret;
+}
+
+static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
+                                   char *buf, size_t nbytes, loff_t off)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+       pid_t pid;
+
+       if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+               return -EINVAL;
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+       if (rdtgrp)
+               ret = rdtgroup_move_task(pid, rdtgrp, of);
+       else
+               ret = -ENOENT;
+
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret ?: nbytes;
+}
+
+static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+{
+       struct task_struct *p, *t;
+
+       rcu_read_lock();
+       for_each_process_thread(p, t) {
+               if (t->closid == r->closid)
+                       seq_printf(s, "%d\n", t->pid);
+       }
+       rcu_read_unlock();
+}
+
+static int rdtgroup_tasks_show(struct kernfs_open_file *of,
+                              struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       int ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (rdtgrp)
+               show_rdt_tasks(rdtgrp, s);
+       else
+               ret = -ENOENT;
+       rdtgroup_kn_unlock(of->kn);
+
+       return ret;
+}
+
+/* Files in each rdtgroup */
+static struct rftype rdtgroup_base_files[] = {
+       {
+               .name           = "cpus",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_cpus_write,
+               .seq_show       = rdtgroup_cpus_show,
+       },
+       {
+               .name           = "tasks",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_tasks_write,
+               .seq_show       = rdtgroup_tasks_show,
+       },
+       {
+               .name           = "schemata",
+               .mode           = 0644,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .write          = rdtgroup_schemata_write,
+               .seq_show       = rdtgroup_schemata_show,
+       },
+};
+
+static int rdt_num_closids_show(struct kernfs_open_file *of,
+                               struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%d\n", r->num_closid);
+
+       return 0;
+}
+
+static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+                            struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%x\n", r->max_cbm);
+
+       return 0;
+}
+
+static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
+                            struct seq_file *seq, void *v)
+{
+       struct rdt_resource *r = of->kn->parent->priv;
+
+       seq_printf(seq, "%d\n", r->min_cbm_bits);
+
+       return 0;
+}
+
+/* rdtgroup information files for one cache resource. */
+static struct rftype res_info_files[] = {
+       {
+               .name           = "num_closids",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_num_closids_show,
+       },
+       {
+               .name           = "cbm_mask",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_cbm_mask_show,
+       },
+       {
+               .name           = "min_cbm_bits",
+               .mode           = 0444,
+               .kf_ops         = &rdtgroup_kf_single_ops,
+               .seq_show       = rdt_min_cbm_bits_show,
+       },
+};
+
+static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+{
+       struct kernfs_node *kn_subdir;
+       struct rdt_resource *r;
+       int ret;
+
+       /* create the directory */
+       kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+       if (IS_ERR(kn_info))
+               return PTR_ERR(kn_info);
+       kernfs_get(kn_info);
+
+       for_each_enabled_rdt_resource(r) {
+               kn_subdir = kernfs_create_dir(kn_info, r->name,
+                                             kn_info->mode, r);
+               if (IS_ERR(kn_subdir)) {
+                       ret = PTR_ERR(kn_subdir);
+                       goto out_destroy;
+               }
+               kernfs_get(kn_subdir);
+               ret = rdtgroup_kn_set_ugid(kn_subdir);
+               if (ret)
+                       goto out_destroy;
+               ret = rdtgroup_add_files(kn_subdir, res_info_files,
+                                        ARRAY_SIZE(res_info_files));
+               if (ret)
+                       goto out_destroy;
+               kernfs_activate(kn_subdir);
+       }
+
+       /*
+        * This extra ref will be put in kernfs_remove() and guarantees
+        * that @rdtgrp->kn is always accessible.
+        */
+       kernfs_get(kn_info);
+
+       ret = rdtgroup_kn_set_ugid(kn_info);
+       if (ret)
+               goto out_destroy;
+
+       kernfs_activate(kn_info);
+
+       return 0;
+
+out_destroy:
+       kernfs_remove(kn_info);
+       return ret;
+}
+
+static void l3_qos_cfg_update(void *arg)
+{
+       bool *enable = arg;
+
+       wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+{
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int cpu;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       list_for_each_entry(d, &r->domains, list) {
+               /* Pick one CPU from each domain instance to update MSR */
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+       }
+       cpu = get_cpu();
+       /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               l3_qos_cfg_update(&enable);
+       /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
+       smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+static int cdp_enable(void)
+{
+       struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+       struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+       struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+       int ret;
+
+       if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
+               return -EINVAL;
+
+       ret = set_l3_qos_cfg(r_l3, true);
+       if (!ret) {
+               r_l3->enabled = false;
+               r_l3data->enabled = true;
+               r_l3code->enabled = true;
+       }
+       return ret;
+}
+
+static void cdp_disable(void)
+{
+       struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+       r->enabled = r->capable;
+
+       if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
+               rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
+               rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
+               set_l3_qos_cfg(r, false);
+       }
+}
+
+static int parse_rdtgroupfs_options(char *data)
+{
+       char *token, *o = data;
+       int ret = 0;
+
+       while ((token = strsep(&o, ",")) != NULL) {
+               if (!*token)
+                       return -EINVAL;
+
+               if (!strcmp(token, "cdp"))
+                       ret = cdp_enable();
+       }
+
+       return ret;
+}
+
+/*
+ * We don't allow rdtgroup directories to be created anywhere
+ * except the root directory. Thus when looking for the rdtgroup
+ * structure for a kernfs node we are either looking at a directory,
+ * in which case the rdtgroup structure is pointed at by the "priv"
+ * field, otherwise we have a file, and need only look to the parent
+ * to find the rdtgroup.
+ */
+static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
+{
+       if (kernfs_type(kn) == KERNFS_DIR) {
+               /*
+                * All the resource directories use "kn->priv"
+                * to point to the "struct rdtgroup" for the
+                * resource. "info" and its subdirectories don't
+                * have rdtgroup structures, so return NULL here.
+                */
+               if (kn == kn_info || kn->parent == kn_info)
+                       return NULL;
+               else
+                       return kn->priv;
+       } else {
+               return kn->parent->priv;
+       }
+}
+
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
+{
+       struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+       if (!rdtgrp)
+               return NULL;
+
+       atomic_inc(&rdtgrp->waitcount);
+       kernfs_break_active_protection(kn);
+
+       mutex_lock(&rdtgroup_mutex);
+
+       /* Was this group deleted while we waited? */
+       if (rdtgrp->flags & RDT_DELETED)
+               return NULL;
+
+       return rdtgrp;
+}
+
+void rdtgroup_kn_unlock(struct kernfs_node *kn)
+{
+       struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+       if (!rdtgrp)
+               return;
+
+       mutex_unlock(&rdtgroup_mutex);
+
+       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+           (rdtgrp->flags & RDT_DELETED)) {
+               kernfs_unbreak_active_protection(kn);
+               kernfs_put(kn);
+               kfree(rdtgrp);
+       } else {
+               kernfs_unbreak_active_protection(kn);
+       }
+}
+
+static struct dentry *rdt_mount(struct file_system_type *fs_type,
+                               int flags, const char *unused_dev_name,
+                               void *data)
+{
+       struct dentry *dentry;
+       int ret;
+
+       mutex_lock(&rdtgroup_mutex);
+       /*
+        * resctrl file system can only be mounted once.
+        */
+       if (static_branch_unlikely(&rdt_enable_key)) {
+               dentry = ERR_PTR(-EBUSY);
+               goto out;
+       }
+
+       ret = parse_rdtgroupfs_options(data);
+       if (ret) {
+               dentry = ERR_PTR(ret);
+               goto out_cdp;
+       }
+
+       closid_init();
+
+       ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
+       if (ret) {
+               dentry = ERR_PTR(ret);
+               goto out_cdp;
+       }
+
+       dentry = kernfs_mount(fs_type, flags, rdt_root,
+                             RDTGROUP_SUPER_MAGIC, NULL);
+       if (IS_ERR(dentry))
+               goto out_cdp;
+
+       static_branch_enable(&rdt_enable_key);
+       goto out;
+
+out_cdp:
+       cdp_disable();
+out:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return dentry;
+}
+
+static int reset_all_cbms(struct rdt_resource *r)
+{
+       struct msr_param msr_param;
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int i, cpu;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       msr_param.res = r;
+       msr_param.low = 0;
+       msr_param.high = r->num_closid;
+
+       /*
+        * Disable resource control for this resource by setting all
+        * CBMs in all domains to the maximum mask value. Pick one CPU
+        * from each domain to update the MSRs below.
+        */
+       list_for_each_entry(d, &r->domains, list) {
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+
+               for (i = 0; i < r->num_closid; i++)
+                       d->cbm[i] = r->max_cbm;
+       }
+       cpu = get_cpu();
+       /* Update CBM on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_cbm_update(&msr_param);
+       /* Update CBM on all other cpus in cpu_mask. */
+       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+/*
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
+ * in the systems are moved unconditionally (used for teardown).
+ *
+ * If @mask is not NULL the cpus on which moved tasks are running are set
+ * in that mask so the update smp function call is restricted to affected
+ * cpus.
+ */
+static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+                                struct cpumask *mask)
+{
+       struct task_struct *p, *t;
+
+       read_lock(&tasklist_lock);
+       for_each_process_thread(p, t) {
+               if (!from || t->closid == from->closid) {
+                       t->closid = to->closid;
+#ifdef CONFIG_SMP
+                       /*
+                        * This is safe on x86 w/o barriers as the ordering
+                        * of writing to task_cpu() and t->on_cpu is
+                        * reverse to the reading here. The detection is
+                        * inaccurate as tasks might move or schedule
+                        * before the smp function call takes place. In
+                        * such a case the function call is pointless, but
+                        * there is no other side effect.
+                        */
+                       if (mask && t->on_cpu)
+                               cpumask_set_cpu(task_cpu(t), mask);
+#endif
+               }
+       }
+       read_unlock(&tasklist_lock);
+}
+
+/*
+ * Forcibly remove all of subdirectories under root.
+ */
+static void rmdir_all_sub(void)
+{
+       struct rdtgroup *rdtgrp, *tmp;
+
+       /* Move all tasks to the default resource group */
+       rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
+
+       list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
+               /* Remove each rdtgroup other than root */
+               if (rdtgrp == &rdtgroup_default)
+                       continue;
+
+               /*
+                * Give any CPUs back to the default group. We cannot copy
+                * cpu_online_mask because a CPU might have executed the
+                * offline callback already, but is still marked online.
+                */
+               cpumask_or(&rdtgroup_default.cpu_mask,
+                          &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+               kernfs_remove(rdtgrp->kn);
+               list_del(&rdtgrp->rdtgroup_list);
+               kfree(rdtgrp);
+       }
+       /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+       get_online_cpus();
+       rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+       put_online_cpus();
+
+       kernfs_remove(kn_info);
+}
+
+static void rdt_kill_sb(struct super_block *sb)
+{
+       struct rdt_resource *r;
+
+       mutex_lock(&rdtgroup_mutex);
+
+       /*Put everything back to default values. */
+       for_each_enabled_rdt_resource(r)
+               reset_all_cbms(r);
+       cdp_disable();
+       rmdir_all_sub();
+       static_branch_disable(&rdt_enable_key);
+       kernfs_kill_sb(sb);
+       mutex_unlock(&rdtgroup_mutex);
+}
+
+static struct file_system_type rdt_fs_type = {
+       .name    = "resctrl",
+       .mount   = rdt_mount,
+       .kill_sb = rdt_kill_sb,
+};
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+                         umode_t mode)
+{
+       struct rdtgroup *parent, *rdtgrp;
+       struct kernfs_node *kn;
+       int ret, closid;
+
+       /* Only allow mkdir in the root directory */
+       if (parent_kn != rdtgroup_default.kn)
+               return -EPERM;
+
+       /* Do not accept '\n' to avoid unparsable situation. */
+       if (strchr(name, '\n'))
+               return -EINVAL;
+
+       parent = rdtgroup_kn_lock_live(parent_kn);
+       if (!parent) {
+               ret = -ENODEV;
+               goto out_unlock;
+       }
+
+       ret = closid_alloc();
+       if (ret < 0)
+               goto out_unlock;
+       closid = ret;
+
+       /* allocate the rdtgroup. */
+       rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
+       if (!rdtgrp) {
+               ret = -ENOSPC;
+               goto out_closid_free;
+       }
+       rdtgrp->closid = closid;
+       list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+
+       /* kernfs creates the directory for rdtgrp */
+       kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+       if (IS_ERR(kn)) {
+               ret = PTR_ERR(kn);
+               goto out_cancel_ref;
+       }
+       rdtgrp->kn = kn;
+
+       /*
+        * kernfs_remove() will drop the reference count on "kn" which
+        * will free it. But we still need it to stick around for the
+        * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+        * here, which will be dropped inside rdtgroup_kn_unlock().
+        */
+       kernfs_get(kn);
+
+       ret = rdtgroup_kn_set_ugid(kn);
+       if (ret)
+               goto out_destroy;
+
+       ret = rdtgroup_add_files(kn, rdtgroup_base_files,
+                                ARRAY_SIZE(rdtgroup_base_files));
+       if (ret)
+               goto out_destroy;
+
+       kernfs_activate(kn);
+
+       ret = 0;
+       goto out_unlock;
+
+out_destroy:
+       kernfs_remove(rdtgrp->kn);
+out_cancel_ref:
+       list_del(&rdtgrp->rdtgroup_list);
+       kfree(rdtgrp);
+out_closid_free:
+       closid_free(closid);
+out_unlock:
+       rdtgroup_kn_unlock(parent_kn);
+       return ret;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+       int ret, cpu, closid = rdtgroup_default.closid;
+       struct rdtgroup *rdtgrp;
+       cpumask_var_t tmpmask;
+
+       if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+               return -ENOMEM;
+
+       rdtgrp = rdtgroup_kn_lock_live(kn);
+       if (!rdtgrp) {
+               ret = -EPERM;
+               goto out;
+       }
+
+       /* Give any tasks back to the default group */
+       rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
+
+       /* Give any CPUs back to the default group */
+       cpumask_or(&rdtgroup_default.cpu_mask,
+                  &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+       /* Update per cpu closid of the moved CPUs first */
+       for_each_cpu(cpu, &rdtgrp->cpu_mask)
+               per_cpu(cpu_closid, cpu) = closid;
+       /*
+        * Update the MSR on moved CPUs and CPUs which have moved
+        * task running on them.
+        */
+       cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+       rdt_update_closid(tmpmask, NULL);
+
+       rdtgrp->flags = RDT_DELETED;
+       closid_free(rdtgrp->closid);
+       list_del(&rdtgrp->rdtgroup_list);
+
+       /*
+        * one extra hold on this, will drop when we kfree(rdtgrp)
+        * in rdtgroup_kn_unlock()
+        */
+       kernfs_get(kn);
+       kernfs_remove(rdtgrp->kn);
+       ret = 0;
+out:
+       rdtgroup_kn_unlock(kn);
+       free_cpumask_var(tmpmask);
+       return ret;
+}
+
+static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
+{
+       if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled)
+               seq_puts(seq, ",cdp");
+       return 0;
+}
+
+static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
+       .mkdir          = rdtgroup_mkdir,
+       .rmdir          = rdtgroup_rmdir,
+       .show_options   = rdtgroup_show_options,
+};
+
+static int __init rdtgroup_setup_root(void)
+{
+       int ret;
+
+       rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
+                                     KERNFS_ROOT_CREATE_DEACTIVATED,
+                                     &rdtgroup_default);
+       if (IS_ERR(rdt_root))
+               return PTR_ERR(rdt_root);
+
+       mutex_lock(&rdtgroup_mutex);
+
+       rdtgroup_default.closid = 0;
+       list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
+
+       ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
+                                ARRAY_SIZE(rdtgroup_base_files));
+       if (ret) {
+               kernfs_destroy_root(rdt_root);
+               goto out;
+       }
+
+       rdtgroup_default.kn = rdt_root->kn;
+       kernfs_activate(rdtgroup_default.kn);
+
+out:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return ret;
+}
+
+/*
+ * rdtgroup_init - rdtgroup initialization
+ *
+ * Setup resctrl file system including set up root, create mount point,
+ * register rdtgroup filesystem, and initialize files under root directory.
+ *
+ * Return: 0 on success or -errno
+ */
+int __init rdtgroup_init(void)
+{
+       int ret = 0;
+
+       ret = rdtgroup_setup_root();
+       if (ret)
+               return ret;
+
+       ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+       if (ret)
+               goto cleanup_root;
+
+       ret = register_filesystem(&rdt_fs_type);
+       if (ret)
+               goto cleanup_mountpoint;
+
+       return 0;
+
+cleanup_mountpoint:
+       sysfs_remove_mount_point(fs_kobj, "resctrl");
+cleanup_root:
+       kernfs_destroy_root(rdt_root);
+
+       return ret;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
new file mode 100644 (file)
index 0000000..f369cb8
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ *    Fenghua Yu <fenghua.yu@intel.com>
+ *    Tony Luck <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * Check whether a cache bit mask is valid. The SDM says:
+ *     Please note that all (and only) contiguous '1' combinations
+ *     are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
+ * Additionally Haswell requires at least two bits set.
+ */
+static bool cbm_validate(unsigned long var, struct rdt_resource *r)
+{
+       unsigned long first_bit, zero_bit;
+
+       if (var == 0 || var > r->max_cbm)
+               return false;
+
+       first_bit = find_first_bit(&var, r->cbm_len);
+       zero_bit = find_next_zero_bit(&var, r->cbm_len, first_bit);
+
+       if (find_next_bit(&var, r->cbm_len, zero_bit) < r->cbm_len)
+               return false;
+
+       if ((zero_bit - first_bit) < r->min_cbm_bits)
+               return false;
+       return true;
+}
+
+/*
+ * Read one cache bit mask (hex). Check that it is valid for the current
+ * resource type.
+ */
+static int parse_cbm(char *buf, struct rdt_resource *r)
+{
+       unsigned long data;
+       int ret;
+
+       ret = kstrtoul(buf, 16, &data);
+       if (ret)
+               return ret;
+       if (!cbm_validate(data, r))
+               return -EINVAL;
+       r->tmp_cbms[r->num_tmp_cbms++] = data;
+
+       return 0;
+}
+
+/*
+ * For each domain in this resource we expect to find a series of:
+ *     id=mask
+ * separated by ";". The "id" is in decimal, and must appear in the
+ * right order.
+ */
+static int parse_line(char *line, struct rdt_resource *r)
+{
+       char *dom = NULL, *id;
+       struct rdt_domain *d;
+       unsigned long dom_id;
+
+       list_for_each_entry(d, &r->domains, list) {
+               dom = strsep(&line, ";");
+               if (!dom)
+                       return -EINVAL;
+               id = strsep(&dom, "=");
+               if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
+                       return -EINVAL;
+               if (parse_cbm(dom, r))
+                       return -EINVAL;
+       }
+
+       /* Any garbage at the end of the line? */
+       if (line && line[0])
+               return -EINVAL;
+       return 0;
+}
+
+static int update_domains(struct rdt_resource *r, int closid)
+{
+       struct msr_param msr_param;
+       cpumask_var_t cpu_mask;
+       struct rdt_domain *d;
+       int cpu, idx = 0;
+
+       if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       msr_param.low = closid;
+       msr_param.high = msr_param.low + 1;
+       msr_param.res = r;
+
+       list_for_each_entry(d, &r->domains, list) {
+               cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+               d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+       }
+       cpu = get_cpu();
+       /* Update CBM on this cpu if it's in cpu_mask. */
+       if (cpumask_test_cpu(cpu, cpu_mask))
+               rdt_cbm_update(&msr_param);
+       /* Update CBM on other cpus. */
+       smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+       put_cpu();
+
+       free_cpumask_var(cpu_mask);
+
+       return 0;
+}
+
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+                               char *buf, size_t nbytes, loff_t off)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+       char *tok, *resname;
+       int closid, ret = 0;
+       u32 *l3_cbms = NULL;
+
+       /* Valid input requires a trailing newline */
+       if (nbytes == 0 || buf[nbytes - 1] != '\n')
+               return -EINVAL;
+       buf[nbytes - 1] = '\0';
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (!rdtgrp) {
+               rdtgroup_kn_unlock(of->kn);
+               return -ENOENT;
+       }
+
+       closid = rdtgrp->closid;
+
+       /* get scratch space to save all the masks while we validate input */
+       for_each_enabled_rdt_resource(r) {
+               r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+                                     GFP_KERNEL);
+               if (!r->tmp_cbms) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               r->num_tmp_cbms = 0;
+       }
+
+       while ((tok = strsep(&buf, "\n")) != NULL) {
+               resname = strsep(&tok, ":");
+               if (!tok) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               for_each_enabled_rdt_resource(r) {
+                       if (!strcmp(resname, r->name) &&
+                           closid < r->num_closid) {
+                               ret = parse_line(tok, r);
+                               if (ret)
+                                       goto out;
+                               break;
+                       }
+               }
+               if (!r->name) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       /* Did the parser find all the masks we need? */
+       for_each_enabled_rdt_resource(r) {
+               if (r->num_tmp_cbms != r->num_domains) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       for_each_enabled_rdt_resource(r) {
+               ret = update_domains(r, closid);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       rdtgroup_kn_unlock(of->kn);
+       for_each_enabled_rdt_resource(r) {
+               kfree(r->tmp_cbms);
+               r->tmp_cbms = NULL;
+       }
+       return ret ?: nbytes;
+}
+
+static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+{
+       struct rdt_domain *dom;
+       bool sep = false;
+
+       seq_printf(s, "%s:", r->name);
+       list_for_each_entry(dom, &r->domains, list) {
+               if (sep)
+                       seq_puts(s, ";");
+               seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+               sep = true;
+       }
+       seq_puts(s, "\n");
+}
+
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+                          struct seq_file *s, void *v)
+{
+       struct rdtgroup *rdtgrp;
+       struct rdt_resource *r;
+       int closid, ret = 0;
+
+       rdtgrp = rdtgroup_kn_lock_live(of->kn);
+       if (rdtgrp) {
+               closid = rdtgrp->closid;
+               for_each_enabled_rdt_resource(r) {
+                       if (closid < r->num_closid)
+                               show_doms(s, r, closid);
+               }
+       } else {
+               ret = -ENOENT;
+       }
+       rdtgroup_kn_unlock(of->kn);
+       return ret;
+}
index d1316f9c8329846b0d3f7dede754fa0eef256bd3..d9794060fe225f72f34e78cdc74959aa1bf38934 100644 (file)
@@ -20,12 +20,15 @@ struct cpuid_bit {
 /* Please keep the leaf sorted by cpuid_bit.level for faster search. */
 static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_APERFMPERF,       CPUID_ECX,  0, 0x00000006, 0 },
-       { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
-       { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
+       { X86_FEATURE_EPB,              CPUID_ECX,  3, 0x00000006, 0 },
+       { X86_FEATURE_INTEL_PT,         CPUID_EBX, 25, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4VNNIW,    CPUID_EDX,  2, 0x00000007, 0 },
        { X86_FEATURE_AVX512_4FMAPS,    CPUID_EDX,  3, 0x00000007, 0 },
-       { X86_FEATURE_HW_PSTATE,        CPUID_EDX,  7, 0x80000007, 0 },
-       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
+       { X86_FEATURE_CAT_L3,           CPUID_EBX,  1, 0x00000010, 0 },
+       { X86_FEATURE_CAT_L2,           CPUID_EBX,  2, 0x00000010, 0 },
+       { X86_FEATURE_CDP_L3,           CPUID_ECX,  2, 0x00000010, 1 },
+       { X86_FEATURE_HW_PSTATE,        CPUID_EDX,  7, 0x80000007, 0 },
+       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
        { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
        { 0, 0, 0, 0, 0 }
 };
index d0d7441085942fc5efe3fda3dd0c0c8fe9fdef69..a0ac3e81518ad8f633c4a3c16e17cabea38950e5 100644 (file)
@@ -53,6 +53,7 @@
 #include <asm/debugreg.h>
 #include <asm/switch_to.h>
 #include <asm/vm86.h>
+#include <asm/intel_rdt.h>
 
 void __show_regs(struct pt_regs *regs, int all)
 {
@@ -296,5 +297,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        this_cpu_write(current_task, next_p);
 
+       /* Load the Intel cache allocation PQR MSR. */
+       intel_rdt_sched_in();
+
        return prev_p;
 }
index a76b65e3e615e8d511f213b2ea2a4511aff516f4..a61e141b6891ed4a08437fee3413fedfbc65e429 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/switch_to.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/vdso.h>
+#include <asm/intel_rdt.h>
 
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
@@ -476,6 +477,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                        loadsegment(ss, __KERNEL_DS);
        }
 
+       /* Load the Intel cache allocation PQR MSR. */
+       intel_rdt_sched_in();
+
        return prev_p;
 }
 
index 8a05a404ae708503589bdf33643948c37d243e62..a57046de2f07f00eae78aaabe9b5e43c1ecc178f 100644 (file)
@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 
        dprintk("%s: write %Zd bytes\n", bd->name, count);
 
+       if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+               return -EINVAL;
+
        bsg_set_block(bd, file);
 
        bytes_written = 0;
index f856963204f4949f5197c1cb0c0a247ca12c95da..656c8c6ed206f876bbdbd0f240dedf9997a88cf0 100644 (file)
@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
                                    || pstart < 0 || plength < 0 || partno > 65535)
                                        return -EINVAL;
                        }
+                       /* check if partition is aligned to blocksize */
+                       if (p.start & (bdev_logical_block_size(bdev) - 1))
+                               return -EINVAL;
 
                        mutex_lock(&bdev->bd_mutex);
 
index 0774799942e06a8d890a5c88e40990cd53a15037..c6fee7437be44573ade684d064e161954485301a 100644 (file)
@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
        __set_bit(WRITE_16, filter->write_ok);
        __set_bit(WRITE_LONG, filter->write_ok);
        __set_bit(WRITE_LONG_2, filter->write_ok);
+       __set_bit(WRITE_SAME, filter->write_ok);
+       __set_bit(WRITE_SAME_16, filter->write_ok);
+       __set_bit(WRITE_SAME_32, filter->write_ok);
        __set_bit(ERASE, filter->write_ok);
        __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
        __set_bit(MODE_SELECT, filter->write_ok);
index 7dd527f8ca1d24b10915c865dc3f24e4a8e34b92..94be8a8e6c082cbe5b808b049187db0657ad5cb0 100644 (file)
@@ -166,6 +166,12 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
 
 acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
 
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+                 struct acpi_table_header **out_table);
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc);
+
 /*
  * tbxfload
  */
index 5fb838e592dc430c49b657ce6fba3dce1572676f..81473a4880ce219febb9024515cc92759362d426 100644 (file)
@@ -311,6 +311,8 @@ void acpi_tb_parse_fadt(void)
 {
        u32 length;
        struct acpi_table_header *table;
+       struct acpi_table_desc *fadt_desc;
+       acpi_status status;
 
        /*
         * The FADT has multiple versions with different lengths,
@@ -319,14 +321,12 @@ void acpi_tb_parse_fadt(void)
         * Get a local copy of the FADT and convert it to a common format
         * Map entire FADT, assumed to be smaller than one page.
         */
-       length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
-
-       table =
-           acpi_os_map_memory(acpi_gbl_root_table_list.
-                              tables[acpi_gbl_fadt_index].address, length);
-       if (!table) {
+       fadt_desc = &acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index];
+       status = acpi_tb_get_table(fadt_desc, &table);
+       if (ACPI_FAILURE(status)) {
                return;
        }
+       length = fadt_desc->length;
 
        /*
         * Validate the FADT checksum before we copy the table. Ignore
@@ -340,7 +340,7 @@ void acpi_tb_parse_fadt(void)
 
        /* All done with the real FADT, unmap it */
 
-       acpi_os_unmap_memory(table, length);
+       acpi_tb_put_table(fadt_desc);
 
        /* Obtain the DSDT and FACS tables via their addresses within the FADT */
 
index 51eb07cf989844842b6dce634a589d1e7440e154..86854e84680056e164c6adad591ad772e85a3e79 100644 (file)
@@ -381,3 +381,88 @@ next_table:
        acpi_os_unmap_memory(table, length);
        return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_get_table
+ *
+ * PARAMETERS:  table_desc          - Table descriptor
+ *              out_table           - Where the pointer to the table is returned
+ *
+ * RETURN:      Status and pointer to the requested table
+ *
+ * DESCRIPTION: Increase a reference to a table descriptor and return the
+ *              validated table pointer.
+ *              If the table descriptor is an entry of the root table list,
+ *              this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_tb_get_table(struct acpi_table_desc *table_desc,
+                 struct acpi_table_header **out_table)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_tb_get_table);
+
+       if (table_desc->validation_count == 0) {
+
+               /* Table need to be "VALIDATED" */
+
+               status = acpi_tb_validate_table(table_desc);
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
+
+       table_desc->validation_count++;
+       if (table_desc->validation_count == 0) {
+               ACPI_ERROR((AE_INFO,
+                           "Table %p, Validation count is zero after increment\n",
+                           table_desc));
+               table_desc->validation_count--;
+               return_ACPI_STATUS(AE_LIMIT);
+       }
+
+       *out_table = table_desc->pointer;
+       return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_put_table
+ *
+ * PARAMETERS:  table_desc          - Table descriptor
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Decrease a reference to a table descriptor and release the
+ *              validated table pointer if no references.
+ *              If the table descriptor is an entry of the root table list,
+ *              this API must be invoked with ACPI_MTX_TABLES acquired.
+ *
+ ******************************************************************************/
+
+void acpi_tb_put_table(struct acpi_table_desc *table_desc)
+{
+
+       ACPI_FUNCTION_TRACE(acpi_tb_put_table);
+
+       if (table_desc->validation_count == 0) {
+               ACPI_WARNING((AE_INFO,
+                             "Table %p, Validation count is zero before decrement\n",
+                             table_desc));
+               return_VOID;
+       }
+       table_desc->validation_count--;
+
+       if (table_desc->validation_count == 0) {
+
+               /* Table need to be "INVALIDATED" */
+
+               acpi_tb_invalidate_table(table_desc);
+       }
+
+       return_VOID;
+}
index d5adb7ac468430c82c4c1d7a1d7c75be6be40c5e..7684707b254b93cf7b7c9aa90ef3346d50609ee7 100644 (file)
@@ -282,7 +282,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_table_with_size
+ * FUNCTION:    acpi_get_table
  *
  * PARAMETERS:  signature           - ACPI signature of needed table
  *              instance            - Which instance (for SSDTs)
@@ -292,16 +292,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_table_header)
  *
  * DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
  *              RSDT/XSDT.
+ *              Note that an early stage acpi_get_table() call must be paired
+ *              with an early stage acpi_put_table() call. otherwise the table
+ *              pointer mapped by the early stage mapping implementation may be
+ *              erroneously unmapped by the late stage unmapping implementation
+ *              in an acpi_put_table() invoked during the late stage.
  *
  ******************************************************************************/
 acpi_status
-acpi_get_table_with_size(char *signature,
-              u32 instance, struct acpi_table_header **out_table,
-              acpi_size *tbl_size)
+acpi_get_table(char *signature,
+              u32 instance, struct acpi_table_header ** out_table)
 {
        u32 i;
        u32 j;
-       acpi_status status;
+       acpi_status status = AE_NOT_FOUND;
+       struct acpi_table_desc *table_desc;
 
        /* Parameter validation */
 
@@ -309,13 +314,22 @@ acpi_get_table_with_size(char *signature,
                return (AE_BAD_PARAMETER);
        }
 
+       /*
+        * Note that the following line is required by some OSPMs, they only
+        * check if the returned table is NULL instead of the returned status
+        * to determined if this function is succeeded.
+        */
+       *out_table = NULL;
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        /* Walk the root table list */
 
        for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
             i++) {
-               if (!ACPI_COMPARE_NAME
-                   (&(acpi_gbl_root_table_list.tables[i].signature),
-                    signature)) {
+               table_desc = &acpi_gbl_root_table_list.tables[i];
+
+               if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
                        continue;
                }
 
@@ -323,43 +337,65 @@ acpi_get_table_with_size(char *signature,
                        continue;
                }
 
-               status =
-                   acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
-               if (ACPI_SUCCESS(status)) {
-                       *out_table = acpi_gbl_root_table_list.tables[i].pointer;
-                       *tbl_size = acpi_gbl_root_table_list.tables[i].length;
-               }
-
-               if (!acpi_gbl_permanent_mmap) {
-                       acpi_gbl_root_table_list.tables[i].pointer = NULL;
-               }
-
-               return (status);
+               status = acpi_tb_get_table(table_desc, out_table);
+               break;
        }
 
-       return (AE_NOT_FOUND);
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+       return (status);
 }
 
-ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+ACPI_EXPORT_SYMBOL(acpi_get_table)
 
-acpi_status
-acpi_get_table(char *signature,
-              u32 instance, struct acpi_table_header **out_table)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_put_table
+ *
+ * PARAMETERS:  table               - The pointer to the table
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Release a table returned by acpi_get_table() and its clones.
+ *              Note that it is not safe if this function was invoked after an
+ *              uninstallation happened to the original table descriptor.
+ *              Currently there is no OSPMs' requirement to handle such
+ *              situations.
+ *
+ ******************************************************************************/
+void acpi_put_table(struct acpi_table_header *table)
 {
-       acpi_size tbl_size;
+       u32 i;
+       struct acpi_table_desc *table_desc;
+
+       ACPI_FUNCTION_TRACE(acpi_put_table);
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+       /* Walk the root table list */
+
+       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+               table_desc = &acpi_gbl_root_table_list.tables[i];
 
-       return acpi_get_table_with_size(signature,
-                      instance, out_table, &tbl_size);
+               if (table_desc->pointer != table) {
+                       continue;
+               }
+
+               acpi_tb_put_table(table_desc);
+               break;
+       }
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+       return_VOID;
 }
 
-ACPI_EXPORT_SYMBOL(acpi_get_table)
+ACPI_EXPORT_SYMBOL(acpi_put_table)
 
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_table_by_index
  *
  * PARAMETERS:  table_index         - Table index
- *              table               - Where the pointer to the table is returned
+ *              out_table           - Where the pointer to the table is returned
  *
  * RETURN:      Status and pointer to the requested table
  *
@@ -368,7 +404,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
  *
  ******************************************************************************/
 acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table)
 {
        acpi_status status;
 
@@ -376,35 +412,33 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
 
        /* Parameter validation */
 
-       if (!table) {
+       if (!out_table) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       /*
+        * Note that the following line is required by some OSPMs, they only
+        * check if the returned table is NULL instead of the returned status
+        * to determined if this function is succeeded.
+        */
+       *out_table = NULL;
+
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
        /* Validate index */
 
        if (table_index >= acpi_gbl_root_table_list.current_table_count) {
-               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
        }
 
-       if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
-
-               /* Table is not mapped, map it */
+       status =
+           acpi_tb_get_table(&acpi_gbl_root_table_list.tables[table_index],
+                             out_table);
 
-               status =
-                   acpi_tb_validate_table(&acpi_gbl_root_table_list.
-                                          tables[table_index]);
-               if (ACPI_FAILURE(status)) {
-                       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-                       return_ACPI_STATUS(status);
-               }
-       }
-
-       *table = acpi_gbl_root_table_list.tables[table_index].pointer;
+unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-       return_ACPI_STATUS(AE_OK);
+       return_ACPI_STATUS(status);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
index 5cbefd7621f01174ff1c3ba3012d9c193dbc1e49..95855cb9d6fb772634e2dd6f683c62a1de819ffd 100644 (file)
@@ -974,7 +974,7 @@ void __init acpi_early_init(void)
        if (!acpi_strict)
                acpi_gbl_enable_interpreter_slack = TRUE;
 
-       acpi_gbl_permanent_mmap = 1;
+       acpi_permanent_mmap = true;
 
        /*
         * If the machine falls into the DMI check table,
index 312c4b4dc363fdbc4847d3db55c0cfbfbb80f5f4..2f82b8eba360e7f369338b7d7a340060d6519f4f 100644 (file)
@@ -2806,12 +2806,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
        acpi_size sz;
        int rc = 0;
 
-       status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+       status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
        if (ACPI_FAILURE(status)) {
                /* This is ok, we could have an nvdimm hotplugged later */
                dev_dbg(dev, "failed to find NFIT at startup\n");
                return 0;
        }
+       sz = tbl->length;
 
        acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
        if (!acpi_desc)
index 9a4c6abee63e0e86a6941d19665a409d3931b301..a404ff4d71511d0a9d56ce10a238682e9ffc6073 100644 (file)
@@ -76,6 +76,7 @@ static struct workqueue_struct *kacpi_notify_wq;
 static struct workqueue_struct *kacpi_hotplug_wq;
 static bool acpi_os_initialized;
 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
+bool acpi_permanent_mmap = false;
 
 /*
  * This list of permanent mappings is for memory that may be accessed from
@@ -306,7 +307,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * virtual address).  If not found, map it, add it to that list and return a
  * pointer to it.
  *
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
 void __iomem *__ref
@@ -322,7 +323,7 @@ acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
                return NULL;
        }
 
-       if (!acpi_gbl_permanent_mmap)
+       if (!acpi_permanent_mmap)
                return __acpi_map_table((unsigned long)phys, size);
 
        mutex_lock(&acpi_ioremap_lock);
@@ -392,7 +393,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
  * mappings, drop a reference to it and unmap it if there are no more active
  * references to it.
  *
- * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
+ * During early init (when acpi_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_unmap_table() to get the job done.  Since
  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
  * here.
@@ -401,7 +402,7 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
 {
        struct acpi_ioremap *map;
 
-       if (!acpi_gbl_permanent_mmap) {
+       if (!acpi_permanent_mmap) {
                __acpi_unmap_table(virt, size);
                return;
        }
@@ -426,12 +427,6 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
-void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
-{
-       if (!acpi_gbl_permanent_mmap)
-               __acpi_unmap_table(virt, size);
-}
-
 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
 {
        u64 addr;
index 5c78ee1860b0ad390671e8f1b1c624339f7414ed..611a5585a9024a728c71e60ada951b3a73936708 100644 (file)
@@ -154,18 +154,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
 phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
 {
        struct acpi_table_madt *madt = NULL;
-       acpi_size tbl_size;
        phys_cpuid_t rv;
 
-       acpi_get_table_with_size(ACPI_SIG_MADT, 0,
-                                (struct acpi_table_header **)&madt,
-                                &tbl_size);
+       acpi_get_table(ACPI_SIG_MADT, 0,
+                      (struct acpi_table_header **)&madt);
        if (!madt)
                return PHYS_CPUID_INVALID;
 
        rv = map_madt_entry(madt, 1, acpi_id, true);
 
-       early_acpi_os_unmap_memory(madt, tbl_size);
+       acpi_put_table((struct acpi_table_header *)madt);
 
        return rv;
 }
index 93b00cf4eb3922d541a43d200ce73d5d414f6c3f..45dec874ea55b820281457a3ae5de2fe1f12403c 100644 (file)
@@ -1120,9 +1120,6 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
                                  "support\n"));
                *cap |= ACPI_VIDEO_BACKLIGHT;
-               if (!acpi_has_method(handle, "_BQC"))
-                       printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
-                               "cannot determine initial brightness\n");
                /* We have backlight support, no need to scan further */
                return AE_CTRL_TERMINATE;
        }
index e8d7bc7d4da89b1d0ff8f86bbf5ebbb258a411e4..b8019c4c1d38908895b21a92a094074ca2bd5bd8 100644 (file)
@@ -33,7 +33,6 @@ int __init parse_spcr(bool earlycon)
 {
        static char opts[64];
        struct acpi_table_spcr *table;
-       acpi_size table_size;
        acpi_status status;
        char *uart;
        char *iotype;
@@ -43,9 +42,8 @@ int __init parse_spcr(bool earlycon)
        if (acpi_disabled)
                return -ENODEV;
 
-       status = acpi_get_table_with_size(ACPI_SIG_SPCR, 0,
-                                         (struct acpi_table_header **)&table,
-                                         &table_size);
+       status = acpi_get_table(ACPI_SIG_SPCR, 0,
+                               (struct acpi_table_header **)&table);
 
        if (ACPI_FAILURE(status))
                return -ENOENT;
@@ -106,6 +104,6 @@ int __init parse_spcr(bool earlycon)
        err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
 
 done:
-       early_acpi_os_unmap_memory((void __iomem *)table, table_size);
+       acpi_put_table((struct acpi_table_header *)table);
        return err;
 }
index cdd56c4657e05ff38ed8e11da59d0c37a91326d6..2604189d6cd156e5449c7dd071d63120ea000a0e 100644 (file)
@@ -333,7 +333,6 @@ acpi_table_parse_entries_array(char *id,
                         unsigned int max_entries)
 {
        struct acpi_table_header *table_header = NULL;
-       acpi_size tbl_size;
        int count;
        u32 instance = 0;
 
@@ -346,7 +345,7 @@ acpi_table_parse_entries_array(char *id,
        if (!strncmp(id, ACPI_SIG_MADT, 4))
                instance = acpi_apic_instance;
 
-       acpi_get_table_with_size(id, instance, &table_header, &tbl_size);
+       acpi_get_table(id, instance, &table_header);
        if (!table_header) {
                pr_warn("%4.4s not present\n", id);
                return -ENODEV;
@@ -355,7 +354,7 @@ acpi_table_parse_entries_array(char *id,
        count = acpi_parse_entries_array(id, table_size, table_header,
                        proc, proc_num, max_entries);
 
-       early_acpi_os_unmap_memory((char *)table_header, tbl_size);
+       acpi_put_table(table_header);
        return count;
 }
 
@@ -397,7 +396,6 @@ acpi_table_parse_madt(enum acpi_madt_type id,
 int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
 {
        struct acpi_table_header *table = NULL;
-       acpi_size tbl_size;
 
        if (acpi_disabled)
                return -ENODEV;
@@ -406,13 +404,13 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
+               acpi_get_table(id, acpi_apic_instance, &table);
        else
-               acpi_get_table_with_size(id, 0, &table, &tbl_size);
+               acpi_get_table(id, 0, &table);
 
        if (table) {
                handler(table);
-               early_acpi_os_unmap_memory(table, tbl_size);
+               acpi_put_table(table);
                return 0;
        } else
                return -ENODEV;
@@ -426,16 +424,15 @@ int __init acpi_table_parse(char *id, acpi_tbl_table_handler handler)
 static void __init check_multiple_madt(void)
 {
        struct acpi_table_header *table = NULL;
-       acpi_size tbl_size;
 
-       acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
+       acpi_get_table(ACPI_SIG_MADT, 2, &table);
        if (table) {
                pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
                        acpi_apic_instance);
                pr_warn("If \"acpi_apic_instance=%d\" works better, "
                        "notify linux-acpi@vger.kernel.org\n",
                        acpi_apic_instance ? 0 : 2);
-               early_acpi_os_unmap_memory(table, tbl_size);
+               acpi_put_table(table);
 
        } else
                acpi_apic_instance = 0;
index 1e3903d0d99441897cae42cd032dfff319b1115f..eb3af2739537a8def39259206e2345a2adc72c71 100644 (file)
@@ -363,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev,         \
        return sprintf(buf, "%u\n", this_leaf->object);         \
 }
 
+show_one(id, id);
 show_one(level, level);
 show_one(coherency_line_size, coherency_line_size);
 show_one(number_of_sets, number_of_sets);
@@ -444,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
        return n;
 }
 
+static DEVICE_ATTR_RO(id);
 static DEVICE_ATTR_RO(level);
 static DEVICE_ATTR_RO(type);
 static DEVICE_ATTR_RO(coherency_line_size);
@@ -457,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
 static DEVICE_ATTR_RO(physical_line_partition);
 
 static struct attribute *cache_default_attrs[] = {
+       &dev_attr_id.attr,
        &dev_attr_type.attr,
        &dev_attr_level.attr,
        &dev_attr_shared_cpu_map.attr,
@@ -480,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
        const struct cpumask *mask = &this_leaf->shared_cpu_map;
        umode_t mode = attr->mode;
 
+       if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+               return mode;
        if ((attr == &dev_attr_type.attr) && this_leaf->type)
                return mode;
        if ((attr == &dev_attr_level.attr) && this_leaf->level)
index 3a98702b7445f747d9a710f16753d29e312b17a6..3a2ca0f79daf281c5940222f6b9da179b35f64f3 100644 (file)
@@ -930,7 +930,7 @@ static void __init acpi_cpufreq_boost_init(void)
 
 static void acpi_cpufreq_boost_exit(void)
 {
-       if (acpi_cpufreq_online >= 0)
+       if (acpi_cpufreq_online > 0)
                cpuhp_remove_state_nocalls(acpi_cpufreq_online);
 }
 
index 176e84cc3991994871d6eee9a1b8927a3bdf95fa..0cb9040eca49c3c84fb852f674f649f213bfd3e7 100644 (file)
@@ -107,7 +107,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
 }
 
 #ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
+static void s3c64xx_cpufreq_config_regulator(void)
 {
        int count, v, i, found;
        struct cpufreq_frequency_table *freq;
index 4f973a9c7b8714d55229b6f712522f50fcfb1fdf..8ec1967a850b86361b18535955fd161a0808f6d4 100644 (file)
@@ -305,8 +305,9 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
        GOP_VBIOS_CONTENT *vbios;
        VFCT_IMAGE_HEADER *vhdr;
 
-       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+       if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
                return false;
+       tbl_size = hdr->length;
        if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
                DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
                goto out_unmap;
index e564442b6393f82520aad6e2f47f99e23f7c68bc..b4e4ec630e8cfd5d38d55b61199f3cc60269e0c2 100644 (file)
@@ -1944,9 +1944,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height ||
-           hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1955,8 +1953,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_width = width;
-               amdgpu_crtc->cursor_height = height;
                amdgpu_crtc->cursor_hot_x = hot_x;
                amdgpu_crtc->cursor_hot_y = hot_y;
        }
index 6ce7fb42dbef68232a8325d720d54bf99b3c8783..584abe834a3ce4658de0e54f6aae416729ad868f 100644 (file)
@@ -2438,8 +2438,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_width = width;
-               amdgpu_crtc->cursor_height = height;
                amdgpu_crtc->cursor_hot_x = hot_x;
                amdgpu_crtc->cursor_hot_y = hot_y;
        }
index 558640aee15a178f9e79ba786bd67bfc12c7d197..b323f5ef64d217813683722ec4cd25282e9185ea 100644 (file)
@@ -411,244 +411,587 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
                break;
        }
 
-       if (adev->asic_type == CHIP_VERDE ||
-           adev->asic_type == CHIP_OLAND ||
-           adev->asic_type == CHIP_HAINAN) {
+       if (adev->asic_type == CHIP_VERDE) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
                        case 0:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 1:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 2:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 3:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
                        case 4:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 5:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        case 6:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        case 7:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
                        case 8:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
                                break;
                        case 9:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 10:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 11:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 12:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 13:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
                                break;
                        case 14:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 15:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 16:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
                        case 17:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
                                                 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
                        case 21:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 22:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 23:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 24:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 25:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       default:
+                               continue;
+                       }
+                       adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+                       WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+               }
+       } else if (adev->asic_type == CHIP_OLAND ||
+           adev->asic_type == CHIP_HAINAN) {
+               for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+                       switch (reg_offset) {
+                       case 0:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 1:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 2:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 3:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 4:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 5:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 6:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 7:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
+                               break;
+                       case 9:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 10:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 11:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 12:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 13:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 14:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 15:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 16:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
+                               break;
+                       case 17:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 21:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
+                               break;
+                       case 22:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 23:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 24:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_8_BANK));
                                break;
                        case 25:
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-                                                NUM_BANKS(ADDR_SURF_8_BANK) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P2) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
                        default:
-                               gb_tile_moden = 0;
-                               break;
+                               continue;
                        }
                        adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
@@ -656,239 +999,291 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
        } else if ((adev->asic_type == CHIP_TAHITI) || (adev->asic_type == CHIP_PITCAIRN)) {
                for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
                        switch (reg_offset) {
-                       case 0:  /* non-AA compressed depth or any compressed stencil */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 0:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 1:  /* 2xAA/4xAA compressed depth only */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 1:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 2:  /* 8xAA compressed depth only */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 2:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 3:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 4:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 5:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                       case 6:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                       case 7:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 8:  /* 1D and 1D Array Surfaces */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 8:
+                               gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED));
                                break;
-                       case 9:  /* Displayable maps. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 9:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 10:  /* Display 8bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 10:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 11:  /* Display 16bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 11:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 12:  /* Display 32bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                       case 12:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 13:  /* Thin. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                       case 13:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
                                break;
-                       case 14:  /* Thin 8 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 14:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 15:  /* Thin 16 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 15:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 16:  /* Thin 32 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 16:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK));
                                break;
-                       case 17:  /* Thin 64 bpp. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 17:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(split_equal_to_row_size) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
+                               break;
+                       case 18:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16));
+                               break;
+                       case 19:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_16_BANK) |
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 21:  /* 8 bpp PRT. */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 20:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THICK) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
-                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
                                                 NUM_BANKS(ADDR_SURF_16_BANK) |
-                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                TILE_SPLIT(split_equal_to_row_size));
                                break;
-                       case 22:  /* 16 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 21:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
+                               break;
+                       case 22:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
                                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_4_BANK));
                                break;
-                       case 23:  /* 32 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 23:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 24:  /* 64 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                       case 24:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
-                                                NUM_BANKS(ADDR_SURF_16_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       case 25:  /* 128 bpp PRT */
-                               gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
-                                                MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
-                                                PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+                       case 25:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
                                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
-                                                NUM_BANKS(ADDR_SURF_8_BANK) |
                                                 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
-                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
-                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
-                       default:
-                               gb_tile_moden = 0;
+                       case 26:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 27:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 28:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
                                break;
+                       case 29:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       case 30:
+                               gb_tile_moden = (MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+                                                ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+                                                PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+                                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+                                                BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+                                                BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+                                                MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+                                                NUM_BANKS(ADDR_SURF_2_BANK));
+                               break;
+                       default:
+                               continue;
                        }
                        adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
index 6324f67bdb1fac705d79e80c4d85ce097ec7358f..d0ec00986f3826c32957ab432c677c53c24091c2 100644 (file)
@@ -3949,8 +3949,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
        temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
        data = mmRLC_SRM_INDEX_CNTL_DATA_0;
        for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
-               amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
-               amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+               if (unique_indices[i] != 0) {
+                       amdgpu_mm_wreg(adev, temp + i,
+                                       unique_indices[i] & 0x3FFFF, false);
+                       amdgpu_mm_wreg(adev, data + i,
+                                       unique_indices[i] >> 20, false);
+               }
        }
        kfree(register_list_format);
 
@@ -3966,20 +3970,17 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
 {
        uint32_t data;
 
-       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                             AMD_PG_SUPPORT_GFX_SMG |
-                             AMD_PG_SUPPORT_GFX_DMG)) {
-               WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
+       WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
 
-               data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
-               data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
-               WREG32(mmRLC_PG_DELAY, data);
+       data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
+       data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
+       WREG32(mmRLC_PG_DELAY, data);
+
+       WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
+       WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
 
-               WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
-               WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
-       }
 }
 
 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
@@ -3996,41 +3997,37 @@ static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
 
 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
 {
-       WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0);
+       WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
 }
 
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
 {
-       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                             AMD_PG_SUPPORT_GFX_SMG |
-                             AMD_PG_SUPPORT_GFX_DMG |
-                             AMD_PG_SUPPORT_CP |
-                             AMD_PG_SUPPORT_GDS |
-                             AMD_PG_SUPPORT_RLC_SMU_HS)) {
+       if ((adev->asic_type == CHIP_CARRIZO) ||
+           (adev->asic_type == CHIP_STONEY)) {
                gfx_v8_0_init_csb(adev);
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
-
-               if ((adev->asic_type == CHIP_CARRIZO) ||
-                   (adev->asic_type == CHIP_STONEY)) {
-                       WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
-                       gfx_v8_0_init_power_gating(adev);
-                       WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
-                       if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
-                               cz_enable_sck_slow_down_on_power_up(adev, true);
-                               cz_enable_sck_slow_down_on_power_down(adev, true);
-                       } else {
-                               cz_enable_sck_slow_down_on_power_up(adev, false);
-                               cz_enable_sck_slow_down_on_power_down(adev, false);
-                       }
-                       if (adev->pg_flags & AMD_PG_SUPPORT_CP)
-                               cz_enable_cp_power_gating(adev, true);
-                       else
-                               cz_enable_cp_power_gating(adev, false);
-               } else if (adev->asic_type == CHIP_POLARIS11) {
-                       gfx_v8_0_init_power_gating(adev);
+               WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+               gfx_v8_0_init_power_gating(adev);
+               WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+               if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+                       cz_enable_sck_slow_down_on_power_up(adev, true);
+                       cz_enable_sck_slow_down_on_power_down(adev, true);
+               } else {
+                       cz_enable_sck_slow_down_on_power_up(adev, false);
+                       cz_enable_sck_slow_down_on_power_down(adev, false);
                }
+               if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+                       cz_enable_cp_power_gating(adev, true);
+               else
+                       cz_enable_cp_power_gating(adev, false);
+       } else if (adev->asic_type == CHIP_POLARIS11) {
+               gfx_v8_0_init_csb(adev);
+               gfx_v8_0_init_save_restore_list(adev);
+               gfx_v8_0_enable_save_restore_machine(adev);
+               gfx_v8_0_init_power_gating(adev);
        }
+
 }
 
 static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
@@ -5339,14 +5336,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
-               return 0;
-
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
-               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
-                       cz_update_gfx_cg_power_gating(adev, enable);
+
+               cz_update_gfx_cg_power_gating(adev, enable);
 
                if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
                        gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
@@ -5791,25 +5785,49 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
                                          enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_CG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_MG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_MG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
        return 0;
 }
@@ -5817,43 +5835,98 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
 static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                          enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_CG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_CG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_3D,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_3D,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+                       pp_support_state = PP_STATE_SUPPORT_LS;
+                       pp_state = PP_STATE_LS;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_MG,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
+                       pp_state |= PP_STATE_CG;
+               }
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
-                       PP_BLOCK_GFX_RLC,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                       pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_MG,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+               pp_support_state = PP_STATE_SUPPORT_LS;
 
-       msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+                               PP_BLOCK_GFX_RLC,
+                               pp_support_state,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+               pp_support_state = PP_STATE_SUPPORT_LS;
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
                        PP_BLOCK_GFX_CP,
-                       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+                       pp_support_state,
                        pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
        return 0;
 }
index 3ed8ad8725b9cf09c965876a482b0699e5704058..c46b0159007d976406d64557316f45e045d82191 100644 (file)
 
 static const u32 tahiti_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
        0x031e, 0x00000080, 0x00000000,
-       0x340c, 0x000300c0, 0x00800040,
-       0x360c, 0x000300c0, 0x00800040,
+       0x340c, 0x000000c0, 0x00800040,
+       0x360c, 0x000000c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
        0x16f0, 0x00200000, 0x50100000,
        0x1c0c, 0x31000311, 0x00000011,
@@ -60,7 +61,7 @@ static const u32 tahiti_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x2a00126a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x23a2, 0x01ff1f3f, 0x00000000,
@@ -73,7 +74,11 @@ static const u32 tahiti_golden_registers[] =
        0x2234, 0xffffffff, 0x000fff40,
        0x2235, 0x0000001f, 0x00000010,
        0x0504, 0x20000000, 0x20fffed8,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 tahiti_golden_registers2[] =
@@ -83,16 +88,18 @@ static const u32 tahiti_golden_registers2[] =
 
 static const u32 tahiti_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x12011003,
        0x3109, 0xffffffff, 0x00601005,
        0x311f, 0xffffffff, 0x10104040,
        0x3122, 0xffffffff, 0x0100000a,
        0x30c5, 0xffffffff, 0x00000800,
        0x30c3, 0xffffffff, 0x800000f4,
-       0x3d2a, 0xffffffff, 0x00000000
+       0x3d2a, 0x00000008, 0x00000000
 };
 
 static const u32 pitcairn_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
@@ -110,7 +117,7 @@ static const u32 pitcairn_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x2a00126a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x2418, 0x0000007f, 0x00000020,
@@ -119,11 +126,16 @@ static const u32 pitcairn_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x32761054,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 pitcairn_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x12011003,
        0x3109, 0xffffffff, 0x00601004,
        0x311f, 0xffffffff, 0x10102020,
        0x3122, 0xffffffff, 0x01000020,
@@ -133,133 +145,134 @@ static const u32 pitcairn_golden_rlc_registers[] =
 
 static const u32 verde_pg_init[] =
 {
-       0xd4f, 0xffffffff, 0x40000,
-       0xd4e, 0xffffffff, 0x200010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x7007,
-       0xd4e, 0xffffffff, 0x300010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x400000,
-       0xd4e, 0xffffffff, 0x100010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x120200,
-       0xd4e, 0xffffffff, 0x500010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x1e1e16,
-       0xd4e, 0xffffffff, 0x600010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x171f1e,
-       0xd4e, 0xffffffff, 0x700010ff,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4f, 0xffffffff, 0x0,
-       0xd4e, 0xffffffff, 0x9ff,
-       0xd40, 0xffffffff, 0x0,
-       0xd41, 0xffffffff, 0x10000800,
-       0xd41, 0xffffffff, 0xf,
-       0xd41, 0xffffffff, 0xf,
-       0xd40, 0xffffffff, 0x4,
-       0xd41, 0xffffffff, 0x1000051e,
-       0xd41, 0xffffffff, 0xffff,
-       0xd41, 0xffffffff, 0xffff,
-       0xd40, 0xffffffff, 0x8,
-       0xd41, 0xffffffff, 0x80500,
-       0xd40, 0xffffffff, 0x12,
-       0xd41, 0xffffffff, 0x9050c,
-       0xd40, 0xffffffff, 0x1d,
-       0xd41, 0xffffffff, 0xb052c,
-       0xd40, 0xffffffff, 0x2a,
-       0xd41, 0xffffffff, 0x1053e,
-       0xd40, 0xffffffff, 0x2d,
-       0xd41, 0xffffffff, 0x10546,
-       0xd40, 0xffffffff, 0x30,
-       0xd41, 0xffffffff, 0xa054e,
-       0xd40, 0xffffffff, 0x3c,
-       0xd41, 0xffffffff, 0x1055f,
-       0xd40, 0xffffffff, 0x3f,
-       0xd41, 0xffffffff, 0x10567,
-       0xd40, 0xffffffff, 0x42,
-       0xd41, 0xffffffff, 0x1056f,
-       0xd40, 0xffffffff, 0x45,
-       0xd41, 0xffffffff, 0x10572,
-       0xd40, 0xffffffff, 0x48,
-       0xd41, 0xffffffff, 0x20575,
-       0xd40, 0xffffffff, 0x4c,
-       0xd41, 0xffffffff, 0x190801,
-       0xd40, 0xffffffff, 0x67,
-       0xd41, 0xffffffff, 0x1082a,
-       0xd40, 0xffffffff, 0x6a,
-       0xd41, 0xffffffff, 0x1b082d,
-       0xd40, 0xffffffff, 0x87,
-       0xd41, 0xffffffff, 0x310851,
-       0xd40, 0xffffffff, 0xba,
-       0xd41, 0xffffffff, 0x891,
-       0xd40, 0xffffffff, 0xbc,
-       0xd41, 0xffffffff, 0x893,
-       0xd40, 0xffffffff, 0xbe,
-       0xd41, 0xffffffff, 0x20895,
-       0xd40, 0xffffffff, 0xc2,
-       0xd41, 0xffffffff, 0x20899,
-       0xd40, 0xffffffff, 0xc6,
-       0xd41, 0xffffffff, 0x2089d,
-       0xd40, 0xffffffff, 0xca,
-       0xd41, 0xffffffff, 0x8a1,
-       0xd40, 0xffffffff, 0xcc,
-       0xd41, 0xffffffff, 0x8a3,
-       0xd40, 0xffffffff, 0xce,
-       0xd41, 0xffffffff, 0x308a5,
-       0xd40, 0xffffffff, 0xd3,
-       0xd41, 0xffffffff, 0x6d08cd,
-       0xd40, 0xffffffff, 0x142,
-       0xd41, 0xffffffff, 0x2000095a,
-       0xd41, 0xffffffff, 0x1,
-       0xd40, 0xffffffff, 0x144,
-       0xd41, 0xffffffff, 0x301f095b,
-       0xd40, 0xffffffff, 0x165,
-       0xd41, 0xffffffff, 0xc094d,
-       0xd40, 0xffffffff, 0x173,
-       0xd41, 0xffffffff, 0xf096d,
-       0xd40, 0xffffffff, 0x184,
-       0xd41, 0xffffffff, 0x15097f,
-       0xd40, 0xffffffff, 0x19b,
-       0xd41, 0xffffffff, 0xc0998,
-       0xd40, 0xffffffff, 0x1a9,
-       0xd41, 0xffffffff, 0x409a7,
-       0xd40, 0xffffffff, 0x1af,
-       0xd41, 0xffffffff, 0xcdc,
-       0xd40, 0xffffffff, 0x1b1,
-       0xd41, 0xffffffff, 0x800,
-       0xd42, 0xffffffff, 0x6c9b2000,
-       0xd44, 0xfc00, 0x2000,
-       0xd51, 0xffffffff, 0xfc0,
-       0xa35, 0x00000100, 0x100
+       0x0d4f, 0xffffffff, 0x40000,
+       0x0d4e, 0xffffffff, 0x200010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x7007,
+       0x0d4e, 0xffffffff, 0x300010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x400000,
+       0x0d4e, 0xffffffff, 0x100010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x120200,
+       0x0d4e, 0xffffffff, 0x500010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x1e1e16,
+       0x0d4e, 0xffffffff, 0x600010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x171f1e,
+       0x0d4e, 0xffffffff, 0x700010ff,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4f, 0xffffffff, 0x0,
+       0x0d4e, 0xffffffff, 0x9ff,
+       0x0d40, 0xffffffff, 0x0,
+       0x0d41, 0xffffffff, 0x10000800,
+       0x0d41, 0xffffffff, 0xf,
+       0x0d41, 0xffffffff, 0xf,
+       0x0d40, 0xffffffff, 0x4,
+       0x0d41, 0xffffffff, 0x1000051e,
+       0x0d41, 0xffffffff, 0xffff,
+       0x0d41, 0xffffffff, 0xffff,
+       0x0d40, 0xffffffff, 0x8,
+       0x0d41, 0xffffffff, 0x80500,
+       0x0d40, 0xffffffff, 0x12,
+       0x0d41, 0xffffffff, 0x9050c,
+       0x0d40, 0xffffffff, 0x1d,
+       0x0d41, 0xffffffff, 0xb052c,
+       0x0d40, 0xffffffff, 0x2a,
+       0x0d41, 0xffffffff, 0x1053e,
+       0x0d40, 0xffffffff, 0x2d,
+       0x0d41, 0xffffffff, 0x10546,
+       0x0d40, 0xffffffff, 0x30,
+       0x0d41, 0xffffffff, 0xa054e,
+       0x0d40, 0xffffffff, 0x3c,
+       0x0d41, 0xffffffff, 0x1055f,
+       0x0d40, 0xffffffff, 0x3f,
+       0x0d41, 0xffffffff, 0x10567,
+       0x0d40, 0xffffffff, 0x42,
+       0x0d41, 0xffffffff, 0x1056f,
+       0x0d40, 0xffffffff, 0x45,
+       0x0d41, 0xffffffff, 0x10572,
+       0x0d40, 0xffffffff, 0x48,
+       0x0d41, 0xffffffff, 0x20575,
+       0x0d40, 0xffffffff, 0x4c,
+       0x0d41, 0xffffffff, 0x190801,
+       0x0d40, 0xffffffff, 0x67,
+       0x0d41, 0xffffffff, 0x1082a,
+       0x0d40, 0xffffffff, 0x6a,
+       0x0d41, 0xffffffff, 0x1b082d,
+       0x0d40, 0xffffffff, 0x87,
+       0x0d41, 0xffffffff, 0x310851,
+       0x0d40, 0xffffffff, 0xba,
+       0x0d41, 0xffffffff, 0x891,
+       0x0d40, 0xffffffff, 0xbc,
+       0x0d41, 0xffffffff, 0x893,
+       0x0d40, 0xffffffff, 0xbe,
+       0x0d41, 0xffffffff, 0x20895,
+       0x0d40, 0xffffffff, 0xc2,
+       0x0d41, 0xffffffff, 0x20899,
+       0x0d40, 0xffffffff, 0xc6,
+       0x0d41, 0xffffffff, 0x2089d,
+       0x0d40, 0xffffffff, 0xca,
+       0x0d41, 0xffffffff, 0x8a1,
+       0x0d40, 0xffffffff, 0xcc,
+       0x0d41, 0xffffffff, 0x8a3,
+       0x0d40, 0xffffffff, 0xce,
+       0x0d41, 0xffffffff, 0x308a5,
+       0x0d40, 0xffffffff, 0xd3,
+       0x0d41, 0xffffffff, 0x6d08cd,
+       0x0d40, 0xffffffff, 0x142,
+       0x0d41, 0xffffffff, 0x2000095a,
+       0x0d41, 0xffffffff, 0x1,
+       0x0d40, 0xffffffff, 0x144,
+       0x0d41, 0xffffffff, 0x301f095b,
+       0x0d40, 0xffffffff, 0x165,
+       0x0d41, 0xffffffff, 0xc094d,
+       0x0d40, 0xffffffff, 0x173,
+       0x0d41, 0xffffffff, 0xf096d,
+       0x0d40, 0xffffffff, 0x184,
+       0x0d41, 0xffffffff, 0x15097f,
+       0x0d40, 0xffffffff, 0x19b,
+       0x0d41, 0xffffffff, 0xc0998,
+       0x0d40, 0xffffffff, 0x1a9,
+       0x0d41, 0xffffffff, 0x409a7,
+       0x0d40, 0xffffffff, 0x1af,
+       0x0d41, 0xffffffff, 0xcdc,
+       0x0d40, 0xffffffff, 0x1b1,
+       0x0d41, 0xffffffff, 0x800,
+       0x0d42, 0xffffffff, 0x6c9b2000,
+       0x0d44, 0xfc00, 0x2000,
+       0x0d51, 0xffffffff, 0xfc0,
+       0x0a35, 0x00000100, 0x100
 };
 
 static const u32 verde_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x02010002,
        0x3109, 0xffffffff, 0x033f1005,
        0x311f, 0xffffffff, 0x10808020,
        0x3122, 0xffffffff, 0x00800008,
@@ -269,65 +282,45 @@ static const u32 verde_golden_rlc_registers[] =
 
 static const u32 verde_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
        0x031e, 0x00000080, 0x00000000,
        0x340c, 0x000300c0, 0x00800040,
-       0x340c, 0x000300c0, 0x00800040,
-       0x360c, 0x000300c0, 0x00800040,
        0x360c, 0x000300c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
        0x16f0, 0x00200000, 0x50100000,
-
        0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0ab9, 0x00073ffe, 0x000022a2,
-       0x0903, 0x000007ff, 0x00000000,
-       0x0903, 0x000007ff, 0x00000000,
        0x0903, 0x000007ff, 0x00000000,
        0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xf000001f, 0x00000007,
-       0x2285, 0xffffffff, 0x00ffffff,
+       0x22c9, 0xffffffff, 0x00ffffff,
        0x22c4, 0x0000ff0f, 0x00000000,
-
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0xa0d4, 0x3f3f3fff, 0x0000124a,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
-       0x2440, 0x07ffffff, 0x03000000,
-       0x23a2, 0x01ff1f3f, 0x00000000,
-       0x23a3, 0x01ff1f3f, 0x00000000,
        0x23a2, 0x01ff1f3f, 0x00000000,
-       0x23a1, 0x01ff1f3f, 0x00000000,
-       0x23a1, 0x01ff1f3f, 0x00000000,
-
        0x23a1, 0x01ff1f3f, 0x00000000,
        0x2418, 0x0000007f, 0x00000020,
        0x2542, 0x00010000, 0x00010000,
-       0x2b01, 0x000003ff, 0x00000003,
-       0x2b05, 0x000003ff, 0x00000003,
        0x2b05, 0x000003ff, 0x00000003,
        0x2b04, 0xffffffff, 0x00000000,
-       0x2b04, 0xffffffff, 0x00000000,
-       0x2b04, 0xffffffff, 0x00000000,
-       0x2b03, 0xffffffff, 0x00001032,
-       0x2b03, 0xffffffff, 0x00001032,
        0x2b03, 0xffffffff, 0x00001032,
        0x2235, 0x0000001f, 0x00000010,
-       0x2235, 0x0000001f, 0x00000010,
-       0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 oland_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
@@ -336,7 +329,7 @@ static const u32 oland_golden_registers[] =
        0x340c, 0x000300c0, 0x00800040,
        0x360c, 0x000300c0, 0x00800040,
        0x16ec, 0x000000f0, 0x00000070,
-       0x16f9, 0x00200000, 0x50100000,
+       0x16f0, 0x00200000, 0x50100000,
        0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
        0x0903, 0x000007ff, 0x00000000,
@@ -345,7 +338,7 @@ static const u32 oland_golden_registers[] =
        0x22c4, 0x0000ff0f, 0x00000000,
        0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x00000082,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x07ffffff, 0x03000000,
        0x2418, 0x0000007f, 0x00000020,
@@ -354,11 +347,16 @@ static const u32 oland_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x00003210,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 oland_golden_rlc_registers[] =
 {
+       0x263e, 0xffffffff, 0x02010002,
        0x3109, 0xffffffff, 0x00601005,
        0x311f, 0xffffffff, 0x10104040,
        0x3122, 0xffffffff, 0x0100000a,
@@ -368,22 +366,27 @@ static const u32 oland_golden_rlc_registers[] =
 
 static const u32 hainan_golden_registers[] =
 {
+       0x17bc, 0x00000030, 0x00000011,
        0x2684, 0x00010000, 0x00018208,
        0x260c, 0xffffffff, 0x00000000,
        0x260d, 0xf00fffff, 0x00000400,
        0x260e, 0x0002021c, 0x00020200,
-       0x4595, 0xff000fff, 0x00000100,
+       0x031e, 0x00000080, 0x00000000,
+       0x3430, 0xff000fff, 0x00000100,
        0x340c, 0x000300c0, 0x00800040,
        0x3630, 0xff000fff, 0x00000100,
        0x360c, 0x000300c0, 0x00800040,
+       0x16ec, 0x000000f0, 0x00000070,
+       0x16f0, 0x00200000, 0x50100000,
+       0x1c0c, 0x31000311, 0x00000011,
        0x0ab9, 0x00073ffe, 0x000022a2,
        0x0903, 0x000007ff, 0x00000000,
        0x2285, 0xf000001f, 0x00000007,
        0x22c9, 0xffffffff, 0x00ffffff,
        0x22c4, 0x0000ff0f, 0x00000000,
-       0xa393, 0x07ffffff, 0x4e000000,
+       0xa293, 0x07ffffff, 0x4e000000,
        0xa0d4, 0x3f3f3fff, 0x00000000,
-       0x000c, 0x000000ff, 0x0040,
+       0x000c, 0xffffffff, 0x0040,
        0x000d, 0x00000040, 0x00004040,
        0x2440, 0x03e00000, 0x03600000,
        0x2418, 0x0000007f, 0x00000020,
@@ -392,12 +395,16 @@ static const u32 hainan_golden_registers[] =
        0x2b04, 0xffffffff, 0x00000000,
        0x2b03, 0xffffffff, 0x00003210,
        0x2235, 0x0000001f, 0x00000010,
-       0x0570, 0x000c0fc0, 0x000c0400
+       0x0570, 0x000c0fc0, 0x000c0400,
+       0x052c, 0x0fffffff, 0xffffffff,
+       0x052d, 0x0fffffff, 0x0fffffff,
+       0x052e, 0x0fffffff, 0x0fffffff,
+       0x052f, 0x0fffffff, 0x0fffffff
 };
 
 static const u32 hainan_golden_registers2[] =
 {
-       0x263e, 0xffffffff, 0x02010001
+       0x263e, 0xffffffff, 0x2011003
 };
 
 static const u32 tahiti_mgcg_cgcg_init[] =
@@ -513,18 +520,18 @@ static const u32 tahiti_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -612,16 +619,16 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -709,18 +716,18 @@ static const u32 verde_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -788,18 +795,18 @@ static const u32 oland_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x40b, 0x00000101, 0x00000000,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x040b, 0x00000101, 0x00000000,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
        0x1579, 0xff000fff, 0x00000100,
        0x157a, 0x00000001, 0x00000001,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -867,15 +874,15 @@ static const u32 hainan_mgcg_cgcg_init[] =
        0x21c2, 0xffffffff, 0x00900100,
        0x311e, 0xffffffff, 0x00000080,
        0x3101, 0xffffffff, 0x0020003f,
-       0xc, 0xffffffff, 0x0000001c,
-       0xd, 0x000f0000, 0x000f0000,
-       0x583, 0xffffffff, 0x00000100,
-       0x409, 0xffffffff, 0x00000100,
-       0x82a, 0xffffffff, 0x00000104,
-       0x993, 0x000c0000, 0x000c0000,
-       0x992, 0x000c0000, 0x000c0000,
-       0xbd4, 0x00000001, 0x00000001,
-       0xc33, 0xc0000fff, 0x00000104,
+       0x000c, 0xffffffff, 0x0000001c,
+       0x000d, 0x000f0000, 0x000f0000,
+       0x0583, 0xffffffff, 0x00000100,
+       0x0409, 0xffffffff, 0x00000100,
+       0x082a, 0xffffffff, 0x00000104,
+       0x0993, 0x000c0000, 0x000c0000,
+       0x0992, 0x000c0000, 0x000c0000,
+       0x0bd4, 0x00000001, 0x00000001,
+       0x0c33, 0xc0000fff, 0x00000104,
        0x3079, 0x00000001, 0x00000001,
        0x3430, 0xfffffff0, 0x00000100,
        0x3630, 0xfffffff0, 0x00000100
@@ -1179,6 +1186,8 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                        adev->pg_flags = 0;
+               adev->external_rev_id = (adev->rev_id == 0) ? 1 :
+                                       (adev->rev_id == 1) ? 5 : 6;
                break;
        case CHIP_PITCAIRN:
                adev->cg_flags =
@@ -1198,6 +1207,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 20;
                break;
 
        case CHIP_VERDE:
@@ -1219,7 +1229,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
                //???
-               adev->external_rev_id = adev->rev_id + 0x14;
+               adev->external_rev_id = adev->rev_id + 40;
                break;
        case CHIP_OLAND:
                adev->cg_flags =
@@ -1238,6 +1248,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = 60;
                break;
        case CHIP_HAINAN:
                adev->cg_flags =
@@ -1255,6 +1266,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
                adev->pg_flags = 0;
+               adev->external_rev_id = 70;
                break;
 
        default:
index 9f771f4ffcb71bf1267ff3aae9c5853fb944e149..bf088d6d9bf1f96430d7afda1ec3fbeae6e82768 100644 (file)
@@ -932,18 +932,64 @@ static int vi_common_early_init(void *handle)
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
        case CHIP_TONGA:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
-               adev->pg_flags = AMD_PG_SUPPORT_UVD;
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG;
+               adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x14;
                break;
        case CHIP_POLARIS11:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_3D_CGCG |
+                       AMD_CG_SUPPORT_GFX_3D_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_VCE_MGCG;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x5A;
                break;
        case CHIP_POLARIS10:
-               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+               adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
+                       AMD_CG_SUPPORT_GFX_3D_CGCG |
+                       AMD_CG_SUPPORT_GFX_3D_CGLS |
+                       AMD_CG_SUPPORT_SDMA_MGCG |
+                       AMD_CG_SUPPORT_SDMA_LS |
+                       AMD_CG_SUPPORT_BIF_MGCG |
+                       AMD_CG_SUPPORT_BIF_LS |
+                       AMD_CG_SUPPORT_HDP_MGCG |
+                       AMD_CG_SUPPORT_HDP_LS |
+                       AMD_CG_SUPPORT_ROM_MGCG |
+                       AMD_CG_SUPPORT_MC_MGCG |
+                       AMD_CG_SUPPORT_MC_LS |
+                       AMD_CG_SUPPORT_DRM_LS |
+                       AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_VCE_MGCG;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x50;
@@ -971,6 +1017,7 @@ static int vi_common_early_init(void *handle)
                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
                                AMD_PG_SUPPORT_GFX_SMG |
                                AMD_PG_SUPPORT_GFX_PIPELINE |
+                               AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_UVD |
                                AMD_PG_SUPPORT_VCE;
                }
@@ -996,6 +1043,7 @@ static int vi_common_early_init(void *handle)
                adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
                        AMD_PG_SUPPORT_GFX_SMG |
                        AMD_PG_SUPPORT_GFX_PIPELINE |
+                       AMD_PG_SUPPORT_CP |
                        AMD_PG_SUPPORT_UVD |
                        AMD_PG_SUPPORT_VCE;
                adev->external_rev_id = adev->rev_id + 0x61;
@@ -1155,57 +1203,118 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
 static int vi_common_set_clockgating_state_by_smu(void *handle,
                                           enum amd_clockgating_state state)
 {
-       uint32_t msg_id, pp_state;
+       uint32_t msg_id, pp_state = 0;
+       uint32_t pp_support_state = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (state == AMD_CG_STATE_UNGATE)
-               pp_state = 0;
-       else
-               pp_state = PP_STATE_CG | PP_STATE_LS;
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_MC,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_SDMA,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_HDP,
-                      PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_BIF,
-                      PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_BIF,
-                      PP_STATE_SUPPORT_CG,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_DRM,
-                      PP_STATE_SUPPORT_LS,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
-
-       msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
-                      PP_BLOCK_SYS_ROM,
-                      PP_STATE_SUPPORT_CG,
-                      pp_state);
-       amd_set_clockgating_by_smu(pp_handle, msg_id);
+       if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_MC_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_MC,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_SDMA,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
+               if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
+                       pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+                       pp_state = PP_STATE_LS;
+               }
+               if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
+                       pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+                       pp_state |= PP_STATE_CG;
+               }
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_HDP,
+                              pp_support_state,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
 
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_BIF,
+                              PP_STATE_SUPPORT_LS,
+                               pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+       if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_CG;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_BIF,
+                              PP_STATE_SUPPORT_CG,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_LS;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_DRM,
+                              PP_STATE_SUPPORT_LS,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
+
+       if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
+
+               if (state == AMD_CG_STATE_UNGATE)
+                       pp_state = 0;
+               else
+                       pp_state = PP_STATE_CG;
+
+               msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+                              PP_BLOCK_SYS_ROM,
+                              PP_STATE_SUPPORT_CG,
+                              pp_state);
+               amd_set_clockgating_by_smu(pp_handle, msg_id);
+       }
        return 0;
 }
 
index d1986276dbbd71cc144a48ec52bb14b908095509..c02469ada9f131f417e2a4fdb1d0c6b1fac4eafa 100644 (file)
@@ -126,6 +126,10 @@ enum amd_vce_level {
 #define AMD_CG_SUPPORT_HDP_LS                  (1 << 15)
 #define AMD_CG_SUPPORT_HDP_MGCG                        (1 << 16)
 #define AMD_CG_SUPPORT_ROM_MGCG                        (1 << 17)
+#define AMD_CG_SUPPORT_DRM_LS                  (1 << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG                        (1 << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG             (1 << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS             (1 << 21)
 
 /* PG flags */
 #define AMD_PG_SUPPORT_GFX_PG                  (1 << 0)
index 904beaa932d03f0b5ce7d89a13d16729582052e0..f75c6421db6239c9435ed39dc7d6244d13894920 100644 (file)
@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
        ast_write32(ast, 0x10000, 0xfc600309);
 
        do {
-               ;
+               if (pci_channel_offline(dev->pdev))
+                       return -EIO;
        } while (ast_read32(ast, 0x10000) != 0x01);
        data = ast_read32(ast, 0x10004);
 
@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
        ast_detect_chip(dev, &need_post);
 
        if (ast->chip != AST1180) {
-               ast_get_dram_info(dev);
+               ret = ast_get_dram_info(dev);
+               if (ret)
+                       goto out_free;
                ast->vram_size = ast_get_vram_info(dev);
                DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
        }
index 56002a52936dcfc4a7dd5539973ce316353d0b1c..243224aeabf82f111ab14c4d6995ce868c0d36c1 100644 (file)
@@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms);
 
 /* intel_sideband.c */
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
index 412f3513f269b4a96217f814aa0d882a27a3e1e5..4a31b7a891ecaf3e2732c9f2d6ce62fa633419f6 100644 (file)
@@ -174,21 +174,35 @@ static struct sg_table *
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
-       char *vaddr = obj->phys_handle->vaddr;
+       drm_dma_handle_t *phys;
        struct sg_table *st;
        struct scatterlist *sg;
+       char *vaddr;
        int i;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
                return ERR_PTR(-EINVAL);
 
+       /* Always aligning to the object size, allows a single allocation
+        * to handle all possible callers, and given typical object sizes,
+        * the alignment of the buddy allocation will naturally match.
+        */
+       phys = drm_pci_alloc(obj->base.dev,
+                            obj->base.size,
+                            roundup_pow_of_two(obj->base.size));
+       if (!phys)
+               return ERR_PTR(-ENOMEM);
+
+       vaddr = phys->vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
                struct page *page;
                char *src;
 
                page = shmem_read_mapping_page(mapping, i);
-               if (IS_ERR(page))
-                       return ERR_CAST(page);
+               if (IS_ERR(page)) {
+                       st = ERR_CAST(page);
+                       goto err_phys;
+               }
 
                src = kmap_atomic(page);
                memcpy(vaddr, src, PAGE_SIZE);
@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
-       if (st == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (!st) {
+               st = ERR_PTR(-ENOMEM);
+               goto err_phys;
+       }
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               st = ERR_PTR(-ENOMEM);
+               goto err_phys;
        }
 
        sg = st->sgl;
        sg->offset = 0;
        sg->length = obj->base.size;
 
-       sg_dma_address(sg) = obj->phys_handle->busaddr;
+       sg_dma_address(sg) = phys->busaddr;
        sg_dma_len(sg) = obj->base.size;
 
+       obj->phys_handle = phys;
+       return st;
+
+err_phys:
+       drm_pci_free(obj->base.dev, phys);
        return st;
 }
 
@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 
        sg_free_table(pages);
        kfree(pages);
+
+       drm_pci_free(obj->base.dev, obj->phys_handle);
 }
 
 static void
 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
 {
-       drm_pci_free(obj->base.dev, obj->phys_handle);
        i915_gem_object_unpin_pages(obj);
 }
 
@@ -538,15 +561,13 @@ int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                            int align)
 {
-       drm_dma_handle_t *phys;
        int ret;
 
-       if (obj->phys_handle) {
-               if ((unsigned long)obj->phys_handle->vaddr & (align -1))
-                       return -EBUSY;
+       if (align > obj->base.size)
+               return -EINVAL;
 
+       if (obj->ops == &i915_gem_phys_ops)
                return 0;
-       }
 
        if (obj->mm.madv != I915_MADV_WILLNEED)
                return -EFAULT;
@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
        if (obj->mm.pages)
                return -EBUSY;
 
-       /* create a new object */
-       phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
-       if (!phys)
-               return -ENOMEM;
-
-       obj->phys_handle = phys;
        obj->ops = &i915_gem_phys_ops;
 
        return i915_gem_object_pin_pages(obj);
@@ -2326,7 +2341,8 @@ static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       int page_count, i;
+       const unsigned long page_count = obj->base.size / PAGE_SIZE;
+       unsigned long i;
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        if (st == NULL)
                return ERR_PTR(-ENOMEM);
 
-       page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
                return ERR_PTR(-ENOMEM);
@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        i915_sg_trim(st);
 
        ret = i915_gem_gtt_prepare_pages(obj, st);
-       if (ret)
-               goto err_pages;
+       if (ret) {
+               /* DMA remapping failed? One possible cause is that
+                * it could not reserve enough large entries, asking
+                * for PAGE_SIZE chunks instead may be helpful.
+                */
+               if (max_segment > PAGE_SIZE) {
+                       for_each_sgt_page(page, sgt_iter, st)
+                               put_page(page);
+                       sg_free_table(st);
+
+                       max_segment = PAGE_SIZE;
+                       goto rebuild_st;
+               } else {
+                       dev_warn(&dev_priv->drm.pdev->dev,
+                                "Failed to DMA remap %lu pages\n",
+                                page_count);
+                       goto err_pages;
+               }
+       }
 
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
index ebaa941c83afd7843a56287f2b857d8f8fd3a9fa..abc78bbfc1dcd0fbc6373222c3801929d1dfd2d4 100644 (file)
@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
                return -ENODEV;
 
        /* See the comment at the drm_mm_init() call for more about this check.
-        * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+        * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
         */
-       if (start < 4096 && (IS_GEN8(dev_priv) ||
-                            IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
+       if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
                start = 4096;
 
        mutex_lock(&dev_priv->mm.stolen_lock);
index 47590ab08d7ea65e7cc94594853117f78e04946b..3df8d3dd31cd0fdb8972b5f3d2ddb5aa1c61d556 100644 (file)
@@ -460,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
-static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
+static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 
index 8405b5a367d7164a15df16ea5c453b472e965916..7e3545f65257c415fa7fab7a65e27a6858e80a09 100644 (file)
@@ -46,14 +46,20 @@ struct edp_power_seq {
        u16 t11_t12;
 } __packed;
 
-/* MIPI Sequence Block definitions */
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
 enum mipi_seq {
        MIPI_SEQ_END = 0,
-       MIPI_SEQ_ASSERT_RESET,
+       MIPI_SEQ_DEASSERT_RESET,        /* Spec says MipiAssertResetPin */
        MIPI_SEQ_INIT_OTP,
        MIPI_SEQ_DISPLAY_ON,
        MIPI_SEQ_DISPLAY_OFF,
-       MIPI_SEQ_DEASSERT_RESET,
+       MIPI_SEQ_ASSERT_RESET,          /* Spec says MipiDeassertResetPin */
        MIPI_SEQ_BACKLIGHT_ON,          /* sequence block v2+ */
        MIPI_SEQ_BACKLIGHT_OFF,         /* sequence block v2+ */
        MIPI_SEQ_TEAR_ON,               /* sequence block v2+ */
index cf5cff7b03b8528ce10590a1dd973bbfa2b87776..6daad86137606d700b6101ee0ffde7a7ee35a1cd 100644 (file)
@@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv)
        dev_priv->cdclk_pll.vco = 0;
 }
 
-static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
-{
-       int ret;
-       u32 val;
-
-       /* inform PCU we want to change CDCLK */
-       val = SKL_CDCLK_PREPARE_FOR_CHANGE;
-       mutex_lock(&dev_priv->rps.hw_lock);
-       ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-
-       return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
-}
-
-static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
-{
-       return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
-}
-
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 {
        u32 freq_select, pcu_ack;
+       int ret;
 
        WARN_ON((cdclk == 24000) != (vco == 0));
 
        DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
 
-       if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
-               DRM_ERROR("failed to inform PCU about cdclk change\n");
+       mutex_lock(&dev_priv->rps.hw_lock);
+       ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               SKL_CDCLK_PREPARE_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE,
+                               SKL_CDCLK_READY_FOR_CHANGE, 3);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       if (ret) {
+               DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+                         ret);
                return;
        }
 
index 90283edcafba6786f79c509df64aa194e490bc3c..d9bc19be855e76b9bab0f1cee082bc170bccff26 100644 (file)
@@ -4014,8 +4014,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                return;
 
        /* FIXME: we need to synchronize this sort of stuff with hardware
-        * readout */
-       if (WARN_ON_ONCE(!intel_dp->lane_count))
+        * readout. Currently fast link training doesn't work on boot-up. */
+       if (!intel_dp->lane_count)
                return;
 
        /* if link training is requested we should perform it always */
index 0d8ff0034b88567369861dc9ee5aa8de6651b05b..47cd1b20fb3e958fc89c83981e29496ecdfbff49 100644 (file)
@@ -300,7 +300,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
        mutex_lock(&dev_priv->sb_lock);
        vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
        vlv_iosf_sb_write(dev_priv, port, cfg0,
-                         CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+                         CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
+                         CHV_GPIO_GPIOTXSTATE(value));
        mutex_unlock(&dev_priv->sb_lock);
 }
 
@@ -376,11 +377,11 @@ static const fn_mipi_elem_exec exec_elem[] = {
  */
 
 static const char * const seq_name[] = {
-       [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+       [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
        [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
        [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
        [MIPI_SEQ_DISPLAY_OFF]  = "MIPI_SEQ_DISPLAY_OFF",
-       [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+       [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
        [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
        [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
        [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
index 0a09024d6ca3ebc502f0b99cef07ed91f0b4cd64..d4961fa20c73d0e2d390673889ae5fa82f04dd07 100644 (file)
@@ -1968,12 +1968,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
                          ret);
        }
 
-       ret = logical_ring_init(engine);
-       if (ret) {
-               lrc_destroy_wa_ctx_obj(engine);
-       }
-
-       return ret;
+       return logical_ring_init(engine);
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
index d67974eb127a0f19caa005c1f5a69687ec88ae84..ae2c0bb4b2e8b384c6fc8858490143064ec4dec3 100644 (file)
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-static int
-intel_do_sagv_disable(struct drm_i915_private *dev_priv)
-{
-       int ret;
-       uint32_t temp = GEN9_SAGV_DISABLE;
-
-       ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
-                                    &temp);
-       if (ret)
-               return ret;
-       else
-               return temp & GEN9_SAGV_IS_DISABLED;
-}
-
 int
 intel_disable_sagv(struct drm_i915_private *dev_priv)
 {
-       int ret, result;
+       int ret;
 
        if (!intel_has_sagv(dev_priv))
                return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
        mutex_lock(&dev_priv->rps.hw_lock);
 
        /* bspec says to keep retrying for at least 1 ms */
-       ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
+       ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+                               GEN9_SAGV_DISABLE,
+                               GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+                               1);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
-       if (ret == -ETIMEDOUT) {
-               DRM_ERROR("Request to disable SAGV timed out\n");
-               return -ETIMEDOUT;
-       }
-
        /*
         * Some skl systems, pre-release machines in particular,
         * don't actually have an SAGV.
         */
-       if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
+       if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
                DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
                dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
                return 0;
-       } else if (result < 0) {
-               DRM_ERROR("Failed to disable the SAGV\n");
-               return result;
+       } else if (ret < 0) {
+               DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
+               return ret;
        }
 
        dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
+                                 u32 request, u32 reply_mask, u32 reply,
+                                 u32 *status)
+{
+       u32 val = request;
+
+       *status = sandybridge_pcode_read(dev_priv, mbox, &val);
+
+       return *status || ((val & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @dev_priv: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 10 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+       u32 status;
+       int ret;
+
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
+                                  &status)
+
+       /*
+        * Prime the PCODE by doing a request first. Normally it guarantees
+        * that a subsequent request, at most @timeout_base_ms later, succeeds.
+        * _wait_for() doesn't guarantee when its passed condition is evaluated
+        * first, so send the first request explicitly.
+        */
+       if (COND) {
+               ret = 0;
+               goto out;
+       }
+       ret = _wait_for(COND, timeout_base_ms * 1000, 10);
+       if (!ret)
+               goto out;
+
+       /*
+        * The above can time out if the number of requests was low (2 in the
+        * worst case) _and_ PCODE was busy for some reason even after a
+        * (queued) request and @timeout_base_ms delay. As a workaround retry
+        * the poll with preemption disabled to maximize the number of
+        * requests. Increase the timeout from @timeout_base_ms to 10ms to
+        * account for interrupts that could reduce the number of these
+        * requests.
+        */
+       DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
+       WARN_ON_ONCE(timeout_base_ms > 3);
+       preempt_disable();
+       ret = wait_for_atomic(COND, 10);
+       preempt_enable();
+
+out:
+       return ret ? ret : status;
+#undef COND
+}
+
 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        /*
index 7b488e2793d98457c2ae09dd988d49707288acf5..c6be70686b4af869a41db55d01b6f562a9355ce1 100644 (file)
@@ -825,13 +825,9 @@ void intel_psr_init(struct drm_device *dev)
        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
-       /* Per platform default */
-       if (i915.enable_psr == -1) {
-               if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-                       i915.enable_psr = 1;
-               else
-                       i915.enable_psr = 0;
-       }
+       /* Per platform default: all disabled. */
+       if (i915.enable_psr == -1)
+               i915.enable_psr = 0;
 
        /* Set link_standby x link_off defaults */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
index 356c662ad45325f6185a7eb6518e33641479d4c3..87b4af092d5487d219024c858585b5448dd3ea1c 100644 (file)
@@ -1039,7 +1039,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 
 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+       u32 val;
+
+       /*
+        * On driver load, a pipe may be active and driving a DSI display.
+        * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+        * (and never recovering) in this case. intel_dsi_post_disable() will
+        * clear it when we turn off the display.
+        */
+       val = I915_READ(DSPCLK_GATE_D);
+       val &= DPOUNIT_CLOCK_GATE_DISABLE;
+       val |= VRHUNIT_CLOCK_GATE_DISABLE;
+       I915_WRITE(DSPCLK_GATE_D, val);
 
        /*
         * Disable trickle feed and enable pnd deadline calculation
index 21b6732425c50d4b368d6b0fb016ea507a8aa6a1..c829cfb02fc4c994a1167e9daa7b4c1010a97ab0 100644 (file)
@@ -603,8 +603,9 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
        GOP_VBIOS_CONTENT *vbios;
        VFCT_IMAGE_HEADER *vhdr;
 
-       if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+       if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
                return false;
+       tbl_size = hdr->length;
        if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
                DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
                goto out_unmap;
index b7ac97b27c88c2fe11ad11f564ce786085d3217c..cda5542e13a206347447a49f18f9e8cb930e7c8c 100644 (file)
@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+                                        FW_RI_RES_WR_FBMAX_V(3)) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               FW_RI_RES_WR_FBMAX_V(3) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
index 392f78384a604ad9175f5376845a0f8e854f5405..98923a8cf86d83361d0a93afd1783b57fc4e5995 100644 (file)
@@ -358,13 +358,16 @@ void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
  * @dev: sc device struct
  * @pd: sc pd ptr
  * @pd_id: pd_id for allocated pd
+ * @abi_ver: ABI version from user context, -1 if not valid
  */
 static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
                             struct i40iw_sc_pd *pd,
-                            u16 pd_id)
+                            u16 pd_id,
+                            int abi_ver)
 {
        pd->size = sizeof(*pd);
        pd->pd_id = pd_id;
+       pd->abi_ver = abi_ver;
        pd->dev = dev;
 }
 
@@ -2252,6 +2255,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
                                              offset);
 
        info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+       info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
        ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
        if (ret_code)
                return ret_code;
@@ -2270,10 +2274,21 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
                                                    false);
        i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
                    __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
-       ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
-                                              &wqe_size);
-       if (ret_code)
-               return ret_code;
+
+       switch (qp->pd->abi_ver) {
+       case 4:
+               ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+                                                      &wqe_size);
+               if (ret_code)
+                       return ret_code;
+               break;
+       case 5: /* fallthrough until next ABI version */
+       default:
+               if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+                       return I40IW_ERR_INVALID_FRAG_COUNT;
+               wqe_size = I40IW_MAX_WQE_SIZE_RQ;
+               break;
+       }
        qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
                                (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
        i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
index 449ba8c81ce786d1442a0808d72041b0edfb710d..db41ab40da9cea375b087d02fccbfe07adf0356a 100644 (file)
@@ -930,7 +930,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
        INIT_LIST_HEAD(&rsrc->txpend);
 
        rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
-       dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+       dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
        rsrc->qp_id = info->qp_id;
        rsrc->cq_id = info->cq_id;
        rsrc->sq_size = info->sq_size;
index f3f8e9cc3c058fe0a1b9fddbe06b64bca26daf9f..7b76259752b0062e5cf16f7bc097f5cd4b66098e 100644 (file)
@@ -280,6 +280,7 @@ struct i40iw_sc_pd {
        u32 size;
        struct i40iw_sc_dev *dev;
        u16 pd_id;
+       int abi_ver;
 };
 
 struct i40iw_cqp_quanta {
@@ -852,6 +853,7 @@ struct i40iw_qp_init_info {
        u64 host_ctx_pa;
        u64 q2_pa;
        u64 shadow_area_pa;
+       int abi_ver;
        u8 sq_tph_val;
        u8 rq_tph_val;
        u8 type;
@@ -1051,7 +1053,7 @@ struct i40iw_aeq_ops {
 };
 
 struct i40iw_pd_ops {
-       void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+       void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int);
 };
 
 struct i40iw_priv_qp_ops {
index 12acd688def4707fa018a9673fe5ea8b6408f9cb..57d3f1d11ff1f5bcbd0108a4ffb48727f5778b56 100644 (file)
@@ -39,8 +39,8 @@
 
 #include <linux/types.h>
 
-#define I40IW_ABI_USERSPACE_VER 4
-#define I40IW_ABI_KERNEL_VER    4
+#define I40IW_ABI_VER 5
+
 struct i40iw_alloc_ucontext_req {
        __u32 reserved32;
        __u8 userspace_ver;
index 4376cd628774248dbc18e7bf1ea731dcede4945c..2800f796271c4a89fccfbea526ff00395269f63c 100644 (file)
@@ -966,10 +966,6 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
        if (ret_code)
                return ret_code;
 
-       ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
-       if (ret_code)
-               return ret_code;
-
        qp->sq_base = info->sq;
        qp->rq_base = info->rq;
        qp->shadow_area = info->shadow_area;
@@ -998,8 +994,19 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
        if (!qp->use_srq) {
                qp->rq_size = info->rq_size;
                qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
-               qp->rq_wqe_size = rqshift;
                I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+               switch (info->abi_ver) {
+               case 4:
+                       ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
+                       if (ret_code)
+                               return ret_code;
+                       break;
+               case 5: /* fallthrough until next ABI version */
+               default:
+                       rqshift = I40IW_MAX_RQ_WQE_SHIFT;
+                       break;
+               }
+               qp->rq_wqe_size = rqshift;
                qp->rq_wqe_size_multiplier = 4 << rqshift;
        }
        qp->ops = iw_qp_uk_ops;
index 80d9f464f65ea31813a2850298b443f9904dbac6..84be6f13b9c5264f27237a0eb6a2810f9d61b0ab 100644 (file)
@@ -76,6 +76,7 @@ enum i40iw_device_capabilities_const {
        I40IW_MAX_ORD_SIZE =                    127,
        I40IW_MAX_WQ_ENTRIES =                  2048,
        I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
+       I40IW_MAX_WQE_SIZE_RQ =                 128,
        I40IW_QP_CTX_SIZE =                     248,
        I40IW_MAX_PDS =                         32768
 };
@@ -97,6 +98,7 @@ enum i40iw_device_capabilities_const {
 #define i40iw_address_list u64 *
 
 #define        I40IW_MAX_MR_SIZE       0x10000000000L
+#define        I40IW_MAX_RQ_WQE_SHIFT  2
 
 struct i40iw_qp_uk;
 struct i40iw_cq_uk;
@@ -405,7 +407,7 @@ struct i40iw_qp_uk_init_info {
        u32 max_sq_frag_cnt;
        u32 max_rq_frag_cnt;
        u32 max_inline_data;
-
+       int abi_ver;
 };
 
 struct i40iw_cq_uk_init_info {
index 7368a50bbdaa09abdfae87783f239d1bac5497e7..29e97df9e1a7f87c784ebf33f4ebccfae217f433 100644 (file)
@@ -145,9 +145,8 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
        if (ib_copy_from_udata(&req, udata, sizeof(req)))
                return ERR_PTR(-EINVAL);
 
-       if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
-               i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
-                            req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+       if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
+               i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
                return ERR_PTR(-EINVAL);
        }
 
@@ -155,13 +154,14 @@ static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
        uresp.max_qps = iwdev->max_qp;
        uresp.max_pds = iwdev->max_pd;
        uresp.wq_size = iwdev->max_qp_wr * 2;
-       uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+       uresp.kernel_ver = req.userspace_ver;
 
        ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
        if (!ucontext)
                return ERR_PTR(-ENOMEM);
 
        ucontext->iwdev = iwdev;
+       ucontext->abi_ver = req.userspace_ver;
 
        if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
                kfree(ucontext);
@@ -333,6 +333,7 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
        struct i40iw_alloc_pd_resp uresp;
        struct i40iw_sc_pd *sc_pd;
+       struct i40iw_ucontext *ucontext;
        u32 pd_id = 0;
        int err;
 
@@ -353,15 +354,18 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
        }
 
        sc_pd = &iwpd->sc_pd;
-       dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
 
        if (context) {
+               ucontext = to_ucontext(context);
+               dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
                memset(&uresp, 0, sizeof(uresp));
                uresp.pd_id = pd_id;
                if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
                        err = -EFAULT;
                        goto error;
                }
+       } else {
+               dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
        }
 
        i40iw_add_pdusecount(iwpd);
@@ -518,7 +522,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
        struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
        u32 sqdepth, rqdepth;
        u32 sq_size, rq_size;
-       u8 sqshift, rqshift;
+       u8 sqshift;
        u32 size;
        enum i40iw_status_code status;
        struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
@@ -527,14 +531,11 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
        rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
 
        status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
-       if (!status)
-               status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
-
        if (status)
                return -ENOMEM;
 
        sqdepth = sq_size << sqshift;
-       rqdepth = rq_size << rqshift;
+       rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
 
        size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
        iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
index 6549c939500f47068567e9bb275dfc8ebc829a70..07c3fec77de6a1fcbb3a52a95e4937b6e23ea8aa 100644 (file)
@@ -42,6 +42,7 @@ struct i40iw_ucontext {
        spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
        struct list_head qp_reg_mem_list;
        spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+       int abi_ver;
 };
 
 struct i40iw_pd {
index 302fb05e6e6fb1d17d5700363b02e70f42eba192..57c8de2080773b161272774a69eaebf02cc411ed 100644 (file)
@@ -890,6 +890,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
 
                pbl_ptr = cq->q.pbl_tbl->pa;
                page_cnt = cq->q.pbl_info.num_pbes;
+
+               cq->ibcq.cqe = chain_entries;
        } else {
                cq->cq_type = QEDR_CQ_TYPE_KERNEL;
 
@@ -905,6 +907,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
 
                page_cnt = qed_chain_get_page_cnt(&cq->pbl);
                pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+               cq->ibcq.cqe = cq->pbl.capacity;
        }
 
        qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
@@ -982,8 +985,13 @@ int qedr_destroy_cq(struct ib_cq *ibcq)
 
        /* GSIs CQs are handled by driver, so they don't exist in the FW */
        if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+               int rc;
+
                iparams.icid = cq->icid;
-               dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+               rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
+                                              &oparams);
+               if (rc)
+                       return rc;
                dev->ops->common->chain_free(dev->cdev, &cq->pbl);
        }
 
@@ -1966,7 +1974,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        if (attr_mask & IB_QP_STATE) {
                if ((qp->qp_type != IB_QPT_GSI) && (!udata))
-                       qedr_update_qp_state(dev, qp, qp_params.new_state);
+                       rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
                qp->state = qp_params.new_state;
        }
 
@@ -2070,8 +2078,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
        DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
                 qp, qp->qp_type);
 
-       if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
-                         QED_ROCE_QP_STATE_INIT)) {
+       if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_INIT)) {
+
                attr.qp_state = IB_QPS_ERR;
                attr_mask |= IB_QP_STATE;
 
@@ -2626,7 +2636,9 @@ static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
        rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
        DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
 
-       if (wr->send_flags & IB_SEND_INLINE) {
+       if (wr->send_flags & IB_SEND_INLINE &&
+           (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+            wr->opcode == IB_WR_RDMA_WRITE)) {
                u8 flags = 0;
 
                SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
@@ -2977,8 +2989,9 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
-           (qp->state == QED_ROCE_QP_STATE_ERR)) {
+       if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+           (qp->state != QED_ROCE_QP_STATE_ERR) &&
+           (qp->state != QED_ROCE_QP_STATE_SQD)) {
                spin_unlock_irqrestore(&qp->q_lock, flags);
                *bad_wr = wr;
                DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -3031,8 +3044,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
        spin_lock_irqsave(&qp->q_lock, flags);
 
-       if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
-           (qp->state == QED_ROCE_QP_STATE_ERR)) {
+       if (qp->state == QED_ROCE_QP_STATE_RESET) {
                spin_unlock_irqrestore(&qp->q_lock, flags);
                *bad_wr = wr;
                return -EINVAL;
@@ -3174,6 +3186,7 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
 
                /* fill WC */
                wc->status = status;
+               wc->vendor_err = 0;
                wc->wc_flags = 0;
                wc->src_qp = qp->id;
                wc->qp = &qp->ibqp;
@@ -3225,7 +3238,7 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                       "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
                       cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
-                                 IB_WC_WR_FLUSH_ERR, 0);
+                                 IB_WC_WR_FLUSH_ERR, 1);
                break;
        default:
                /* process all WQE before the cosumer */
@@ -3363,6 +3376,7 @@ static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
 
        /* fill WC */
        wc->status = wc_status;
+       wc->vendor_err = 0;
        wc->src_qp = qp->id;
        wc->qp = &qp->ibqp;
        wc->wr_id = wr_id;
@@ -3391,6 +3405,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
        while (num_entries && qp->rq.wqe_cons != hw_cons) {
                /* fill WC */
                wc->status = IB_WC_WR_FLUSH_ERR;
+               wc->vendor_err = 0;
                wc->wc_flags = 0;
                wc->src_qp = qp->id;
                wc->byte_len = 0;
index cd27cbde765249b98331fd1da8f023d134dddd38..d369f24425f94008878b0d9514de16952cbae4af 100644 (file)
@@ -224,7 +224,7 @@ static inline enum comp_state check_psn(struct rxe_qp *qp,
                else
                        return COMPST_DONE;
        } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
-               return COMPST_ERROR_RETRY;
+               return COMPST_DONE;
        } else {
                return COMPST_CHECK_ACK;
        }
index 16967cdb45dffb6c9841748ac0b13f379d02adb9..342e78163613dfdc719b171e1396d01fd44432eb 100644 (file)
@@ -455,8 +455,7 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
                return -EAGAIN;
        }
 
-       if (pkt->qp)
-               atomic_inc(&pkt->qp->skb_out);
+       atomic_inc(&pkt->qp->skb_out);
        kfree_skb(skb);
 
        return 0;
index c3e60e4bde6e2a3ba5e0953b531a42f65927b717..486d576e55bc016dda1f8ddad6b8f00941f66727 100644 (file)
@@ -855,4 +855,5 @@ void rxe_qp_cleanup(void *arg)
        free_rd_atomic_resources(qp);
 
        kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+       sock_release(qp->sk);
 }
index 7a36ec9dbc0c98cc9907f378e593814fbe26ffcc..3435efff879960cece0c7e122b5960a057f2d4a1 100644 (file)
@@ -1070,12 +1070,13 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                                          struct rxe_pkt_info *pkt)
 {
        enum resp_states rc;
+       u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
 
        if (pkt->mask & RXE_SEND_MASK ||
            pkt->mask & RXE_WRITE_MASK) {
                /* SEND. Ack again and cleanup. C9-105. */
                if (bth_ack(pkt))
-                       send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+                       send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
                rc = RESPST_CLEANUP;
                goto out;
        } else if (pkt->mask & RXE_READ_MASK) {
index 971154cbbb03eb36efc7fb60608afe23d48f38a2..6799cf9713f77f460f990e6bc0f38b31422c0745 100644 (file)
@@ -2209,14 +2209,13 @@ static void __init free_dma_resources(void)
 static int __init early_amd_iommu_init(void)
 {
        struct acpi_table_header *ivrs_base;
-       acpi_size ivrs_size;
        acpi_status status;
        int i, remap_cache_sz, ret = 0;
 
        if (!amd_iommu_detected)
                return -ENODEV;
 
-       status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
        if (status == AE_NOT_FOUND)
                return -ENODEV;
        else if (ACPI_FAILURE(status)) {
@@ -2338,7 +2337,7 @@ static int __init early_amd_iommu_init(void)
 
 out:
        /* Don't leak any ACPI memory */
-       early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+       acpi_put_table(ivrs_base);
        ivrs_base = NULL;
 
        return ret;
@@ -2362,10 +2361,9 @@ out:
 static bool detect_ivrs(void)
 {
        struct acpi_table_header *ivrs_base;
-       acpi_size ivrs_size;
        acpi_status status;
 
-       status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+       status = acpi_get_table("IVRS", 0, &ivrs_base);
        if (status == AE_NOT_FOUND)
                return false;
        else if (ACPI_FAILURE(status)) {
@@ -2374,7 +2372,7 @@ static bool detect_ivrs(void)
                return false;
        }
 
-       early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+       acpi_put_table(ivrs_base);
 
        /* Make sure ACS will be enabled during PCI probe */
        pci_request_acs();
index 8c53748a769d447fac83622725c305a3ee6bc92f..a88576d50740b2dbdbe6e65b2bfe1985f01c81ca 100644 (file)
@@ -68,7 +68,6 @@ DECLARE_RWSEM(dmar_global_lock);
 LIST_HEAD(dmar_drhd_units);
 
 struct acpi_table_header * __initdata dmar_tbl;
-static acpi_size dmar_tbl_size;
 static int dmar_dev_scope_status = 1;
 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
 
@@ -543,9 +542,7 @@ static int __init dmar_table_detect(void)
        acpi_status status = AE_OK;
 
        /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl,
-                               &dmar_tbl_size);
+       status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
 
        if (ACPI_SUCCESS(status) && !dmar_tbl) {
                pr_warn("Unable to map DMAR\n");
@@ -906,7 +903,7 @@ int __init detect_intel_iommu(void)
                x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
 
-       early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
+       acpi_put_table(dmar_tbl);
        dmar_tbl = NULL;
        up_write(&dmar_global_lock);
 
index 1f32688c312d717639ecfe7de3ca94b35d31a637..dd9ecd354a3e001a1b4037f0e1ca2c92c5672957 100644 (file)
@@ -447,7 +447,6 @@ static int pcc_parse_subspace_irq(int id,
  */
 static int __init acpi_pcc_probe(void)
 {
-       acpi_size pcct_tbl_header_size;
        struct acpi_table_header *pcct_tbl;
        struct acpi_subtable_header *pcct_entry;
        struct acpi_table_pcct *acpi_pcct_tbl;
@@ -456,9 +455,7 @@ static int __init acpi_pcc_probe(void)
        acpi_status status = AE_OK;
 
        /* Search for PCCT */
-       status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
-                       &pcct_tbl,
-                       &pcct_tbl_header_size);
+       status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
 
        if (ACPI_FAILURE(status) || !pcct_tbl) {
                pr_warn("PCCT header not found.\n");
index 6b420a55c7459f1af42b9dd9c920dce63bd7d089..c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66 100644 (file)
@@ -425,7 +425,7 @@ struct cache {
         * until a gc finishes - otherwise we could pointlessly burn a ton of
         * cpu
         */
-       unsigned                invalidate_needs_gc:1;
+       unsigned                invalidate_needs_gc;
 
        bool                    discard; /* Get rid of? */
 
@@ -593,8 +593,8 @@ struct cache_set {
 
        /* Counts how many sectors bio_insert has added to the cache */
        atomic_t                sectors_to_gc;
+       wait_queue_head_t       gc_wait;
 
-       wait_queue_head_t       moving_gc_wait;
        struct keybuf           moving_gc_keys;
        /* Number of moving GC bios in flight */
        struct semaphore        moving_in_flight;
index 6fdd8e252760cbc11ff8cceb1c38fb85eccbcbad..a43eedd5804dd8a9c13b60d96c5bca59fd355b81 100644 (file)
@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
        bch_moving_gc(c);
 }
 
-static int bch_gc_thread(void *arg)
+static bool gc_should_run(struct cache_set *c)
 {
-       struct cache_set *c = arg;
        struct cache *ca;
        unsigned i;
 
-       while (1) {
-again:
-               bch_btree_gc(c);
+       for_each_cache(ca, c, i)
+               if (ca->invalidate_needs_gc)
+                       return true;
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop())
-                       break;
+       if (atomic_read(&c->sectors_to_gc) < 0)
+               return true;
 
-               mutex_lock(&c->bucket_lock);
+       return false;
+}
 
-               for_each_cache(ca, c, i)
-                       if (ca->invalidate_needs_gc) {
-                               mutex_unlock(&c->bucket_lock);
-                               set_current_state(TASK_RUNNING);
-                               goto again;
-                       }
+static int bch_gc_thread(void *arg)
+{
+       struct cache_set *c = arg;
 
-               mutex_unlock(&c->bucket_lock);
+       while (1) {
+               wait_event_interruptible(c->gc_wait,
+                          kthread_should_stop() || gc_should_run(c));
 
-               schedule();
+               if (kthread_should_stop())
+                       break;
+
+               set_gc_sectors(c);
+               bch_btree_gc(c);
        }
 
        return 0;
@@ -1790,11 +1792,10 @@ again:
 
 int bch_gc_thread_start(struct cache_set *c)
 {
-       c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
+       c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
        if (IS_ERR(c->gc_thread))
                return PTR_ERR(c->gc_thread);
 
-       set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
        return 0;
 }
 
index 5c391fa01bedbfba3f1dea062605460ccadc1c6a..9b80417cd547f52c264c1b4b993f3ee2155405f2 100644 (file)
@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
 
 static inline void wake_up_gc(struct cache_set *c)
 {
-       if (c->gc_thread)
-               wake_up_process(c->gc_thread);
+       wake_up(&c->gc_wait);
 }
 
 #define MAP_DONE       0
index f49c5417527dcbb8e0a839a32ec5d354323d63a4..76d20875503c17c6f5956d7f7bfde4eaa90ffbcd 100644 (file)
@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
        struct bio *bio = op->bio, *n;
 
-       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
-               set_gc_sectors(op->c);
+       if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
                wake_up_gc(op->c);
-       }
 
        if (op->bypass)
                return bch_data_invalidate(cl);
index 2fb5bfeb43e2e1668051667d6582a087b6bb14ad..3a19cbc8b230e5a7b77cafa89427b8e282c251dc 100644 (file)
@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
+#define BCACHE_MINORS          16 /* partition support */
 
 /* Superblock */
 
@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
        if (minor < 0)
                return minor;
 
+       minor *= BCACHE_MINORS;
+
        if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(d->disk = alloc_disk(1))) {
+           !(d->disk = alloc_disk(BCACHE_MINORS))) {
                ida_simple_remove(&bcache_minor, minor);
                return -ENOMEM;
        }
@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
        init_waitqueue_head(&c->bucket_wait);
+       init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);
 
        spin_lock_init(&c->btree_gc_time.lock);
@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
 
        for_each_cache(ca, c, i)
                c->nbuckets += ca->sb.nbuckets;
+       set_gc_sectors(c);
 
        if (CACHE_SYNC(&c->sb)) {
                LIST_HEAD(journal);
index 543eadd230e55c8918c45c3ef3b6428cbb73ed97..1076b9d89df38e26bfb088fae3586a6c577fd70e 100644 (file)
@@ -496,8 +496,7 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  * Returns enum mmc_blk_status after checking errors.
  */
 static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
-                                     struct mmc_request *mrq,
-                                     struct mmc_async_req *next_req)
+                                                     struct mmc_request *mrq)
 {
        struct mmc_command *cmd;
        struct mmc_context_info *context_info = &host->context_info;
@@ -507,7 +506,7 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
                wait_event_interruptible(context_info->wait,
                                (context_info->is_done_rcv ||
                                 context_info->is_new_req));
-               context_info->is_waiting_last_req = false;
+
                if (context_info->is_done_rcv) {
                        context_info->is_done_rcv = false;
                        cmd = mrq->cmd;
@@ -527,10 +526,9 @@ static enum mmc_blk_status mmc_wait_for_data_req_done(struct mmc_host *host,
                                __mmc_start_request(host, mrq);
                                continue; /* wait for done/new event again */
                        }
-               } else if (context_info->is_new_req) {
-                       if (!next_req)
-                               return MMC_BLK_NEW_REQUEST;
                }
+
+               return MMC_BLK_NEW_REQUEST;
        }
        mmc_retune_release(host);
        return status;
@@ -660,7 +658,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
                mmc_pre_req(host, areq->mrq);
 
        if (host->areq) {
-               status = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
+               status = mmc_wait_for_data_req_done(host, host->areq->mrq);
                if (status == MMC_BLK_NEW_REQUEST) {
                        if (ret_stat)
                                *ret_stat = status;
index deb90c2ff6b423e63352283c8dfdbb935bfb4e27..a614f37faf27e05e52d851353024384e5cbc3f41 100644 (file)
@@ -223,6 +223,7 @@ static int mmc_decode_scr(struct mmc_card *card)
 static int mmc_read_ssr(struct mmc_card *card)
 {
        unsigned int au, es, et, eo;
+       u32 *raw_ssr;
        int i;
 
        if (!(card->csd.cmdclass & CCC_APP_SPEC)) {
@@ -231,14 +232,21 @@ static int mmc_read_ssr(struct mmc_card *card)
                return 0;
        }
 
-       if (mmc_app_sd_status(card, card->raw_ssr)) {
+       raw_ssr = kmalloc(sizeof(card->raw_ssr), GFP_KERNEL);
+       if (!raw_ssr)
+               return -ENOMEM;
+
+       if (mmc_app_sd_status(card, raw_ssr)) {
                pr_warn("%s: problem reading SD Status register\n",
                        mmc_hostname(card->host));
+               kfree(raw_ssr);
                return 0;
        }
 
        for (i = 0; i < 16; i++)
-               card->raw_ssr[i] = be32_to_cpu(card->raw_ssr[i]);
+               card->raw_ssr[i] = be32_to_cpu(raw_ssr[i]);
+
+       kfree(raw_ssr);
 
        /*
         * UNSTUFF_BITS only works with four u32s so we have to offset the
index 1501cfdac4734246aa6746ed5ee8d2678122d964..4b0ecb981842248b1cc4ea8edaf35dc4bac753e1 100644 (file)
@@ -262,6 +262,7 @@ disable_clk:
 }
 
 static const struct of_device_id sdhci_cdns_match[] = {
+       { .compatible = "socionext,uniphier-sd4hc" },
        { .compatible = "cdns,sd4hc" },
        { /* sentinel */ }
 };
index 111991e5b9a0e7ecf587eaf402a5ecd0d2906f55..23909804ffb840d3187f21f67180a634a769425e 100644 (file)
@@ -1576,6 +1576,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        unsigned long flags;
        u8 ctrl;
 
+       if (ios->power_mode == MMC_POWER_UNDEFINED)
+               return;
+
        spin_lock_irqsave(&host->lock, flags);
 
        if (host->flags & SDHCI_DEVICE_DEAD) {
@@ -2938,22 +2941,24 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
 
        sdhci_init(host, 0);
 
-       /* Force clock and power re-program */
-       host->pwr = 0;
-       host->clock = 0;
-       mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
-       mmc->ops->set_ios(mmc, &mmc->ios);
+       if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
+               /* Force clock and power re-program */
+               host->pwr = 0;
+               host->clock = 0;
+               mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+               mmc->ops->set_ios(mmc, &mmc->ios);
 
-       if ((host_flags & SDHCI_PV_ENABLED) &&
-               !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
-               spin_lock_irqsave(&host->lock, flags);
-               sdhci_enable_preset_value(host, true);
-               spin_unlock_irqrestore(&host->lock, flags);
-       }
+               if ((host_flags & SDHCI_PV_ENABLED) &&
+                   !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
+                       spin_lock_irqsave(&host->lock, flags);
+                       sdhci_enable_preset_value(host, true);
+                       spin_unlock_irqrestore(&host->lock, flags);
+               }
 
-       if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
-           mmc->ops->hs400_enhanced_strobe)
-               mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+               if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
+                   mmc->ops->hs400_enhanced_strobe)
+                       mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
+       }
 
        spin_lock_irqsave(&host->lock, flags);
 
index fba3b2ad382d61d31fa20210001361fc3bbc6d9c..a267173f59972f174947578935189b1e474dc94b 100644 (file)
@@ -76,6 +76,7 @@ enum {
        CPL_PASS_ESTABLISH    = 0x41,
        CPL_RX_DATA_DDP       = 0x42,
        CPL_PASS_ACCEPT_REQ   = 0x44,
+       CPL_RX_ISCSI_CMP      = 0x45,
        CPL_TRACE_PKT_T5      = 0x48,
        CPL_RX_ISCSI_DDP      = 0x49,
 
@@ -934,6 +935,18 @@ struct cpl_iscsi_data {
        __u8 status;
 };
 
+struct cpl_rx_iscsi_cmp {
+       union opcode_tid ot;
+       __be16 pdu_len_ddp;
+       __be16 len;
+       __be32 seq;
+       __be16 urg;
+       __u8 rsvd;
+       __u8 status;
+       __be32 ulp_crc;
+       __be32 ddpvld;
+};
+
 struct cpl_tx_data_iso {
        __be32 op_to_scsi;
        __u8   reserved1;
index 2fd7dc2e8fc4b742f505b7c54961f1be4dc927de..3d21a154dce79deceeff77cd16ef5c6bf2a71978 100644 (file)
@@ -50,7 +50,7 @@
 #define NVME_AQ_DEPTH          256
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
-               
+
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
 {
        struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
 
-       return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+       return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
                       ndev->cmbloc, ndev->cmbsz);
 }
 static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
index 581001989937ce1e0aaab11c26136d5e11b4fa4d..d5bf36ec8a751326062e47abe80ddd1f61a5f43b 100644 (file)
@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
 
 
 /**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
  * @tag: identifier for event
  * @erp: erp_action running
  */
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
 {
        struct zfcp_dbf *dbf = erp->adapter->dbf;
        struct zfcp_dbf_rec *rec = &dbf->rec_buf;
@@ -319,10 +320,20 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
        else
                rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
 
-       debug_event(dbf->rec, 1, rec, sizeof(*rec));
+       debug_event(dbf->rec, level, rec, sizeof(*rec));
        spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+       zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
 /**
  * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
  * @tag: identifier for event
index 36d07584271d569d27ec2eeb3706235d6459e026..db186d44cfafb6036a7e452e68e2c8f078cff6d1 100644 (file)
@@ -2,7 +2,7 @@
  * zfcp device driver
  * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2015
+ * Copyright IBM Corp. 2008, 2016
  */
 
 #ifndef ZFCP_DBF_H
@@ -283,6 +283,30 @@ struct zfcp_dbf {
        struct zfcp_dbf_scsi            scsi_buf;
 };
 
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+       struct fsf_qtcb *qtcb = req->qtcb;
+       u32 fsf_stat = qtcb->header.fsf_status;
+       struct fcp_resp *fcp_rsp;
+       u8 rsp_flags, fr_status;
+
+       if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+               return false; /* not an FCP response */
+       fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+       rsp_flags = fcp_rsp->fr_flags;
+       fr_status = fcp_rsp->fr_status;
+       return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+               (rsp_flags == FCP_RESID_UNDER) &&
+               (fr_status == SAM_STAT_GOOD);
+}
+
 static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
                zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
        } else if (qtcb->header.fsf_status != FSF_GOOD) {
-               zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+               zfcp_dbf_hba_fsf_resp("fs_ferr",
+                                     zfcp_dbf_hba_fsf_resp_suppress(req)
+                                     ? 5 : 1, req);
 
        } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
                   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
        _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
 }
 
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+                                         struct zfcp_fsf_req *fsf_req)
+{
+       _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
 #endif /* ZFCP_DBF_H */
index a59d678125bd0e0ad0bd1ca74b0d42985abb25d8..7ccfce55903423f5e998fe3d93f6942a5879376f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Error Recovery Procedures (ERP).
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
        }
 }
 
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+       unsigned long flags;
+       struct zfcp_adapter *adapter = port->adapter;
+       int port_status;
+       struct Scsi_Host *shost = adapter->scsi_host;
+       struct scsi_device *sdev;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       port_status = atomic_read(&port->status);
+       if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED)    == 0 ||
+           (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+                           ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+               /* new ERP of severity >= port triggered elsewhere meanwhile or
+                * local link down (adapter erp_failed but not clear unblock)
+                */
+               zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+               write_unlock_irqrestore(&adapter->erp_lock, flags);
+               return;
+       }
+       spin_lock(shost->host_lock);
+       __shost_for_each_device(sdev, shost) {
+               struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+               int lun_status;
+
+               if (zsdev->port != port)
+                       continue;
+               /* LUN under port of interest */
+               lun_status = atomic_read(&zsdev->status);
+               if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+                       continue; /* unblock rport despite failed LUNs */
+               /* LUN recovery not given up yet [maybe follow-up pending] */
+               if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+                   (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+                       /* LUN blocked:
+                        * not yet unblocked [LUN recovery pending]
+                        * or meanwhile blocked [new LUN recovery triggered]
+                        */
+                       zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+                       spin_unlock(shost->host_lock);
+                       write_unlock_irqrestore(&adapter->erp_lock, flags);
+                       return;
+               }
+       }
+       /* now port has no child or all children have completed recovery,
+        * and no ERP of severity >= port was meanwhile triggered elsewhere
+        */
+       zfcp_scsi_schedule_rport_register(port);
+       spin_unlock(shost->host_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 {
        struct zfcp_adapter *adapter = act->adapter;
@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
        case ZFCP_ERP_ACTION_REOPEN_LUN:
                if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
                        scsi_device_put(sdev);
+               zfcp_erp_try_rport_unblock(port);
                break;
 
        case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
                 */
                if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
                        if (result == ZFCP_ERP_SUCCEEDED)
-                               zfcp_scsi_schedule_rport_register(port);
+                               zfcp_erp_try_rport_unblock(port);
                /* fall through */
        case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
                put_device(&port->dev);
index 968a0ab4b398c23329f5713b07091db1ab80d92d..9afdbc32b23f6386a7dce2b51e4cf67ad562d215 100644 (file)
@@ -3,7 +3,7 @@
  *
  * External function declarations.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #ifndef ZFCP_EXT_H
@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
                              struct zfcp_port *, struct scsi_device *, u8, u8);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+                                struct zfcp_erp_action *erp);
 extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
index be1c04b334c51f678d643e4c488173f8fd6be0ee..ea3c76ac0de14dc8ea9ad147d1f5b9ab80cc1172 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #ifndef FSF_H
@@ -78,6 +78,7 @@
 #define FSF_APP_TAG_CHECK_FAILURE              0x00000082
 #define FSF_REF_TAG_CHECK_FAILURE              0x00000083
 #define FSF_ADAPTER_STATUS_AVAILABLE           0x000000AD
+#define FSF_FCP_RSP_AVAILABLE                  0x000000AF
 #define FSF_UNKNOWN_COMMAND                    0x000000E2
 #define FSF_UNKNOWN_OP_SUBTYPE                  0x000000E3
 #define FSF_INVALID_COMMAND_OPTION              0x000000E5
index 7c2c6194dfca58e1eb2fe6466abf64fd928deb96..703fce59befef0be884449e00addad4a49981d61 100644 (file)
@@ -4,7 +4,7 @@
  * Data structure and helper functions for tracking pending FSF
  * requests.
  *
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2016
  */
 
 #ifndef ZFCP_REQLIST_H
@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
        spin_unlock_irqrestore(&rl->lock, flags);
 }
 
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ *     function will be the target-request; the second parameter is the same
+ *     pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+                          void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+       struct zfcp_fsf_req *req;
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&rl->lock, flags);
+       for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+               list_for_each_entry(req, &rl->buckets[i], list)
+                       f(req, data);
+       spin_unlock_irqrestore(&rl->lock, flags);
+}
+
 #endif /* ZFCP_REQLIST_H */
index 9069f98a18172e754c943010654de65e91c2fb7c..07ffdbb5107f732082e88c94b0362b845e7e17d6 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Interface to Linux SCSI midlayer.
  *
- * Copyright IBM Corp. 2002, 2015
+ * Copyright IBM Corp. 2002, 2016
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
        }
 
        if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
-               /* This could be either
-                * open LUN pending: this is temporary, will result in
-                *      open LUN or ERP_FAILED, so retry command
+               /* This could be
                 * call to rport_delete pending: mimic retry from
                 *      fc_remote_port_chkready until rport is BLOCKED
                 */
@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
        return retval;
 }
 
+struct zfcp_scsi_req_filter {
+       u8 tmf_scope;
+       u32 lun_handle;
+       u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+       struct zfcp_scsi_req_filter *filter =
+               (struct zfcp_scsi_req_filter *)data;
+
+       /* already aborted - prevent side-effects - or not a SCSI command */
+       if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+               return;
+
+       /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+       if (old_req->qtcb->header.port_handle != filter->port_handle)
+               return;
+
+       if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+           old_req->qtcb->header.lun_handle != filter->lun_handle)
+               return;
+
+       zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+       old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+       struct zfcp_adapter *adapter = zsdev->port->adapter;
+       struct zfcp_scsi_req_filter filter = {
+               .tmf_scope = FCP_TMF_TGT_RESET,
+               .port_handle = zsdev->port->handle,
+       };
+       unsigned long flags;
+
+       if (tm_flags == FCP_TMF_LUN_RESET) {
+               filter.tmf_scope = FCP_TMF_LUN_RESET;
+               filter.lun_handle = zsdev->lun_handle;
+       }
+
+       /*
+        * abort_lock secures against other processings - in the abort-function
+        * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+        */
+       write_lock_irqsave(&adapter->abort_lock, flags);
+       zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+                                  &filter);
+       write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
 static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
        if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
                zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
                retval = FAILED;
-       } else
+       } else {
                zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+               zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+       }
 
        zfcp_fsf_req_free(fsf_req);
        return retval;
index a56a7b243e91fae96b05cae0118d96e9d284dd7b..316f87fe32997138911d064a84b2395a1001276f 100644 (file)
@@ -1,8 +1,8 @@
 /*
    3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
-   Modifications By: Tom Couch <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
+   Modifications By: Tom Couch
 
    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
    Copyright (C) 2010 LSI Corporation.
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 
    Note: This version of the driver does not contain a bundled firmware
          image.
index 0fdc83cfa0e1a28a42757ae52f434523238075b3..b6c208cc474f0e4ae5d37b0c7465c99129121cc7 100644 (file)
@@ -1,8 +1,8 @@
 /*
    3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
-   Modifications By: Tom Couch <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
+   Modifications By: Tom Couch
 
    Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
    Copyright (C) 2010 LSI Corporation.
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 */
 
 #ifndef _3W_9XXX_H
index f8374850f714dd09c53aa1eb25a38bbcb7d0ee8d..970d8fa6bd53eb0135343483589727757cc21464 100644 (file)
@@ -1,7 +1,7 @@
 /*
    3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
 
    Copyright (C) 2009 LSI Corporation.
 
    LSI 3ware 9750 6Gb/s SAS/SATA-RAID
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 
    History
    -------
index fec6449c7595132f706439277cd396b4dc66c0a8..05e77d84c16d95254544750c4e297a35887fc5dc 100644 (file)
@@ -1,7 +1,7 @@
 /*
    3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
 
    Copyright (C) 2009 LSI Corporation.
 
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
    Bugs/Comments/Suggestions should be mailed to:
-   linuxraid@lsi.com
-
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
 */
 
 #ifndef _3W_SAS_H
index 25aba1613e2157f7a2e468007c202a16015b3f40..aa412ab0276523077cf09fda7ddbe086f41ec89b 100644 (file)
@@ -1,7 +1,7 @@
 /* 
    3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
 
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
    Modifications By: Joel Jacobson <linux@3ware.com>
                     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 
    Bugs/Comments/Suggestions should be mailed to:                            
-   linuxraid@lsi.com
 
-   For more information, goto:
-   http://www.lsi.com
+   aradford@gmail.com
+
 
    History
    -------
index 6f65e663d3932108edaed6b75eba328d469f4fce..69e80c1ed1ca642806698177f0c345e2d600b2c5 100644 (file)
@@ -1,7 +1,7 @@
 /* 
    3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
    
-   Written By: Adam Radford <linuxraid@lsi.com>
+   Written By: Adam Radford <aradford@gmail.com>
    Modifications By: Joel Jacobson <linux@3ware.com>
                     Arnaldo Carvalho de Melo <acme@conectiva.com.br>
                      Brad Strand <linux@3ware.com>
@@ -45,7 +45,8 @@
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 
    Bugs/Comments/Suggestions should be mailed to:                            
-   linuxraid@lsi.com
+
+   aradford@gmail.com
    
    For more information, goto:
    http://www.lsi.com
index dfa93347c752479aeff9f4c307911add845a880e..a4f6b0d955159cde292b9c7f09811d5d542c3fa8 100644 (file)
@@ -1233,6 +1233,7 @@ config SCSI_QLOGICPTI
 
 source "drivers/scsi/qla2xxx/Kconfig"
 source "drivers/scsi/qla4xxx/Kconfig"
+source "drivers/scsi/qedi/Kconfig"
 
 config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
index a2d03957cbe2e85626199e854f6ad1e26f5128ee..736b77414a4baae3fe9520dc1eea00df591b4d7f 100644 (file)
@@ -131,6 +131,7 @@ obj-$(CONFIG_PS3_ROM)               += ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)  += libiscsi.o bnx2i/
+obj-$(CONFIG_QEDI)          += libiscsi.o qedi/
 obj-$(CONFIG_BE2ISCSI)         += libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_ESAS2R)      += esas2r/
 obj-$(CONFIG_SCSI_PMCRAID)     += pmcraid.o
index d849ffa378b1ef19f80af760d53ae29774007e7c..4f5ca794bb71507a90879af23f0f4ea998fea9ec 100644 (file)
@@ -97,9 +97,6 @@
  * and macros and include this file in your driver.
  *
  * These macros control options :
- * AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
- * defined.
- *
  * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
  * for commands that return with a CHECK CONDITION status.
  *
  * NCR5380_dma_residual   - residual byte count
  *
  * The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID.  If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
+ * after setting the appropriate host specific fields and ID.
  */
 
 #ifndef NCR5380_io_delay
@@ -351,76 +346,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
 }
 #endif
 
-
-static int probe_irq;
-
-/**
- * probe_intr  -       helper for IRQ autoprobe
- * @irq: interrupt number
- * @dev_id: unused
- * @regs: unused
- *
- * Set a flag to indicate the IRQ in question was received. This is
- * used by the IRQ probe code.
- */
-
-static irqreturn_t probe_intr(int irq, void *dev_id)
-{
-       probe_irq = irq;
-       return IRQ_HANDLED;
-}
-
-/**
- * NCR5380_probe_irq   -       find the IRQ of an NCR5380
- * @instance: NCR5380 controller
- * @possible: bitmask of ISA IRQ lines
- *
- * Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
- * and then looking to see what interrupt actually turned up.
- */
-
-static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
-                                               int possible)
-{
-       struct NCR5380_hostdata *hostdata = shost_priv(instance);
-       unsigned long timeout;
-       int trying_irqs, i, mask;
-
-       for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1)
-               if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))
-                       trying_irqs |= mask;
-
-       timeout = jiffies + msecs_to_jiffies(250);
-       probe_irq = NO_IRQ;
-
-       /*
-        * A interrupt is triggered whenever BSY = false, SEL = true
-        * and a bit set in the SELECT_ENABLE_REG is asserted on the
-        * SCSI bus.
-        *
-        * Note that the bus is only driven when the phase control signals
-        * (I/O, C/D, and MSG) match those in the TCR, so we must reset that
-        * to zero.
-        */
-
-       NCR5380_write(TARGET_COMMAND_REG, 0);
-       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-       NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
-       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
-
-       while (probe_irq == NO_IRQ && time_before(jiffies, timeout))
-               schedule_timeout_uninterruptible(1);
-
-       NCR5380_write(SELECT_ENABLE_REG, 0);
-       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-       for (i = 1, mask = 2; i < 16; ++i, mask <<= 1)
-               if (trying_irqs & mask)
-                       free_irq(i, NULL);
-
-       return probe_irq;
-}
-
 /**
  * NCR58380_info - report driver and host information
  * @instance: relevant scsi host instance
index 3c6ce5434449677026551e8d1acd5c447785bb8e..51a3567a6fb2a36a3eb44fbf94edf5536e333fe4 100644 (file)
 
 #define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
 
-/*
- * These are "special" values for the irq and dma_channel fields of the 
- * Scsi_Host structure
- */
-
-#define DMA_NONE       255
-#define IRQ_AUTO       254
-#define DMA_AUTO       254
-#define PORT_AUTO      0xffff  /* autoprobe io port for 53c400a */
-
 #ifndef NO_IRQ
 #define NO_IRQ         0
 #endif
@@ -290,7 +280,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
 #define NCR5380_dprint_phase(flg, arg) do {} while (0)
 #endif
 
-static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
 static int NCR5380_init(struct Scsi_Host *instance, int flags);
 static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
 static void NCR5380_exit(struct Scsi_Host *instance);
index e4f3e22fcbd9b3d4463374375e7ee32542f409d4..3ecbf20ca29f96b970cd4b14c38eb3fdbb3b1511 100644 (file)
@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
        { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
        { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
        { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
-       { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
        { 0,}
 };
 MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
        { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
        { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
        { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
 };
 
 /**
index 9e6f647ff1c16828f48d5abed6ce248155106977..9a2fdc305cf2a9a0c7b4f4fabae4191bb9f12ad1 100644 (file)
@@ -189,7 +189,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                                struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
-       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -232,7 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                        csk, &req->local_ip, ntohs(req->local_port),
                        &req->peer_ip, ntohs(req->peer_port),
                        csk->atid, csk->rss_qid);
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                struct cpl_t5_act_open_req *req =
                                (struct cpl_t5_act_open_req *)skb->head;
                u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -260,12 +259,45 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                        csk, &req->local_ip, ntohs(req->local_port),
                        &req->peer_ip, ntohs(req->peer_port),
                        csk->atid, csk->rss_qid);
+       } else {
+               struct cpl_t6_act_open_req *req =
+                               (struct cpl_t6_act_open_req *)skb->head;
+               u32 isn = (prandom_u32() & ~7UL) - 1;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+                                                           qid_atid));
+               req->local_port = csk->saddr.sin_port;
+               req->peer_port = csk->daddr.sin_port;
+               req->local_ip = csk->saddr.sin_addr.s_addr;
+               req->peer_ip = csk->daddr.sin_addr.s_addr;
+               req->opt0 = cpu_to_be64(opt0);
+               req->params = cpu_to_be64(FILTER_TUPLE_V(
+                               cxgb4_select_ntuple(
+                                       csk->cdev->ports[csk->port_id],
+                                       csk->l2t)));
+               req->rsvd = cpu_to_be32(isn);
+
+               opt2 |= T5_ISS_VALID;
+               opt2 |= RX_FC_DISABLE_F;
+               opt2 |= T5_OPT_2_VALID_F;
+
+               req->opt2 = cpu_to_be32(opt2);
+               req->rsvd2 = cpu_to_be32(0);
+               req->opt3 = cpu_to_be32(0);
+
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
+                         csk, &req->local_ip, ntohs(req->local_port),
+                         &req->peer_ip, ntohs(req->peer_port),
+                         csk->atid, csk->rss_qid);
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 
        pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
-                      (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+                      (&csk->saddr), (&csk->daddr),
+                      CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
                       csk->state, csk->flags, csk->atid, csk->rss_qid);
 
        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
@@ -276,7 +308,6 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                               struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
-       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -294,10 +325,9 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
 
        opt2 = RX_CHANNEL_V(0) |
                RSS_QUEUE_VALID_F |
-               RX_FC_DISABLE_F |
                RSS_QUEUE_V(csk->rss_qid);
 
-       if (t4) {
+       if (is_t4(lldi->adapter_type)) {
                struct cpl_act_open_req6 *req =
                            (struct cpl_act_open_req6 *)skb->head;
 
@@ -322,7 +352,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                req->params = cpu_to_be32(cxgb4_select_ntuple(
                                          csk->cdev->ports[csk->port_id],
                                          csk->l2t));
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                struct cpl_t5_act_open_req6 *req =
                                (struct cpl_t5_act_open_req6 *)skb->head;
 
@@ -345,12 +375,41 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
                                          csk->cdev->ports[csk->port_id],
                                          csk->l2t)));
+       } else {
+               struct cpl_t6_act_open_req6 *req =
+                               (struct cpl_t6_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= RX_FC_DISABLE_F;
+               opt2 |= T5_OPT_2_VALID_F;
+
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t)));
+
+               req->rsvd2 = cpu_to_be32(0);
+               req->opt3 = cpu_to_be32(0);
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 
        pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
-               t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+               CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
+               csk->flags, csk->atid,
                &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
                &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
                csk->rss_qid);
@@ -742,7 +801,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
                       (&csk->saddr), (&csk->daddr),
                       atid, tid, csk, csk->state, csk->flags, rcv_isn);
 
-       module_put(THIS_MODULE);
+       module_put(cdev->owner);
 
        cxgbi_sock_get(csk);
        csk->tid = tid;
@@ -891,7 +950,7 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (is_neg_adv(status))
                goto rel_skb;
 
-       module_put(THIS_MODULE);
+       module_put(cdev->owner);
 
        if (status && status != CPL_ERR_TCAM_FULL &&
            status != CPL_ERR_CONN_EXIST &&
@@ -1173,6 +1232,101 @@ rel_skb:
        __kfree_skb(skb);
 }
 
+static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+       struct cxgbi_sock *csk;
+       struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+       struct tid_info *t = lldi->tids;
+       struct sk_buff *lskb;
+       u32 tid = GET_TID(cpl);
+       u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
+
+       csk = lookup_tid(t, tid);
+       if (unlikely(!csk)) {
+               pr_err("can't find conn. for tid %u.\n", tid);
+               goto rel_skb;
+       }
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
+                 csk, csk->state, csk->flags, csk->tid, skb,
+                 skb->len, pdu_len_ddp);
+
+       spin_lock_bh(&csk->lock);
+
+       if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+                         csk, csk->state, csk->flags, csk->tid);
+
+               if (csk->state != CTP_ABORTING)
+                       goto abort_conn;
+               else
+                       goto discard;
+       }
+
+       cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
+       cxgbi_skcb_flags(skb) = 0;
+
+       skb_reset_transport_header(skb);
+       __skb_pull(skb, sizeof(*cpl));
+       __pskb_trim(skb, ntohs(cpl->len));
+
+       if (!csk->skb_ulp_lhdr)
+               csk->skb_ulp_lhdr = skb;
+
+       lskb = csk->skb_ulp_lhdr;
+       cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
+                 csk, csk->state, csk->flags, skb, lskb);
+
+       __skb_queue_tail(&csk->receive_queue, skb);
+       spin_unlock_bh(&csk->lock);
+       return;
+
+abort_conn:
+       send_abort_req(csk);
+discard:
+       spin_unlock_bh(&csk->lock);
+rel_skb:
+       __kfree_skb(skb);
+}
+
+static void
+cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
+                     struct sk_buff *skb, u32 ddpvld)
+{
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
+               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
+                       csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
+       }
+
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
+               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
+                       csk, skb, ddpvld, cxgbi_skcb_flags(skb));
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
+       }
+
+       if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
+               log_debug(1 << CXGBI_DBG_PDU_RX,
+                         "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
+                         csk, skb, ddpvld);
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
+       }
+
+       if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
+           !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
+               log_debug(1 << CXGBI_DBG_PDU_RX,
+                         "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
+                         csk, skb, ddpvld);
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
+       }
+}
+
 static void do_rx_data_ddp(struct cxgbi_device *cdev,
                                  struct sk_buff *skb)
 {
@@ -1182,7 +1336,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
        unsigned int tid = GET_TID(rpl);
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct tid_info *t = lldi->tids;
-       unsigned int status = ntohl(rpl->ddpvld);
+       u32 ddpvld = be32_to_cpu(rpl->ddpvld);
 
        csk = lookup_tid(t, tid);
        if (unlikely(!csk)) {
@@ -1192,7 +1346,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
-               csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
+               csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
 
        spin_lock_bh(&csk->lock);
 
@@ -1220,29 +1374,8 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev,
                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
 
-       if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
-               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
-                       csk, lskb, status, cxgbi_skcb_flags(lskb));
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
-       }
-       if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
-               pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
-                       csk, lskb, status, cxgbi_skcb_flags(lskb));
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
-       }
-       if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
-               log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
-                       csk, lskb, status);
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
-       }
-       if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
-               !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
-               log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
-                       csk, lskb, status);
-               cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
-       }
+       cxgb4i_process_ddpvld(csk, lskb, ddpvld);
+
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
                csk, lskb, cxgbi_skcb_flags(lskb));
@@ -1260,6 +1393,98 @@ rel_skb:
        __kfree_skb(skb);
 }
 
+static void
+do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
+{
+       struct cxgbi_sock *csk;
+       struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
+       struct tid_info *t = lldi->tids;
+       struct sk_buff *data_skb = NULL;
+       u32 tid = GET_TID(rpl);
+       u32 ddpvld = be32_to_cpu(rpl->ddpvld);
+       u32 seq = be32_to_cpu(rpl->seq);
+       u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
+
+       csk = lookup_tid(t, tid);
+       if (unlikely(!csk)) {
+               pr_err("can't find connection for tid %u.\n", tid);
+               goto rel_skb;
+       }
+
+       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
+                 "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
+                 "pdu_len_ddp %u, status %u.\n",
+                 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
+                 ntohs(rpl->len), pdu_len_ddp,  rpl->status);
+
+       spin_lock_bh(&csk->lock);
+
+       if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
+               log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
+                         "csk 0x%p,%u,0x%lx,%u, bad state.\n",
+                         csk, csk->state, csk->flags, csk->tid);
+
+               if (csk->state != CTP_ABORTING)
+                       goto abort_conn;
+               else
+                       goto discard;
+       }
+
+       cxgbi_skcb_tcp_seq(skb) = seq;
+       cxgbi_skcb_flags(skb) = 0;
+       cxgbi_skcb_rx_pdulen(skb) = 0;
+
+       skb_reset_transport_header(skb);
+       __skb_pull(skb, sizeof(*rpl));
+       __pskb_trim(skb, be16_to_cpu(rpl->len));
+
+       csk->rcv_nxt = seq + pdu_len_ddp;
+
+       if (csk->skb_ulp_lhdr) {
+               data_skb = skb_peek(&csk->receive_queue);
+               if (!data_skb ||
+                   !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
+                       pr_err("Error! freelist data not found 0x%p, tid %u\n",
+                              data_skb, tid);
+
+                       goto abort_conn;
+               }
+               __skb_unlink(data_skb, &csk->receive_queue);
+
+               cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
+
+               __skb_queue_tail(&csk->receive_queue, skb);
+               __skb_queue_tail(&csk->receive_queue, data_skb);
+       } else {
+                __skb_queue_tail(&csk->receive_queue, skb);
+       }
+
+       csk->skb_ulp_lhdr = NULL;
+
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
+       cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
+       cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
+
+       cxgb4i_process_ddpvld(csk, skb, ddpvld);
+
+       log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
+                 csk, skb, cxgbi_skcb_flags(skb));
+
+       cxgbi_conn_pdu_ready(csk);
+       spin_unlock_bh(&csk->lock);
+
+       return;
+
+abort_conn:
+       send_abort_req(csk);
+discard:
+       spin_unlock_bh(&csk->lock);
+rel_skb:
+       __kfree_skb(skb);
+}
+
 static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
 {
        struct cxgbi_sock *csk;
@@ -1382,7 +1607,6 @@ static int init_act_open(struct cxgbi_sock *csk)
        void *daddr;
        unsigned int step;
        unsigned int size, size6;
-       int t4 = is_t4(lldi->adapter_type);
        unsigned int linkspeed;
        unsigned int rcv_winf, snd_winf;
 
@@ -1428,12 +1652,15 @@ static int init_act_open(struct cxgbi_sock *csk)
                cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
 #endif
 
-       if (t4) {
+       if (is_t4(lldi->adapter_type)) {
                size = sizeof(struct cpl_act_open_req);
                size6 = sizeof(struct cpl_act_open_req6);
-       } else {
+       } else if (is_t5(lldi->adapter_type)) {
                size = sizeof(struct cpl_t5_act_open_req);
                size6 = sizeof(struct cpl_t5_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t6_act_open_req);
+               size6 = sizeof(struct cpl_t6_act_open_req6);
        }
 
        if (csk->csk_family == AF_INET)
@@ -1452,8 +1679,8 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->mtu = dst_mtu(csk->dst);
        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
        csk->tx_chan = cxgb4_port_chan(ndev);
-       /* SMT two entries per row */
-       csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
+       csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
+                                        cxgb4_port_viid(ndev));
        step = lldi->ntxq / lldi->nchan;
        csk->txq_idx = cxgb4_port_idx(ndev) * step;
        step = lldi->nrxq / lldi->nchan;
@@ -1486,7 +1713,11 @@ static int init_act_open(struct cxgbi_sock *csk)
                       csk->mtu, csk->mss_idx, csk->smac_idx);
 
        /* must wait for either a act_open_rpl or act_open_establish */
-       try_module_get(THIS_MODULE);
+       if (!try_module_get(cdev->owner)) {
+               pr_err("%s, try_module_get failed.\n", ndev->name);
+               goto rel_resource;
+       }
+
        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
        if (csk->csk_family == AF_INET)
                send_act_open_req(csk, skb, csk->l2t);
@@ -1521,10 +1752,11 @@ static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
        [CPL_FW4_ACK] = do_fw4_ack,
        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
-       [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
+       [CPL_ISCSI_DATA] = do_rx_iscsi_data,
        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
        [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
+       [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
        [CPL_RX_DATA] = do_rx_data,
 };
 
@@ -1794,10 +2026,12 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
        cdev->nports = lldi->nports;
        cdev->mtus = lldi->mtus;
        cdev->nmtus = NMTUS;
-       cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
+       cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
+                                CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
        cdev->itp = &cxgb4i_iscsi_transport;
+       cdev->owner = THIS_MODULE;
 
        cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
                        << FW_VIID_PFN_S;
index 2ffe029ff2b6ff29fbaaada58d277d89d2643f31..9167bcd9fffe9b3a5fea9005671239a96551aace 100644 (file)
@@ -642,6 +642,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                        n->dev->name, ndev->name, mtu);
        }
 
+       if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+               pr_info("%s interface not up.\n", ndev->name);
+               err = -ENETDOWN;
+               goto rel_neigh;
+       }
+
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
        if (!cdev) {
                pr_info("dst %pI4, %s, NOT cxgbi device.\n",
@@ -736,6 +742,12 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
        }
        ndev = n->dev;
 
+       if (!(ndev->flags & IFF_UP) || !netif_carrier_ok(ndev)) {
+               pr_info("%s interface not up.\n", ndev->name);
+               err = -ENETDOWN;
+               goto rel_rt;
+       }
+
        if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
                pr_info("multi-cast route %pI6 port %u, dev %s.\n",
                        daddr6->sin6_addr.s6_addr,
@@ -896,6 +908,7 @@ EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 {
        struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
+       struct module *owner = csk->cdev->owner;
 
        log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
                csk, (csk)->state, (csk)->flags, (csk)->tid);
@@ -906,6 +919,8 @@ void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
        __kfree_skb(skb);
+
+       module_put(owner);
 }
 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
 
@@ -1574,6 +1589,25 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
                return -EIO;
        }
 
+       if (cxgbi_skcb_test_flag(skb, SKCBF_RX_ISCSI_COMPL) &&
+           cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA_DDPD)) {
+               /* If completion flag is set and data is directly
+                * placed in to the host memory then update
+                * task->exp_datasn to the datasn in completion
+                * iSCSI hdr as T6 adapter generates completion only
+                * for the last pdu of a sequence.
+                */
+               itt_t itt = ((struct iscsi_data *)skb->data)->itt;
+               struct iscsi_task *task = iscsi_itt_to_ctask(conn, itt);
+               u32 data_sn = be32_to_cpu(((struct iscsi_data *)
+                                                       skb->data)->datasn);
+               if (task && task->sc) {
+                       struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+                       tcp_task->exp_datasn = data_sn;
+               }
+       }
+
        return read_pdu_skb(conn, skb, 0, 0);
 }
 
@@ -1627,15 +1661,15 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
                csk->rcv_wup, cdev->rx_credit_thres,
                csk->rcv_win);
 
+       if (!cdev->rx_credit_thres)
+               return;
+
        if (csk->state != CTP_ESTABLISHED)
                return;
 
        credits = csk->copied_seq - csk->rcv_wup;
        if (unlikely(!credits))
                return;
-       if (unlikely(cdev->rx_credit_thres == 0))
-               return;
-
        must_send = credits + 16384 >= csk->rcv_win;
        if (must_send || credits >= cdev->rx_credit_thres)
                csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
index e7802738f5d28e0b7e7fe4e3f6bf8075197b838f..95ba99044c3e9c2af6f0a8deb656b878c15cf656 100644 (file)
@@ -207,6 +207,7 @@ enum cxgbi_skcb_flags {
        SKCBF_RX_HDR,           /* received pdu header */
        SKCBF_RX_DATA,          /* received pdu payload */
        SKCBF_RX_STATUS,        /* received ddp status */
+       SKCBF_RX_ISCSI_COMPL,   /* received iscsi completion */
        SKCBF_RX_DATA_DDPD,     /* pdu payload ddp'd */
        SKCBF_RX_HCRC_ERR,      /* header digest error */
        SKCBF_RX_DCRC_ERR,      /* data digest error */
@@ -467,6 +468,7 @@ struct cxgbi_device {
        struct pci_dev *pdev;
        struct dentry *debugfs_root;
        struct iscsi_transport *itp;
+       struct module *owner;
 
        unsigned int pfvf;
        unsigned int rx_credit_thres;
index de5147a8c959aaa7cc070d55a9040e9f1ead37fd..6f9665d50d84bb485d3ba9cf99da05bae0f1c018 100644 (file)
@@ -37,7 +37,7 @@
 #define MAX_CARDS 8
 
 /* old-style parameters for compatibility */
-static int ncr_irq;
+static int ncr_irq = -1;
 static int ncr_addr;
 static int ncr_5380;
 static int ncr_53c400;
@@ -52,9 +52,9 @@ module_param(ncr_53c400a, int, 0);
 module_param(dtc_3181e, int, 0);
 module_param(hp_c2502, int, 0);
 
-static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
 module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])");
 
 static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
 module_param_array(base, int, NULL, 0);
@@ -67,6 +67,56 @@ MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC31
 MODULE_ALIAS("g_NCR5380_mmio");
 MODULE_LICENSE("GPL");
 
+static void g_NCR5380_trigger_irq(struct Scsi_Host *instance)
+{
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+       /*
+        * An interrupt is triggered whenever BSY = false, SEL = true
+        * and a bit set in the SELECT_ENABLE_REG is asserted on the
+        * SCSI bus.
+        *
+        * Note that the bus is only driven when the phase control signals
+        * (I/O, C/D, and MSG) match those in the TCR.
+        */
+       NCR5380_write(TARGET_COMMAND_REG,
+                     PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
+       NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+       NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
+       NCR5380_write(INITIATOR_COMMAND_REG,
+                     ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
+
+       msleep(1);
+
+       NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+       NCR5380_write(SELECT_ENABLE_REG, 0);
+       NCR5380_write(TARGET_COMMAND_REG, 0);
+}
+
+/**
+ * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent
+ * @instance: SCSI host instance
+ *
+ * Autoprobe for the IRQ line used by the card by triggering an IRQ
+ * and then looking to see what interrupt actually turned up.
+ */
+
+static int g_NCR5380_probe_irq(struct Scsi_Host *instance)
+{
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+       int irq_mask, irq;
+
+       NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+       irq_mask = probe_irq_on();
+       g_NCR5380_trigger_irq(instance);
+       irq = probe_irq_off(irq_mask);
+       NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+       if (irq <= 0)
+               return NO_IRQ;
+       return irq;
+}
+
 /*
  * Configure I/O address of 53C400A or DTC436 by writing magic numbers
  * to ports 0x779 and 0x379.
@@ -81,14 +131,33 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
        outb(magic[3], 0x379);
        outb(magic[4], 0x379);
 
-       /* allowed IRQs for HP C2502 */
-       if (irq != 2 && irq != 3 && irq != 4 && irq != 5 && irq != 7)
-               irq = 0;
+       if (irq == 9)
+               irq = 2;
+
        if (idx >= 0 && idx <= 7)
                cfg = 0x80 | idx | (irq << 4);
        outb(cfg, 0x379);
 }
 
+static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id)
+{
+       return IRQ_HANDLED;
+}
+
+static int legacy_find_free_irq(int *irq_table)
+{
+       while (*irq_table != -1) {
+               if (!request_irq(*irq_table, legacy_empty_irq_handler,
+                                IRQF_PROBE_SHARED, "Test IRQ",
+                                (void *)irq_table)) {
+                       free_irq(*irq_table, (void *) irq_table);
+                       return *irq_table;
+               }
+               irq_table++;
+       }
+       return -1;
+}
+
 static unsigned int ncr_53c400a_ports[] = {
        0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
 };
@@ -101,6 +170,9 @@ static u8 ncr_53c400a_magic[] = {   /* 53C400A & DTC436 */
 static u8 hp_c2502_magic[] = { /* HP C2502 */
        0x0f, 0x22, 0xf0, 0x20, 0x80
 };
+static int hp_c2502_irqs[] = {
+       9, 5, 7, 3, 4, -1
+};
 
 static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
                        struct device *pdev, int base, int irq, int board)
@@ -248,6 +320,13 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
                }
        }
 
+       /* Check for vacant slot */
+       NCR5380_write(MODE_REG, 0);
+       if (NCR5380_read(MODE_REG) != 0) {
+               ret = -ENODEV;
+               goto out_unregister;
+       }
+
        ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
        if (ret)
                goto out_unregister;
@@ -262,31 +341,59 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 
        NCR5380_maybe_reset_bus(instance);
 
-       if (irq != IRQ_AUTO)
-               instance->irq = irq;
-       else
-               instance->irq = NCR5380_probe_irq(instance, 0xffff);
-
        /* Compatibility with documented NCR5380 kernel parameters */
-       if (instance->irq == 255)
-               instance->irq = NO_IRQ;
+       if (irq == 255 || irq == 0)
+               irq = NO_IRQ;
+       else if (irq == -1)
+               irq = IRQ_AUTO;
+
+       if (board == BOARD_HP_C2502) {
+               int *irq_table = hp_c2502_irqs;
+               int board_irq = -1;
+
+               switch (irq) {
+               case NO_IRQ:
+                       board_irq = 0;
+                       break;
+               case IRQ_AUTO:
+                       board_irq = legacy_find_free_irq(irq_table);
+                       break;
+               default:
+                       while (*irq_table != -1)
+                               if (*irq_table++ == irq)
+                                       board_irq = irq;
+               }
+
+               if (board_irq <= 0) {
+                       board_irq = 0;
+                       irq = NO_IRQ;
+               }
+
+               magic_configure(port_idx, board_irq, magic);
+       }
+
+       if (irq == IRQ_AUTO) {
+               instance->irq = g_NCR5380_probe_irq(instance);
+               if (instance->irq == NO_IRQ)
+                       shost_printk(KERN_INFO, instance, "no irq detected\n");
+       } else {
+               instance->irq = irq;
+               if (instance->irq == NO_IRQ)
+                       shost_printk(KERN_INFO, instance, "no irq provided\n");
+       }
 
        if (instance->irq != NO_IRQ) {
-               /* set IRQ for HP C2502 */
-               if (board == BOARD_HP_C2502)
-                       magic_configure(port_idx, instance->irq, magic);
                if (request_irq(instance->irq, generic_NCR5380_intr,
                                0, "NCR5380", instance)) {
-                       printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
                        instance->irq = NO_IRQ;
+                       shost_printk(KERN_INFO, instance,
+                                    "irq %d denied\n", instance->irq);
+               } else {
+                       shost_printk(KERN_INFO, instance,
+                                    "irq %d acquired\n", instance->irq);
                }
        }
 
-       if (instance->irq == NO_IRQ) {
-               printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
-               printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
-       }
-
        ret = scsi_add_host(instance, pdev);
        if (ret)
                goto out_free_irq;
@@ -597,7 +704,7 @@ static int __init generic_NCR5380_init(void)
        int ret = 0;
 
        /* compatibility with old-style parameters */
-       if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+       if (irq[0] == -1 && base[0] == 0 && card[0] == -1) {
                irq[0] = ncr_irq;
                base[0] = ncr_addr;
                if (ncr_5380)
index 3ce5b65ccb00b618012ef0825390a6a098950cd6..81b22d989648b0107f6c0f3128cc25348a88e775 100644 (file)
@@ -51,4 +51,6 @@
 #define BOARD_DTC3181E 3
 #define BOARD_HP_C2502 4
 
+#define IRQ_AUTO       254
+
 #endif /* GENERIC_NCR5380_H */
index 691a0931695238cf07fcfc544dfe33740cb38a1c..cbc0c5fe5a60188515dab3dd99be83ce62732e1c 100644 (file)
@@ -1557,10 +1557,9 @@ static void hpsa_monitor_offline_device(struct ctlr_info *h,
 
        /* Device is not on the list, add it. */
        device = kmalloc(sizeof(*device), GFP_KERNEL);
-       if (!device) {
-               dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
+       if (!device)
                return;
-       }
+
        memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
        spin_lock_irqsave(&h->offline_device_lock, flags);
        list_add_tail(&device->offline_list, &h->offline_device_list);
@@ -2142,17 +2141,15 @@ static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
 
        h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
                                GFP_KERNEL);
-       if (!h->cmd_sg_list) {
-               dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
+       if (!h->cmd_sg_list)
                return -ENOMEM;
-       }
+
        for (i = 0; i < h->nr_cmds; i++) {
                h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
                                                h->chainsize, GFP_KERNEL);
-               if (!h->cmd_sg_list[i]) {
-                       dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
+               if (!h->cmd_sg_list[i])
                        goto clean;
-               }
+
        }
        return 0;
 
@@ -3454,11 +3451,8 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
                struct bmic_sense_subsystem_info *ssi;
 
                ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
-               if (ssi == NULL) {
-                       dev_warn(&h->pdev->dev,
-                               "%s: out of memory\n", __func__);
+               if (!ssi)
                        return;
-               }
 
                rc = hpsa_bmic_sense_subsystem_information(h,
                                        scsi3addr, 0, ssi, sizeof(*ssi));
@@ -4335,8 +4329,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
 
                currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
                if (!currentsd[i]) {
-                       dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
-                               __FILE__, __LINE__);
                        h->drv_req_rescan = 1;
                        goto out;
                }
@@ -8597,14 +8589,12 @@ static int hpsa_luns_changed(struct ctlr_info *h)
         */
 
        if (!h->lastlogicals)
-               goto out;
+               return rc;
 
        logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
-       if (!logdev) {
-               dev_warn(&h->pdev->dev,
-                       "Out of memory, can't track lun changes.\n");
-               goto out;
-       }
+       if (!logdev)
+               return rc;
+
        if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
                dev_warn(&h->pdev->dev,
                        "report luns failed, can't track lun changes.\n");
@@ -8998,11 +8988,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
                return;
 
        options = kzalloc(sizeof(*options), GFP_KERNEL);
-       if (!options) {
-               dev_err(&h->pdev->dev,
-                       "Error: failed to disable rld caching, during alloc.\n");
+       if (!options)
                return;
-       }
 
        c = cmd_alloc(h);
 
index d9534ee6ef524fb1fab03680bdde2ed34a4202b7..50cd01165e355b092fb954d2399f42798d5e3e00 100644 (file)
@@ -95,6 +95,7 @@ static int fast_fail = 1;
 static int client_reserve = 1;
 static char partition_name[97] = "UNKNOWN";
 static unsigned int partition_number = -1;
+static LIST_HEAD(ibmvscsi_head);
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -232,6 +233,7 @@ static void ibmvscsi_task(void *data)
                while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
                        ibmvscsi_handle_crq(crq, hostdata);
                        crq->valid = VIOSRP_CRQ_FREE;
+                       wmb();
                }
 
                vio_enable_interrupts(vdev);
@@ -240,6 +242,7 @@ static void ibmvscsi_task(void *data)
                        vio_disable_interrupts(vdev);
                        ibmvscsi_handle_crq(crq, hostdata);
                        crq->valid = VIOSRP_CRQ_FREE;
+                       wmb();
                } else {
                        done = 1;
                }
@@ -992,7 +995,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
        if (unlikely(rsp->opcode != SRP_RSP)) {
                if (printk_ratelimit())
                        dev_warn(evt_struct->hostdata->dev,
-                                "bad SRP RSP type %d\n", rsp->opcode);
+                                "bad SRP RSP type %#02x\n", rsp->opcode);
        }
        
        if (cmnd) {
@@ -2270,6 +2273,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
 
        dev_set_drvdata(&vdev->dev, hostdata);
+       list_add_tail(&hostdata->host_list, &ibmvscsi_head);
        return 0;
 
       add_srp_port_failed:
@@ -2291,6 +2295,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
+       list_del(&hostdata->host_list);
        unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
index e0f6c3aeb4eef35aa390afaa5d7cd6174d8f7a48..3a7875575616e3eec27443a0db7ef9882fcc2528 100644 (file)
@@ -90,6 +90,7 @@ struct event_pool {
 
 /* all driver data associated with a host adapter */
 struct ibmvscsi_host_data {
+       struct list_head host_list;
        atomic_t request_limit;
        int client_migrated;
        int reset_crq;
index c9fa3565c671e9f1b0f4e16e847c713cd6165078..2583e8b50b21c2cfa6ec3d10e624adb8141d5c9d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/string.h>
+#include <linux/delay.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 98b0ca79a5c5e2e04c526ca741c504377c75ca9e..65c6189885ab08f24212bce5f008c1973539a0f7 100644 (file)
@@ -26,6 +26,7 @@
 #ifndef __H_IBMVSCSI_TGT
 #define __H_IBMVSCSI_TGT
 
+#include <linux/interrupt.h>
 #include "libsrp.h"
 
 #define SYS_ID_NAME_LEN                64
index 8de0eda8cd006ac9ec0192256c56aec182bd08b9..394fe1338d0976a42f183e328dfaed02f540560f 100644 (file)
@@ -402,6 +402,9 @@ struct MPT3SAS_DEVICE {
        u8      block;
        u8      tlr_snoop_check;
        u8      ignore_delay_remove;
+       /* Iopriority Command Handling */
+       u8      ncq_prio_enable;
+
 };
 
 #define MPT3_CMD_NOT_USED      0x8000  /* free */
@@ -1458,4 +1461,7 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
        struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
        u16 smid);
 
+/* NCQ Prio Handling Check */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+
 #endif /* MPT3SAS_BASE_H_INCLUDED */
index 050bd788ad029818de9151eaafb3584387755c36..95f0f24bac05598e1c8246cb1078de9163a66e5d 100644 (file)
@@ -3325,8 +3325,6 @@ static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
 
 /*********** diagnostic trigger suppport *** END ****************************/
 
-
-
 /*****************************************/
 
 struct device_attribute *mpt3sas_host_attrs[] = {
@@ -3402,9 +3400,50 @@ _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
 
+/**
+ * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
+ * @dev - pointer to embedded device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA
+ */
+static ssize_t
+_ctl_device_ncq_prio_enable_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       sas_device_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+_ctl_device_ncq_prio_enable_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+       bool ncq_prio_enable = 0;
+
+       if (kstrtobool(buf, &ncq_prio_enable))
+               return -EINVAL;
+
+       if (!scsih_ncq_prio_supp(sdev))
+               return -EINVAL;
+
+       sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
+       return strlen(buf);
+}
+static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
+                  _ctl_device_ncq_prio_enable_show,
+                  _ctl_device_ncq_prio_enable_store);
+
 struct device_attribute *mpt3sas_dev_attrs[] = {
        &dev_attr_sas_address,
        &dev_attr_sas_device_handle,
+       &dev_attr_sas_ncq_prio_enable,
        NULL,
 };
 
index 5c8f75247d739489313613e2f18965fd63b80e86..b5c966e319d315474b94703b93ab0343013dd973 100644 (file)
@@ -4053,6 +4053,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        struct MPT3SAS_TARGET *sas_target_priv_data;
        struct _raid_device *raid_device;
+       struct request *rq = scmd->request;
+       int class;
        Mpi2SCSIIORequest_t *mpi_request;
        u32 mpi_control;
        u16 smid;
@@ -4115,7 +4117,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 
        /* set tags */
        mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
+       /* NCQ Prio supported, make sure control indicated high priority */
+       if (sas_device_priv_data->ncq_prio_enable) {
+               class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+               if (class == IOPRIO_CLASS_RT)
+                       mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
+       }
        /* Make sure Device is not raid volume.
         * We do not expose raid functionality to upper layer for warpdrive.
         */
@@ -9099,6 +9106,31 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
        return PCI_ERS_RESULT_RECOVERED;
 }
 
+/**
+ * scsih__ncq_prio_supp - Check for NCQ command priority support
+ * @sdev: scsi device struct
+ *
+ * This is called when a user indicates they would like to enable
+ * ncq command priorities. This works only on SATA devices.
+ */
+bool scsih_ncq_prio_supp(struct scsi_device *sdev)
+{
+       unsigned char *buf;
+       bool ncq_prio_supp = false;
+
+       if (!scsi_device_supports_vpd(sdev))
+               return ncq_prio_supp;
+
+       buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
+       if (!buf)
+               return ncq_prio_supp;
+
+       if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
+               ncq_prio_supp = (buf[213] >> 4) & 1;
+
+       kfree(buf);
+       return ncq_prio_supp;
+}
 /*
  * The pci device ids are defined in mpi/mpi2_cnfg.h.
  */
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
new file mode 100644 (file)
index 0000000..23ca8a2
--- /dev/null
@@ -0,0 +1,10 @@
+config QEDI
+       tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
+       depends on PCI && SCSI
+       depends on QED
+       select SCSI_ISCSI_ATTRS
+       select QED_LL2
+       select QED_ISCSI
+       ---help---
+       This driver supports iSCSI offload for the QLogic FastLinQ
+       41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
new file mode 100644 (file)
index 0000000..2b3e16b
--- /dev/null
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDI) := qedi.o
+qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
+           qedi_dbg.o
+
+qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
new file mode 100644 (file)
index 0000000..5ca3e8c
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_H_
+#define _QEDI_H_
+
+#define __PREVENT_QED_HSI__
+
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_host.h>
+#include <linux/uio_driver.h>
+
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedi_dbg.h"
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedi_version.h"
+
+#define QEDI_MODULE_NAME               "qedi"
+
+struct qedi_endpoint;
+
+/*
+ * PCI function probe defines
+ */
+#define QEDI_MODE_NORMAL       0
+#define QEDI_MODE_RECOVERY     1
+
+#define ISCSI_WQE_SET_PTU_INVALIDATE   1
+#define QEDI_MAX_ISCSI_TASK            4096
+#define QEDI_MAX_TASK_NUM              0x0FFF
+#define QEDI_MAX_ISCSI_CONNS_PER_HBA   1024
+#define QEDI_ISCSI_MAX_BDS_PER_CMD     256     /* Firmware max BDs is 256 */
+#define MAX_OUSTANDING_TASKS_PER_CON   1024
+
+#define QEDI_MAX_BD_LEN                0xffff
+#define QEDI_BD_SPLIT_SZ       0x1000
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_FAST_SGE_COUNT    4
+/* MAX Length for cached SGL */
+#define MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+
+#define MAX_NUM_MSIX_PF         8
+#define MIN_NUM_CPUS_MSIX(x)   min((x)->msix_count, num_online_cpus())
+
+#define QEDI_LOCAL_PORT_MIN     60000
+#define QEDI_LOCAL_PORT_MAX     61024
+#define QEDI_LOCAL_PORT_RANGE   (QEDI_LOCAL_PORT_MAX - QEDI_LOCAL_PORT_MIN)
+#define QEDI_LOCAL_PORT_INVALID        0xffff
+#define TX_RX_RING             16
+#define RX_RING                        (TX_RX_RING - 1)
+#define LL2_SINGLE_BUF_SIZE    0x400
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_PAGE_ALIGN(addr)  ALIGN(addr, QEDI_PAGE_SIZE)
+#define QEDI_PAGE_MASK         (~((QEDI_PAGE_SIZE) - 1))
+
+#define QEDI_PAGE_SIZE         4096
+#define QEDI_PATH_HANDLE       0xFE0000000UL
+
+struct qedi_uio_ctrl {
+       /* meta data */
+       u32 uio_hsi_version;
+
+       /* user writes */
+       u32 host_tx_prod;
+       u32 host_rx_cons;
+       u32 host_rx_bd_cons;
+       u32 host_tx_pkt_len;
+       u32 host_rx_cons_cnt;
+
+       /* driver writes */
+       u32 hw_tx_cons;
+       u32 hw_rx_prod;
+       u32 hw_rx_bd_prod;
+       u32 hw_rx_prod_cnt;
+
+       /* other */
+       u8 mac_addr[6];
+       u8 reserve[2];
+};
+
+struct qedi_rx_bd {
+       u32 rx_pkt_index;
+       u32 rx_pkt_len;
+       u16 vlan_id;
+};
+
+#define QEDI_RX_DESC_CNT       (QEDI_PAGE_SIZE / sizeof(struct qedi_rx_bd))
+#define QEDI_MAX_RX_DESC_CNT   (QEDI_RX_DESC_CNT - 1)
+#define QEDI_NUM_RX_BD         (QEDI_RX_DESC_CNT * 1)
+#define QEDI_MAX_RX_BD         (QEDI_NUM_RX_BD - 1)
+
+#define QEDI_NEXT_RX_IDX(x)    ((((x) & (QEDI_MAX_RX_DESC_CNT)) ==     \
+                                 (QEDI_MAX_RX_DESC_CNT - 1)) ?         \
+                                (x) + 2 : (x) + 1)
+
+struct qedi_uio_dev {
+       struct uio_info         qedi_uinfo;
+       u32                     uio_dev;
+       struct list_head        list;
+
+       u32                     ll2_ring_size;
+       void                    *ll2_ring;
+
+       u32                     ll2_buf_size;
+       void                    *ll2_buf;
+
+       void                    *rx_pkt;
+       void                    *tx_pkt;
+
+       struct qedi_ctx         *qedi;
+       struct pci_dev          *pdev;
+       void                    *uctrl;
+};
+
+/* List to maintain the skb pointers */
+struct skb_work_list {
+       struct list_head list;
+       struct sk_buff *skb;
+       u16 vlan_id;
+};
+
+/* Queue sizes in number of elements */
+#define QEDI_SQ_SIZE           MAX_OUSTANDING_TASKS_PER_CON
+#define QEDI_CQ_SIZE           2048
+#define QEDI_CMDQ_SIZE         QEDI_MAX_ISCSI_TASK
+#define QEDI_PROTO_CQ_PROD_IDX 0
+
+struct qedi_glbl_q_params {
+       u64 hw_p_cq;    /* Completion queue PBL */
+       u64 hw_p_rq;    /* Request queue PBL */
+       u64 hw_p_cmdq;  /* Command queue PBL */
+};
+
+struct global_queue {
+       union iscsi_cqe *cq;
+       dma_addr_t cq_dma;
+       u32 cq_mem_size;
+       u32 cq_cons_idx; /* Completion queue consumer index */
+
+       void *cq_pbl;
+       dma_addr_t cq_pbl_dma;
+       u32 cq_pbl_size;
+
+};
+
+struct qedi_fastpath {
+       struct qed_sb_info      *sb_info;
+       u16                     sb_id;
+#define QEDI_NAME_SIZE         16
+       char                    name[QEDI_NAME_SIZE];
+       struct qedi_ctx         *qedi;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedi_io_work {
+       struct list_head list;
+       struct iscsi_cqe_solicited cqe;
+       u16     que_idx;
+};
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+       void *cid_que_base;
+       u32 *cid_que;
+       u32 cid_q_prod_idx;
+       u32 cid_q_cons_idx;
+       u32 cid_q_max_idx;
+       u32 cid_free_cnt;
+       struct qedi_conn **conn_cid_tbl;
+};
+
+struct qedi_portid_tbl {
+       spinlock_t      lock;   /* Port id lock */
+       u16             start;
+       u16             max;
+       u16             next;
+       unsigned long   *table;
+};
+
+struct qedi_itt_map {
+       __le32  itt;
+       struct qedi_cmd *p_cmd;
+};
+
+/* I/O tracing entry */
+#define QEDI_IO_TRACE_SIZE             2048
+struct qedi_io_log {
+#define QEDI_IO_TRACE_REQ              0
+#define QEDI_IO_TRACE_RSP              1
+       u8 direction;
+       u16 task_id;
+       u32 cid;
+       u32 port_id;    /* Remote port fabric ID */
+       int lun;
+       u8 op;          /* SCSI CDB */
+       u8 lba[4];
+       unsigned int bufflen;   /* SCSI buffer length */
+       unsigned int sg_count;  /* Number of SG elements */
+       u8 fast_sgs;            /* number of fast sgls */
+       u8 slow_sgs;            /* number of slow sgls */
+       u8 cached_sgs;          /* number of cached sgls */
+       int result;             /* Result passed back to mid-layer */
+       unsigned long jiffies;  /* Time stamp when I/O logged */
+       int refcount;           /* Reference count for task id */
+       unsigned int blk_req_cpu; /* CPU that the task is queued on by
+                                  * blk layer
+                                  */
+       unsigned int req_cpu;   /* CPU that the task is queued on */
+       unsigned int intr_cpu;  /* Interrupt CPU that the task is received on */
+       unsigned int blk_rsp_cpu;/* CPU that task is actually processed and
+                                 * returned to blk layer
+                                 */
+       bool cached_sge;
+       bool slow_sge;
+       bool fast_sge;
+};
+
+/* Number of entries in BDQ */
+#define QEDI_BDQ_NUM           256
+#define QEDI_BDQ_BUF_SIZE      256
+
+/* DMA coherent buffers for BDQ */
+struct qedi_bdq_buf {
+       void *buf_addr;
+       dma_addr_t buf_dma;
+};
+
+/* Main port level struct */
+struct qedi_ctx {
+       struct qedi_dbg_ctx dbg_ctx;
+       struct Scsi_Host *shost;
+       struct pci_dev *pdev;
+       struct qed_dev *cdev;
+       struct qed_dev_iscsi_info dev_info;
+       struct qed_int_info int_info;
+       struct qedi_glbl_q_params *p_cpuq;
+       struct global_queue **global_queues;
+       /* uio declaration */
+       struct qedi_uio_dev *udev;
+       struct list_head ll2_skb_list;
+       spinlock_t ll2_lock;    /* Light L2 lock */
+       spinlock_t hba_lock;    /* per port lock */
+       struct task_struct *ll2_recv_thread;
+       unsigned long flags;
+#define UIO_DEV_OPENED         1
+#define QEDI_IOTHREAD_WAKE     2
+#define QEDI_IN_RECOVERY       5
+#define QEDI_IN_OFFLINE                6
+
+       u8 mac[ETH_ALEN];
+       u32 src_ip[4];
+       u8 ip_type;
+
+       /* Physical address of above array */
+       dma_addr_t hw_p_cpuq;
+
+       struct qedi_bdq_buf bdq[QEDI_BDQ_NUM];
+       void *bdq_pbl;
+       dma_addr_t bdq_pbl_dma;
+       size_t bdq_pbl_mem_size;
+       void *bdq_pbl_list;
+       dma_addr_t bdq_pbl_list_dma;
+       u8 bdq_pbl_list_num_entries;
+       void __iomem *bdq_primary_prod;
+       void __iomem *bdq_secondary_prod;
+       u16 bdq_prod_idx;
+       u16 rq_num_entries;
+
+       u32 msix_count;
+       u32 max_sqes;
+       u8 num_queues;
+       u32 max_active_conns;
+
+       struct iscsi_cid_queue cid_que;
+       struct qedi_endpoint **ep_tbl;
+       struct qedi_portid_tbl lcl_port_tbl;
+
+       /* Rx fast path intr context */
+       struct qed_sb_info      *sb_array;
+       struct qedi_fastpath    *fp_array;
+       struct qed_iscsi_tid    tasks;
+
+#define QEDI_LINK_DOWN         0
+#define QEDI_LINK_UP           1
+       atomic_t link_state;
+
+#define QEDI_RESERVE_TASK_ID   0
+#define MAX_ISCSI_TASK_ENTRIES 4096
+#define QEDI_INVALID_TASK_ID   (MAX_ISCSI_TASK_ENTRIES + 1)
+       unsigned long task_idx_map[MAX_ISCSI_TASK_ENTRIES / BITS_PER_LONG];
+       struct qedi_itt_map *itt_map;
+       u16 tid_reuse_count[QEDI_MAX_ISCSI_TASK];
+       struct qed_pf_params pf_params;
+
+       struct workqueue_struct *tmf_thread;
+       struct workqueue_struct *offload_thread;
+
+       u16 ll2_mtu;
+
+       struct workqueue_struct *dpc_wq;
+
+       spinlock_t task_idx_lock;       /* To protect gbl context */
+       s32 last_tidx_alloc;
+       s32 last_tidx_clear;
+
+       struct qedi_io_log io_trace_buf[QEDI_IO_TRACE_SIZE];
+       spinlock_t io_trace_lock;       /* prtect trace Log buf */
+       u16 io_trace_idx;
+       unsigned int intr_cpu;
+       u32 cached_sgls;
+       bool use_cached_sge;
+       u32 slow_sgls;
+       bool use_slow_sge;
+       u32 fast_sgls;
+       bool use_fast_sge;
+
+       atomic_t num_offloads;
+};
+
+struct qedi_work {
+       struct list_head list;
+       struct qedi_ctx *qedi;
+       union iscsi_cqe cqe;
+       u16     que_idx;
+       bool is_solicited;
+};
+
+struct qedi_percpu_s {
+       struct task_struct *iothread;
+       struct list_head work_list;
+       spinlock_t p_work_lock;         /* Per cpu worker lock */
+};
+
+static inline void *qedi_get_task_mem(struct qed_iscsi_tid *info, u32 tid)
+{
+       return (info->blocks[tid / info->num_tids_per_block] +
+               (tid % info->num_tids_per_block) * info->size);
+}
+
+#define QEDI_U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define QEDI_U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#endif /* _QEDI_H_ */
diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c
new file mode 100644 (file)
index 0000000..2bdedb9
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+            const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                      nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+             const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & QEDI_LOG_WARN))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                       nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+               const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_notice("[%s]:[%s:%d]:%d: %pV",
+                         dev_name(&qedi->pdev->dev), nfunc, line,
+                         qedi->host_no, &vaf);
+       else
+               pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+void
+qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+             u32 level, const char *fmt, ...)
+{
+       va_list va;
+       struct va_format vaf;
+       char nfunc[32];
+
+       memset(nfunc, 0, sizeof(nfunc));
+       memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+       va_start(va, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &va;
+
+       if (!(qedi_dbg_log & level))
+               return;
+
+       if (likely(qedi) && likely(qedi->pdev))
+               pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
+                       nfunc, line, qedi->host_no, &vaf);
+       else
+               pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+       va_end(va);
+}
+
+int
+qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+       int ret = 0;
+
+       for (; iter->name; iter++) {
+               ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+                                           iter->attr);
+               if (ret)
+                       pr_err("Unable to create sysfs %s attr, err(%d).\n",
+                              iter->name, ret);
+       }
+       return ret;
+}
+
+void
+qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+       for (; iter->name; iter++)
+               sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
new file mode 100644 (file)
index 0000000..c55572b
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_DBG_H_
+#define _QEDI_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <linux/fs.h>
+
+#define __PREVENT_QED_HSI__
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedi_dbg_log;
+
+/* Debug print level definitions */
+#define QEDI_LOG_DEFAULT       0x1             /* Set default logging mask */
+#define QEDI_LOG_INFO          0x2             /* Informational logs,
+                                                * MAC address, WWPN, WWNN
+                                                */
+#define QEDI_LOG_DISC          0x4             /* Init, discovery, rport */
+#define QEDI_LOG_LL2           0x8             /* LL2, VLAN logs */
+#define QEDI_LOG_CONN          0x10            /* Connection setup, cleanup */
+#define QEDI_LOG_EVT           0x20            /* Events, link, mtu */
+#define QEDI_LOG_TIMER         0x40            /* Timer events */
+#define QEDI_LOG_MP_REQ                0x80            /* Middle Path (MP) logs */
+#define QEDI_LOG_SCSI_TM       0x100           /* SCSI Aborts, Task Mgmt */
+#define QEDI_LOG_UNSOL         0x200           /* unsolicited event logs */
+#define QEDI_LOG_IO            0x400           /* scsi cmd, completion */
+#define QEDI_LOG_MQ            0x800           /* Multi Queue logs */
+#define QEDI_LOG_BSG           0x1000          /* BSG logs */
+#define QEDI_LOG_DEBUGFS       0x2000          /* debugFS logs */
+#define QEDI_LOG_LPORT         0x4000          /* lport logs */
+#define QEDI_LOG_ELS           0x8000          /* ELS logs */
+#define QEDI_LOG_NPIV          0x10000         /* NPIV logs */
+#define QEDI_LOG_SESS          0x20000         /* Conection setup, cleanup */
+#define QEDI_LOG_UIO           0x40000         /* iSCSI UIO logs */
+#define QEDI_LOG_TID           0x80000         /* FW TID context acquire,
+                                                * free
+                                                */
+#define QEDI_TRACK_TID         0x100000        /* Track TID state. To be
+                                                * enabled only at module load
+                                                * and not run-time.
+                                                */
+#define QEDI_TRACK_CMD_LIST    0x300000        /* Track active cmd list nodes,
+                                               * done with reference to TID,
+                                               * hence TRACK_TID also enabled.
+                                               */
+#define QEDI_LOG_NOTICE                0x40000000      /* Notice logs */
+#define QEDI_LOG_WARN          0x80000000      /* Warning logs */
+
+/* Debug context structure */
+struct qedi_dbg_ctx {
+       unsigned int host_no;
+       struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDI_ERR(pdev, fmt, ...)       \
+               qedi_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_WARN(pdev, fmt, ...)      \
+               qedi_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_NOTICE(pdev, fmt, ...)    \
+               qedi_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDI_INFO(pdev, level, fmt, ...)       \
+               qedi_dbg_info(pdev, __func__, __LINE__, level, fmt,     \
+                             ## __VA_ARGS__)
+
+void qedi_dbg_err(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                 const char *fmt, ...);
+void qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                  const char *fmt, ...);
+void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                    const char *fmt, ...);
+void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
+                  u32 info, const char *fmt, ...);
+
+struct Scsi_Host;
+
+struct sysfs_bin_attrs {
+       char *name;
+       struct bin_attribute *attr;
+};
+
+int qedi_create_sysfs_attr(struct Scsi_Host *shost,
+                          struct sysfs_bin_attrs *iter);
+void qedi_remove_sysfs_attr(struct Scsi_Host *shost,
+                           struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedi_list_of_funcs {
+       char *oper_str;
+       ssize_t (*oper_func)(struct qedi_dbg_ctx *qedi);
+};
+
+struct qedi_debugfs_ops {
+       char *name;
+       struct qedi_list_of_funcs *qedi_funcs;
+};
+
+#define qedi_dbg_fileops(drv, ops) \
+{ \
+       .owner  = THIS_MODULE, \
+       .open   = simple_open, \
+       .read   = drv##_dbg_##ops##_cmd_read, \
+       .write  = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedi_dbg_fileops_seq(drv, ops) \
+{ \
+       .owner = THIS_MODULE, \
+       .open = drv##_dbg_##ops##_open, \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+       .release = single_release, \
+}
+
+void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+                       struct qedi_debugfs_ops *dops,
+                       const struct file_operations *fops);
+void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi);
+void qedi_dbg_init(char *drv_name);
+void qedi_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDI_DBG_H_ */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
new file mode 100644 (file)
index 0000000..9559362
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_dbg.h"
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+int do_not_recover;
+static struct dentry *qedi_dbg_root;
+
+void
+qedi_dbg_host_init(struct qedi_dbg_ctx *qedi,
+                  struct qedi_debugfs_ops *dops,
+                  const struct file_operations *fops)
+{
+       char host_dirname[32];
+       struct dentry *file_dentry = NULL;
+
+       sprintf(host_dirname, "host%u", qedi->host_no);
+       qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root);
+       if (!qedi->bdf_dentry)
+               return;
+
+       while (dops) {
+               if (!(dops->name))
+                       break;
+
+               file_dentry = debugfs_create_file(dops->name, 0600,
+                                                 qedi->bdf_dentry, qedi,
+                                                 fops);
+               if (!file_dentry) {
+                       QEDI_INFO(qedi, QEDI_LOG_DEBUGFS,
+                                 "Debugfs entry %s creation failed\n",
+                                 dops->name);
+                       debugfs_remove_recursive(qedi->bdf_dentry);
+                       return;
+               }
+               dops++;
+               fops++;
+       }
+}
+
+void
+qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi)
+{
+       debugfs_remove_recursive(qedi->bdf_dentry);
+       qedi->bdf_dentry = NULL;
+}
+
+void
+qedi_dbg_init(char *drv_name)
+{
+       qedi_dbg_root = debugfs_create_dir(drv_name, NULL);
+       if (!qedi_dbg_root)
+               QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n");
+}
+
+void
+qedi_dbg_exit(void)
+{
+       debugfs_remove_recursive(qedi_dbg_root);
+       qedi_dbg_root = NULL;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
+{
+       if (!do_not_recover)
+               do_not_recover = 1;
+
+       QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+                 do_not_recover);
+       return 0;
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
+{
+       if (do_not_recover)
+               do_not_recover = 0;
+
+       QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
+                 do_not_recover);
+       return 0;
+}
+
+static struct qedi_list_of_funcs qedi_dbg_do_not_recover_ops[] = {
+       { "enable", qedi_dbg_do_not_recover_enable },
+       { "disable", qedi_dbg_do_not_recover_disable },
+       { NULL, NULL }
+};
+
+struct qedi_debugfs_ops qedi_debugfs_ops[] = {
+       { "gbl_ctx", NULL },
+       { "do_not_recover", qedi_dbg_do_not_recover_ops},
+       { "io_trace", NULL },
+       { NULL, NULL }
+};
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_write(struct file *filp, const char __user *buffer,
+                                 size_t count, loff_t *ppos)
+{
+       size_t cnt = 0;
+       struct qedi_dbg_ctx *qedi_dbg =
+                       (struct qedi_dbg_ctx *)filp->private_data;
+       struct qedi_list_of_funcs *lof = qedi_dbg_do_not_recover_ops;
+
+       if (*ppos)
+               return 0;
+
+       while (lof) {
+               if (!(lof->oper_str))
+                       break;
+
+               if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) {
+                       cnt = lof->oper_func(qedi_dbg);
+                       break;
+               }
+
+               lof++;
+       }
+       return (count - cnt);
+}
+
+static ssize_t
+qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
+                                size_t count, loff_t *ppos)
+{
+       size_t cnt = 0;
+
+       if (*ppos)
+               return 0;
+
+       cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+       cnt = min_t(int, count, cnt - *ppos);
+       *ppos += cnt;
+       return cnt;
+}
+
+static int
+qedi_gbl_ctx_show(struct seq_file *s, void *unused)
+{
+       struct qedi_fastpath *fp = NULL;
+       struct qed_sb_info *sb_info = NULL;
+       struct status_block *sb = NULL;
+       struct global_queue *que = NULL;
+       int id;
+       u16 prod_idx;
+       struct qedi_ctx *qedi = s->private;
+       unsigned long flags;
+
+       seq_puts(s, " DUMP CQ CONTEXT:\n");
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id);
+               fp = &qedi->fp_array[id];
+               sb_info = fp->sb_info;
+               sb = sb_info->sb_virt;
+               prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
+                           STATUS_BLOCK_PROD_INDEX_MASK);
+               seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
+               que = qedi->global_queues[fp->sb_id];
+               seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
+               seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id);
+               seq_puts(s, "=========== END ==================\n\n\n");
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+       return 0;
+}
+
+static int
+qedi_dbg_gbl_ctx_open(struct inode *inode, struct file *file)
+{
+       struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+       struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+                                            dbg_ctx);
+
+       return single_open(file, qedi_gbl_ctx_show, qedi);
+}
+
+static int
+qedi_io_trace_show(struct seq_file *s, void *unused)
+{
+       int id, idx = 0;
+       struct qedi_ctx *qedi = s->private;
+       struct qedi_io_log *io_log;
+       unsigned long flags;
+
+       seq_puts(s, " DUMP IO LOGS:\n");
+       spin_lock_irqsave(&qedi->io_trace_lock, flags);
+       idx = qedi->io_trace_idx;
+       for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) {
+               io_log = &qedi->io_trace_buf[idx];
+               seq_printf(s, "iodir-%d:", io_log->direction);
+               seq_printf(s, "tid-0x%x:", io_log->task_id);
+               seq_printf(s, "cid-0x%x:", io_log->cid);
+               seq_printf(s, "lun-%d:", io_log->lun);
+               seq_printf(s, "op-0x%02x:", io_log->op);
+               seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+                          io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+               seq_printf(s, "buflen-%d:", io_log->bufflen);
+               seq_printf(s, "sgcnt-%d:", io_log->sg_count);
+               seq_printf(s, "res-0x%08x:", io_log->result);
+               seq_printf(s, "jif-%lu:", io_log->jiffies);
+               seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu);
+               seq_printf(s, "req_cpu-%d:", io_log->req_cpu);
+               seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu);
+               seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu);
+
+               idx++;
+               if (idx == QEDI_IO_TRACE_SIZE)
+                       idx = 0;
+       }
+       spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+       return 0;
+}
+
+static int
+qedi_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+       struct qedi_dbg_ctx *qedi_dbg = inode->i_private;
+       struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx,
+                                            dbg_ctx);
+
+       return single_open(file, qedi_io_trace_show, qedi);
+}
+
+const struct file_operations qedi_dbg_fops[] = {
+       qedi_dbg_fileops_seq(qedi, gbl_ctx),
+       qedi_dbg_fileops(qedi, do_not_recover),
+       qedi_dbg_fileops_seq(qedi, io_trace),
+       { NULL, NULL },
+};
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
new file mode 100644 (file)
index 0000000..b1d3904
--- /dev/null
@@ -0,0 +1,2378 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/delay.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+                              struct iscsi_task *mtask);
+
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+       if (cmd->io_tbl.sge_valid && sc) {
+               cmd->io_tbl.sge_valid = 0;
+               scsi_dma_unmap(sc);
+       }
+}
+
+static void qedi_process_logout_resp(struct qedi_ctx *qedi,
+                                    union iscsi_cqe *cqe,
+                                    struct iscsi_task *task,
+                                    struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_logout_rsp *resp_hdr;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_logout_response_hdr *cqe_logout_response;
+       struct qedi_cmd *cmd;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+       cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
+       spin_lock(&session->back_lock);
+       resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = cqe_logout_response->opcode;
+       resp_hdr->flags = cqe_logout_response->flags;
+       resp_hdr->hlength = 0;
+
+       resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+       resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
+       resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
+
+       resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
+       resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id,
+                         &cmd->io_cmd);
+       }
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_text_resp(struct qedi_ctx *qedi,
+                                  union iscsi_cqe *cqe,
+                                  struct iscsi_task *task,
+                                  struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_task_context *task_ctx;
+       struct iscsi_text_rsp *resp_hdr_ptr;
+       struct iscsi_text_response_hdr *cqe_text_response;
+       struct qedi_cmd *cmd;
+       int pld_len;
+       u32 *tmp;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+       task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+       cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr_ptr->opcode = cqe_text_response->opcode;
+       resp_hdr_ptr->flags = cqe_text_response->flags;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_text_response->hdr_second_dword &
+               ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->ttt = cqe_text_response->ttt;
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
+
+       pld_len = cqe_text_response->hdr_second_dword &
+                 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+       memset(task_ctx, '\0', sizeof(*task_ctx));
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id,
+                         &cmd->io_cmd);
+       }
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+                            qedi_conn->gen_pdu.resp_buf,
+                            (qedi_conn->gen_pdu.resp_wr_ptr -
+                             qedi_conn->gen_pdu.resp_buf));
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_tmf_resp_work(struct work_struct *work)
+{
+       struct qedi_cmd *qedi_cmd =
+                               container_of(work, struct qedi_cmd, tmf_work);
+       struct qedi_conn *qedi_conn = qedi_cmd->conn;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_tm_rsp *resp_hdr_ptr;
+       struct iscsi_cls_session *cls_sess;
+       int rval = 0;
+
+       set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+
+       iscsi_block_session(session->cls_session);
+       rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
+       if (rval) {
+               clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+               qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+               iscsi_unblock_session(session->cls_session);
+               return;
+       }
+
+       iscsi_unblock_session(session->cls_session);
+       qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+       spin_lock(&session->back_lock);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+       spin_unlock(&session->back_lock);
+       kfree(resp_hdr_ptr);
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
+                                 union iscsi_cqe *cqe,
+                                 struct iscsi_task *task,
+                                 struct qedi_conn *qedi_conn)
+
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_tmf_response_hdr *cqe_tmp_response;
+       struct iscsi_tm_rsp *resp_hdr_ptr;
+       struct iscsi_tm *tmf_hdr;
+       struct qedi_cmd *qedi_cmd = NULL;
+       u32 *tmp;
+
+       cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
+
+       qedi_cmd = task->dd_data;
+       qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+       if (!qedi_cmd->tmf_resp_buf) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to allocate resp buf, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               return;
+       }
+
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
+
+       /* Fill up the header */
+       resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
+       resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
+       resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_tmp_response->hdr_second_dword &
+               ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn  = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
+
+       tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+
+       if (likely(qedi_cmd->io_cmd_in_list)) {
+               qedi_cmd->io_cmd_in_list = false;
+               list_del_init(&qedi_cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+
+       if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+           ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+           ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+             ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+               INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
+               queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+               goto unblock_sess;
+       }
+
+       qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
+       kfree(resp_hdr_ptr);
+
+unblock_sess:
+       spin_unlock(&session->back_lock);
+}
+
+static void qedi_process_login_resp(struct qedi_ctx *qedi,
+                                   union iscsi_cqe *cqe,
+                                   struct iscsi_task *task,
+                                   struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_task_context *task_ctx;
+       struct iscsi_login_rsp *resp_hdr_ptr;
+       struct iscsi_login_response_hdr *cqe_login_response;
+       struct qedi_cmd *cmd;
+       int pld_len;
+       u32 *tmp;
+
+       cmd = (struct qedi_cmd *)task->dd_data;
+
+       cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
+       task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
+
+       spin_lock(&session->back_lock);
+       resp_hdr_ptr =  (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
+       resp_hdr_ptr->opcode = cqe_login_response->opcode;
+       resp_hdr_ptr->flags = cqe_login_response->flags_attr;
+       resp_hdr_ptr->hlength = 0;
+
+       hton24(resp_hdr_ptr->dlength,
+              (cqe_login_response->hdr_second_dword &
+               ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
+       tmp = (u32 *)resp_hdr_ptr->dlength;
+       resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
+                                     conn->session->age);
+       resp_hdr_ptr->tsih = cqe_login_response->tsih;
+       resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
+       resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
+       resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
+       resp_hdr_ptr->status_class = cqe_login_response->status_class;
+       resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
+       pld_len = cqe_login_response->hdr_second_dword &
+                 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+
+       memset(task_ctx, '\0', sizeof(*task_ctx));
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
+                            qedi_conn->gen_pdu.resp_buf,
+                            (qedi_conn->gen_pdu.resp_wr_ptr -
+                            qedi_conn->gen_pdu.resp_buf));
+
+       spin_unlock(&session->back_lock);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+}
+
+static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
+                               struct iscsi_cqe_unsolicited *cqe,
+                               char *ptr, int len)
+{
+       u16 idx = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
+                 len, qedi->bdq_prod_idx,
+                 (qedi->bdq_prod_idx % qedi->rq_num_entries));
+
+       /* Obtain buffer address from rqe_opaque */
+       idx = cqe->rqe_opaque.lo;
+       if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+                         idx);
+               return;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
+                 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
+       switch (cqe->unsol_cqe_type) {
+       case ISCSI_CQE_UNSOLICITED_SINGLE:
+       case ISCSI_CQE_UNSOLICITED_FIRST:
+               if (len)
+                       memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
+               break;
+       case ISCSI_CQE_UNSOLICITED_MIDDLE:
+       case ISCSI_CQE_UNSOLICITED_LAST:
+               break;
+       default:
+               break;
+       }
+}
+
+static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
+                               struct iscsi_cqe_unsolicited *cqe,
+                               int count)
+{
+       u16 tmp;
+       u16 idx = 0;
+       struct scsi_bd *pbl;
+
+       /* Obtain buffer address from rqe_opaque */
+       idx = cqe->rqe_opaque.lo;
+       if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
+                         idx);
+               return;
+       }
+
+       pbl = (struct scsi_bd *)qedi->bdq_pbl;
+       pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
+       pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
+       pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
+                 pbl, pbl->address.hi, pbl->address.lo, idx);
+       pbl->opaque.hi = 0;
+       pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+
+       /* Increment producer to let f/w know we've handled the frame */
+       qedi->bdq_prod_idx += count;
+
+       writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+       tmp = readw(qedi->bdq_primary_prod);
+
+       writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+       tmp = readw(qedi->bdq_secondary_prod);
+}
+
+static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
+                                     struct iscsi_cqe_unsolicited *cqe,
+                                     u32 pdu_len, u32 num_bdqs,
+                                     char *bdq_data)
+{
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "num_bdqs [%d]\n", num_bdqs);
+
+       qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
+       qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
+}
+
+static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
+                                  union iscsi_cqe *cqe,
+                                  struct iscsi_task *task,
+                                  struct qedi_conn *qedi_conn, u16 que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_nop_in_hdr *cqe_nop_in;
+       struct iscsi_nopin *hdr;
+       struct qedi_cmd *cmd;
+       int tgt_async_nop = 0;
+       u32 lun[2];
+       u32 pdu_len, num_bdqs;
+       char bdq_data[QEDI_BDQ_BUF_SIZE];
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+       cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
+
+       pdu_len = cqe_nop_in->hdr_second_dword &
+                 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+       hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = cqe_nop_in->opcode;
+       hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
+       hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
+       hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pdu_len, num_bdqs, bdq_data);
+               hdr->itt = RESERVED_ITT;
+               tgt_async_nop = 1;
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+               goto done;
+       }
+
+       /* Response to one of our nop-outs */
+       if (task) {
+               cmd = task->dd_data;
+               hdr->flags = ISCSI_FLAG_CMD_FINAL;
+               hdr->itt = build_itt(cqe->cqe_solicited.itid,
+                                    conn->session->age);
+               lun[0] = 0xffffffff;
+               lun[1] = 0xffffffff;
+               memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                         "Freeing tid=0x%x for cid=0x%x\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id);
+               cmd->state = RESPONSE_RECEIVED;
+               spin_lock(&qedi_conn->list_lock);
+               if (likely(cmd->io_cmd_in_list)) {
+                       cmd->io_cmd_in_list = false;
+                       list_del_init(&cmd->io_cmd);
+                       qedi_conn->active_cmd_count--;
+               }
+
+               spin_unlock(&qedi_conn->list_lock);
+               qedi_clear_task_idx(qedi, cmd->task_id);
+       }
+
+done:
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
+
+       spin_unlock_bh(&session->back_lock);
+       return tgt_async_nop;
+}
+
+static void qedi_process_async_mesg(struct qedi_ctx *qedi,
+                                   union iscsi_cqe *cqe,
+                                   struct iscsi_task *task,
+                                   struct qedi_conn *qedi_conn,
+                                   u16 que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_async_msg_hdr *cqe_async_msg;
+       struct iscsi_async *resp_hdr;
+       u32 lun[2];
+       u32 pdu_len, num_bdqs;
+       char bdq_data[QEDI_BDQ_BUF_SIZE];
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+
+       cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
+       pdu_len = cqe_async_msg->hdr_second_dword &
+               ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pdu_len, num_bdqs, bdq_data);
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+
+       resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = cqe_async_msg->opcode;
+       resp_hdr->flags = 0x80;
+
+       lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
+       lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
+       memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
+       resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
+       resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
+
+       resp_hdr->async_event = cqe_async_msg->async_event;
+       resp_hdr->async_vcode = cqe_async_msg->async_vcode;
+
+       resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
+       resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
+       resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
+                            pdu_len);
+
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
+                                    union iscsi_cqe *cqe,
+                                    struct iscsi_task *task,
+                                    struct qedi_conn *qedi_conn,
+                                    uint16_t que_idx)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_reject_hdr *cqe_reject;
+       struct iscsi_reject *hdr;
+       u32 pld_len, num_bdqs;
+       unsigned long flags;
+
+       spin_lock_bh(&session->back_lock);
+       cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
+       pld_len = cqe_reject->hdr_second_dword &
+                 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
+       num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
+
+       if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
+               spin_lock_irqsave(&qedi->hba_lock, flags);
+               qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
+                                         pld_len, num_bdqs, conn->data);
+               spin_unlock_irqrestore(&qedi->hba_lock, flags);
+       }
+       hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = cqe_reject->opcode;
+       hdr->reason = cqe_reject->hdr_reason;
+       hdr->flags = cqe_reject->hdr_flags;
+       hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
+                             ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
+       hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
+       hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
+       hdr->ffffffff = cpu_to_be32(0xffffffff);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+                            conn->data, pld_len);
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_scsi_completion(struct qedi_ctx *qedi,
+                                union iscsi_cqe *cqe,
+                                struct iscsi_task *task,
+                                struct iscsi_conn *conn)
+{
+       struct scsi_cmnd *sc_cmd;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct iscsi_scsi_rsp *hdr;
+       struct iscsi_data_in_hdr *cqe_data_in;
+       int datalen = 0;
+       struct qedi_conn *qedi_conn;
+       u32 iscsi_cid;
+       bool mark_cmd_node_deleted = false;
+       u8 cqe_err_bits = 0;
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+       cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
+       cqe_err_bits =
+               cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+       spin_lock_bh(&session->back_lock);
+       /* get the scsi command */
+       sc_cmd = cmd->scsi_cmd;
+
+       if (!sc_cmd) {
+               QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
+               goto error;
+       }
+
+       if (!sc_cmd->SCp.ptr) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "SCp.ptr is NULL, returned in another context.\n");
+               goto error;
+       }
+
+       if (!sc_cmd->request) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "sc_cmd->request is NULL, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       if (!sc_cmd->request->special) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "request->special is NULL so request not valid, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       if (!sc_cmd->request->q) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "request->q is NULL so request is not valid, sc_cmd=%p.\n",
+                         sc_cmd);
+               goto error;
+       }
+
+       qedi_iscsi_unmap_sg_list(cmd);
+
+       hdr = (struct iscsi_scsi_rsp *)task->hdr;
+       hdr->opcode = cqe_data_in->opcode;
+       hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
+       hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
+       hdr->response = cqe_data_in->reserved1;
+       hdr->cmd_status = cqe_data_in->status_rsvd;
+       hdr->flags = cqe_data_in->flags;
+       hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
+
+       if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
+               datalen = cqe_data_in->reserved2 &
+                         ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
+               memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
+       }
+
+       /* If f/w reports data underrun err then set residual to IO transfer
+        * length, set Underrun flag and clear Overrun flag explicitly
+        */
+       if (unlikely(cqe_err_bits &&
+                    GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
+                         hdr->itt, cqe_data_in->flags, cmd->task_id,
+                         qedi_conn->iscsi_conn_id, hdr->residual_count,
+                         scsi_bufflen(sc_cmd));
+               hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
+               hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+               hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
+       }
+
+       spin_lock(&qedi_conn->list_lock);
+       if (likely(cmd->io_cmd_in_list)) {
+               cmd->io_cmd_in_list = false;
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+               mark_cmd_node_deleted = true;
+       }
+       spin_unlock(&qedi_conn->list_lock);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                 "Freeing tid=0x%x for cid=0x%x\n",
+                 cmd->task_id, qedi_conn->iscsi_conn_id);
+       cmd->state = RESPONSE_RECEIVED;
+       if (qedi_io_tracing)
+               qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
+
+       qedi_clear_task_idx(qedi, cmd->task_id);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+                            conn->data, datalen);
+error:
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_mtask_completion(struct qedi_ctx *qedi,
+                                 union iscsi_cqe *cqe,
+                                 struct iscsi_task *task,
+                                 struct qedi_conn *conn, uint16_t que_idx)
+{
+       struct iscsi_conn *iscsi_conn;
+       u32 hdr_opcode;
+
+       hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+       iscsi_conn = conn->cls_conn->dd_data;
+
+       switch (hdr_opcode) {
+       case ISCSI_OPCODE_SCSI_RESPONSE:
+       case ISCSI_OPCODE_DATA_IN:
+               qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
+               break;
+       case ISCSI_OPCODE_LOGIN_RESPONSE:
+               qedi_process_login_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_TMF_RESPONSE:
+               qedi_process_tmf_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_TEXT_RESPONSE:
+               qedi_process_text_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_LOGOUT_RESPONSE:
+               qedi_process_logout_resp(qedi, cqe, task, conn);
+               break;
+       case ISCSI_OPCODE_NOP_IN:
+               qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
+               break;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
+       }
+}
+
+static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
+                                         struct iscsi_cqe_solicited *cqe,
+                                         struct iscsi_task *task,
+                                         struct qedi_conn *qedi_conn)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
+                 "itid=0x%x, cmd task id=0x%x\n",
+                 cqe->itid, cmd->task_id);
+
+       cmd->state = RESPONSE_RECEIVED;
+       qedi_clear_task_idx(qedi, cmd->task_id);
+
+       spin_lock_bh(&session->back_lock);
+       __iscsi_put_task(task);
+       spin_unlock_bh(&session->back_lock);
+}
+
+static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
+                                         struct iscsi_cqe_solicited *cqe,
+                                         struct iscsi_task *task,
+                                         struct iscsi_conn *conn)
+{
+       struct qedi_work_map *work, *work_tmp;
+       u32 proto_itt = cqe->itid;
+       u32 ptmp_itt = 0;
+       itt_t protoitt = 0;
+       int found = 0;
+       struct qedi_cmd *qedi_cmd = NULL;
+       u32 rtid = 0;
+       u32 iscsi_cid;
+       struct qedi_conn *qedi_conn;
+       struct qedi_cmd *cmd_new, *dbg_cmd;
+       struct iscsi_task *mtask;
+       struct iscsi_tm *tmf_hdr = NULL;
+
+       iscsi_cid = cqe->conn_id;
+       qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+
+       /* Based on this itt get the corresponding qedi_cmd */
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
+                                list) {
+               if (work->rtid == proto_itt) {
+                       /* We found the command */
+                       qedi_cmd = work->qedi_cmd;
+                       if (!qedi_cmd->list_tmf_work) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                         "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
+                                         proto_itt, qedi_conn->iscsi_conn_id);
+                               WARN_ON(1);
+                       }
+                       found = 1;
+                       mtask = qedi_cmd->task;
+                       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+                       rtid = work->rtid;
+
+                       list_del_init(&work->list);
+                       kfree(work);
+                       qedi_cmd->list_tmf_work = NULL;
+               }
+       }
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       if (found) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+                         proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+               if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_ABORT_TASK) {
+                       spin_lock_bh(&conn->session->back_lock);
+
+                       protoitt = build_itt(get_itt(tmf_hdr->rtt),
+                                            conn->session->age);
+                       task = iscsi_itt_to_task(conn, protoitt);
+
+                       spin_unlock_bh(&conn->session->back_lock);
+
+                       if (!task) {
+                               QEDI_NOTICE(&qedi->dbg_ctx,
+                                           "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+                                           get_itt(tmf_hdr->rtt),
+                                           qedi_conn->iscsi_conn_id);
+                               return;
+                       }
+
+                       dbg_cmd = task->dd_data;
+
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+                                 get_itt(tmf_hdr->rtt), get_itt(task->itt),
+                                 dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+
+                       if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
+                               qedi_cmd->state = CLEANUP_RECV;
+
+                       qedi_clear_task_idx(qedi_conn->qedi, rtid);
+
+                       spin_lock(&qedi_conn->list_lock);
+                       list_del_init(&dbg_cmd->io_cmd);
+                       qedi_conn->active_cmd_count--;
+                       spin_unlock(&qedi_conn->list_lock);
+                       qedi_cmd->state = CLEANUP_RECV;
+                       wake_up_interruptible(&qedi_conn->wait_queue);
+               }
+       } else if (qedi_conn->cmd_cleanup_req > 0) {
+               spin_lock_bh(&conn->session->back_lock);
+               qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+               protoitt = build_itt(ptmp_itt, conn->session->age);
+               task = iscsi_itt_to_task(conn, protoitt);
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                         cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
+                         qedi_conn->iscsi_conn_id);
+
+               spin_unlock_bh(&conn->session->back_lock);
+               if (!task) {
+                       QEDI_NOTICE(&qedi->dbg_ctx,
+                                   "task is null, itid=0x%x, cid=0x%x\n",
+                                   cqe->itid, qedi_conn->iscsi_conn_id);
+                       return;
+               }
+               qedi_conn->cmd_cleanup_cmpl++;
+               wake_up(&qedi_conn->wait_queue);
+               cmd_new = task->dd_data;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+                         "Freeing tid=0x%x for cid=0x%x\n",
+                         cqe->itid, qedi_conn->iscsi_conn_id);
+               qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
+
+       } else {
+               qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
+               protoitt = build_itt(ptmp_itt, conn->session->age);
+               task = iscsi_itt_to_task(conn, protoitt);
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
+                        protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+               WARN_ON(1);
+       }
+}
+
+void qedi_fp_process_cqes(struct qedi_work *work)
+{
+       struct qedi_ctx *qedi = work->qedi;
+       union iscsi_cqe *cqe = &work->cqe;
+       struct iscsi_task *task = NULL;
+       struct iscsi_nopout *nopout_hdr;
+       struct qedi_conn *q_conn;
+       struct iscsi_conn *conn;
+       struct qedi_cmd *qedi_cmd;
+       u32 comp_type;
+       u32 iscsi_cid;
+       u32 hdr_opcode;
+       u16 que_idx = work->que_idx;
+       u8 cqe_err_bits = 0;
+
+       comp_type = cqe->cqe_common.cqe_type;
+       hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
+       cqe_err_bits =
+               cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
+                 cqe->cqe_common.conn_id, comp_type, hdr_opcode);
+
+       if (comp_type >= MAX_ISCSI_CQES_TYPE) {
+               QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
+               return;
+       }
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+       if (!q_conn) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Session no longer exists for cid=0x%x!!\n",
+                         iscsi_cid);
+               return;
+       }
+
+       conn = q_conn->cls_conn->dd_data;
+
+       if (unlikely(cqe_err_bits &&
+                    GET_FIELD(cqe_err_bits,
+                              CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
+               iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+               return;
+       }
+
+       switch (comp_type) {
+       case ISCSI_CQE_TYPE_SOLICITED:
+       case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+               qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
+               task = qedi_cmd->task;
+               if (!task) {
+                       QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
+                       return;
+               }
+
+               /* Process NOPIN local completion */
+               nopout_hdr = (struct iscsi_nopout *)task->hdr;
+               if ((nopout_hdr->itt == RESERVED_ITT) &&
+                   (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
+                       qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
+                                                     task, q_conn);
+               } else {
+                       cqe->cqe_solicited.itid =
+                                              qedi_get_itt(cqe->cqe_solicited);
+                       /* Process other solicited responses */
+                       qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
+               }
+               break;
+       case ISCSI_CQE_TYPE_UNSOLICITED:
+               switch (hdr_opcode) {
+               case ISCSI_OPCODE_NOP_IN:
+                       qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
+                                               que_idx);
+                       break;
+               case ISCSI_OPCODE_ASYNC_MSG:
+                       qedi_process_async_mesg(qedi, cqe, task, q_conn,
+                                               que_idx);
+                       break;
+               case ISCSI_OPCODE_REJECT:
+                       qedi_process_reject_mesg(qedi, cqe, task, q_conn,
+                                                que_idx);
+                       break;
+               }
+               goto exit_fp_process;
+       case ISCSI_CQE_TYPE_DUMMY:
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
+               goto exit_fp_process;
+       case ISCSI_CQE_TYPE_TASK_CLEANUP:
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
+               qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
+                                             conn);
+               goto exit_fp_process;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
+               break;
+       }
+
+exit_fp_process:
+       return;
+}
+
+static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
+                          u16 tid, uint16_t ptu_invalidate, int is_cleanup)
+{
+       struct iscsi_wqe *wqe;
+       struct iscsi_wqe_field *cont_field;
+       struct qedi_endpoint *ep;
+       struct scsi_cmnd *sc = task->sc;
+       struct iscsi_login_req *login_hdr;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       login_hdr = (struct iscsi_login_req *)task->hdr;
+       ep = qedi_conn->ep;
+       wqe = &ep->sq[ep->sq_prod_idx];
+
+       memset(wqe, 0, sizeof(*wqe));
+
+       ep->sq_prod_idx++;
+       ep->fw_sq_prod_idx++;
+       if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+               ep->sq_prod_idx = 0;
+
+       if (is_cleanup) {
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_TASK_CLEANUP);
+               wqe->task_id = tid;
+               return;
+       }
+
+       if (ptu_invalidate) {
+               SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
+                         ISCSI_WQE_SET_PTU_INVALIDATE);
+       }
+
+       cont_field = &wqe->cont_prevtid_union.cont_field;
+
+       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+       case ISCSI_OP_TEXT:
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_MIDDLE_PATH);
+               SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+                         1);
+               cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
+               break;
+       case ISCSI_OP_LOGOUT:
+       case ISCSI_OP_NOOP_OUT:
+       case ISCSI_OP_SCSI_TMFUNC:
+                SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                          ISCSI_WQE_TYPE_NORMAL);
+               break;
+       default:
+               if (!sc)
+                       break;
+
+               SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
+                         ISCSI_WQE_TYPE_NORMAL);
+               cont_field->contlen_cdbsize_field =
+                               (sc->sc_data_direction == DMA_TO_DEVICE) ?
+                               scsi_bufflen(sc) : 0;
+               if (cmd->use_slowpath)
+                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
+               else
+                       SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
+                                 (sc->sc_data_direction ==
+                                  DMA_TO_DEVICE) ?
+                                 min((u16)QEDI_FAST_SGE_COUNT,
+                                     (u16)cmd->io_tbl.sge_valid) : 0);
+               break;
+       }
+
+       wqe->task_id = tid;
+       /* Make sure SQ data is coherent */
+       wmb();
+}
+
+static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
+{
+       struct iscsi_db_data dbell = { 0 };
+
+       dbell.agg_flags = 0;
+
+       dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
+       dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
+       dbell.params |=
+                  DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+       dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
+       writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
+
+       /* Make sure fw write idx is coherent, and include both memory barriers
+        * as a failsafe as for some architectures the call is the same but on
+        * others they are two different assembly operations.
+        */
+       wmb();
+       mmiowb();
+       QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
+                 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
+                 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
+                 qedi_conn->iscsi_conn_id);
+}
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_login_req *login_hdr;
+       struct iscsi_login_req_hdr *fw_login_req = NULL;
+       struct iscsi_cached_sge_ctx *cached_sge = NULL;
+       struct iscsi_sge *single_sge = NULL;
+       struct iscsi_sge *req_sge = NULL;
+       struct iscsi_sge *resp_sge = NULL;
+       struct qedi_cmd *qedi_cmd;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       login_hdr = (struct iscsi_login_req *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
+       fw_login_req->opcode = login_hdr->opcode;
+       fw_login_req->version_min = login_hdr->min_version;
+       fw_login_req->version_max = login_hdr->max_version;
+       fw_login_req->flags_attr = login_hdr->flags;
+       fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
+       fw_login_req->isid_d = *((u32 *)login_hdr->isid);
+       fw_login_req->tsih = login_hdr->tsih;
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_login_req->cid = qedi_conn->iscsi_conn_id;
+       fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+       fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+       fw_login_req->exp_stat_sn = 0;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                            (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = 0x2;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+                                               ntoh24(login_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+       fw_task_ctx->ustorm_st_context.task_type = 0x2;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                ntoh24(login_hdr->dlength);
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_logout_req_hdr *fw_logout_req = NULL;
+       struct iscsi_task_context *fw_task_ctx = NULL;
+       struct iscsi_logout *logout_hdr = NULL;
+       struct qedi_cmd *qedi_cmd = NULL;
+       s16  tid = 0;
+       s16 ptu_invalidate = 0;
+
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       logout_hdr = (struct iscsi_logout *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
+       fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
+       fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+       fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                 qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       fw_logout_req->cid = qedi_conn->iscsi_conn_id;
+       fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
+
+int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+                       struct iscsi_task *task, bool in_recovery)
+{
+       int rval;
+       struct iscsi_task *ctask;
+       struct qedi_cmd *cmd, *cmd_tmp;
+       struct iscsi_tm *tmf_hdr;
+       unsigned int lun = 0;
+       bool lun_reset = false;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+
+       /* From recovery, task is NULL or from tmf resp valid task */
+       if (task) {
+               tmf_hdr = (struct iscsi_tm *)task->hdr;
+
+               if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                       ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
+                       lun_reset = true;
+                       lun = scsilun_to_int(&tmf_hdr->lun);
+               }
+       }
+
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
+                 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
+                 in_recovery, lun_reset);
+
+       if (lun_reset)
+               spin_lock_bh(&session->back_lock);
+
+       spin_lock(&qedi_conn->list_lock);
+
+       list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+                                io_cmd) {
+               ctask = cmd->task;
+               if (ctask == task)
+                       continue;
+
+               if (lun_reset) {
+                       if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                                         "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
+                                         cmd->task_id, get_itt(ctask->itt),
+                                         cmd->scsi_cmd, cmd->scsi_cmd->device,
+                                         ctask->state, cmd->state,
+                                         qedi_conn->iscsi_conn_id);
+                               if (cmd->scsi_cmd->device->lun != lun)
+                                       continue;
+                       }
+               }
+               qedi_conn->cmd_cleanup_req++;
+               qedi_iscsi_cleanup_task(ctask, true);
+
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
+                         &cmd->io_cmd, qedi_conn->iscsi_conn_id);
+       }
+
+       spin_unlock(&qedi_conn->list_lock);
+
+       if (lun_reset)
+               spin_unlock_bh(&session->back_lock);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "cmd_cleanup_req=%d, cid=0x%x\n",
+                 qedi_conn->cmd_cleanup_req,
+                 qedi_conn->iscsi_conn_id);
+
+       rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                                ((qedi_conn->cmd_cleanup_req ==
+                                                qedi_conn->cmd_cleanup_cmpl) ||
+                                                qedi_conn->ep),
+                                                5 * HZ);
+       if (rval) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                         qedi_conn->cmd_cleanup_req,
+                         qedi_conn->cmd_cleanup_cmpl,
+                         qedi_conn->iscsi_conn_id);
+
+               return 0;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
+                 qedi_conn->cmd_cleanup_req,
+                 qedi_conn->cmd_cleanup_cmpl,
+                 qedi_conn->iscsi_conn_id);
+
+       iscsi_host_for_each_session(qedi->shost,
+                                   qedi_mark_device_missing);
+       qedi_ops->common->drain(qedi->cdev);
+
+       /* Enable IOs for all other sessions except current.*/
+       if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                             (qedi_conn->cmd_cleanup_req ==
+                                              qedi_conn->cmd_cleanup_cmpl),
+                                             5 * HZ)) {
+               iscsi_host_for_each_session(qedi->shost,
+                                           qedi_mark_device_available);
+               return -1;
+       }
+
+       iscsi_host_for_each_session(qedi->shost,
+                                   qedi_mark_device_available);
+
+       return 0;
+}
+
+void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
+                 struct iscsi_task *task)
+{
+       struct qedi_endpoint *qedi_ep;
+       int rval;
+
+       qedi_ep = qedi_conn->ep;
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       if (!qedi_ep) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Cannot proceed, ep already disconnected, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               return;
+       }
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
+                 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
+
+       qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
+
+       rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
+       if (rval) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fatal error, need hard reset, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               WARN_ON(1);
+       }
+}
+
+static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
+                                        struct qedi_conn *qedi_conn,
+                                        struct iscsi_task *task,
+                                        struct qedi_cmd *qedi_cmd,
+                                        struct qedi_work_map *list_work)
+{
+       struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
+       int wait;
+
+       wait  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
+                                                ((qedi_cmd->state ==
+                                                  CLEANUP_RECV) ||
+                                                ((qedi_cmd->type == TYPEIO) &&
+                                                 (cmd->state ==
+                                                  RESPONSE_RECEIVED))),
+                                                5 * HZ);
+       if (!wait) {
+               qedi_cmd->state = CLEANUP_WAIT_FAILED;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                         "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
+                         cmd->task_id, qedi_conn->iscsi_conn_id);
+
+               return -1;
+       }
+       return 0;
+}
+
+static void qedi_tmf_work(struct work_struct *work)
+{
+       struct qedi_cmd *qedi_cmd =
+               container_of(work, struct qedi_cmd, tmf_work);
+       struct qedi_conn *qedi_conn = qedi_cmd->conn;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct iscsi_cls_session *cls_sess;
+       struct qedi_work_map *list_work = NULL;
+       struct iscsi_task *mtask;
+       struct qedi_cmd *cmd;
+       struct iscsi_task *ctask;
+       struct iscsi_tm *tmf_hdr;
+       s16 rval = 0;
+       s16 tid = 0;
+
+       mtask = qedi_cmd->task;
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+       cls_sess = iscsi_conn_to_session(qedi_conn->cls_conn);
+       set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+       ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+       if (!ctask || !ctask->sc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
+               goto abort_ret;
+       }
+
+       cmd = (struct qedi_cmd *)ctask->dd_data;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
+                 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
+                 qedi_conn->iscsi_conn_id);
+
+       if (do_not_recover) {
+               QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
+                        do_not_recover);
+               goto abort_ret;
+       }
+
+       list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+       if (!list_work) {
+               QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n");
+               goto abort_ret;
+       }
+
+       qedi_cmd->type = TYPEIO;
+       list_work->qedi_cmd = qedi_cmd;
+       list_work->rtid = cmd->task_id;
+       list_work->state = QEDI_WORK_SCHEDULED;
+       qedi_cmd->list_tmf_work = list_work;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
+                 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
+                 tmf_hdr->flags);
+
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       qedi_iscsi_cleanup_task(ctask, false);
+
+       rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
+                                            list_work);
+       if (rval == -1) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "FW cleanup got escalated, cid=0x%x\n",
+                         qedi_conn->iscsi_conn_id);
+               goto ldel_exit;
+       }
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1) {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               goto ldel_exit;
+       }
+
+       qedi_cmd->task_id = tid;
+       qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+abort_ret:
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       return;
+
+ldel_exit:
+       spin_lock_bh(&qedi_conn->tmf_work_lock);
+       if (!qedi_cmd->list_tmf_work) {
+               list_del_init(&list_work->list);
+               qedi_cmd->list_tmf_work = NULL;
+               kfree(list_work);
+       }
+       spin_unlock_bh(&qedi_conn->tmf_work_lock);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_del_init(&cmd->io_cmd);
+       qedi_conn->active_cmd_count--;
+       spin_unlock(&qedi_conn->list_lock);
+
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+}
+
+static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
+                              struct iscsi_task *mtask)
+{
+       struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_tmf_request_hdr *fw_tmf_request;
+       struct iscsi_sge *single_sge;
+       struct qedi_cmd *qedi_cmd;
+       struct qedi_cmd *cmd;
+       struct iscsi_task *ctask;
+       struct iscsi_tm *tmf_hdr;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       u32 lun[2];
+       s16 tid = 0, ptu_invalidate = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+
+       tid = qedi_cmd->task_id;
+       qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
+       fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
+       fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+       memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+       fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
+       fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+
+       if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+            ISCSI_TM_FUNC_ABORT_TASK) {
+               ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
+               if (!ctask || !ctask->sc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not get reference task\n");
+                       return 0;
+               }
+               cmd = (struct qedi_cmd *)ctask->dd_data;
+               fw_tmf_request->rtt =
+                               qedi_set_itt(cmd->task_id,
+                                            get_itt(tmf_hdr->rtt));
+       } else {
+               fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+       }
+
+       fw_tmf_request->opcode = tmf_hdr->opcode;
+       fw_tmf_request->function = tmf_hdr->flags;
+       fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
+       fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
+
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
+                 tid,  mtask->itt, qedi_conn->iscsi_conn_id);
+
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *mtask)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_tm *tmf_hdr;
+       struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+       s16 tid = 0;
+
+       tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+       qedi_cmd->task = mtask;
+
+       /* If abort task then schedule the work and return */
+       if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+           ISCSI_TM_FUNC_ABORT_TASK) {
+               qedi_cmd->state = CLEANUP_WAIT;
+               INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+               queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
+
+       } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
+                  ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
+                  ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
+                   ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+               tid = qedi_get_task_idx(qedi);
+               if (tid == -1) {
+                       QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
+                                qedi_conn->iscsi_conn_id);
+                       return -1;
+               }
+               qedi_cmd->task_id = tid;
+
+               qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
+
+       } else {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
+                        qedi_conn->iscsi_conn_id);
+               return -1;
+       }
+
+       return 0;
+}
+
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+                        struct iscsi_task *task)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_text_request_hdr *fw_text_request;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_sge *single_sge;
+       struct qedi_cmd *qedi_cmd;
+       /* For 6.5 hdr iscsi_hdr */
+       struct iscsi_text *text_hdr;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       text_hdr = (struct iscsi_text *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_text_request =
+                       &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
+       fw_text_request->opcode = text_hdr->opcode;
+       fw_text_request->flags_attr = text_hdr->flags;
+
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+       fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_text_request->ttt = text_hdr->ttt;
+       fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+       fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+       fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                    qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                                  qedi->tid_reuse_count[tid]++;
+
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                             (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       fw_task_ctx->mstorm_st_context.task_type = 0x2;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                 ISCSI_MFLAGS_SLOW_IO, 0);
+       fw_task_ctx->mstorm_st_context.sgl_size = 1;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                     ntoh24(text_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
+                                                     ntoh24(text_hdr->dlength);
+       fw_task_ctx->ustorm_st_context.exp_data_sn =
+                                             be32_to_cpu(text_hdr->exp_statsn);
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+       fw_task_ctx->ustorm_st_context.task_type = 0x2;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       /*  Add command in active command list */
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+       qedi_cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
+
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task,
+                          char *datap, int data_len, int unsol)
+{
+       struct qedi_ctx *qedi = qedi_conn->qedi;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_nop_out_hdr *fw_nop_out;
+       struct qedi_cmd *qedi_cmd;
+       /* For 6.5 hdr iscsi_hdr */
+       struct iscsi_nopout *nopout_hdr;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_sge *single_sge;
+       struct iscsi_sge *req_sge;
+       struct iscsi_sge *resp_sge;
+       u32 lun[2];
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+
+       req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+       resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+       qedi_cmd = (struct qedi_cmd *)task->dd_data;
+       nopout_hdr = (struct iscsi_nopout *)task->hdr;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1) {
+               QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+               return -ENOMEM;
+       }
+
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       qedi_cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
+       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+       SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+
+       memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+       fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
+       fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+
+       if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
+               fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
+               fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
+               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+               fw_task_ctx->ystorm_st_context.state.local_comp = 1;
+               SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                         USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+       } else {
+               fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
+               fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
+               fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+
+               spin_lock(&qedi_conn->list_lock);
+               list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
+               qedi_cmd->io_cmd_in_list = true;
+               qedi_conn->active_cmd_count++;
+               spin_unlock(&qedi_conn->list_lock);
+       }
+
+       fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
+       fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+       fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
+
+       cached_sge =
+              &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
+       cached_sge->sge.sge_len = req_sge->sge_len;
+       cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
+       cached_sge->sge.sge_addr.hi =
+                       (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
+
+       single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
+       single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
+       single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
+       single_sge->sge_len = resp_sge->sge_len;
+       fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                               qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                               qedi->tid_reuse_count[tid]++;
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
+       fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
+       fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
+
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, 0);
+
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       return 0;
+}
+
+static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
+                        int bd_index)
+{
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       int frag_size, sg_frags;
+
+       sg_frags = 0;
+
+       while (sg_len) {
+               if (addr % QEDI_PAGE_SIZE)
+                       frag_size =
+                                  (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
+               else
+                       frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
+                                   (sg_len % QEDI_BD_SPLIT_SZ);
+
+               if (frag_size == 0)
+                       frag_size = QEDI_BD_SPLIT_SZ;
+
+               bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
+               bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
+               bd[bd_index + sg_frags].sge_len = (u16)frag_size;
+               QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
+                         "split sge %d: addr=%llx, len=%x",
+                         (bd_index + sg_frags), addr, frag_size);
+
+               addr += (u64)frag_size;
+               sg_frags++;
+               sg_len -= frag_size;
+       }
+       return sg_frags;
+}
+
+static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       struct scatterlist *sg;
+       int byte_count = 0;
+       int bd_count = 0;
+       int sg_count;
+       int sg_len;
+       int sg_frags;
+       u64 addr, end_addr;
+       int i;
+
+       WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
+
+       sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
+                             scsi_sg_count(sc), sc->sc_data_direction);
+
+       /*
+        * New condition to send single SGE as cached-SGL.
+        * Single SGE with length less than 64K.
+        */
+       sg = scsi_sglist(sc);
+       if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
+               sg_len = sg_dma_len(sg);
+               addr = (u64)sg_dma_address(sg);
+
+               bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+               bd[bd_count].sge_addr.hi = (addr >> 32);
+               bd[bd_count].sge_len = (u16)sg_len;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                         "single-cashed-sgl: bd_count:%d addr=%llx, len=%x",
+                         sg_count, addr, sg_len);
+
+               return ++bd_count;
+       }
+
+       scsi_for_each_sg(sc, sg, sg_count, i) {
+               sg_len = sg_dma_len(sg);
+               addr = (u64)sg_dma_address(sg);
+               end_addr = (addr + sg_len);
+
+               /*
+                * first sg elem in the 'list',
+                * check if end addr is page-aligned.
+                */
+               if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
+                       cmd->use_slowpath = true;
+
+               /*
+                * last sg elem in the 'list',
+                * check if start addr is page-aligned.
+                */
+               else if ((i == (sg_count - 1)) &&
+                        (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
+                       cmd->use_slowpath = true;
+
+               /*
+                * middle sg elements in list,
+                * check if start and end addr is page-aligned
+                */
+               else if ((i != 0) && (i != (sg_count - 1)) &&
+                        ((addr % QEDI_PAGE_SIZE) ||
+                        (end_addr % QEDI_PAGE_SIZE)))
+                       cmd->use_slowpath = true;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
+                         i, sg_len);
+
+               if (sg_len > QEDI_BD_SPLIT_SZ) {
+                       sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
+               } else {
+                       sg_frags = 1;
+                       bd[bd_count].sge_addr.lo = addr & 0xffffffff;
+                       bd[bd_count].sge_addr.hi = addr >> 32;
+                       bd[bd_count].sge_len = sg_len;
+               }
+               byte_count += sg_len;
+               bd_count += sg_frags;
+       }
+
+       if (byte_count != scsi_bufflen(sc))
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "byte_count = %d != scsi_bufflen = %d\n", byte_count,
+                        scsi_bufflen(sc));
+       else
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
+                         byte_count);
+
+       WARN_ON(byte_count != scsi_bufflen(sc));
+
+       return bd_count;
+}
+
+static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
+{
+       int bd_count;
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+       if (scsi_sg_count(sc)) {
+               bd_count  = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
+               if (bd_count == 0)
+                       return;
+       } else {
+               struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+
+               bd[0].sge_addr.lo = 0;
+               bd[0].sge_addr.hi = 0;
+               bd[0].sge_len = 0;
+               bd_count = 0;
+       }
+       cmd->io_tbl.sge_valid = bd_count;
+}
+
+static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
+{
+       u32 dword;
+       int lpcnt;
+       u8 *srcp;
+
+       lpcnt = sc->cmd_len / sizeof(dword);
+       srcp = (u8 *)sc->cmnd;
+       while (lpcnt--) {
+               memcpy(&dword, (const void *)srcp, 4);
+               *dstp = cpu_to_be32(dword);
+               srcp += 4;
+               dstp++;
+       }
+       if (sc->cmd_len & 0x3) {
+               dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
+               *dstp = cpu_to_be32(dword);
+       }
+}
+
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+                  u16 tid, int8_t direction)
+{
+       struct qedi_io_log *io_log;
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct scsi_cmnd *sc_cmd = task->sc;
+       unsigned long flags;
+       u8 op;
+
+       spin_lock_irqsave(&qedi->io_trace_lock, flags);
+
+       io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
+       io_log->direction = direction;
+       io_log->task_id = tid;
+       io_log->cid = qedi_conn->iscsi_conn_id;
+       io_log->lun = sc_cmd->device->lun;
+       io_log->op = sc_cmd->cmnd[0];
+       op = sc_cmd->cmnd[0];
+       io_log->lba[0] = sc_cmd->cmnd[2];
+       io_log->lba[1] = sc_cmd->cmnd[3];
+       io_log->lba[2] = sc_cmd->cmnd[4];
+       io_log->lba[3] = sc_cmd->cmnd[5];
+       io_log->bufflen = scsi_bufflen(sc_cmd);
+       io_log->sg_count = scsi_sg_count(sc_cmd);
+       io_log->fast_sgs = qedi->fast_sgls;
+       io_log->cached_sgs = qedi->cached_sgls;
+       io_log->slow_sgs = qedi->slow_sgls;
+       io_log->cached_sge = qedi->use_cached_sge;
+       io_log->slow_sge = qedi->use_slow_sge;
+       io_log->fast_sge = qedi->use_fast_sge;
+       io_log->result = sc_cmd->result;
+       io_log->jiffies = jiffies;
+       io_log->blk_req_cpu = smp_processor_id();
+
+       if (direction == QEDI_IO_TRACE_REQ) {
+               /* For requests we only care about the submission CPU */
+               io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+               io_log->intr_cpu = 0;
+               io_log->blk_rsp_cpu = 0;
+       } else if (direction == QEDI_IO_TRACE_RSP) {
+               io_log->req_cpu = smp_processor_id() % qedi->num_queues;
+               io_log->intr_cpu = qedi->intr_cpu;
+               io_log->blk_rsp_cpu = smp_processor_id();
+       }
+
+       qedi->io_trace_idx++;
+       if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
+               qedi->io_trace_idx = 0;
+
+       qedi->use_cached_sge = false;
+       qedi->use_slow_sge = false;
+       qedi->use_fast_sge = false;
+
+       spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
+}
+
+int qedi_iscsi_send_ioreq(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct iscsi_session *session = conn->session;
+       struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+       struct iscsi_task_context *fw_task_ctx;
+       struct iscsi_cached_sge_ctx *cached_sge;
+       struct iscsi_phys_sgl_ctx *phys_sgl;
+       struct iscsi_virt_sgl_ctx *virt_sgl;
+       struct ystorm_iscsi_task_st_ctx *yst_cxt;
+       struct mstorm_iscsi_task_st_ctx *mst_cxt;
+       struct iscsi_sgl *sgl_struct;
+       struct iscsi_sge *single_sge;
+       struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
+       struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+       enum iscsi_task_type task_type;
+       struct iscsi_cmd_hdr *fw_cmd;
+       u32 lun[2];
+       u32 exp_data;
+       u16 cq_idx = smp_processor_id() % qedi->num_queues;
+       s16 ptu_invalidate = 0;
+       s16 tid = 0;
+       u8 num_fast_sgs;
+
+       tid = qedi_get_task_idx(qedi);
+       if (tid == -1)
+               return -ENOMEM;
+
+       qedi_iscsi_map_sg_list(cmd);
+
+       int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
+       fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+
+       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+       cmd->task_id = tid;
+
+       /* Ystorm context */
+       fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
+       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+
+       if (sc->sc_data_direction == DMA_TO_DEVICE) {
+               if (conn->session->initial_r2t_en) {
+                       exp_data = min((conn->session->imm_data_en *
+                                       conn->max_xmit_dlength),
+                                      conn->session->first_burst);
+                       exp_data = min(exp_data, scsi_bufflen(sc));
+                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                                                         cpu_to_le32(exp_data);
+               } else {
+                       fw_task_ctx->ustorm_ag_context.exp_data_acked =
+                             min(conn->session->first_burst, scsi_bufflen(sc));
+               }
+
+               SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+               task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+       } else {
+               if (scsi_bufflen(sc))
+                       SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+               task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+       }
+
+       fw_cmd->lun.lo = be32_to_cpu(lun[0]);
+       fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+
+       qedi_update_itt_map(qedi, tid, task->itt, cmd);
+       fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
+       fw_cmd->expected_transfer_length = scsi_bufflen(sc);
+       fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
+       fw_cmd->opcode = hdr->opcode;
+       qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
+
+       /* Mstorm context */
+       fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
+       fw_task_ctx->mstorm_st_context.sense_db.hi =
+                                       (u32)((u64)cmd->sense_buffer_dma >> 32);
+       fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
+       fw_task_ctx->mstorm_st_context.task_type = task_type;
+
+       if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
+               ptu_invalidate = 1;
+               qedi->tid_reuse_count[tid] = 0;
+       }
+       fw_task_ctx->ystorm_st_context.state.reuse_count =
+                                                    qedi->tid_reuse_count[tid];
+       fw_task_ctx->mstorm_st_context.reuse_count =
+                                                  qedi->tid_reuse_count[tid]++;
+
+       /* Ustorm context */
+       fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
+       fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
+       fw_task_ctx->ustorm_st_context.exp_data_sn =
+                                                  be32_to_cpu(hdr->exp_statsn);
+       fw_task_ctx->ustorm_st_context.task_type = task_type;
+       fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
+       fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
+
+       SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
+                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
+                 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+
+       num_fast_sgs = (cmd->io_tbl.sge_valid ?
+                       min((u16)QEDI_FAST_SGE_COUNT,
+                           (u16)cmd->io_tbl.sge_valid) : 0);
+       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                 ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
+
+       fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
+       fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
+                 cmd->io_tbl.sge_valid);
+
+       yst_cxt = &fw_task_ctx->ystorm_st_context;
+       mst_cxt = &fw_task_ctx->mstorm_st_context;
+       /* Tx path */
+       if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+               /* not considering  superIO or FastIO */
+               if (cmd->io_tbl.sge_valid == 1) {
+                       cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
+                       cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
+                       cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
+                       cached_sge->sge.sge_len = bd[0].sge_len;
+                       qedi->cached_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 1);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
+                       phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+                       phys_sgl->sgl_base.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
+                       qedi->slow_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES,
+                                 min((u16)QEDI_FAST_SGE_COUNT,
+                                     (u16)cmd->io_tbl.sge_valid));
+                       virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
+                       virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
+                       virt_sgl->sgl_base.hi =
+                                     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       virt_sgl->sgl_initial_offset =
+                                (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+                       qedi->fast_sgls++;
+               }
+               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+       } else {
+       /* Rx path */
+               if (cmd->io_tbl.sge_valid == 1) {
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SINGLE_SGE, 1);
+                       single_sge = &mst_cxt->sgl_union.single_sge;
+                       single_sge->sge_addr.lo = bd[0].sge_addr.lo;
+                       single_sge->sge_addr.hi = bd[0].sge_addr.hi;
+                       single_sge->sge_len = bd[0].sge_len;
+                       qedi->cached_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
+                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+                       sgl_struct->sgl_addr.lo =
+                                               (u32)(cmd->io_tbl.sge_tbl_dma);
+                       sgl_struct->sgl_addr.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 1);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       sgl_struct->updated_sge_size = 0;
+                       sgl_struct->updated_sge_offset = 0;
+                       qedi->slow_sgls++;
+               } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
+                       sgl_struct = &mst_cxt->sgl_union.sgl_struct;
+                       sgl_struct->sgl_addr.lo =
+                                               (u32)(cmd->io_tbl.sge_tbl_dma);
+                       sgl_struct->sgl_addr.hi =
+                                    (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+                       sgl_struct->byte_offset =
+                               (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
+                       SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
+                                 ISCSI_MFLAGS_SLOW_IO, 0);
+                       SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
+                                 ISCSI_REG1_NUM_FAST_SGES, 0);
+                       sgl_struct->updated_sge_size = 0;
+                       sgl_struct->updated_sge_offset = 0;
+                       qedi->fast_sgls++;
+               }
+               fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
+               fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
+       }
+
+       if (cmd->io_tbl.sge_valid == 1)
+               /* Singel-SGL */
+               qedi->use_cached_sge = true;
+       else {
+               if (cmd->use_slowpath)
+                       qedi->use_slow_sge = true;
+               else
+                       qedi->use_fast_sge = true;
+       }
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                 "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+                 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
+                 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
+                 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
+                 (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+                 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
+
+       /*  Add command in active command list */
+       spin_lock(&qedi_conn->list_lock);
+       list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
+       cmd->io_cmd_in_list = true;
+       qedi_conn->active_cmd_count++;
+       spin_unlock(&qedi_conn->list_lock);
+
+       qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+       qedi_ring_doorbell(qedi_conn);
+       if (qedi_io_tracing)
+               qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
+
+       return 0;
+}
+
+int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       s16 ptu_invalidate = 0;
+
+       QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+                 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
+                 cmd->task_id, get_itt(task->itt), task->state,
+                 cmd->state, qedi_conn->iscsi_conn_id);
+
+       qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
+       qedi_ring_doorbell(qedi_conn);
+
+       return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
new file mode 100644 (file)
index 0000000..8e488de
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_GBL_H_
+#define _QEDI_GBL_H_
+
+#include "qedi_iscsi.h"
+
+extern uint qedi_io_tracing;
+extern int do_not_recover;
+extern struct scsi_host_template qedi_host_template;
+extern struct iscsi_transport qedi_iscsi_transport;
+extern const struct qed_iscsi_ops *qedi_ops;
+extern struct qedi_debugfs_ops qedi_debugfs_ops;
+extern const struct file_operations qedi_dbg_fops;
+extern struct device_attribute *qedi_shost_attrs[];
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep);
+
+int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *task);
+int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task);
+int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
+                         struct iscsi_task *mtask);
+int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
+                        struct iscsi_task *task);
+int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
+                          struct iscsi_task *task,
+                          char *datap, int data_len, int unsol);
+int qedi_iscsi_send_ioreq(struct iscsi_task *task);
+int qedi_get_task_idx(struct qedi_ctx *qedi);
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx);
+int qedi_iscsi_cleanup_task(struct iscsi_task *task,
+                           bool mark_cmd_node_deleted);
+void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd);
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+                        struct qedi_cmd *qedi_cmd);
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+                             struct async_data *data);
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+                             struct qedi_conn *qedi_conn);
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
+int qedi_recover_all_conns(struct qedi_ctx *qedi);
+void qedi_fp_process_cqes(struct qedi_work *work);
+int qedi_cleanup_all_io(struct qedi_ctx *qedi,
+                       struct qedi_conn *qedi_conn,
+                       struct iscsi_task *task, bool in_recovery);
+void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
+                  u16 tid, int8_t direction);
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id);
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl);
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id);
+int qedi_create_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
+void qedi_clearsq(struct qedi_ctx *qedi,
+                 struct qedi_conn *qedi_conn,
+                 struct iscsi_task *task);
+
+#endif
diff --git a/drivers/scsi/qedi/qedi_hsi.h b/drivers/scsi/qedi/qedi_hsi.h
new file mode 100644 (file)
index 0000000..8ca44c7
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDI_HSI__
+#define __QEDI_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common TCP target
+ */
+#include <linux/qed/tcp_common.h>
+
+/*
+ * Add include to common iSCSI target for both eCore and protocol driver
+ */
+#include <linux/qed/iscsi_common.h>
+
+/*
+ * iSCSI CMDQ element
+ */
+struct iscsi_cmdqe {
+       __le16 conn_id;
+       u8 invalid_command;
+       u8 cmd_hdr_type;
+       __le32 reserved1[2];
+       __le32 cmd_payload[13];
+};
+
+/*
+ * iSCSI CMD header type
+ */
+enum iscsi_cmd_hdr_type {
+       ISCSI_CMD_HDR_TYPE_BHS_ONLY /* iSCSI BHS with no expected AHS */,
+       ISCSI_CMD_HDR_TYPE_BHS_W_AHS /* iSCSI BHS with expected AHS */,
+       ISCSI_CMD_HDR_TYPE_AHS /* iSCSI AHS */,
+       MAX_ISCSI_CMD_HDR_TYPE
+};
+
+#endif /* __QEDI_HSI__ */
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
new file mode 100644 (file)
index 0000000..d6a2054
--- /dev/null
@@ -0,0 +1,1624 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <scsi/scsi_tcq.h>
+
+#include "qedi.h"
+#include "qedi_iscsi.h"
+#include "qedi_gbl.h"
+
+int qedi_recover_all_conns(struct qedi_ctx *qedi)
+{
+       struct qedi_conn *qedi_conn;
+       int i;
+
+       for (i = 0; i < qedi->max_active_conns; i++) {
+               qedi_conn = qedi_get_conn_from_id(qedi, i);
+               if (!qedi_conn)
+                       continue;
+
+               qedi_start_conn_recovery(qedi, qedi_conn);
+       }
+
+       return SUCCESS;
+}
+
+static int qedi_eh_host_reset(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *shost = cmd->device->host;
+       struct qedi_ctx *qedi;
+
+       qedi = iscsi_host_priv(shost);
+
+       return qedi_recover_all_conns(qedi);
+}
+
+struct scsi_host_template qedi_host_template = {
+       .module = THIS_MODULE,
+       .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
+       .proc_name = QEDI_MODULE_NAME,
+       .queuecommand = iscsi_queuecommand,
+       .eh_abort_handler = iscsi_eh_abort,
+       .eh_device_reset_handler = iscsi_eh_device_reset,
+       .eh_target_reset_handler = iscsi_eh_recover_target,
+       .eh_host_reset_handler = qedi_eh_host_reset,
+       .target_alloc = iscsi_target_alloc,
+       .change_queue_depth = scsi_change_queue_depth,
+       .can_queue = QEDI_MAX_ISCSI_TASK,
+       .this_id = -1,
+       .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD,
+       .max_sectors = 0xffff,
+       .cmd_per_lun = 128,
+       .use_clustering = ENABLE_CLUSTERING,
+       .shost_attrs = qedi_shost_attrs,
+};
+
+static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
+                                          struct qedi_conn *qedi_conn)
+{
+       if (qedi_conn->gen_pdu.resp_bd_tbl) {
+               dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                 qedi_conn->gen_pdu.resp_bd_tbl,
+                                 qedi_conn->gen_pdu.resp_bd_dma);
+               qedi_conn->gen_pdu.resp_bd_tbl = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.req_bd_tbl) {
+               dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                 qedi_conn->gen_pdu.req_bd_tbl,
+                                 qedi_conn->gen_pdu.req_bd_dma);
+               qedi_conn->gen_pdu.req_bd_tbl = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.resp_buf) {
+               dma_free_coherent(&qedi->pdev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 qedi_conn->gen_pdu.resp_buf,
+                                 qedi_conn->gen_pdu.resp_dma_addr);
+               qedi_conn->gen_pdu.resp_buf = NULL;
+       }
+
+       if (qedi_conn->gen_pdu.req_buf) {
+               dma_free_coherent(&qedi->pdev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 qedi_conn->gen_pdu.req_buf,
+                                 qedi_conn->gen_pdu.req_dma_addr);
+               qedi_conn->gen_pdu.req_buf = NULL;
+       }
+}
+
+static int qedi_conn_alloc_login_resources(struct qedi_ctx *qedi,
+                                          struct qedi_conn *qedi_conn)
+{
+       qedi_conn->gen_pdu.req_buf =
+               dma_alloc_coherent(&qedi->pdev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &qedi_conn->gen_pdu.req_dma_addr,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.req_buf)
+               goto login_req_buf_failure;
+
+       qedi_conn->gen_pdu.req_buf_size = 0;
+       qedi_conn->gen_pdu.req_wr_ptr = qedi_conn->gen_pdu.req_buf;
+
+       qedi_conn->gen_pdu.resp_buf =
+               dma_alloc_coherent(&qedi->pdev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &qedi_conn->gen_pdu.resp_dma_addr,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.resp_buf)
+               goto login_resp_buf_failure;
+
+       qedi_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf;
+
+       qedi_conn->gen_pdu.req_bd_tbl =
+               dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                  &qedi_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.req_bd_tbl)
+               goto login_req_bd_tbl_failure;
+
+       qedi_conn->gen_pdu.resp_bd_tbl =
+               dma_alloc_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                                  &qedi_conn->gen_pdu.resp_bd_dma,
+                                  GFP_KERNEL);
+       if (!qedi_conn->gen_pdu.resp_bd_tbl)
+               goto login_resp_bd_tbl_failure;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SESS,
+                 "Allocation successful, cid=0x%x\n",
+                 qedi_conn->iscsi_conn_id);
+       return 0;
+
+login_resp_bd_tbl_failure:
+       dma_free_coherent(&qedi->pdev->dev, QEDI_PAGE_SIZE,
+                         qedi_conn->gen_pdu.req_bd_tbl,
+                         qedi_conn->gen_pdu.req_bd_dma);
+       qedi_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+       dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         qedi_conn->gen_pdu.resp_buf,
+                         qedi_conn->gen_pdu.resp_dma_addr);
+       qedi_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+       dma_free_coherent(&qedi->pdev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         qedi_conn->gen_pdu.req_buf,
+                         qedi_conn->gen_pdu.req_dma_addr);
+       qedi_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+       iscsi_conn_printk(KERN_ERR, qedi_conn->cls_conn->dd_data,
+                         "login resource alloc failed!!\n");
+       return -ENOMEM;
+}
+
+static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
+                                 struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct qedi_cmd *cmd = task->dd_data;
+
+               if (cmd->io_tbl.sge_tbl)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         QEDI_ISCSI_MAX_BDS_PER_CMD *
+                                         sizeof(struct iscsi_sge),
+                                         cmd->io_tbl.sge_tbl,
+                                         cmd->io_tbl.sge_tbl_dma);
+
+               if (cmd->sense_buffer)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         SCSI_SENSE_BUFFERSIZE,
+                                         cmd->sense_buffer,
+                                         cmd->sense_buffer_dma);
+       }
+}
+
+static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
+                          struct qedi_cmd *cmd)
+{
+       struct qedi_io_bdt *io = &cmd->io_tbl;
+       struct iscsi_sge *sge;
+
+       io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
+                                        QEDI_ISCSI_MAX_BDS_PER_CMD *
+                                        sizeof(*sge),
+                                        &io->sge_tbl_dma, GFP_KERNEL);
+       if (!io->sge_tbl) {
+               iscsi_session_printk(KERN_ERR, session,
+                                    "Could not allocate BD table.\n");
+               return -ENOMEM;
+       }
+
+       io->sge_valid = 0;
+       return 0;
+}
+
+static int qedi_setup_cmd_pool(struct qedi_ctx *qedi,
+                              struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct qedi_cmd *cmd = task->dd_data;
+
+               task->hdr = &cmd->hdr;
+               task->hdr_max = sizeof(struct iscsi_hdr);
+
+               if (qedi_alloc_sget(qedi, session, cmd))
+                       goto free_sgets;
+
+               cmd->sense_buffer = dma_alloc_coherent(&qedi->pdev->dev,
+                                                      SCSI_SENSE_BUFFERSIZE,
+                                                      &cmd->sense_buffer_dma,
+                                                      GFP_KERNEL);
+               if (!cmd->sense_buffer)
+                       goto free_sgets;
+       }
+
+       return 0;
+
+free_sgets:
+       qedi_destroy_cmd_pool(qedi, session);
+       return -ENOMEM;
+}
+
+static struct iscsi_cls_session *
+qedi_session_create(struct iscsi_endpoint *ep, u16 cmds_max,
+                   u16 qdepth, uint32_t initial_cmdsn)
+{
+       struct Scsi_Host *shost;
+       struct iscsi_cls_session *cls_session;
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+
+       if (!ep)
+               return NULL;
+
+       qedi_ep = ep->dd_data;
+       shost = qedi_ep->qedi->shost;
+       qedi = iscsi_host_priv(shost);
+
+       if (cmds_max > qedi->max_sqes)
+               cmds_max = qedi->max_sqes;
+       else if (cmds_max < QEDI_SQ_WQES_MIN)
+               cmds_max = QEDI_SQ_WQES_MIN;
+
+       cls_session = iscsi_session_setup(&qedi_iscsi_transport, shost,
+                                         cmds_max, 0, sizeof(struct qedi_cmd),
+                                         initial_cmdsn, ISCSI_MAX_TARGET);
+       if (!cls_session) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to setup session for ep=%p\n", qedi_ep);
+               return NULL;
+       }
+
+       if (qedi_setup_cmd_pool(qedi, cls_session->dd_data)) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to setup cmd pool for ep=%p\n", qedi_ep);
+               goto session_teardown;
+       }
+
+       return cls_session;
+
+session_teardown:
+       iscsi_session_teardown(cls_session);
+       return NULL;
+}
+
+static void qedi_session_destroy(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *session = cls_session->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+
+       qedi_destroy_cmd_pool(qedi, session);
+       iscsi_session_teardown(cls_session);
+}
+
+static struct iscsi_cls_conn *
+qedi_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct iscsi_cls_conn *cls_conn;
+       struct qedi_conn *qedi_conn;
+       struct iscsi_conn *conn;
+
+       cls_conn = iscsi_conn_setup(cls_session, sizeof(*qedi_conn),
+                                   cid);
+       if (!cls_conn) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "conn_new: iscsi conn setup failed, cid=0x%x, cls_sess=%p!\n",
+                        cid, cls_session);
+               return NULL;
+       }
+
+       conn = cls_conn->dd_data;
+       qedi_conn = conn->dd_data;
+       qedi_conn->cls_conn = cls_conn;
+       qedi_conn->qedi = qedi;
+       qedi_conn->ep = NULL;
+       qedi_conn->active_cmd_count = 0;
+       INIT_LIST_HEAD(&qedi_conn->active_cmd_list);
+       spin_lock_init(&qedi_conn->list_lock);
+
+       if (qedi_conn_alloc_login_resources(qedi, qedi_conn)) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "conn_new: login resc alloc failed, cid=0x%x, cls_sess=%p!!\n",
+                                  cid, cls_session);
+               goto free_conn;
+       }
+
+       return cls_conn;
+
+free_conn:
+       iscsi_conn_teardown(cls_conn);
+       return NULL;
+}
+
+void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
+{
+       iscsi_block_session(cls_session);
+}
+
+void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
+{
+       iscsi_unblock_session(cls_session);
+}
+
+static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
+                                      struct qedi_conn *qedi_conn)
+{
+       u32 iscsi_cid = qedi_conn->iscsi_conn_id;
+
+       if (qedi->cid_que.conn_cid_tbl[iscsi_cid]) {
+               iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+                                 "conn bind - entry #%d not free\n",
+                                 iscsi_cid);
+               return -EBUSY;
+       }
+
+       qedi->cid_que.conn_cid_tbl[iscsi_cid] = qedi_conn;
+       return 0;
+}
+
+struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid)
+{
+       if (!qedi->cid_que.conn_cid_tbl) {
+               QEDI_ERR(&qedi->dbg_ctx, "missing conn<->cid table\n");
+               return NULL;
+
+       } else if (iscsi_cid >= qedi->max_active_conns) {
+               QEDI_ERR(&qedi->dbg_ctx, "wrong cid #%d\n", iscsi_cid);
+               return NULL;
+       }
+       return qedi->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
+                         struct iscsi_cls_conn *cls_conn,
+                         u64 transport_fd, int is_leading)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct qedi_ctx *qedi = iscsi_host_priv(shost);
+       struct qedi_endpoint *qedi_ep;
+       struct iscsi_endpoint *ep;
+
+       ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
+
+       qedi_ep = ep->dd_data;
+       if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+           (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
+               return -EINVAL;
+
+       if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+               return -EINVAL;
+
+       qedi_ep->conn = qedi_conn;
+       qedi_conn->ep = qedi_ep;
+       qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
+       qedi_conn->fw_cid = qedi_ep->fw_cid;
+       qedi_conn->cmd_cleanup_req = 0;
+       qedi_conn->cmd_cleanup_cmpl = 0;
+
+       if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
+               return -EINVAL;
+
+       spin_lock_init(&qedi_conn->tmf_work_lock);
+       INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
+       init_waitqueue_head(&qedi_conn->wait_queue);
+       return 0;
+}
+
+static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
+                                 struct qedi_conn *qedi_conn)
+{
+       struct qed_iscsi_params_update *conn_info;
+       struct iscsi_cls_conn *cls_conn = qedi_conn->cls_conn;
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_endpoint *qedi_ep;
+       int rval;
+
+       qedi_ep = qedi_conn->ep;
+
+       conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+       if (!conn_info) {
+               QEDI_ERR(&qedi->dbg_ctx, "memory alloc failed\n");
+               return -ENOMEM;
+       }
+
+       conn_info->update_flag = 0;
+
+       if (conn->hdrdgst_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, true);
+       if (conn->datadgst_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, true);
+       if (conn->session->initial_r2t_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T,
+                         true);
+       if (conn->session->imm_data_en)
+               SET_FIELD(conn_info->update_flag,
+                         ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA,
+                         true);
+
+       conn_info->max_seq_size = conn->session->max_burst;
+       conn_info->max_recv_pdu_length = conn->max_recv_dlength;
+       conn_info->max_send_pdu_length = conn->max_xmit_dlength;
+       conn_info->first_seq_length = conn->session->first_burst;
+       conn_info->exp_stat_sn = conn->exp_statsn;
+
+       rval = qedi_ops->update_conn(qedi->cdev, qedi_ep->handle,
+                                    conn_info);
+       if (rval) {
+               rval = -ENXIO;
+               QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
+               goto update_conn_err;
+       }
+
+       kfree(conn_info);
+       rval = 0;
+
+update_conn_err:
+       return rval;
+}
+
+static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
+{
+       u16 mss = 0;
+       u16 hdrs = TCP_HDR_LEN;
+
+       if (is_ipv6)
+               hdrs += IPV6_HDR_LEN;
+       else
+               hdrs += IPV4_HDR_LEN;
+
+       if (vlan_en)
+               hdrs += VLAN_LEN;
+
+       mss = pmtu - hdrs;
+
+       if (tcp_ts_en)
+               mss -= TCP_OPTION_LEN;
+
+       if (!mss)
+               mss = DEF_MSS;
+
+       return mss;
+}
+
+static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
+{
+       struct qedi_ctx *qedi = qedi_ep->qedi;
+       struct qed_iscsi_params_offload *conn_info;
+       int rval;
+       int i;
+
+       conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+       if (!conn_info) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failed to allocate memory ep=%p\n", qedi_ep);
+               return -ENOMEM;
+       }
+
+       ether_addr_copy(conn_info->src.mac, qedi_ep->src_mac);
+       ether_addr_copy(conn_info->dst.mac, qedi_ep->dst_mac);
+
+       conn_info->src.ip[0] = ntohl(qedi_ep->src_addr[0]);
+       conn_info->dst.ip[0] = ntohl(qedi_ep->dst_addr[0]);
+
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               conn_info->ip_version = 0;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "After ntohl: src_addr=%pI4, dst_addr=%pI4\n",
+                         qedi_ep->src_addr, qedi_ep->dst_addr);
+       } else {
+               for (i = 1; i < 4; i++) {
+                       conn_info->src.ip[i] = ntohl(qedi_ep->src_addr[i]);
+                       conn_info->dst.ip[i] = ntohl(qedi_ep->dst_addr[i]);
+               }
+
+               conn_info->ip_version = 1;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "After ntohl: src_addr=%pI6, dst_addr=%pI6\n",
+                         qedi_ep->src_addr, qedi_ep->dst_addr);
+       }
+
+       conn_info->src.port = qedi_ep->src_port;
+       conn_info->dst.port = qedi_ep->dst_port;
+
+       conn_info->layer_code = ISCSI_SLOW_PATH_LAYER_CODE;
+       conn_info->sq_pbl_addr = qedi_ep->sq_pbl_dma;
+       conn_info->vlan_id = qedi_ep->vlan_id;
+
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_TS_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_DA_CNT_EN, 1);
+       SET_FIELD(conn_info->tcp_flags, TCP_OFFLOAD_PARAMS_KA_EN, 1);
+
+       conn_info->default_cq = (qedi_ep->fw_cid % 8);
+
+       conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
+       conn_info->dup_ack_theshold = 3;
+       conn_info->rcv_wnd = 65535;
+       conn_info->cwnd = DEF_MAX_CWND;
+
+       conn_info->ss_thresh = 65535;
+       conn_info->srtt = 300;
+       conn_info->rtt_var = 150;
+       conn_info->flow_label = 0;
+       conn_info->ka_timeout = DEF_KA_TIMEOUT;
+       conn_info->ka_interval = DEF_KA_INTERVAL;
+       conn_info->max_rt_time = DEF_MAX_RT_TIME;
+       conn_info->ttl = DEF_TTL;
+       conn_info->tos_or_tc = DEF_TOS;
+       conn_info->remote_port = qedi_ep->dst_port;
+       conn_info->local_port = qedi_ep->src_port;
+
+       conn_info->mss = qedi_calc_mss(qedi_ep->pmtu,
+                                      (qedi_ep->ip_type == TCP_IPV6),
+                                      1, (qedi_ep->vlan_id != 0));
+
+       conn_info->rcv_wnd_scale = 4;
+       conn_info->ts_ticks_per_second = 1000;
+       conn_info->da_timeout_value = 200;
+       conn_info->ack_frequency = 2;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Default cq index [%d], mss [%d]\n",
+                 conn_info->default_cq, conn_info->mss);
+
+       rval = qedi_ops->offload_conn(qedi->cdev, qedi_ep->handle, conn_info);
+       if (rval)
+               QEDI_ERR(&qedi->dbg_ctx, "offload_conn returned %d, ep=%p\n",
+                        rval, qedi_ep);
+
+       kfree(conn_info);
+       return rval;
+}
+
+static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_ctx *qedi;
+       int rval;
+
+       qedi = qedi_conn->qedi;
+
+       rval = qedi_iscsi_update_conn(qedi, qedi_conn);
+       if (rval) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "conn_start: FW oflload conn failed.\n");
+               rval = -EINVAL;
+               goto start_err;
+       }
+
+       clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+       qedi_conn->abrt_conn = 0;
+
+       rval = iscsi_conn_start(cls_conn);
+       if (rval) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "iscsi_conn_start: FW oflload conn failed!!\n");
+       }
+
+start_err:
+       return rval;
+}
+
+static void qedi_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi;
+
+       shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+       qedi = iscsi_host_priv(shost);
+
+       qedi_conn_free_login_resources(qedi, qedi_conn);
+       iscsi_conn_teardown(cls_conn);
+}
+
+static int qedi_ep_get_param(struct iscsi_endpoint *ep,
+                            enum iscsi_param param, char *buf)
+{
+       struct qedi_endpoint *qedi_ep = ep->dd_data;
+       int len;
+
+       if (!qedi_ep)
+               return -ENOTCONN;
+
+       switch (param) {
+       case ISCSI_PARAM_CONN_PORT:
+               len = sprintf(buf, "%hu\n", qedi_ep->dst_port);
+               break;
+       case ISCSI_PARAM_CONN_ADDRESS:
+               if (qedi_ep->ip_type == TCP_IPV4)
+                       len = sprintf(buf, "%pI4\n", qedi_ep->dst_addr);
+               else
+                       len = sprintf(buf, "%pI6\n", qedi_ep->dst_addr);
+               break;
+       default:
+               return -ENOTCONN;
+       }
+
+       return len;
+}
+
+static int qedi_host_get_param(struct Scsi_Host *shost,
+                              enum iscsi_host_param param, char *buf)
+{
+       struct qedi_ctx *qedi;
+       int len;
+
+       qedi = iscsi_host_priv(shost);
+
+       switch (param) {
+       case ISCSI_HOST_PARAM_HWADDRESS:
+               len = sysfs_format_mac(buf, qedi->mac, 6);
+               break;
+       case ISCSI_HOST_PARAM_NETDEV_NAME:
+               len = sprintf(buf, "host%d\n", shost->host_no);
+               break;
+       case ISCSI_HOST_PARAM_IPADDRESS:
+               if (qedi->ip_type == TCP_IPV4)
+                       len = sprintf(buf, "%pI4\n", qedi->src_ip);
+               else
+                       len = sprintf(buf, "%pI6\n", qedi->src_ip);
+               break;
+       default:
+               return iscsi_host_get_param(shost, param, buf);
+       }
+
+       return len;
+}
+
+static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+                               struct iscsi_stats *stats)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct qed_iscsi_stats iscsi_stats;
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi;
+
+       shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+       qedi = iscsi_host_priv(shost);
+       qedi_ops->get_stats(qedi->cdev, &iscsi_stats);
+
+       conn->txdata_octets = iscsi_stats.iscsi_tx_bytes_cnt;
+       conn->rxdata_octets = iscsi_stats.iscsi_rx_bytes_cnt;
+       conn->dataout_pdus_cnt = (uint32_t)iscsi_stats.iscsi_tx_data_pdu_cnt;
+       conn->datain_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_data_pdu_cnt;
+       conn->r2t_pdus_cnt = (uint32_t)iscsi_stats.iscsi_rx_r2t_pdu_cnt;
+
+       stats->txdata_octets = conn->txdata_octets;
+       stats->rxdata_octets = conn->rxdata_octets;
+       stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+       stats->dataout_pdus = conn->dataout_pdus_cnt;
+       stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+       stats->datain_pdus = conn->datain_pdus_cnt;
+       stats->r2t_pdus = conn->r2t_pdus_cnt;
+       stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+       stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+       stats->digest_err = 0;
+       stats->timeout_err = 0;
+       strcpy(stats->custom[0].desc, "eh_abort_cnt");
+       stats->custom[0].value = conn->eh_abort_cnt;
+       stats->custom_length = 1;
+}
+
+static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
+{
+       struct iscsi_sge *bd_tbl;
+
+       bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+
+       bd_tbl->sge_addr.hi =
+               (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+       bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
+       bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
+                               qedi_conn->gen_pdu.req_buf;
+       bd_tbl->reserved0 = 0;
+       bd_tbl = (struct iscsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
+       bd_tbl->sge_addr.hi =
+                       (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+       bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
+       bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       bd_tbl->reserved0 = 0;
+}
+
+static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
+{
+       struct qedi_cmd *cmd = task->dd_data;
+       struct qedi_conn *qedi_conn = cmd->conn;
+       char *buf;
+       int data_len;
+       int rc = 0;
+
+       qedi_iscsi_prep_generic_pdu_bd(qedi_conn);
+       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+               qedi_send_iscsi_login(qedi_conn, task);
+               break;
+       case ISCSI_OP_NOOP_OUT:
+               data_len = qedi_conn->gen_pdu.req_buf_size;
+               buf = qedi_conn->gen_pdu.req_buf;
+               if (data_len)
+                       rc = qedi_send_iscsi_nopout(qedi_conn, task,
+                                                   buf, data_len, 1);
+               else
+                       rc = qedi_send_iscsi_nopout(qedi_conn, task,
+                                                   NULL, 0, 1);
+               break;
+       case ISCSI_OP_LOGOUT:
+               rc = qedi_send_iscsi_logout(qedi_conn, task);
+               break;
+       case ISCSI_OP_SCSI_TMFUNC:
+               rc = qedi_iscsi_abort_work(qedi_conn, task);
+               break;
+       case ISCSI_OP_TEXT:
+               rc = qedi_send_iscsi_text(qedi_conn, task);
+               break;
+       default:
+               iscsi_conn_printk(KERN_ALERT, qedi_conn->cls_conn->dd_data,
+                                 "unsupported op 0x%x\n", task->hdr->opcode);
+       }
+
+       return rc;
+}
+
+static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+
+       memset(qedi_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+       qedi_conn->gen_pdu.req_buf_size = task->data_count;
+
+       if (task->data_count) {
+               memcpy(qedi_conn->gen_pdu.req_buf, task->data,
+                      task->data_count);
+               qedi_conn->gen_pdu.req_wr_ptr =
+                       qedi_conn->gen_pdu.req_buf + task->data_count;
+       }
+
+       cmd->conn = conn->dd_data;
+       cmd->scsi_cmd = NULL;
+       return qedi_iscsi_send_generic_request(task);
+}
+
+static int qedi_task_xmit(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct qedi_conn *qedi_conn = conn->dd_data;
+       struct qedi_cmd *cmd = task->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+
+       cmd->state = 0;
+       cmd->task = NULL;
+       cmd->use_slowpath = false;
+       cmd->conn = qedi_conn;
+       cmd->task = task;
+       cmd->io_cmd_in_list = false;
+       INIT_LIST_HEAD(&cmd->io_cmd);
+
+       if (!sc)
+               return qedi_mtask_xmit(conn, task);
+
+       cmd->scsi_cmd = sc;
+       return qedi_iscsi_send_ioreq(task);
+}
+
+static struct iscsi_endpoint *
+qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+               int non_blocking)
+{
+       struct qedi_ctx *qedi;
+       struct iscsi_endpoint *ep;
+       struct qedi_endpoint *qedi_ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct qed_dev *cdev  =  NULL;
+       struct qedi_uio_dev *udev = NULL;
+       struct iscsi_path path_req;
+       u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+       u32 iscsi_cid = QEDI_CID_RESERVED;
+       u16 len = 0;
+       char *buf = NULL;
+       int ret;
+
+       if (!shost) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost is NULL\n");
+               return ERR_PTR(ret);
+       }
+
+       if (do_not_recover) {
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+
+       qedi = iscsi_host_priv(shost);
+       cdev = qedi->cdev;
+       udev = qedi->udev;
+
+       if (test_bit(QEDI_IN_OFFLINE, &qedi->flags) ||
+           test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+
+       ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
+       if (!ep) {
+               QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
+               ret = -ENOMEM;
+               return ERR_PTR(ret);
+       }
+       qedi_ep = ep->dd_data;
+       memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+       qedi_ep->state = EP_STATE_IDLE;
+       qedi_ep->iscsi_cid = (u32)-1;
+       qedi_ep->qedi = qedi;
+
+       if (dst_addr->sa_family == AF_INET) {
+               addr = (struct sockaddr_in *)dst_addr;
+               memcpy(qedi_ep->dst_addr, &addr->sin_addr.s_addr,
+                      sizeof(struct in_addr));
+               qedi_ep->dst_port = ntohs(addr->sin_port);
+               qedi_ep->ip_type = TCP_IPV4;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "dst_addr=%pI4, dst_port=%u\n",
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else if (dst_addr->sa_family == AF_INET6) {
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               memcpy(qedi_ep->dst_addr, &addr6->sin6_addr,
+                      sizeof(struct in6_addr));
+               qedi_ep->dst_port = ntohs(addr6->sin6_port);
+               qedi_ep->ip_type = TCP_IPV6;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "dst_addr=%pI6, dst_port=%u\n",
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
+       }
+
+       if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+               QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+               ret = -ENXIO;
+               goto ep_conn_exit;
+       }
+
+       ret = qedi_alloc_sq(qedi, qedi_ep);
+       if (ret)
+               goto ep_conn_exit;
+
+       ret = qedi_ops->acquire_conn(qedi->cdev, &qedi_ep->handle,
+                                    &qedi_ep->fw_cid, &qedi_ep->p_doorbell);
+
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx, "Could not acquire connection\n");
+               ret = -ENXIO;
+               goto ep_free_sq;
+       }
+
+       iscsi_cid = qedi_ep->handle;
+       qedi_ep->iscsi_cid = iscsi_cid;
+
+       init_waitqueue_head(&qedi_ep->ofld_wait);
+       init_waitqueue_head(&qedi_ep->tcp_ofld_wait);
+       qedi_ep->state = EP_STATE_OFLDCONN_START;
+       qedi->ep_tbl[iscsi_cid] = qedi_ep;
+
+       buf = (char *)&path_req;
+       len = sizeof(path_req);
+       memset(&path_req, 0, len);
+
+       msg_type = ISCSI_KEVENT_PATH_REQ;
+       path_req.handle = (u64)qedi_ep->iscsi_cid;
+       path_req.pmtu = qedi->ll2_mtu;
+       qedi_ep->pmtu = qedi->ll2_mtu;
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               memcpy(&path_req.dst.v4_addr, &qedi_ep->dst_addr,
+                      sizeof(struct in_addr));
+               path_req.ip_addr_len = 4;
+       } else {
+               memcpy(&path_req.dst.v6_addr, &qedi_ep->dst_addr,
+                      sizeof(struct in6_addr));
+               path_req.ip_addr_len = 16;
+       }
+
+       ret = iscsi_offload_mesg(shost, &qedi_iscsi_transport, msg_type, buf,
+                                len);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "iscsi_offload_mesg() failed for cid=0x%x ret=%d\n",
+                        iscsi_cid, ret);
+               goto ep_rel_conn;
+       }
+
+       atomic_inc(&qedi->num_offloads);
+       return ep;
+
+ep_rel_conn:
+       qedi->ep_tbl[iscsi_cid] = NULL;
+       ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+       if (ret)
+               QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
+                         ret);
+ep_free_sq:
+       qedi_free_sq(qedi, qedi_ep);
+ep_conn_exit:
+       iscsi_destroy_endpoint(ep);
+       return ERR_PTR(ret);
+}
+
+static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+       struct qedi_endpoint *qedi_ep;
+       int ret = 0;
+
+       if (do_not_recover)
+               return 1;
+
+       qedi_ep = ep->dd_data;
+       if (qedi_ep->state == EP_STATE_IDLE ||
+           qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+               return -1;
+
+       if (qedi_ep->state == EP_STATE_OFLDCONN_COMPL)
+               ret = 1;
+
+       ret = wait_event_interruptible_timeout(qedi_ep->ofld_wait,
+                                              QEDI_OFLD_WAIT_STATE(qedi_ep),
+                                              msecs_to_jiffies(timeout_ms));
+
+       if (qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
+               ret = -1;
+
+       if (ret > 0)
+               return 1;
+       else if (!ret)
+               return 0;
+       else
+               return ret;
+}
+
+static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
+{
+       struct qedi_cmd *cmd, *cmd_tmp;
+
+       list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
+                                io_cmd) {
+               list_del_init(&cmd->io_cmd);
+               qedi_conn->active_cmd_count--;
+       }
+}
+
+static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+{
+       struct qedi_endpoint *qedi_ep;
+       struct qedi_conn *qedi_conn = NULL;
+       struct iscsi_conn *conn = NULL;
+       struct qedi_ctx *qedi;
+       int ret = 0;
+       int wait_delay = 20 * HZ;
+       int abrt_conn = 0;
+       int count = 10;
+
+       qedi_ep = ep->dd_data;
+       qedi = qedi_ep->qedi;
+
+       flush_work(&qedi_ep->offload_work);
+
+       if (qedi_ep->conn) {
+               qedi_conn = qedi_ep->conn;
+               conn = qedi_conn->cls_conn->dd_data;
+               iscsi_suspend_queue(conn);
+               abrt_conn = qedi_conn->abrt_conn;
+
+               while (count--) {
+                       if (!test_bit(QEDI_CONN_FW_CLEANUP,
+                                     &qedi_conn->flags)) {
+                               break;
+                       }
+                       msleep(1000);
+               }
+
+               if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+                       if (do_not_recover) {
+                               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                                         "Do not recover cid=0x%x\n",
+                                         qedi_ep->iscsi_cid);
+                               goto ep_exit_recover;
+                       }
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                                 "Reset recovery cid=0x%x, qedi_ep=%p, state=0x%x\n",
+                                 qedi_ep->iscsi_cid, qedi_ep, qedi_ep->state);
+                       qedi_cleanup_active_cmd_list(qedi_conn);
+                       goto ep_release_conn;
+               }
+       }
+
+       if (do_not_recover)
+               goto ep_exit_recover;
+
+       switch (qedi_ep->state) {
+       case EP_STATE_OFLDCONN_START:
+               goto ep_release_conn;
+       case EP_STATE_OFLDCONN_FAILED:
+                       break;
+       case EP_STATE_OFLDCONN_COMPL:
+               if (unlikely(!qedi_conn))
+                       break;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Active cmd count=%d, abrt_conn=%d, ep state=0x%x, cid=0x%x, qedi_conn=%p\n",
+                         qedi_conn->active_cmd_count, abrt_conn,
+                         qedi_ep->state,
+                         qedi_ep->iscsi_cid,
+                         qedi_ep->conn
+                         );
+
+               if (!qedi_conn->active_cmd_count)
+                       abrt_conn = 0;
+               else
+                       abrt_conn = 1;
+
+               if (abrt_conn)
+                       qedi_clearsq(qedi, qedi_conn, NULL);
+               break;
+       default:
+               break;
+       }
+
+       qedi_ep->state = EP_STATE_DISCONN_START;
+       ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+       if (ret) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "destroy_conn failed returned %d\n", ret);
+       } else {
+               ret = wait_event_interruptible_timeout(
+                                       qedi_ep->tcp_ofld_wait,
+                                       (qedi_ep->state !=
+                                        EP_STATE_DISCONN_START),
+                                       wait_delay);
+               if ((ret <= 0) || (qedi_ep->state == EP_STATE_DISCONN_START)) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Destroy conn timedout or interrupted, ret=%d, delay=%d, cid=0x%x\n",
+                                 ret, wait_delay, qedi_ep->iscsi_cid);
+               }
+       }
+
+ep_release_conn:
+       ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
+       if (ret)
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "release_conn returned %d, cid=0x%x\n",
+                         ret, qedi_ep->iscsi_cid);
+ep_exit_recover:
+       qedi_ep->state = EP_STATE_IDLE;
+       qedi->ep_tbl[qedi_ep->iscsi_cid] = NULL;
+       qedi->cid_que.conn_cid_tbl[qedi_ep->iscsi_cid] = NULL;
+       qedi_free_id(&qedi->lcl_port_tbl, qedi_ep->src_port);
+       qedi_free_sq(qedi, qedi_ep);
+
+       if (qedi_conn)
+               qedi_conn->ep = NULL;
+
+       qedi_ep->conn = NULL;
+       qedi_ep->qedi = NULL;
+       atomic_dec(&qedi->num_offloads);
+
+       iscsi_destroy_endpoint(ep);
+}
+
+static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
+{
+       struct qed_dev *cdev = qedi->cdev;
+       struct qedi_uio_dev *udev;
+       struct qedi_uio_ctrl *uctrl;
+       struct sk_buff *skb;
+       u32 len;
+       int rc = 0;
+
+       udev = qedi->udev;
+       if (!udev) {
+               QEDI_ERR(&qedi->dbg_ctx, "udev is NULL.\n");
+               return -EINVAL;
+       }
+
+       uctrl = (struct qedi_uio_ctrl *)udev->uctrl;
+       if (!uctrl) {
+               QEDI_ERR(&qedi->dbg_ctx, "uctlr is NULL.\n");
+               return -EINVAL;
+       }
+
+       len = uctrl->host_tx_pkt_len;
+       if (!len) {
+               QEDI_ERR(&qedi->dbg_ctx, "Invalid len %u\n", len);
+               return -EINVAL;
+       }
+
+       skb = alloc_skb(len, GFP_ATOMIC);
+       if (!skb) {
+               QEDI_ERR(&qedi->dbg_ctx, "alloc_skb failed\n");
+               return -EINVAL;
+       }
+
+       skb_put(skb, len);
+       memcpy(skb->data, udev->tx_pkt, len);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       if (vlanid)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+
+       rc = qedi_ops->ll2->start_xmit(cdev, skb);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
+                        rc);
+               kfree_skb(skb);
+       }
+
+       uctrl->host_tx_pkt_len = 0;
+       uctrl->hw_tx_cons++;
+
+       return rc;
+}
+
+static void qedi_offload_work(struct work_struct *work)
+{
+       struct qedi_endpoint *qedi_ep =
+               container_of(work, struct qedi_endpoint, offload_work);
+       struct qedi_ctx *qedi;
+       int wait_delay = 20 * HZ;
+       int ret;
+
+       qedi = qedi_ep->qedi;
+
+       ret = qedi_iscsi_offload_conn(qedi_ep);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+                        qedi_ep->iscsi_cid, qedi_ep, ret);
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               return;
+       }
+
+       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+                                              (qedi_ep->state ==
+                                              EP_STATE_OFLDCONN_COMPL),
+                                              wait_delay);
+       if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+                        qedi_ep->iscsi_cid, qedi_ep);
+       }
+}
+
+static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
+{
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+       int ret = 0;
+       u32 iscsi_cid;
+       u16 port_id = 0;
+
+       if (!shost) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost is NULL\n");
+               return ret;
+       }
+
+       if (strcmp(shost->hostt->proc_name, "qedi")) {
+               ret = -ENXIO;
+               QEDI_ERR(NULL, "shost %s is invalid\n",
+                        shost->hostt->proc_name);
+               return ret;
+       }
+
+       qedi = iscsi_host_priv(shost);
+       if (path_data->handle == QEDI_PATH_HANDLE) {
+               ret = qedi_data_avail(qedi, path_data->vlan_id);
+               goto set_path_exit;
+       }
+
+       iscsi_cid = (u32)path_data->handle;
+       qedi_ep = qedi->ep_tbl[iscsi_cid];
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
+
+       if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
+               QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+               ret = -EIO;
+               goto set_path_exit;
+       }
+
+       ether_addr_copy(&qedi_ep->src_mac[0], &qedi->mac[0]);
+       ether_addr_copy(&qedi_ep->dst_mac[0], &path_data->mac_addr[0]);
+
+       qedi_ep->vlan_id = path_data->vlan_id;
+       if (path_data->pmtu < DEF_PATH_MTU) {
+               qedi_ep->pmtu = qedi->ll2_mtu;
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "MTU cannot be %u, using default MTU %u\n",
+                          path_data->pmtu, qedi_ep->pmtu);
+       }
+
+       if (path_data->pmtu != qedi->ll2_mtu) {
+               if (path_data->pmtu > JUMBO_MTU) {
+                       ret = -EINVAL;
+                       QEDI_ERR(NULL, "Invalid MTU %u\n", path_data->pmtu);
+                       goto set_path_exit;
+               }
+
+               qedi_reset_host_mtu(qedi, path_data->pmtu);
+               qedi_ep->pmtu = qedi->ll2_mtu;
+       }
+
+       port_id = qedi_ep->src_port;
+       if (port_id >= QEDI_LOCAL_PORT_MIN &&
+           port_id < QEDI_LOCAL_PORT_MAX) {
+               if (qedi_alloc_id(&qedi->lcl_port_tbl, port_id))
+                       port_id = 0;
+       } else {
+               port_id = 0;
+       }
+
+       if (!port_id) {
+               port_id = qedi_alloc_new_id(&qedi->lcl_port_tbl);
+               if (port_id == QEDI_LOCAL_PORT_INVALID) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Failed to allocate port id for iscsi_cid=0x%x\n",
+                                iscsi_cid);
+                       ret = -ENOMEM;
+                       goto set_path_exit;
+               }
+       }
+
+       qedi_ep->src_port = port_id;
+
+       if (qedi_ep->ip_type == TCP_IPV4) {
+               memcpy(&qedi_ep->src_addr[0], &path_data->src.v4_addr,
+                      sizeof(struct in_addr));
+               memcpy(&qedi->src_ip[0], &path_data->src.v4_addr,
+                      sizeof(struct in_addr));
+               qedi->ip_type = TCP_IPV4;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "src addr:port=%pI4:%u, dst addr:port=%pI4:%u\n",
+                         qedi_ep->src_addr, qedi_ep->src_port,
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       } else {
+               memcpy(&qedi_ep->src_addr[0], &path_data->src.v6_addr,
+                      sizeof(struct in6_addr));
+               memcpy(&qedi->src_ip[0], &path_data->src.v6_addr,
+                      sizeof(struct in6_addr));
+               qedi->ip_type = TCP_IPV6;
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "src addr:port=%pI6:%u, dst addr:port=%pI6:%u\n",
+                         qedi_ep->src_addr, qedi_ep->src_port,
+                         qedi_ep->dst_addr, qedi_ep->dst_port);
+       }
+
+       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
+       queue_work(qedi->offload_thread, &qedi_ep->offload_work);
+
+       ret = 0;
+
+set_path_exit:
+       return ret;
+}
+
+static umode_t qedi_attr_is_visible(int param_type, int param)
+{
+       switch (param_type) {
+       case ISCSI_HOST_PARAM:
+               switch (param) {
+               case ISCSI_HOST_PARAM_NETDEV_NAME:
+               case ISCSI_HOST_PARAM_HWADDRESS:
+               case ISCSI_HOST_PARAM_IPADDRESS:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       case ISCSI_PARAM:
+               switch (param) {
+               case ISCSI_PARAM_MAX_RECV_DLENGTH:
+               case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+               case ISCSI_PARAM_HDRDGST_EN:
+               case ISCSI_PARAM_DATADGST_EN:
+               case ISCSI_PARAM_CONN_ADDRESS:
+               case ISCSI_PARAM_CONN_PORT:
+               case ISCSI_PARAM_EXP_STATSN:
+               case ISCSI_PARAM_PERSISTENT_ADDRESS:
+               case ISCSI_PARAM_PERSISTENT_PORT:
+               case ISCSI_PARAM_PING_TMO:
+               case ISCSI_PARAM_RECV_TMO:
+               case ISCSI_PARAM_INITIAL_R2T_EN:
+               case ISCSI_PARAM_MAX_R2T:
+               case ISCSI_PARAM_IMM_DATA_EN:
+               case ISCSI_PARAM_FIRST_BURST:
+               case ISCSI_PARAM_MAX_BURST:
+               case ISCSI_PARAM_PDU_INORDER_EN:
+               case ISCSI_PARAM_DATASEQ_INORDER_EN:
+               case ISCSI_PARAM_ERL:
+               case ISCSI_PARAM_TARGET_NAME:
+               case ISCSI_PARAM_TPGT:
+               case ISCSI_PARAM_USERNAME:
+               case ISCSI_PARAM_PASSWORD:
+               case ISCSI_PARAM_USERNAME_IN:
+               case ISCSI_PARAM_PASSWORD_IN:
+               case ISCSI_PARAM_FAST_ABORT:
+               case ISCSI_PARAM_ABORT_TMO:
+               case ISCSI_PARAM_LU_RESET_TMO:
+               case ISCSI_PARAM_TGT_RESET_TMO:
+               case ISCSI_PARAM_IFACE_NAME:
+               case ISCSI_PARAM_INITIATOR_NAME:
+               case ISCSI_PARAM_BOOT_ROOT:
+               case ISCSI_PARAM_BOOT_NIC:
+               case ISCSI_PARAM_BOOT_TARGET:
+                       return 0444;
+               default:
+                       return 0;
+               }
+       }
+
+       return 0;
+}
+
+static void qedi_cleanup_task(struct iscsi_task *task)
+{
+       if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+               QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
+                         atomic_read(&task->refcount));
+               return;
+       }
+
+       qedi_iscsi_unmap_sg_list(task->dd_data);
+}
+
+struct iscsi_transport qedi_iscsi_transport = {
+       .owner = THIS_MODULE,
+       .name = QEDI_MODULE_NAME,
+       .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST |
+               CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO,
+       .create_session = qedi_session_create,
+       .destroy_session = qedi_session_destroy,
+       .create_conn = qedi_conn_create,
+       .bind_conn = qedi_conn_bind,
+       .start_conn = qedi_conn_start,
+       .stop_conn = iscsi_conn_stop,
+       .destroy_conn = qedi_conn_destroy,
+       .set_param = iscsi_set_param,
+       .get_ep_param = qedi_ep_get_param,
+       .get_conn_param = iscsi_conn_get_param,
+       .get_session_param = iscsi_session_get_param,
+       .get_host_param = qedi_host_get_param,
+       .send_pdu = iscsi_conn_send_pdu,
+       .get_stats = qedi_conn_get_stats,
+       .xmit_task = qedi_task_xmit,
+       .cleanup_task = qedi_cleanup_task,
+       .session_recovery_timedout = iscsi_session_recovery_timedout,
+       .ep_connect = qedi_ep_connect,
+       .ep_poll = qedi_ep_poll,
+       .ep_disconnect = qedi_ep_disconnect,
+       .set_path = qedi_set_path,
+       .attr_is_visible = qedi_attr_is_visible,
+};
+
+void qedi_start_conn_recovery(struct qedi_ctx *qedi,
+                             struct qedi_conn *qedi_conn)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_conn *conn;
+
+       cls_conn = qedi_conn->cls_conn;
+       conn = cls_conn->dd_data;
+       cls_sess = iscsi_conn_to_session(cls_conn);
+
+       if (iscsi_is_session_online(cls_sess)) {
+               qedi_conn->abrt_conn = 1;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Failing connection, state=0x%x, cid=0x%x\n",
+                        conn->session->state, qedi_conn->iscsi_conn_id);
+               iscsi_conn_failure(qedi_conn->cls_conn->dd_data,
+                                  ISCSI_ERR_CONN_FAILED);
+       }
+}
+
+static const struct {
+       enum iscsi_error_types error_code;
+       char *err_string;
+} qedi_iscsi_error[] = {
+       { ISCSI_STATUS_NONE,
+         "tcp_error none"
+       },
+       { ISCSI_CONN_ERROR_TASK_CID_MISMATCH,
+         "task cid mismatch"
+       },
+       { ISCSI_CONN_ERROR_TASK_NOT_VALID,
+         "invalid task"
+       },
+       { ISCSI_CONN_ERROR_RQ_RING_IS_FULL,
+         "rq ring full"
+       },
+       { ISCSI_CONN_ERROR_CMDQ_RING_IS_FULL,
+         "cmdq ring full"
+       },
+       { ISCSI_CONN_ERROR_HQE_CACHING_FAILED,
+         "sge caching failed"
+       },
+       { ISCSI_CONN_ERROR_HEADER_DIGEST_ERROR,
+         "hdr digest error"
+       },
+       { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR,
+         "local cmpl error"
+       },
+       { ISCSI_CONN_ERROR_DATA_OVERRUN,
+         "invalid task"
+       },
+       { ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
+         "out of sge error"
+       },
+       { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
+         "tcp seg ip options error"
+       },
+       { ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
+         "tcp ip fragment error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN,
+         "AHS len protocol error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_ITT_OUT_OF_RANGE,
+         "itt out of range error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_EXCEEDS_PDU_SIZE,
+         "data seg more than pdu size"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE,
+         "invalid opcode"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_OPCODE_BEFORE_UPDATE,
+         "invalid opcode before update"
+       },
+       { ISCSI_CONN_ERROR_UNVALID_NOPIN_DSL,
+         "unexpected opcode"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_CARRIES_NO_DATA,
+         "r2t carries no data"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SN,
+         "data sn error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_IN_TTT,
+         "data TTT error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_TTT,
+         "r2t TTT error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_BUFFER_OFFSET,
+         "buffer offset error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_BUFFER_OFFSET_OOO,
+         "buffer offset ooo"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_R2T_SN,
+         "data seg len 0"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0,
+         "data xer len error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1,
+         "data xer len1 error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_2,
+         "data xer len2 error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_LUN,
+         "protocol lun error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_F_BIT_ZERO,
+         "f bit zero error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_EXP_STAT_SN,
+         "exp stat sn error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DSL_NOT_ZERO,
+         "dsl not zero error"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_INVALID_DSL,
+         "invalid dsl"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG,
+         "data seg len too big"
+       },
+       { ISCSI_CONN_ERROR_PROTOCOL_ERR_OUTSTANDING_R2T_COUNT,
+         "outstanding r2t count error"
+       },
+       { ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
+         "sense datalen error"
+       },
+};
+
+char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
+{
+       int i;
+       char *msg = NULL;
+
+       for (i = 0; i < ARRAY_SIZE(qedi_iscsi_error); i++) {
+               if (qedi_iscsi_error[i].error_code == err_code) {
+                       msg = qedi_iscsi_error[i].err_string;
+                       break;
+               }
+       }
+       return msg;
+}
+
+void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+       struct qedi_conn *qedi_conn;
+       struct qedi_ctx *qedi;
+       char warn_notice[] = "iscsi_warning";
+       char error_notice[] = "iscsi_error";
+       char unknown_msg[] = "Unknown error";
+       char *message;
+       int need_recovery = 0;
+       u32 err_mask = 0;
+       char *msg;
+
+       if (!ep)
+               return;
+
+       qedi_conn = ep->conn;
+       if (!qedi_conn)
+               return;
+
+       qedi = ep->qedi;
+
+       QEDI_ERR(&qedi->dbg_ctx, "async event iscsi error:0x%x\n",
+                data->error_code);
+
+       if (err_mask) {
+               need_recovery = 0;
+               message = warn_notice;
+       } else {
+               need_recovery = 1;
+               message = error_notice;
+       }
+
+       msg = qedi_get_iscsi_error(data->error_code);
+       if (!msg) {
+               need_recovery = 0;
+               msg = unknown_msg;
+       }
+
+       iscsi_conn_printk(KERN_ALERT,
+                         qedi_conn->cls_conn->dd_data,
+                         "qedi: %s - %s\n", message, msg);
+
+       if (need_recovery)
+               qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
+void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+{
+       struct qedi_conn *qedi_conn;
+
+       if (!ep)
+               return;
+
+       qedi_conn = ep->conn;
+       if (!qedi_conn)
+               return;
+
+       QEDI_ERR(&ep->qedi->dbg_ctx, "async event TCP error:0x%x\n",
+                data->error_code);
+
+       qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
new file mode 100644 (file)
index 0000000..d3c06bb
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_ISCSI_H_
+#define _QEDI_ISCSI_H_
+
+#include <linux/socket.h>
+#include <linux/completion.h>
+#include "qedi.h"
+
+#define ISCSI_MAX_SESS_PER_HBA 4096
+
+#define DEF_KA_TIMEOUT         7200000
+#define DEF_KA_INTERVAL                10000
+#define DEF_KA_MAX_PROBE_COUNT 10
+#define DEF_TOS                        0
+#define DEF_TTL                        0xfe
+#define DEF_SND_SEQ_SCALE      0
+#define DEF_RCV_BUF            0xffff
+#define DEF_SND_BUF            0xffff
+#define DEF_SEED               0
+#define DEF_MAX_RT_TIME                8000
+#define DEF_MAX_DA_COUNT        2
+#define DEF_SWS_TIMER          1000
+#define DEF_MAX_CWND           2
+#define DEF_PATH_MTU           1500
+#define DEF_MSS                        1460
+#define DEF_LL2_MTU            1560
+#define JUMBO_MTU              9000
+
+#define MIN_MTU         576 /* rfc 793 */
+#define IPV4_HDR_LEN    20
+#define IPV6_HDR_LEN    40
+#define TCP_HDR_LEN     20
+#define TCP_OPTION_LEN  12
+#define VLAN_LEN         4
+
+enum {
+       EP_STATE_IDLE                   = 0x0,
+       EP_STATE_ACQRCONN_START         = 0x1,
+       EP_STATE_ACQRCONN_COMPL         = 0x2,
+       EP_STATE_OFLDCONN_START         = 0x4,
+       EP_STATE_OFLDCONN_COMPL         = 0x8,
+       EP_STATE_DISCONN_START          = 0x10,
+       EP_STATE_DISCONN_COMPL          = 0x20,
+       EP_STATE_CLEANUP_START          = 0x40,
+       EP_STATE_CLEANUP_CMPL           = 0x80,
+       EP_STATE_TCP_FIN_RCVD           = 0x100,
+       EP_STATE_TCP_RST_RCVD           = 0x200,
+       EP_STATE_LOGOUT_SENT            = 0x400,
+       EP_STATE_LOGOUT_RESP_RCVD       = 0x800,
+       EP_STATE_CLEANUP_FAILED         = 0x1000,
+       EP_STATE_OFLDCONN_FAILED        = 0x2000,
+       EP_STATE_CONNECT_FAILED         = 0x4000,
+       EP_STATE_DISCONN_TIMEDOUT       = 0x8000,
+};
+
+struct qedi_conn;
+
+struct qedi_endpoint {
+       struct qedi_ctx *qedi;
+       u32 dst_addr[4];
+       u32 src_addr[4];
+       u16 src_port;
+       u16 dst_port;
+       u16 vlan_id;
+       u16 pmtu;
+       u8 src_mac[ETH_ALEN];
+       u8 dst_mac[ETH_ALEN];
+       u8 ip_type;
+       int state;
+       wait_queue_head_t ofld_wait;
+       wait_queue_head_t tcp_ofld_wait;
+       u32 iscsi_cid;
+       /* identifier of the connection from qed */
+       u32 handle;
+       u32 fw_cid;
+       void __iomem *p_doorbell;
+
+       /* Send queue management */
+       struct iscsi_wqe *sq;
+       dma_addr_t sq_dma;
+
+       u16 sq_prod_idx;
+       u16 fw_sq_prod_idx;
+       u16 sq_con_idx;
+       u32 sq_mem_size;
+
+       void *sq_pbl;
+       dma_addr_t sq_pbl_dma;
+       u32 sq_pbl_size;
+       struct qedi_conn *conn;
+       struct work_struct offload_work;
+};
+
+#define QEDI_SQ_WQES_MIN       16
+
+struct qedi_io_bdt {
+       struct iscsi_sge *sge_tbl;
+       dma_addr_t sge_tbl_dma;
+       u16 sge_valid;
+};
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *      Logout and NOP
+ */
+struct generic_pdu_resc {
+       char *req_buf;
+       dma_addr_t req_dma_addr;
+       u32 req_buf_size;
+       char *req_wr_ptr;
+       struct iscsi_hdr resp_hdr;
+       char *resp_buf;
+       dma_addr_t resp_dma_addr;
+       u32 resp_buf_size;
+       char *resp_wr_ptr;
+       char *req_bd_tbl;
+       dma_addr_t req_bd_dma;
+       char *resp_bd_tbl;
+       dma_addr_t resp_bd_dma;
+};
+
+struct qedi_conn {
+       struct iscsi_cls_conn *cls_conn;
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *ep;
+       struct list_head active_cmd_list;
+       spinlock_t list_lock;           /* internal conn lock */
+       u32 active_cmd_count;
+       u32 cmd_cleanup_req;
+       u32 cmd_cleanup_cmpl;
+
+       u32 iscsi_conn_id;
+       int itt;
+       int abrt_conn;
+#define QEDI_CID_RESERVED      0x5AFF
+       u32 fw_cid;
+       /*
+        * Buffer for login negotiation process
+        */
+       struct generic_pdu_resc gen_pdu;
+
+       struct list_head tmf_work_list;
+       wait_queue_head_t wait_queue;
+       spinlock_t tmf_work_lock;       /* tmf work lock */
+       unsigned long flags;
+#define QEDI_CONN_FW_CLEANUP   1
+};
+
+struct qedi_cmd {
+       struct list_head io_cmd;
+       bool io_cmd_in_list;
+       struct iscsi_hdr hdr;
+       struct qedi_conn *conn;
+       struct scsi_cmnd *scsi_cmd;
+       struct scatterlist *sg;
+       struct qedi_io_bdt io_tbl;
+       struct iscsi_task_context request;
+       unsigned char *sense_buffer;
+       dma_addr_t sense_buffer_dma;
+       u16 task_id;
+
+       /* field populated for tmf work queue */
+       struct iscsi_task *task;
+       struct work_struct tmf_work;
+       int state;
+#define CLEANUP_WAIT   1
+#define CLEANUP_RECV   2
+#define CLEANUP_WAIT_FAILED    3
+#define CLEANUP_NOT_REQUIRED   4
+#define LUN_RESET_RESPONSE_RECEIVED    5
+#define RESPONSE_RECEIVED      6
+
+       int type;
+#define TYPEIO         1
+#define TYPERESET      2
+
+       struct qedi_work_map *list_tmf_work;
+       /* slowpath management */
+       bool use_slowpath;
+
+       struct iscsi_tm_rsp *tmf_resp_buf;
+       struct qedi_work cqe_work;
+};
+
+struct qedi_work_map {
+       struct list_head list;
+       struct qedi_cmd *qedi_cmd;
+       int rtid;
+
+       int state;
+#define QEDI_WORK_QUEUED       1
+#define QEDI_WORK_SCHEDULED    2
+#define QEDI_WORK_EXIT         3
+
+       struct work_struct *ptr_tmf_work;
+};
+
+#define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
+#define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
+
+#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
+                               (q)->state == EP_STATE_OFLDCONN_COMPL)
+
+#endif /* _QEDI_ISCSI_H_ */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
new file mode 100644 (file)
index 0000000..19ead8d
--- /dev/null
@@ -0,0 +1,2127 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <scsi/iscsi_if.h>
+#include <linux/inet.h>
+#include <net/arp.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+
+static uint qedi_fw_debug;
+module_param(qedi_fw_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
+
+uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
+module_param(qedi_dbg_log, uint, 0644);
+MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
+
+uint qedi_io_tracing;
+module_param(qedi_io_tracing, uint, 0644);
+MODULE_PARM_DESC(qedi_io_tracing,
+                " Enable logging of SCSI requests/completions into trace buffer. (default off).");
+
+const struct qed_iscsi_ops *qedi_ops;
+static struct scsi_transport_template *qedi_scsi_transport;
+static struct pci_driver qedi_pci_driver;
+static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
+static LIST_HEAD(qedi_udev_list);
+/* Static function declaration */
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
+static void qedi_free_global_queues(struct qedi_ctx *qedi);
+static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+
+static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
+{
+       struct qedi_ctx *qedi;
+       struct qedi_endpoint *qedi_ep;
+       struct async_data *data;
+       int rval = 0;
+
+       if (!context || !fw_handle) {
+               QEDI_ERR(NULL, "Recv event with ctx NULL\n");
+               return -EINVAL;
+       }
+
+       qedi = (struct qedi_ctx *)context;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
+
+       data = (struct async_data *)fw_handle;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
+                  data->cid, data->itid, data->error_code,
+                  data->fw_debug_param);
+
+       qedi_ep = qedi->ep_tbl[data->cid];
+
+       if (!qedi_ep) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Cannot process event, ep already disconnected, cid=0x%x\n",
+                          data->cid);
+               WARN_ON(1);
+               return -ENODEV;
+       }
+
+       switch (fw_event_code) {
+       case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
+               if (qedi_ep->state == EP_STATE_OFLDCONN_START)
+                       qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
+
+               wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+               break;
+       case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
+               qedi_ep->state = EP_STATE_DISCONN_COMPL;
+               wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
+               break;
+       case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
+               qedi_process_iscsi_error(qedi_ep, data);
+               break;
+       case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
+       case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
+       case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
+       case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
+       case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
+               qedi_process_tcp_error(qedi_ep, data);
+               break;
+       default:
+               QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
+                        fw_event_code);
+       }
+
+       return rval;
+}
+
+static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+       struct qedi_uio_dev *udev = uinfo->priv;
+       struct qedi_ctx *qedi = udev->qedi;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (udev->uio_dev != -1)
+               return -EBUSY;
+
+       rtnl_lock();
+       udev->uio_dev = iminor(inode);
+       qedi_reset_uio_rings(udev);
+       set_bit(UIO_DEV_OPENED, &qedi->flags);
+       rtnl_unlock();
+
+       return 0;
+}
+
+static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+       struct qedi_uio_dev *udev = uinfo->priv;
+       struct qedi_ctx *qedi = udev->qedi;
+
+       udev->uio_dev = -1;
+       clear_bit(UIO_DEV_OPENED, &qedi->flags);
+       qedi_ll2_free_skbs(qedi);
+       return 0;
+}
+
+static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
+{
+       if (udev->ll2_ring) {
+               free_page((unsigned long)udev->ll2_ring);
+               udev->ll2_ring = NULL;
+       }
+
+       if (udev->ll2_buf) {
+               free_pages((unsigned long)udev->ll2_buf, 2);
+               udev->ll2_buf = NULL;
+       }
+}
+
+static void __qedi_free_uio(struct qedi_uio_dev *udev)
+{
+       uio_unregister_device(&udev->qedi_uinfo);
+
+       __qedi_free_uio_rings(udev);
+
+       pci_dev_put(udev->pdev);
+       kfree(udev->uctrl);
+       kfree(udev);
+}
+
+static void qedi_free_uio(struct qedi_uio_dev *udev)
+{
+       if (!udev)
+               return;
+
+       list_del_init(&udev->list);
+       __qedi_free_uio(udev);
+}
+
+static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
+{
+       struct qedi_ctx *qedi = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+
+       qedi = udev->qedi;
+       uctrl = udev->uctrl;
+
+       spin_lock_bh(&qedi->ll2_lock);
+       uctrl->host_rx_cons = 0;
+       uctrl->hw_rx_prod = 0;
+       uctrl->hw_rx_bd_prod = 0;
+       uctrl->host_rx_bd_cons = 0;
+
+       memset(udev->ll2_ring, 0, udev->ll2_ring_size);
+       memset(udev->ll2_buf, 0, udev->ll2_buf_size);
+       spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
+{
+       int rc = 0;
+
+       if (udev->ll2_ring || udev->ll2_buf)
+               return rc;
+
+       /* Allocating memory for LL2 ring  */
+       udev->ll2_ring_size = QEDI_PAGE_SIZE;
+       udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
+       if (!udev->ll2_ring) {
+               rc = -ENOMEM;
+               goto exit_alloc_ring;
+       }
+
+       /* Allocating memory for Tx/Rx pkt buffer */
+       udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
+       udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
+       udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
+                                                __GFP_ZERO, 2);
+       if (!udev->ll2_buf) {
+               rc = -ENOMEM;
+               goto exit_alloc_buf;
+       }
+       return rc;
+
+exit_alloc_buf:
+       free_page((unsigned long)udev->ll2_ring);
+       udev->ll2_ring = NULL;
+exit_alloc_ring:
+       return rc;
+}
+
+static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
+{
+       struct qedi_uio_dev *udev = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+       int rc = 0;
+
+       list_for_each_entry(udev, &qedi_udev_list, list) {
+               if (udev->pdev == qedi->pdev) {
+                       udev->qedi = qedi;
+                       if (__qedi_alloc_uio_rings(udev)) {
+                               udev->qedi = NULL;
+                               return -ENOMEM;
+                       }
+                       qedi->udev = udev;
+                       return 0;
+               }
+       }
+
+       udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+       if (!udev) {
+               rc = -ENOMEM;
+               goto err_udev;
+       }
+
+       uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL);
+       if (!uctrl) {
+               rc = -ENOMEM;
+               goto err_uctrl;
+       }
+
+       udev->uio_dev = -1;
+
+       udev->qedi = qedi;
+       udev->pdev = qedi->pdev;
+       udev->uctrl = uctrl;
+
+       rc = __qedi_alloc_uio_rings(udev);
+       if (rc)
+               goto err_uio_rings;
+
+       list_add(&udev->list, &qedi_udev_list);
+
+       pci_dev_get(udev->pdev);
+       qedi->udev = udev;
+
+       udev->tx_pkt = udev->ll2_buf;
+       udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
+       return 0;
+
+ err_uio_rings:
+       kfree(uctrl);
+ err_uctrl:
+       kfree(udev);
+ err_udev:
+       return -ENOMEM;
+}
+
+static int qedi_init_uio(struct qedi_ctx *qedi)
+{
+       struct qedi_uio_dev *udev = qedi->udev;
+       struct uio_info *uinfo;
+       int ret = 0;
+
+       if (!udev)
+               return -ENOMEM;
+
+       uinfo = &udev->qedi_uinfo;
+
+       uinfo->mem[0].addr = (unsigned long)udev->uctrl;
+       uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
+       uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
+       uinfo->mem[1].size = udev->ll2_ring_size;
+       uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
+       uinfo->mem[2].size = udev->ll2_buf_size;
+       uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->name = "qedi_uio";
+       uinfo->version = QEDI_MODULE_VERSION;
+       uinfo->irq = UIO_IRQ_CUSTOM;
+
+       uinfo->open = qedi_uio_open;
+       uinfo->release = qedi_uio_close;
+
+       if (udev->uio_dev == -1) {
+               if (!uinfo->priv) {
+                       uinfo->priv = udev;
+
+                       ret = uio_register_device(&udev->pdev->dev, uinfo);
+                       if (ret) {
+                               QEDI_ERR(&qedi->dbg_ctx,
+                                        "UIO registration failed\n");
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
+                                 struct qed_sb_info *sb_info, u16 sb_id)
+{
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       int ret;
+
+       sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
+                                    sizeof(struct status_block), &sb_phys,
+                                    GFP_KERNEL);
+       if (!sb_virt) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Status block allocation failed for id = %d.\n",
+                         sb_id);
+               return -ENOMEM;
+       }
+
+       ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
+                                      sb_id, QED_SB_TYPE_STORAGE);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Status block initialization failed for id = %d.\n",
+                         sb_id);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void qedi_free_sb(struct qedi_ctx *qedi)
+{
+       struct qed_sb_info *sb_info;
+       int id;
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               sb_info = &qedi->sb_array[id];
+               if (sb_info->sb_virt)
+                       dma_free_coherent(&qedi->pdev->dev,
+                                         sizeof(*sb_info->sb_virt),
+                                         (void *)sb_info->sb_virt,
+                                         sb_info->sb_phys);
+       }
+}
+
+static void qedi_free_fp(struct qedi_ctx *qedi)
+{
+       kfree(qedi->fp_array);
+       kfree(qedi->sb_array);
+}
+
+static void qedi_destroy_fp(struct qedi_ctx *qedi)
+{
+       qedi_free_sb(qedi);
+       qedi_free_fp(qedi);
+}
+
+static int qedi_alloc_fp(struct qedi_ctx *qedi)
+{
+       int ret = 0;
+
+       qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+                                sizeof(struct qedi_fastpath), GFP_KERNEL);
+       if (!qedi->fp_array) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fastpath fp array allocation failed.\n");
+               return -ENOMEM;
+       }
+
+       qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
+                                sizeof(struct qed_sb_info), GFP_KERNEL);
+       if (!qedi->sb_array) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "fastpath sb array allocation failed.\n");
+               ret = -ENOMEM;
+               goto free_fp;
+       }
+
+       return ret;
+
+free_fp:
+       qedi_free_fp(qedi);
+       return ret;
+}
+
+static void qedi_int_fp(struct qedi_ctx *qedi)
+{
+       struct qedi_fastpath *fp;
+       int id;
+
+       memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+              sizeof(*qedi->fp_array));
+       memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
+              sizeof(*qedi->sb_array));
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               fp = &qedi->fp_array[id];
+               fp->sb_info = &qedi->sb_array[id];
+               fp->sb_id = id;
+               fp->qedi = qedi;
+               snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+                        "qedi", id);
+
+               /* fp_array[i] ---- irq cookie
+                * So init data which is needed in int ctx
+                */
+       }
+}
+
+static int qedi_prepare_fp(struct qedi_ctx *qedi)
+{
+       struct qedi_fastpath *fp;
+       int id, ret = 0;
+
+       ret = qedi_alloc_fp(qedi);
+       if (ret)
+               goto err;
+
+       qedi_int_fp(qedi);
+
+       for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
+               fp = &qedi->fp_array[id];
+               ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
+               if (ret) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "SB allocation and initialization failed.\n");
+                       ret = -EIO;
+                       goto err_init;
+               }
+       }
+
+       return 0;
+
+err_init:
+       qedi_free_sb(qedi);
+       qedi_free_fp(qedi);
+err:
+       return ret;
+}
+
+static int qedi_setup_cid_que(struct qedi_ctx *qedi)
+{
+       int i;
+
+       qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
+                                                  sizeof(u32), GFP_KERNEL);
+       if (!qedi->cid_que.cid_que_base)
+               return -ENOMEM;
+
+       qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
+                                                  sizeof(struct qedi_conn *),
+                                                  GFP_KERNEL);
+       if (!qedi->cid_que.conn_cid_tbl) {
+               kfree(qedi->cid_que.cid_que_base);
+               qedi->cid_que.cid_que_base = NULL;
+               return -ENOMEM;
+       }
+
+       qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
+       qedi->cid_que.cid_q_prod_idx = 0;
+       qedi->cid_que.cid_q_cons_idx = 0;
+       qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
+       qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
+
+       for (i = 0; i < qedi->max_active_conns; i++) {
+               qedi->cid_que.cid_que[i] = i;
+               qedi->cid_que.conn_cid_tbl[i] = NULL;
+       }
+
+       return 0;
+}
+
+static void qedi_release_cid_que(struct qedi_ctx *qedi)
+{
+       kfree(qedi->cid_que.cid_que_base);
+       qedi->cid_que.cid_que_base = NULL;
+
+       kfree(qedi->cid_que.conn_cid_tbl);
+       qedi->cid_que.conn_cid_tbl = NULL;
+}
+
+static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
+                           u16 start_id, u16 next)
+{
+       id_tbl->start = start_id;
+       id_tbl->max = size;
+       id_tbl->next = next;
+       spin_lock_init(&id_tbl->lock);
+       id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+       if (!id_tbl->table)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
+{
+       kfree(id_tbl->table);
+       id_tbl->table = NULL;
+}
+
+int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+       int ret = -1;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return ret;
+
+       spin_lock(&id_tbl->lock);
+       if (!test_bit(id, id_tbl->table)) {
+               set_bit(id, id_tbl->table);
+               ret = 0;
+       }
+       spin_unlock(&id_tbl->lock);
+       return ret;
+}
+
+u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
+{
+       u16 id;
+
+       spin_lock(&id_tbl->lock);
+       id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+       if (id >= id_tbl->max) {
+               id = QEDI_LOCAL_PORT_INVALID;
+               if (id_tbl->next != 0) {
+                       id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+                       if (id >= id_tbl->next)
+                               id = QEDI_LOCAL_PORT_INVALID;
+               }
+       }
+
+       if (id < id_tbl->max) {
+               set_bit(id, id_tbl->table);
+               id_tbl->next = (id + 1) & (id_tbl->max - 1);
+               id += id_tbl->start;
+       }
+
+       spin_unlock(&id_tbl->lock);
+
+       return id;
+}
+
+void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
+{
+       if (id == QEDI_LOCAL_PORT_INVALID)
+               return;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return;
+
+       clear_bit(id, id_tbl->table);
+}
+
+static void qedi_cm_free_mem(struct qedi_ctx *qedi)
+{
+       kfree(qedi->ep_tbl);
+       qedi->ep_tbl = NULL;
+       qedi_free_id_tbl(&qedi->lcl_port_tbl);
+}
+
+static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
+{
+       u16 port_id;
+
+       qedi->ep_tbl = kzalloc((qedi->max_active_conns *
+                               sizeof(struct qedi_endpoint *)), GFP_KERNEL);
+       if (!qedi->ep_tbl)
+               return -ENOMEM;
+       port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+       if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
+                            QEDI_LOCAL_PORT_MIN, port_id)) {
+               qedi_cm_free_mem(qedi);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost;
+       struct qedi_ctx *qedi = NULL;
+
+       shost = iscsi_host_alloc(&qedi_host_template,
+                                sizeof(struct qedi_ctx), 0);
+       if (!shost) {
+               QEDI_ERR(NULL, "Could not allocate shost\n");
+               goto exit_setup_shost;
+       }
+
+       shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+       shost->max_channel = 0;
+       shost->max_lun = ~0;
+       shost->max_cmd_len = 16;
+       shost->transportt = qedi_scsi_transport;
+
+       qedi = iscsi_host_priv(shost);
+       memset(qedi, 0, sizeof(*qedi));
+       qedi->shost = shost;
+       qedi->dbg_ctx.host_no = shost->host_no;
+       qedi->pdev = pdev;
+       qedi->dbg_ctx.pdev = pdev;
+       qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
+       qedi->max_sqes = QEDI_SQ_SIZE;
+
+       if (shost_use_blk_mq(shost))
+               shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+       pci_set_drvdata(pdev, qedi);
+
+exit_setup_shost:
+       return qedi;
+}
+
+static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+       struct qedi_uio_dev *udev;
+       struct qedi_uio_ctrl *uctrl;
+       struct skb_work_list *work;
+       u32 prod;
+
+       if (!qedi) {
+               QEDI_ERR(NULL, "qedi is NULL\n");
+               return -1;
+       }
+
+       if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
+                         "UIO DEV is not opened\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       udev = qedi->udev;
+       uctrl = udev->uctrl;
+
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate work so dropping frame.\n");
+               kfree_skb(skb);
+               return 0;
+       }
+
+       INIT_LIST_HEAD(&work->list);
+       work->skb = skb;
+
+       if (skb_vlan_tag_present(skb))
+               work->vlan_id = skb_vlan_tag_get(skb);
+
+       if (work->vlan_id)
+               __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
+
+       spin_lock_bh(&qedi->ll2_lock);
+       list_add_tail(&work->list, &qedi->ll2_skb_list);
+
+       ++uctrl->hw_rx_prod_cnt;
+       prod = (uctrl->hw_rx_prod + 1) % RX_RING;
+       if (prod != uctrl->host_rx_cons) {
+               uctrl->hw_rx_prod = prod;
+               spin_unlock_bh(&qedi->ll2_lock);
+               wake_up_process(qedi->ll2_recv_thread);
+               return 0;
+       }
+
+       spin_unlock_bh(&qedi->ll2_lock);
+       return 0;
+}
+
+/* map this skb to iscsiuio mmaped region */
+static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
+                               u16 vlan_id)
+{
+       struct qedi_uio_dev *udev = NULL;
+       struct qedi_uio_ctrl *uctrl = NULL;
+       struct qedi_rx_bd rxbd;
+       struct qedi_rx_bd *p_rxbd;
+       u32 rx_bd_prod;
+       void *pkt;
+       int len = 0;
+
+       if (!qedi) {
+               QEDI_ERR(NULL, "qedi is NULL\n");
+               return -1;
+       }
+
+       udev = qedi->udev;
+       uctrl = udev->uctrl;
+       pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
+       len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
+       memcpy(pkt, skb->data, len);
+
+       memset(&rxbd, 0, sizeof(rxbd));
+       rxbd.rx_pkt_index = uctrl->hw_rx_prod;
+       rxbd.rx_pkt_len = len;
+       rxbd.vlan_id = vlan_id;
+
+       uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
+       rx_bd_prod = uctrl->hw_rx_bd_prod;
+       p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
+       p_rxbd += rx_bd_prod;
+
+       memcpy(p_rxbd, &rxbd, sizeof(rxbd));
+
+       /* notify the iscsiuio about new packet */
+       uio_event_notify(&udev->qedi_uinfo);
+
+       return 0;
+}
+
+static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
+{
+       struct skb_work_list *work, *work_tmp;
+
+       spin_lock_bh(&qedi->ll2_lock);
+       list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
+               list_del(&work->list);
+               if (work->skb)
+                       kfree_skb(work->skb);
+               kfree(work);
+       }
+       spin_unlock_bh(&qedi->ll2_lock);
+}
+
+static int qedi_ll2_recv_thread(void *arg)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
+       struct skb_work_list *work, *work_tmp;
+
+       set_user_nice(current, -20);
+
+       while (!kthread_should_stop()) {
+               spin_lock_bh(&qedi->ll2_lock);
+               list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
+                                        list) {
+                       list_del(&work->list);
+                       qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
+                       kfree_skb(work->skb);
+                       kfree(work);
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_bh(&qedi->ll2_lock);
+               schedule();
+       }
+
+       __set_current_state(TASK_RUNNING);
+       return 0;
+}
+
+static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+       u8 num_sq_pages;
+       u32 log_page_size;
+       int rval = 0;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n",
+                 MIN_NUM_CPUS_MSIX(qedi));
+
+       num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
+
+       qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
+
+       memset(&qedi->pf_params.iscsi_pf_params, 0,
+              sizeof(qedi->pf_params.iscsi_pf_params));
+
+       qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+                       qedi->num_queues * sizeof(struct qedi_glbl_q_params),
+                       &qedi->hw_p_cpuq);
+       if (!qedi->p_cpuq) {
+               QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+               rval = -1;
+               goto err_alloc_mem;
+       }
+
+       rval = qedi_alloc_global_queues(qedi);
+       if (rval) {
+               QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
+               rval = -1;
+               goto err_alloc_mem;
+       }
+
+       qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+       qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
+       qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
+       qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
+       qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
+       qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
+
+       for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
+               if ((1 << log_page_size) == PAGE_SIZE)
+                       break;
+       }
+       qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
+
+       qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
+                                                          (u64)qedi->hw_p_cpuq;
+
+       /* RQ BDQ initializations.
+        * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
+        * rqe_log_size: 8 for 256B RQE
+        */
+       qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
+       /* BDQ address and size */
+       qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
+                                                       qedi->bdq_pbl_list_dma;
+       qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+                                               qedi->bdq_pbl_list_num_entries;
+       qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
+
+       /* cq_num_entries: num_tasks + rq_num_entries */
+       qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
+
+       qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
+       qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
+       qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
+
+err_alloc_mem:
+       return rval;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
+{
+       size_t size = 0;
+
+       if (qedi->p_cpuq) {
+               size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
+               pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+                                   qedi->hw_p_cpuq);
+       }
+
+       qedi_free_global_queues(qedi);
+
+       kfree(qedi->global_queues);
+}
+
+static void qedi_link_update(void *dev, struct qed_link_output *link)
+{
+       struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+
+       if (link->link_up) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_UP);
+       } else {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "Link Down event.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+       }
+}
+
+static struct qed_iscsi_cb_ops qedi_cb_ops = {
+       {
+               .link_update =          qedi_link_update,
+       }
+};
+
+static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
+                         u16 que_idx, struct qedi_percpu_s *p)
+{
+       struct qedi_work *qedi_work;
+       struct qedi_conn *q_conn;
+       struct iscsi_conn *conn;
+       struct qedi_cmd *qedi_cmd;
+       u32 iscsi_cid;
+       int rc = 0;
+
+       iscsi_cid  = cqe->cqe_common.conn_id;
+       q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+       if (!q_conn) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Session no longer exists for cid=0x%x!!\n",
+                         iscsi_cid);
+               return -1;
+       }
+       conn = q_conn->cls_conn->dd_data;
+
+       switch (cqe->cqe_common.cqe_type) {
+       case ISCSI_CQE_TYPE_SOLICITED:
+       case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
+               qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
+               if (!qedi_cmd) {
+                       rc = -1;
+                       break;
+               }
+               INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
+               qedi_cmd->cqe_work.qedi = qedi;
+               memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
+               qedi_cmd->cqe_work.que_idx = que_idx;
+               qedi_cmd->cqe_work.is_solicited = true;
+               list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
+               break;
+       case ISCSI_CQE_TYPE_UNSOLICITED:
+       case ISCSI_CQE_TYPE_DUMMY:
+       case ISCSI_CQE_TYPE_TASK_CLEANUP:
+               qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
+               if (!qedi_work) {
+                       rc = -1;
+                       break;
+               }
+               INIT_LIST_HEAD(&qedi_work->list);
+               qedi_work->qedi = qedi;
+               memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
+               qedi_work->que_idx = que_idx;
+               qedi_work->is_solicited = false;
+               list_add_tail(&qedi_work->list, &p->work_list);
+               break;
+       default:
+               rc = -1;
+               QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
+       }
+       return rc;
+}
+
+static bool qedi_process_completions(struct qedi_fastpath *fp)
+{
+       struct qedi_ctx *qedi = fp->qedi;
+       struct qed_sb_info *sb_info = fp->sb_info;
+       struct status_block *sb = sb_info->sb_virt;
+       struct qedi_percpu_s *p = NULL;
+       struct global_queue *que;
+       u16 prod_idx;
+       unsigned long flags;
+       union iscsi_cqe *cqe;
+       int cpu;
+       int ret;
+
+       /* Get the current firmware producer index */
+       prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+       if (prod_idx >= QEDI_CQ_SIZE)
+               prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+       que = qedi->global_queues[fp->sb_id];
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                 "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
+                 que, prod_idx, que->cq_cons_idx, fp->sb_id);
+
+       qedi->intr_cpu = fp->sb_id;
+       cpu = smp_processor_id();
+       p = &per_cpu(qedi_percpu, cpu);
+
+       if (unlikely(!p->iothread))
+               WARN_ON(1);
+
+       spin_lock_irqsave(&p->p_work_lock, flags);
+       while (que->cq_cons_idx != prod_idx) {
+               cqe = &que->cq[que->cq_cons_idx];
+
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
+                         "cqe=%p prod_idx=%d cons_idx=%d.\n",
+                         cqe, prod_idx, que->cq_cons_idx);
+
+               ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
+               if (ret)
+                       continue;
+
+               que->cq_cons_idx++;
+               if (que->cq_cons_idx == QEDI_CQ_SIZE)
+                       que->cq_cons_idx = 0;
+       }
+       wake_up_process(p->iothread);
+       spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+       return true;
+}
+
+static bool qedi_fp_has_work(struct qedi_fastpath *fp)
+{
+       struct qedi_ctx *qedi = fp->qedi;
+       struct global_queue *que;
+       struct qed_sb_info *sb_info = fp->sb_info;
+       struct status_block *sb = sb_info->sb_virt;
+       u16 prod_idx;
+
+       barrier();
+
+       /* Get the current firmware producer index */
+       prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
+
+       /* Get the pointer to the global CQ this completion is on */
+       que = qedi->global_queues[fp->sb_id];
+
+       /* prod idx wrap around uint16 */
+       if (prod_idx >= QEDI_CQ_SIZE)
+               prod_idx = prod_idx % QEDI_CQ_SIZE;
+
+       return (que->cq_cons_idx != prod_idx);
+}
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
+{
+       struct qedi_fastpath *fp = dev_id;
+       struct qedi_ctx *qedi = fp->qedi;
+       bool wake_io_thread = true;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+
+process_again:
+       wake_io_thread = qedi_process_completions(fp);
+       if (wake_io_thread) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                         "process already running\n");
+       }
+
+       if (qedi_fp_has_work(fp) == 0)
+               qed_sb_update_sb_idx(fp->sb_info);
+
+       /* Check for more work */
+       rmb();
+
+       if (qedi_fp_has_work(fp) == 0)
+               qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+       else
+               goto process_again;
+
+       return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedi_simd_int_handler(void *cookie)
+{
+       /* Cookie is qedi_ctx struct */
+       struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
+
+       QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
+}
+
+#define QEDI_SIMD_HANDLER_NUM          0
+static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
+{
+       int i;
+
+       if (qedi->int_info.msix_cnt) {
+               for (i = 0; i < qedi->int_info.used_cnt; i++) {
+                       synchronize_irq(qedi->int_info.msix[i].vector);
+                       irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+                                             NULL);
+                       free_irq(qedi->int_info.msix[i].vector,
+                                &qedi->fp_array[i]);
+               }
+       } else {
+               qedi_ops->common->simd_handler_clean(qedi->cdev,
+                                                    QEDI_SIMD_HANDLER_NUM);
+       }
+
+       qedi->int_info.used_cnt = 0;
+       qedi_ops->common->set_fp_int(qedi->cdev, 0);
+}
+
+static int qedi_request_msix_irq(struct qedi_ctx *qedi)
+{
+       int i, rc, cpu;
+
+       cpu = cpumask_first(cpu_online_mask);
+       for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+               rc = request_irq(qedi->int_info.msix[i].vector,
+                                qedi_msix_handler, 0, "qedi",
+                                &qedi->fp_array[i]);
+
+               if (rc) {
+                       QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
+                       qedi_sync_free_irqs(qedi);
+                       return rc;
+               }
+               qedi->int_info.used_cnt++;
+               rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
+                                          get_cpu_mask(cpu));
+               cpu = cpumask_next(cpu, cpu_online_mask);
+       }
+
+       return 0;
+}
+
+static int qedi_setup_int(struct qedi_ctx *qedi)
+{
+       int rc = 0;
+
+       rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+       rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
+       if (rc)
+               goto exit_setup_int;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
+                  qedi->int_info.msix_cnt, num_online_cpus());
+
+       if (qedi->int_info.msix_cnt) {
+               rc = qedi_request_msix_irq(qedi);
+               goto exit_setup_int;
+       } else {
+               qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
+                                                     QEDI_SIMD_HANDLER_NUM,
+                                                     qedi_simd_int_handler);
+               qedi->int_info.used_cnt = 1;
+       }
+
+exit_setup_int:
+       return rc;
+}
+
+static void qedi_free_bdq(struct qedi_ctx *qedi)
+{
+       int i;
+
+       if (qedi->bdq_pbl_list)
+               dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
+                                 qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
+
+       if (qedi->bdq_pbl)
+               dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
+                                 qedi->bdq_pbl, qedi->bdq_pbl_dma);
+
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               if (qedi->bdq[i].buf_addr) {
+                       dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
+                                         qedi->bdq[i].buf_addr,
+                                         qedi->bdq[i].buf_dma);
+               }
+       }
+}
+
+static void qedi_free_global_queues(struct qedi_ctx *qedi)
+{
+       int i;
+       struct global_queue **gl = qedi->global_queues;
+
+       for (i = 0; i < qedi->num_queues; i++) {
+               if (!gl[i])
+                       continue;
+
+               if (gl[i]->cq)
+                       dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
+                                         gl[i]->cq, gl[i]->cq_dma);
+               if (gl[i]->cq_pbl)
+                       dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
+                                         gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+               kfree(gl[i]);
+       }
+       qedi_free_bdq(qedi);
+}
+
+static int qedi_alloc_bdq(struct qedi_ctx *qedi)
+{
+       int i;
+       struct scsi_bd *pbl;
+       u64 *list;
+       dma_addr_t page;
+
+       /* Alloc dma memory for BDQ buffers */
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               qedi->bdq[i].buf_addr =
+                               dma_alloc_coherent(&qedi->pdev->dev,
+                                                  QEDI_BDQ_BUF_SIZE,
+                                                  &qedi->bdq[i].buf_dma,
+                                                  GFP_KERNEL);
+               if (!qedi->bdq[i].buf_addr) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not allocate BDQ buffer %d.\n", i);
+                       return -ENOMEM;
+               }
+       }
+
+       /* Alloc dma memory for BDQ page buffer list */
+       qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
+       qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
+       qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
+                 qedi->rq_num_entries);
+
+       qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
+                                          qedi->bdq_pbl_mem_size,
+                                          &qedi->bdq_pbl_dma, GFP_KERNEL);
+       if (!qedi->bdq_pbl) {
+               QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * Populate BDQ PBL with physical and virtual address of individual
+        * BDQ buffers
+        */
+       pbl = (struct scsi_bd  *)qedi->bdq_pbl;
+       for (i = 0; i < QEDI_BDQ_NUM; i++) {
+               pbl->address.hi =
+                               cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
+               pbl->address.lo =
+                               cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                         "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
+                         pbl, pbl->address.hi, pbl->address.lo, i);
+               pbl->opaque.hi = 0;
+               pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+               pbl++;
+       }
+
+       /* Allocate list of PBL pages */
+       qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
+                                               PAGE_SIZE,
+                                               &qedi->bdq_pbl_list_dma,
+                                               GFP_KERNEL);
+       if (!qedi->bdq_pbl_list) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Could not allocate list of PBL pages.\n");
+               return -ENOMEM;
+       }
+       memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
+
+       /*
+        * Now populate PBL list with pages that contain pointers to the
+        * individual buffers.
+        */
+       qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
+       list = (u64 *)qedi->bdq_pbl_list;
+       page = qedi->bdq_pbl_list_dma;
+       for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
+               *list = qedi->bdq_pbl_dma;
+               list++;
+               page += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
+{
+       u32 *list;
+       int i;
+       int status = 0, rc;
+       u32 *pbl;
+       dma_addr_t page;
+       int num_pages;
+
+       /*
+        * Number of global queues (CQ / RQ). This should
+        * be <= number of available MSIX vectors for the PF
+        */
+       if (!qedi->num_queues) {
+               QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
+               return 1;
+       }
+
+       /* Make sure we allocated the PBL that will contain the physical
+        * addresses of our queues
+        */
+       if (!qedi->p_cpuq) {
+               status = 1;
+               goto mem_alloc_failure;
+       }
+
+       qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
+                                      qedi->num_queues), GFP_KERNEL);
+       if (!qedi->global_queues) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Unable to allocate global queues array ptr memory\n");
+               return -ENOMEM;
+       }
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "qedi->global_queues=%p.\n", qedi->global_queues);
+
+       /* Allocate DMA coherent buffers for BDQ */
+       rc = qedi_alloc_bdq(qedi);
+       if (rc)
+               goto mem_alloc_failure;
+
+       /* Allocate a CQ and an associated PBL for each MSI-X
+        * vector.
+        */
+       for (i = 0; i < qedi->num_queues; i++) {
+               qedi->global_queues[i] =
+                                       kzalloc(sizeof(*qedi->global_queues[0]),
+                                               GFP_KERNEL);
+               if (!qedi->global_queues[i]) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to allocation global queue %d.\n", i);
+                       goto mem_alloc_failure;
+               }
+
+               qedi->global_queues[i]->cq_mem_size =
+                   (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
+               qedi->global_queues[i]->cq_mem_size =
+                   (qedi->global_queues[i]->cq_mem_size +
+                   (QEDI_PAGE_SIZE - 1));
+
+               qedi->global_queues[i]->cq_pbl_size =
+                   (qedi->global_queues[i]->cq_mem_size /
+                   QEDI_PAGE_SIZE) * sizeof(void *);
+               qedi->global_queues[i]->cq_pbl_size =
+                   (qedi->global_queues[i]->cq_pbl_size +
+                   (QEDI_PAGE_SIZE - 1));
+
+               qedi->global_queues[i]->cq =
+                   dma_alloc_coherent(&qedi->pdev->dev,
+                                      qedi->global_queues[i]->cq_mem_size,
+                                      &qedi->global_queues[i]->cq_dma,
+                                      GFP_KERNEL);
+
+               if (!qedi->global_queues[i]->cq) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Could not allocate cq.\n");
+                       status = -ENOMEM;
+                       goto mem_alloc_failure;
+               }
+               memset(qedi->global_queues[i]->cq, 0,
+                      qedi->global_queues[i]->cq_mem_size);
+
+               qedi->global_queues[i]->cq_pbl =
+                   dma_alloc_coherent(&qedi->pdev->dev,
+                                      qedi->global_queues[i]->cq_pbl_size,
+                                      &qedi->global_queues[i]->cq_pbl_dma,
+                                      GFP_KERNEL);
+
+               if (!qedi->global_queues[i]->cq_pbl) {
+                       QEDI_WARN(&qedi->dbg_ctx,
+                                 "Could not allocate cq PBL.\n");
+                       status = -ENOMEM;
+                       goto mem_alloc_failure;
+               }
+               memset(qedi->global_queues[i]->cq_pbl, 0,
+                      qedi->global_queues[i]->cq_pbl_size);
+
+               /* Create PBL */
+               num_pages = qedi->global_queues[i]->cq_mem_size /
+                   QEDI_PAGE_SIZE;
+               page = qedi->global_queues[i]->cq_dma;
+               pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
+
+               while (num_pages--) {
+                       *pbl = (u32)page;
+                       pbl++;
+                       *pbl = (u32)((u64)page >> 32);
+                       pbl++;
+                       page += QEDI_PAGE_SIZE;
+               }
+       }
+
+       list = (u32 *)qedi->p_cpuq;
+
+       /*
+        * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+        * CQ#1 PBL pointer, RQ#1 PBL pointer, etc.  Each PBL pointer points
+        * to the physical address which contains an array of pointers to the
+        * physical addresses of the specific queue pages.
+        */
+       for (i = 0; i < qedi->num_queues; i++) {
+               *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
+               list++;
+               *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
+               list++;
+
+               *list = (u32)0;
+               list++;
+               *list = (u32)((u64)0 >> 32);
+               list++;
+       }
+
+       return 0;
+
+mem_alloc_failure:
+       qedi_free_global_queues(qedi);
+       return status;
+}
+
+int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+       int rval = 0;
+       u32 *pbl;
+       dma_addr_t page;
+       int num_pages;
+
+       if (!ep)
+               return -EIO;
+
+       /* Calculate appropriate queue and PBL sizes */
+       ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
+       ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
+
+       ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
+       ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
+
+       ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+                                   &ep->sq_dma, GFP_KERNEL);
+       if (!ep->sq) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate send queue.\n");
+               rval = -ENOMEM;
+               goto out;
+       }
+       memset(ep->sq, 0, ep->sq_mem_size);
+
+       ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+                                       &ep->sq_pbl_dma, GFP_KERNEL);
+       if (!ep->sq_pbl) {
+               QEDI_WARN(&qedi->dbg_ctx,
+                         "Could not allocate send queue PBL.\n");
+               rval = -ENOMEM;
+               goto out_free_sq;
+       }
+       memset(ep->sq_pbl, 0, ep->sq_pbl_size);
+
+       /* Create PBL */
+       num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
+       page = ep->sq_dma;
+       pbl = (u32 *)ep->sq_pbl;
+
+       while (num_pages--) {
+               *pbl = (u32)page;
+               pbl++;
+               *pbl = (u32)((u64)page >> 32);
+               pbl++;
+               page += QEDI_PAGE_SIZE;
+       }
+
+       return rval;
+
+out_free_sq:
+       dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+                         ep->sq_dma);
+out:
+       return rval;
+}
+
+void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
+{
+       if (ep->sq_pbl)
+               dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
+                                 ep->sq_pbl_dma);
+       if (ep->sq)
+               dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
+                                 ep->sq_dma);
+}
+
+int qedi_get_task_idx(struct qedi_ctx *qedi)
+{
+       s16 tmp_idx;
+
+again:
+       tmp_idx = find_first_zero_bit(qedi->task_idx_map,
+                                     MAX_ISCSI_TASK_ENTRIES);
+
+       if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
+               QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
+               tmp_idx = -1;
+               goto err_idx;
+       }
+
+       if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
+               goto again;
+
+err_idx:
+       return tmp_idx;
+}
+
+void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
+{
+       if (!test_and_clear_bit(idx, qedi->task_idx_map)) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "FW task context, already cleared, tid=0x%x\n", idx);
+               WARN_ON(1);
+       }
+}
+
+void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+                        struct qedi_cmd *cmd)
+{
+       qedi->itt_map[tid].itt = proto_itt;
+       qedi->itt_map[tid].p_cmd = cmd;
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
+                 qedi->itt_map[tid].itt);
+}
+
+void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
+{
+       u16 i;
+
+       for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
+               if (qedi->itt_map[i].itt == itt) {
+                       *tid = i;
+                       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                                 "Ref itt=0x%x, found at tid=0x%x\n",
+                                 itt, *tid);
+                       return;
+               }
+       }
+
+       WARN_ON(1);
+}
+
+void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
+{
+       *proto_itt = qedi->itt_map[tid].itt;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
+                 "Get itt map tid [0x%x with proto itt[0x%x]",
+                 tid, *proto_itt);
+}
+
+struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
+{
+       struct qedi_cmd *cmd = NULL;
+
+       if (tid > MAX_ISCSI_TASK_ENTRIES)
+               return NULL;
+
+       cmd = qedi->itt_map[tid].p_cmd;
+       if (cmd->task_id != tid)
+               return NULL;
+
+       qedi->itt_map[tid].p_cmd = NULL;
+
+       return cmd;
+}
+
+static int qedi_alloc_itt(struct qedi_ctx *qedi)
+{
+       qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
+                               sizeof(struct qedi_itt_map), GFP_KERNEL);
+       if (!qedi->itt_map) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Unable to allocate itt map array memory\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void qedi_free_itt(struct qedi_ctx *qedi)
+{
+       kfree(qedi->itt_map);
+}
+
+static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
+       .rx_cb = qedi_ll2_rx,
+       .tx_cb = NULL,
+};
+
+static int qedi_percpu_io_thread(void *arg)
+{
+       struct qedi_percpu_s *p = arg;
+       struct qedi_work *work, *tmp;
+       unsigned long flags;
+       LIST_HEAD(work_list);
+
+       set_user_nice(current, -20);
+
+       while (!kthread_should_stop()) {
+               spin_lock_irqsave(&p->p_work_lock, flags);
+               while (!list_empty(&p->work_list)) {
+                       list_splice_init(&p->work_list, &work_list);
+                       spin_unlock_irqrestore(&p->p_work_lock, flags);
+
+                       list_for_each_entry_safe(work, tmp, &work_list, list) {
+                               list_del_init(&work->list);
+                               qedi_fp_process_cqes(work);
+                               if (!work->is_solicited)
+                                       kfree(work);
+                       }
+                       cond_resched();
+                       spin_lock_irqsave(&p->p_work_lock, flags);
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_irqrestore(&p->p_work_lock, flags);
+               schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+static void qedi_percpu_thread_create(unsigned int cpu)
+{
+       struct qedi_percpu_s *p;
+       struct task_struct *thread;
+
+       p = &per_cpu(qedi_percpu, cpu);
+
+       thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
+                                       cpu_to_node(cpu),
+                                       "qedi_thread/%d", cpu);
+       if (likely(!IS_ERR(thread))) {
+               kthread_bind(thread, cpu);
+               p->iothread = thread;
+               wake_up_process(thread);
+       }
+}
+
+static void qedi_percpu_thread_destroy(unsigned int cpu)
+{
+       struct qedi_percpu_s *p;
+       struct task_struct *thread;
+       struct qedi_work *work, *tmp;
+
+       p = &per_cpu(qedi_percpu, cpu);
+       spin_lock_bh(&p->p_work_lock);
+       thread = p->iothread;
+       p->iothread = NULL;
+
+       list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+               list_del_init(&work->list);
+               qedi_fp_process_cqes(work);
+               if (!work->is_solicited)
+                       kfree(work);
+       }
+
+       spin_unlock_bh(&p->p_work_lock);
+       if (thread)
+               kthread_stop(thread);
+}
+
+static int qedi_cpu_callback(struct notifier_block *nfb,
+                            unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+               QEDI_ERR(NULL, "CPU %d online.\n", cpu);
+               qedi_percpu_thread_create(cpu);
+               break;
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               QEDI_ERR(NULL, "CPU %d offline.\n", cpu);
+               qedi_percpu_thread_destroy(cpu);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block qedi_cpu_notifier = {
+       .notifier_call = qedi_cpu_callback,
+};
+
+void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
+{
+       struct qed_ll2_params params;
+
+       qedi_recover_all_conns(qedi);
+
+       qedi_ops->ll2->stop(qedi->cdev);
+       qedi_ll2_free_skbs(qedi);
+
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
+                 qedi->ll2_mtu, mtu);
+       memset(&params, 0, sizeof(params));
+       qedi->ll2_mtu = mtu;
+       params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
+       params.drop_ttl0_packets = 0;
+       params.rx_vlan_stripping = 1;
+       ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+       qedi_ops->ll2->start(qedi->cdev, &params);
+}
+
+static void __qedi_remove(struct pci_dev *pdev, int mode)
+{
+       struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+       if (qedi->tmf_thread) {
+               flush_workqueue(qedi->tmf_thread);
+               destroy_workqueue(qedi->tmf_thread);
+               qedi->tmf_thread = NULL;
+       }
+
+       if (qedi->offload_thread) {
+               flush_workqueue(qedi->offload_thread);
+               destroy_workqueue(qedi->offload_thread);
+               qedi->offload_thread = NULL;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
+               qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+       qedi_sync_free_irqs(qedi);
+
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+               qedi_ops->stop(qedi->cdev);
+               qedi_ops->ll2->stop(qedi->cdev);
+       }
+
+       if (mode == QEDI_MODE_NORMAL)
+               qedi_free_iscsi_pf_param(qedi);
+
+       if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+               qedi_ops->common->slowpath_stop(qedi->cdev);
+               qedi_ops->common->remove(qedi->cdev);
+       }
+
+       qedi_destroy_fp(qedi);
+
+       if (mode == QEDI_MODE_NORMAL) {
+               qedi_release_cid_que(qedi);
+               qedi_cm_free_mem(qedi);
+               qedi_free_uio(qedi->udev);
+               qedi_free_itt(qedi);
+
+               iscsi_host_remove(qedi->shost);
+               iscsi_host_free(qedi->shost);
+
+               if (qedi->ll2_recv_thread) {
+                       kthread_stop(qedi->ll2_recv_thread);
+                       qedi->ll2_recv_thread = NULL;
+               }
+               qedi_ll2_free_skbs(qedi);
+       }
+}
+
+static int __qedi_probe(struct pci_dev *pdev, int mode)
+{
+       struct qedi_ctx *qedi;
+       struct qed_ll2_params params;
+       u32 dp_module = 0;
+       u8 dp_level = 0;
+       bool is_vf = false;
+       char host_buf[16];
+       struct qed_link_params link_params;
+       struct qed_slowpath_params sp_params;
+       struct qed_probe_params qed_params;
+       void *task_start, *task_end;
+       int rc;
+       u16 tmp;
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               qedi = qedi_host_alloc(pdev);
+               if (!qedi) {
+                       rc = -ENOMEM;
+                       goto exit_probe;
+               }
+       } else {
+               qedi = pci_get_drvdata(pdev);
+       }
+
+       memset(&qed_params, 0, sizeof(qed_params));
+       qed_params.protocol = QED_PROTOCOL_ISCSI;
+       qed_params.dp_module = dp_module;
+       qed_params.dp_level = dp_level;
+       qed_params.is_vf = is_vf;
+       qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
+       if (!qedi->cdev) {
+               rc = -ENODEV;
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
+               goto free_host;
+       }
+
+       qedi->msix_count = MAX_NUM_MSIX_PF;
+       atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               rc = qedi_set_iscsi_pf_param(qedi);
+               if (rc) {
+                       rc = -ENOMEM;
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Set iSCSI pf param fail\n");
+                       goto free_host;
+               }
+       }
+
+       qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+       rc = qedi_prepare_fp(qedi);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
+               goto free_pf_params;
+       }
+
+       /* Start the Slowpath-process */
+       memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
+       sp_params.int_mode = QED_INT_MODE_MSIX;
+       sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
+       sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
+       sp_params.drv_rev = QEDI_DRIVER_REV_VER;
+       sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
+       strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
+       rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
+       if (rc) {
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
+               goto stop_hw;
+       }
+
+       /* update_pf_params needs to be called before and after slowpath
+        * start
+        */
+       qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
+
+       qedi_setup_int(qedi);
+       if (rc)
+               goto stop_iscsi_func;
+
+       qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
+
+       /* Learn information crucial for qedi to progress */
+       rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
+       if (rc)
+               goto stop_iscsi_func;
+
+       /* Record BDQ producer doorbell addresses */
+       qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
+       qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "BDQ primary_prod=%p secondary_prod=%p.\n",
+                 qedi->bdq_primary_prod,
+                 qedi->bdq_secondary_prod);
+
+       /*
+        * We need to write the number of BDs in the BDQ we've preallocated so
+        * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+        * packet arrives.
+        */
+       qedi->bdq_prod_idx = QEDI_BDQ_NUM;
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Writing %d to primary and secondary BDQ doorbell registers.\n",
+                 qedi->bdq_prod_idx);
+       writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
+       tmp = readw(qedi->bdq_primary_prod);
+       writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
+       tmp = readw(qedi->bdq_secondary_prod);
+
+       ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
+                 qedi->mac);
+
+       sprintf(host_buf, "host_%d", qedi->shost->host_no);
+       qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+
+       qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
+
+       memset(&params, 0, sizeof(params));
+       params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
+       qedi->ll2_mtu = DEF_PATH_MTU;
+       params.drop_ttl0_packets = 0;
+       params.rx_vlan_stripping = 1;
+       ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               /* set up rx path */
+               INIT_LIST_HEAD(&qedi->ll2_skb_list);
+               spin_lock_init(&qedi->ll2_lock);
+               /* start qedi context */
+               spin_lock_init(&qedi->hba_lock);
+               spin_lock_init(&qedi->task_idx_lock);
+       }
+       qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
+       qedi_ops->ll2->start(qedi->cdev, &params);
+
+       if (mode != QEDI_MODE_RECOVERY) {
+               qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
+                                                   (void *)qedi,
+                                                   "qedi_ll2_thread");
+       }
+
+       rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
+                            qedi, qedi_iscsi_event_cb);
+       if (rc) {
+               rc = -ENODEV;
+               QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
+               goto stop_slowpath;
+       }
+
+       task_start = qedi_get_task_mem(&qedi->tasks, 0);
+       task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
+                 "Task context start=%p, end=%p block_size=%u.\n",
+                  task_start, task_end, qedi->tasks.size);
+
+       memset(&link_params, 0, sizeof(link_params));
+       link_params.link_up = true;
+       rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
+       if (rc) {
+               QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
+               atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
+                          &qedi_dbg_fops);
+#endif
+       QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                 "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
+                 QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
+                 FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+
+       if (mode == QEDI_MODE_NORMAL) {
+               if (iscsi_host_add(qedi->shost, &pdev->dev)) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not add iscsi host\n");
+                       rc = -ENOMEM;
+                       goto remove_host;
+               }
+
+               /* Allocate uio buffers */
+               rc = qedi_alloc_uio_rings(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "UIO alloc ring failed err=%d\n", rc);
+                       goto remove_host;
+               }
+
+               rc = qedi_init_uio(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "UIO init failed, err=%d\n", rc);
+                       goto free_uio;
+               }
+
+               /* host the array on iscsi_conn */
+               rc = qedi_setup_cid_que(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not setup cid que\n");
+                       goto free_uio;
+               }
+
+               rc = qedi_cm_alloc_mem(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not alloc cm memory\n");
+                       goto free_cid_que;
+               }
+
+               rc = qedi_alloc_itt(qedi);
+               if (rc) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Could not alloc itt memory\n");
+                       goto free_cid_que;
+               }
+
+               sprintf(host_buf, "host_%d", qedi->shost->host_no);
+               qedi->tmf_thread = create_singlethread_workqueue(host_buf);
+               if (!qedi->tmf_thread) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to start tmf thread!\n");
+                       rc = -ENODEV;
+                       goto free_cid_que;
+               }
+
+               sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
+               qedi->offload_thread = create_workqueue(host_buf);
+               if (!qedi->offload_thread) {
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Unable to start offload thread!\n");
+                       rc = -ENODEV;
+                       goto free_cid_que;
+               }
+
+               /* F/w needs 1st task context memory entry for performance */
+               set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
+               atomic_set(&qedi->num_offloads, 0);
+       }
+
+       return 0;
+
+free_cid_que:
+       qedi_release_cid_que(qedi);
+free_uio:
+       qedi_free_uio(qedi->udev);
+remove_host:
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_host_exit(&qedi->dbg_ctx);
+#endif
+       iscsi_host_remove(qedi->shost);
+stop_iscsi_func:
+       qedi_ops->stop(qedi->cdev);
+stop_slowpath:
+       qedi_ops->common->slowpath_stop(qedi->cdev);
+stop_hw:
+       qedi_ops->common->remove(qedi->cdev);
+free_pf_params:
+       qedi_free_iscsi_pf_param(qedi);
+free_host:
+       iscsi_host_free(qedi->shost);
+exit_probe:
+       return rc;
+}
+
+static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       return __qedi_probe(pdev, QEDI_MODE_NORMAL);
+}
+
+static void qedi_remove(struct pci_dev *pdev)
+{
+       __qedi_remove(pdev, QEDI_MODE_NORMAL);
+}
+
+static struct pci_device_id qedi_pci_tbl[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+       { 0 },
+};
+MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
+
+static struct pci_driver qedi_pci_driver = {
+       .name = QEDI_MODULE_NAME,
+       .id_table = qedi_pci_tbl,
+       .probe = qedi_probe,
+       .remove = qedi_remove,
+};
+
+static int __init qedi_init(void)
+{
+       int rc = 0;
+       int ret;
+       struct qedi_percpu_s *p;
+       unsigned int cpu = 0;
+
+       qedi_ops = qed_get_iscsi_ops();
+       if (!qedi_ops) {
+               QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
+               rc = -EINVAL;
+               goto exit_qedi_init_0;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_init("qedi");
+#endif
+
+       qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
+       if (!qedi_scsi_transport) {
+               QEDI_ERR(NULL, "Could not register qedi transport");
+               rc = -ENOMEM;
+               goto exit_qedi_init_1;
+       }
+
+       register_hotcpu_notifier(&qedi_cpu_notifier);
+
+       ret = pci_register_driver(&qedi_pci_driver);
+       if (ret) {
+               QEDI_ERR(NULL, "Failed to register driver\n");
+               rc = -EINVAL;
+               goto exit_qedi_init_2;
+       }
+
+       for_each_possible_cpu(cpu) {
+               p = &per_cpu(qedi_percpu, cpu);
+               INIT_LIST_HEAD(&p->work_list);
+               spin_lock_init(&p->p_work_lock);
+               p->iothread = NULL;
+       }
+
+       for_each_online_cpu(cpu)
+               qedi_percpu_thread_create(cpu);
+
+       return rc;
+
+exit_qedi_init_2:
+       iscsi_unregister_transport(&qedi_iscsi_transport);
+exit_qedi_init_1:
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_exit();
+#endif
+       qed_put_iscsi_ops();
+exit_qedi_init_0:
+       return rc;
+}
+
+static void __exit qedi_cleanup(void)
+{
+       unsigned int cpu = 0;
+
+       for_each_online_cpu(cpu)
+               qedi_percpu_thread_destroy(cpu);
+
+       pci_unregister_driver(&qedi_pci_driver);
+       unregister_hotcpu_notifier(&qedi_cpu_notifier);
+       iscsi_unregister_transport(&qedi_iscsi_transport);
+
+#ifdef CONFIG_DEBUG_FS
+       qedi_dbg_exit();
+#endif
+       qed_put_iscsi_ops();
+}
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDI_MODULE_VERSION);
+module_init(qedi_init);
+module_exit(qedi_cleanup);
diff --git a/drivers/scsi/qedi/qedi_sysfs.c b/drivers/scsi/qedi/qedi_sysfs.c
new file mode 100644 (file)
index 0000000..b10c48b
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include "qedi.h"
+#include "qedi_gbl.h"
+#include "qedi_iscsi.h"
+#include "qedi_dbg.h"
+
+static inline struct qedi_ctx *qedi_dev_to_hba(struct device *dev)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+
+       return iscsi_host_priv(shost);
+}
+
+static ssize_t qedi_show_port_state(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+
+       if (atomic_read(&qedi->link_state) == QEDI_LINK_UP)
+               return sprintf(buf, "Online\n");
+       else
+               return sprintf(buf, "Linkdown\n");
+}
+
+static ssize_t qedi_show_speed(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct qedi_ctx *qedi = qedi_dev_to_hba(dev);
+       struct qed_link_output if_link;
+
+       qedi_ops->common->get_link(qedi->cdev, &if_link);
+
+       return sprintf(buf, "%d Gbit\n", if_link.speed / 1000);
+}
+
+static DEVICE_ATTR(port_state, 0444, qedi_show_port_state, NULL);
+static DEVICE_ATTR(speed, 0444, qedi_show_speed, NULL);
+
+struct device_attribute *qedi_shost_attrs[] = {
+       &dev_attr_port_state,
+       &dev_attr_speed,
+       NULL
+};
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
new file mode 100644 (file)
index 0000000..9543a1b
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDI_MODULE_VERSION    "8.10.3.0"
+#define QEDI_DRIVER_MAJOR_VER          8
+#define QEDI_DRIVER_MINOR_VER          10
+#define QEDI_DRIVER_REV_VER            3
+#define QEDI_DRIVER_ENG_VER            0
index fe7469c901f76ac2d46006eeb208f3c9398d3ee1..47eb4d545d13c5f9b80149f162b04756108cd654 100644 (file)
@@ -1988,9 +1988,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
        scsi_qla_host_t *vha = NULL;
        struct qla_hw_data *ha = base_vha->hw;
-       uint16_t options = 0;
        int     cnt;
        struct req_que *req = ha->req_q_map[0];
+       struct qla_qpair *qpair;
 
        ret = qla24xx_vport_create_req_sanity_check(fc_vport);
        if (ret) {
@@ -2075,15 +2075,9 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        qlt_vport_create(vha, ha);
        qla24xx_vport_disable(fc_vport, disable);
 
-       if (ha->flags.cpu_affinity_enabled) {
-               req = ha->req_q_map[1];
-               ql_dbg(ql_dbg_multiq, vha, 0xc000,
-                   "Request queue %p attached with "
-                   "VP[%d], cpu affinity =%d\n",
-                   req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
-               goto vport_queue;
-       } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+       if (!ql2xmqsupport || !ha->npiv_info)
                goto vport_queue;
+
        /* Create a request queue in QoS mode for the vport */
        for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
                if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
@@ -2095,20 +2089,20 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
        }
 
        if (qos) {
-               ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
-                       qos);
-               if (!ret)
+               qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+               if (!qpair)
                        ql_log(ql_log_warn, vha, 0x7084,
-                           "Can't create request queue for VP[%d]\n",
+                           "Can't create qpair for VP[%d]\n",
                            vha->vp_idx);
                else {
                        ql_dbg(ql_dbg_multiq, vha, 0xc001,
-                           "Request Que:%d Q0s: %d) created for VP[%d]\n",
-                           ret, qos, vha->vp_idx);
+                           "Queue pair: %d Qos: %d) created for VP[%d]\n",
+                           qpair->id, qos, vha->vp_idx);
                        ql_dbg(ql_dbg_user, vha, 0x7085,
-                           "Request Que:%d Q0s: %d) created for VP[%d]\n",
-                           ret, qos, vha->vp_idx);
-                       req = ha->req_q_map[ret];
+                           "Queue Pair: %d Qos: %d) created for VP[%d]\n",
+                           qpair->id, qos, vha->vp_idx);
+                       req = qpair->req;
+                       vha->qpair = qpair;
                }
        }
 
@@ -2162,10 +2156,10 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        clear_bit(vha->vp_idx, ha->vp_idx_map);
        mutex_unlock(&ha->vport_lock);
 
-       if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
-               if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
+       if (vha->qpair->vp_idx == vha->vp_idx) {
+               if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
                        ql_log(ql_log_warn, vha, 0x7087,
-                           "Queue delete failed.\n");
+                           "Queue Pair delete failed.\n");
        }
 
        ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
index 45af34ddc43297d06c8c7cf44425a1a7cee4cc7f..21d9fb7fc88796cbaa09fbfa160b9b20c17e2015 100644 (file)
@@ -11,7 +11,7 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0191       | 0x0146         |
+ * | Module Init and Probe        |       0x0193       | 0x0146         |
  * |                              |                    | 0x015b-0x0160 |
  * |                              |                    | 0x016e                |
  * | Mailbox commands             |       0x1199       | 0x1193                |
@@ -58,7 +58,7 @@
  * |                              |                    | 0xb13a,0xb142  |
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149                |
- * | MultiQ                       |       0xc00c       |               |
+ * | MultiQ                       |       0xc010       |               |
  * | Misc                         |       0xd301       | 0xd031-0xd0ff |
  * |                              |                    | 0xd101-0xd1fe |
  * |                              |                    | 0xd214-0xd2fe |
index 5236e3f2a06a432740316cb74e1dea708c5fc439..f7df01b76714e09dc919cbb9660b66bed603d6bc 100644 (file)
@@ -401,6 +401,7 @@ typedef struct srb {
        uint16_t type;
        char *name;
        int iocbs;
+       struct qla_qpair *qpair;
        union {
                struct srb_iocb iocb_cmd;
                struct bsg_job *bsg_job;
@@ -2719,6 +2720,7 @@ struct isp_operations {
 
        int (*get_flash_version) (struct scsi_qla_host *, void *);
        int (*start_scsi) (srb_t *);
+       int (*start_scsi_mq) (srb_t *);
        int (*abort_isp) (struct scsi_qla_host *);
        int (*iospace_config)(struct qla_hw_data*);
        int (*initialize_adapter)(struct scsi_qla_host *);
@@ -2730,8 +2732,10 @@ struct isp_operations {
 #define QLA_MSIX_FW_MODE(m)    (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
 #define QLA_MSIX_FW_MODE_1(m)  (QLA_MSIX_FW_MODE(m) == 1)
 
-#define QLA_MSIX_DEFAULT       0x00
-#define QLA_MSIX_RSP_Q         0x01
+#define QLA_MSIX_DEFAULT               0x00
+#define QLA_MSIX_RSP_Q                 0x01
+#define QLA_ATIO_VECTOR                0x02
+#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q    0x03
 
 #define QLA_MIDX_DEFAULT       0
 #define QLA_MIDX_RSP_Q         1
@@ -2745,9 +2749,11 @@ struct scsi_qla_host;
 
 struct qla_msix_entry {
        int have_irq;
+       int in_use;
        uint32_t vector;
        uint16_t entry;
-       struct rsp_que *rsp;
+       char name[30];
+       void *handle;
        struct irq_affinity_notify irq_notify;
        int cpuid;
 };
@@ -2872,7 +2878,6 @@ struct rsp_que {
        struct qla_msix_entry *msix;
        struct req_que *req;
        srb_t *status_srb; /* status continuation entry */
-       struct work_struct q_work;
 
        dma_addr_t  dma_fx00;
        response_t *ring_fx00;
@@ -2909,6 +2914,37 @@ struct req_que {
        uint8_t req_pkt[REQUEST_ENTRY_SIZE];
 };
 
+/*Queue pair data structure */
+struct qla_qpair {
+       spinlock_t qp_lock;
+       atomic_t ref_count;
+       /* distill these fields down to 'online=0/1'
+        * ha->flags.eeh_busy
+        * ha->flags.pci_channel_io_perm_failure
+        * base_vha->loop_state
+        */
+       uint32_t online:1;
+       /* move vha->flags.difdix_supported here */
+       uint32_t difdix_supported:1;
+       uint32_t delete_in_progress:1;
+
+       uint16_t id;                    /* qp number used with FW */
+       uint16_t num_active_cmd;        /* cmds down at firmware */
+       cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
+       uint16_t vp_idx;                /* vport ID */
+
+       mempool_t *srb_mempool;
+
+       /* to do: New driver: move queues to here instead of pointers */
+       struct req_que *req;
+       struct rsp_que *rsp;
+       struct atio_que *atio;
+       struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
+       struct qla_hw_data *hw;
+       struct work_struct q_work;
+       struct list_head qp_list_elem; /* vha->qp_list */
+};
+
 /* Place holder for FW buffer parameters */
 struct qlfc_fw {
        void *fw_buf;
@@ -3004,7 +3040,6 @@ struct qla_hw_data {
                uint32_t        chip_reset_done         :1;
                uint32_t        running_gold_fw         :1;
                uint32_t        eeh_busy                :1;
-               uint32_t        cpu_affinity_enabled    :1;
                uint32_t        disable_msix_handshake  :1;
                uint32_t        fcp_prio_enabled        :1;
                uint32_t        isp82xx_fw_hung:1;
@@ -3061,10 +3096,15 @@ struct qla_hw_data {
        uint8_t         mqenable;
        struct req_que **req_q_map;
        struct rsp_que **rsp_q_map;
+       struct qla_qpair **queue_pair_map;
        unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
        unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+       unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
+               / sizeof(unsigned long)];
        uint8_t         max_req_queues;
        uint8_t         max_rsp_queues;
+       uint8_t         max_qpairs;
+       struct qla_qpair *base_qpair;
        struct qla_npiv_entry *npiv_info;
        uint16_t        nvram_npiv_size;
 
@@ -3328,6 +3368,7 @@ struct qla_hw_data {
 
        struct mutex vport_lock;        /* Virtual port synchronization */
        spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
+       struct mutex mq_lock;        /* multi-queue synchronization */
        struct completion mbx_cmd_comp; /* Serialize mbx access */
        struct completion mbx_intr_comp;  /* Used for completion notification */
        struct completion dcbx_comp;    /* For set port config notification */
@@ -3608,6 +3649,7 @@ typedef struct scsi_qla_host {
 
                uint32_t        fw_tgt_reported:1;
                uint32_t        bbcr_enable:1;
+               uint32_t        qpairs_available:1;
        } flags;
 
        atomic_t        loop_state;
@@ -3646,6 +3688,7 @@ typedef struct scsi_qla_host {
 #define FX00_TARGET_SCAN       24
 #define FX00_CRITEMP_RECOVERY  25
 #define FX00_HOST_INFO_RESEND  26
+#define QPAIR_ONLINE_CHECK_NEEDED      27
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
@@ -3704,10 +3747,13 @@ typedef struct scsi_qla_host {
        /* List of pending PLOGI acks, protected by hw lock */
        struct list_head        plogi_ack_list;
 
+       struct list_head        qp_list;
+
        uint32_t        vp_abort_cnt;
 
        struct fc_vport *fc_vport;      /* holds fc_vport * for each vport */
        uint16_t        vp_idx;         /* vport ID */
+       struct qla_qpair *qpair;        /* base qpair */
 
        unsigned long           vp_flags;
 #define VP_IDX_ACQUIRED                0       /* bit no 0 */
@@ -3763,6 +3809,23 @@ struct qla_tgt_vp_map {
        scsi_qla_host_t *vha;
 };
 
+struct qla2_sgx {
+       dma_addr_t              dma_addr;       /* OUT */
+       uint32_t                dma_len;        /* OUT */
+
+       uint32_t                tot_bytes;      /* IN */
+       struct scatterlist      *cur_sg;        /* IN */
+
+       /* for book keeping, bzero on initial invocation */
+       uint32_t                bytes_consumed;
+       uint32_t                num_bytes;
+       uint32_t                tot_partial;
+
+       /* for debugging */
+       uint32_t                num_sg;
+       srb_t                   *sp;
+};
+
 /*
  * Macros to help code, maintain, etc.
  */
@@ -3775,21 +3838,34 @@ struct qla_tgt_vp_map {
                (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
                         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
 
-#define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
-       atomic_inc(&__vha->vref_count);                      \
-       mb();                                                \
-       if (__vha->flags.delete_progress) {                  \
-               atomic_dec(&__vha->vref_count);              \
-               __bail = 1;                                  \
-       } else {                                             \
-               __bail = 0;                                  \
-       }                                                    \
+#define QLA_VHA_MARK_BUSY(__vha, __bail) do {          \
+       atomic_inc(&__vha->vref_count);                 \
+       mb();                                           \
+       if (__vha->flags.delete_progress) {             \
+               atomic_dec(&__vha->vref_count);         \
+               __bail = 1;                             \
+       } else {                                        \
+               __bail = 0;                             \
+       }                                               \
 } while (0)
 
-#define QLA_VHA_MARK_NOT_BUSY(__vha) do {                   \
-       atomic_dec(&__vha->vref_count);                      \
+#define QLA_VHA_MARK_NOT_BUSY(__vha)                   \
+       atomic_dec(&__vha->vref_count);                 \
+
+#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do {      \
+       atomic_inc(&__qpair->ref_count);                \
+       mb();                                           \
+       if (__qpair->delete_in_progress) {              \
+               atomic_dec(&__qpair->ref_count);        \
+               __bail = 1;                             \
+       } else {                                        \
+              __bail = 0;                              \
+       }                                               \
 } while (0)
 
+#define QLA_QPAIR_MARK_NOT_BUSY(__qpair)               \
+       atomic_dec(&__qpair->ref_count);                \
+
 /*
  * qla2x00 local function return status codes
  */
index c51d9f3359e3286f272a15a8442c8b7a2fcc1a13..afa0116a163b12b5f8663a52bee5cc2ec846541d 100644 (file)
@@ -91,12 +91,17 @@ extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
 extern int qla2x00_init_rings(scsi_qla_host_t *);
 extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+       int, int);
+extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
 
 /*
  * Global Data in qla_os.c source file.
  */
 extern char qla2x00_version_str[];
 
+extern struct kmem_cache *srb_cachep;
+
 extern int ql2xlogintimeout;
 extern int qlport_down_retry;
 extern int ql2xplogiabsentdevice;
@@ -105,8 +110,7 @@ extern int ql2xfdmienable;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xiidmaenable;
-extern int ql2xmaxqueues;
-extern int ql2xmultique_tag;
+extern int ql2xmqsupport;
 extern int ql2xfwloadbin;
 extern int ql2xetsenable;
 extern int ql2xshiftctondsd;
@@ -172,6 +176,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
+extern void qla2x00_sp_compl(void *, void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *, void *);
+extern void qla2xxx_qpair_sp_compl(void *, void *, int);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -220,6 +227,8 @@ extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
 extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
 extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
 extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
+extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
+       uint16_t, struct req_que *);
 extern int qla2x00_start_scsi(srb_t *sp);
 extern int qla24xx_start_scsi(srb_t *sp);
 int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
@@ -227,6 +236,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
 extern int qla2x00_start_sp(srb_t *);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
+extern int qla2xxx_dif_start_scsi_mq(srb_t *);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
@@ -237,7 +247,10 @@ extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
        uint32_t *, uint16_t, struct qla_tgt_cmd *);
 extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
        uint32_t *, uint16_t, struct qla_tgt_cmd *);
-
+extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
+extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
+extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
+       struct cmd_type_crc_2 *, uint16_t, uint16_t, uint16_t);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -468,6 +481,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
 extern void
 qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
        uint32_t);
+extern irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id);
 
 /*
  * Global Function Prototypes in qla_sup.c source file.
@@ -603,15 +618,18 @@ extern int qla2x00_dfs_setup(scsi_qla_host_t *);
 extern int qla2x00_dfs_remove(scsi_qla_host_t *);
 
 /* Globa function prototypes for multi-q */
-extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
+       struct qla_msix_entry *, int);
 extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
 extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
        uint16_t, int, uint8_t);
 extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
-       uint16_t, int);
+       uint16_t, struct qla_qpair *);
+
 extern void qla2x00_init_response_q_entries(struct rsp_que *);
 extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_delete_queues(struct scsi_qla_host *);
 extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
 extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
index 5b09296b46a3058f9c938990274d158b508af884..632d5f30386ab0ae529036c292f3c1c8e64162ca 100644 (file)
@@ -1769,8 +1769,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
        if (req->outstanding_cmds)
                return QLA_SUCCESS;
 
-       if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
-           (ql2xmultique_tag || ql2xmaxqueues > 1)))
+       if (!IS_FWI2_CAPABLE(ha))
                req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
        else {
                if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
@@ -4248,10 +4247,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
        struct req_que *req;
        struct rsp_que *rsp;
 
-       if (vha->hw->flags.cpu_affinity_enabled)
-               req = vha->hw->req_q_map[0];
-       else
-               req = vha->req;
+       req = vha->req;
        rsp = req->rsp;
 
        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -6040,10 +6036,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
                return -EINVAL;
 
        rval = qla2x00_fw_ready(base_vha);
-       if (ha->flags.cpu_affinity_enabled)
-               req = ha->req_q_map[0];
+       if (vha->qpair)
+               req = vha->qpair->req;
        else
-               req = vha->req;
+               req = ha->req_q_map[0];
        rsp = req->rsp;
 
        if (rval == QLA_SUCCESS) {
@@ -6725,3 +6721,162 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
 
        return ret;
 }
+
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+{
+       int rsp_id = 0;
+       int  req_id = 0;
+       int i;
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t qpair_id = 0;
+       struct qla_qpair *qpair = NULL;
+       struct qla_msix_entry *msix;
+
+       if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
+               ql_log(ql_log_warn, vha, 0x00181,
+                   "FW/Driver is not multi-queue capable.\n");
+               return NULL;
+       }
+
+       if (ql2xmqsupport) {
+               qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+               if (qpair == NULL) {
+                       ql_log(ql_log_warn, vha, 0x0182,
+                           "Failed to allocate memory for queue pair.\n");
+                       return NULL;
+               }
+               memset(qpair, 0, sizeof(struct qla_qpair));
+
+               qpair->hw = vha->hw;
+
+               /* Assign available que pair id */
+               mutex_lock(&ha->mq_lock);
+               qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
+               if (qpair_id >= ha->max_qpairs) {
+                       mutex_unlock(&ha->mq_lock);
+                       ql_log(ql_log_warn, vha, 0x0183,
+                           "No resources to create additional q pair.\n");
+                       goto fail_qid_map;
+               }
+               set_bit(qpair_id, ha->qpair_qid_map);
+               ha->queue_pair_map[qpair_id] = qpair;
+               qpair->id = qpair_id;
+               qpair->vp_idx = vp_idx;
+
+               for (i = 0; i < ha->msix_count; i++) {
+                       msix = &ha->msix_entries[i];
+                       if (msix->in_use)
+                               continue;
+                       qpair->msix = msix;
+                       ql_log(ql_dbg_multiq, vha, 0xc00f,
+                           "Vector %x selected for qpair\n", msix->vector);
+                       break;
+               }
+               if (!qpair->msix) {
+                       ql_log(ql_log_warn, vha, 0x0184,
+                           "Out of MSI-X vectors!.\n");
+                       goto fail_msix;
+               }
+
+               qpair->msix->in_use = 1;
+               list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+
+               mutex_unlock(&ha->mq_lock);
+
+               /* Create response queue first */
+               rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+               if (!rsp_id) {
+                       ql_log(ql_log_warn, vha, 0x0185,
+                           "Failed to create response queue.\n");
+                       goto fail_rsp;
+               }
+
+               qpair->rsp = ha->rsp_q_map[rsp_id];
+
+               /* Create request queue */
+               req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+               if (!req_id) {
+                       ql_log(ql_log_warn, vha, 0x0186,
+                           "Failed to create request queue.\n");
+                       goto fail_req;
+               }
+
+               qpair->req = ha->req_q_map[req_id];
+               qpair->rsp->req = qpair->req;
+
+               if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
+                       if (ha->fw_attributes & BIT_4)
+                               qpair->difdix_supported = 1;
+               }
+
+               qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+               if (!qpair->srb_mempool) {
+                       ql_log(ql_log_warn, vha, 0x0191,
+                           "Failed to create srb mempool for qpair %d\n",
+                           qpair->id);
+                       goto fail_mempool;
+               }
+
+               /* Mark as online */
+               qpair->online = 1;
+
+               if (!vha->flags.qpairs_available)
+                       vha->flags.qpairs_available = 1;
+
+               ql_dbg(ql_dbg_multiq, vha, 0xc00d,
+                   "Request/Response queue pair created, id %d\n",
+                   qpair->id);
+               ql_dbg(ql_dbg_init, vha, 0x0187,
+                   "Request/Response queue pair created, id %d\n",
+                   qpair->id);
+       }
+       return qpair;
+
+fail_mempool:
+fail_req:
+       qla25xx_delete_rsp_que(vha, qpair->rsp);
+fail_rsp:
+       mutex_lock(&ha->mq_lock);
+       qpair->msix->in_use = 0;
+       list_del(&qpair->qp_list_elem);
+       if (list_empty(&vha->qp_list))
+               vha->flags.qpairs_available = 0;
+fail_msix:
+       ha->queue_pair_map[qpair_id] = NULL;
+       clear_bit(qpair_id, ha->qpair_qid_map);
+       mutex_unlock(&ha->mq_lock);
+fail_qid_map:
+       kfree(qpair);
+       return NULL;
+}
+
+int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
+{
+       int ret;
+       struct qla_hw_data *ha = qpair->hw;
+
+       qpair->delete_in_progress = 1;
+       while (atomic_read(&qpair->ref_count))
+               msleep(500);
+
+       ret = qla25xx_delete_req_que(vha, qpair->req);
+       if (ret != QLA_SUCCESS)
+               goto fail;
+       ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
+       if (ret != QLA_SUCCESS)
+               goto fail;
+
+       mutex_lock(&ha->mq_lock);
+       ha->queue_pair_map[qpair->id] = NULL;
+       clear_bit(qpair->id, ha->qpair_qid_map);
+       list_del(&qpair->qp_list_elem);
+       if (list_empty(&vha->qp_list))
+               vha->flags.qpairs_available = 0;
+       mempool_destroy(qpair->srb_mempool);
+       kfree(qpair);
+       mutex_unlock(&ha->mq_lock);
+
+       return QLA_SUCCESS;
+fail:
+       return ret;
+}
index edc48f3b8230cd60b9b3df25bbf87394f5ad6676..44e404583c86fca78d50be48c12129c2f89ad695 100644 (file)
@@ -215,6 +215,36 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
            test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
 }
 
+static inline srb_t *
+qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+{
+       srb_t *sp = NULL;
+       uint8_t bail;
+
+       QLA_QPAIR_MARK_BUSY(qpair, bail);
+       if (unlikely(bail))
+               return NULL;
+
+       sp = mempool_alloc(qpair->srb_mempool, flag);
+       if (!sp)
+               goto done;
+
+       memset(sp, 0, sizeof(*sp));
+       sp->fcport = fcport;
+       sp->iocbs = 1;
+done:
+       if (!sp)
+               QLA_QPAIR_MARK_NOT_BUSY(qpair);
+       return sp;
+}
+
+static inline void
+qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
+{
+       mempool_free(sp, qpair->srb_mempool);
+       QLA_QPAIR_MARK_NOT_BUSY(qpair);
+}
+
 static inline srb_t *
 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
 {
index 221ad89078938d37227bd385f55a5a5b74675c0b..58e49a3e1de8bcc30b448a889ab8f4b0144fd982 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <scsi/scsi_tcq.h>
 
-static void qla25xx_set_que(srb_t *, struct rsp_que **);
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  * @cmd: SCSI command
@@ -143,7 +142,7 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
        return (cont_pkt);
 }
 
-static inline int
+inline int
 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 {
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -693,10 +692,11 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
  * @sp: SRB command to process
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
+ * @req: pointer to request queue
  */
-static inline void
+inline void
 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
-    uint16_t tot_dsds)
+       uint16_t tot_dsds, struct req_que *req)
 {
        uint16_t        avail_dsds;
        uint32_t        *cur_dsd;
@@ -745,7 +745,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -845,24 +845,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
        }
 }
 
-struct qla2_sgx {
-       dma_addr_t              dma_addr;       /* OUT */
-       uint32_t                dma_len;        /* OUT */
-
-       uint32_t                tot_bytes;      /* IN */
-       struct scatterlist      *cur_sg;        /* IN */
-
-       /* for book keeping, bzero on initial invocation */
-       uint32_t                bytes_consumed;
-       uint32_t                num_bytes;
-       uint32_t                tot_partial;
-
-       /* for debugging */
-       uint32_t                num_sg;
-       srb_t                   *sp;
-};
-
-static int
+int
 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
        uint32_t *partial)
 {
@@ -1207,7 +1190,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
  */
-static inline int
+inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
 {
@@ -1436,8 +1419,8 @@ qla24xx_start_scsi(srb_t *sp)
        struct qla_hw_data *ha = vha->hw;
 
        /* Setup device pointers. */
-       qla25xx_set_que(sp, &rsp);
        req = vha->req;
+       rsp = req->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1523,12 +1506,10 @@ qla24xx_start_scsi(srb_t *sp)
        cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 
        /* Build IOCB segments */
-       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
 
        /* Set total data segment count. */
        cmd_pkt->entry_count = (uint8_t)req_cnt;
-       /* Specify response queue number where completion should happen */
-       cmd_pkt->entry_status = (uint8_t) rsp->id;
        wmb();
        /* Adjust ring index. */
        req->ring_index++;
@@ -1597,9 +1578,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
        }
 
        /* Setup device pointers. */
-
-       qla25xx_set_que(sp, &rsp);
        req = vha->req;
+       rsp = req->rsp;
 
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
@@ -1764,18 +1744,365 @@ queuing_error:
        return QLA_FUNCTION_FAILED;
 }
 
-
-static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+/**
+ * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla2xxx_start_scsi_mq(srb_t *sp)
 {
+       int             nseg;
+       unsigned long   flags;
+       uint32_t        *clr_ptr;
+       uint32_t        index;
+       uint32_t        handle;
+       struct cmd_type_7 *cmd_pkt;
+       uint16_t        cnt;
+       uint16_t        req_cnt;
+       uint16_t        tot_dsds;
+       struct req_que *req = NULL;
+       struct rsp_que *rsp = NULL;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-       struct qla_hw_data *ha = sp->fcport->vha->hw;
-       int affinity = cmd->request->cpu;
+       struct scsi_qla_host *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qpair = sp->qpair;
+
+       /* Setup qpair pointers */
+       rsp = qpair->rsp;
+       req = qpair->req;
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+                   QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire qpair specific lock */
+       spin_lock_irqsave(&qpair->qp_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+       } else
+               nseg = 0;
+
+       tot_dsds = nseg;
+       req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+       /* Zero out remaining portion of packet. */
+       /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+       cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+       /* Set NPORT-ID and LUN number*/
+       cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+       cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+       cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+       int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+       host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+       cmd_pkt->task = TSK_SIMPLE;
+
+       /* Load SCSI command packet. */
+       memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+       host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+       cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+       /* Build IOCB segments */
+       qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
+
+       /* Set total data segment count. */
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+       wmb();
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       sp->flags |= SRB_DMA_VALID;
+
+       /* Set chip new ring index. */
+       WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+               rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+       return QLA_SUCCESS;
+
+queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+       return QLA_FUNCTION_FAILED;
+}
+
+
+/**
+ * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla2xxx_dif_start_scsi_mq(srb_t *sp)
+{
+       int                     nseg;
+       unsigned long           flags;
+       uint32_t                *clr_ptr;
+       uint32_t                index;
+       uint32_t                handle;
+       uint16_t                cnt;
+       uint16_t                req_cnt = 0;
+       uint16_t                tot_dsds;
+       uint16_t                tot_prot_dsds;
+       uint16_t                fw_prot_opts = 0;
+       struct req_que          *req = NULL;
+       struct rsp_que          *rsp = NULL;
+       struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
+       struct scsi_qla_host    *vha = sp->fcport->vha;
+       struct qla_hw_data      *ha = vha->hw;
+       struct cmd_type_crc_2   *cmd_pkt;
+       uint32_t                status = 0;
+       struct qla_qpair        *qpair = sp->qpair;
+
+#define QDSS_GOT_Q_SPACE       BIT_0
+
+       /* Check for host side state */
+       if (!qpair->online) {
+               cmd->result = DID_NO_CONNECT << 16;
+               return QLA_INTERFACE_ERROR;
+       }
+
+       if (!qpair->difdix_supported &&
+               scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+               cmd->result = DID_NO_CONNECT << 16;
+               return QLA_INTERFACE_ERROR;
+       }
+
+       /* Only process protection or >16 cdb in this routine */
+       if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
+               if (cmd->cmd_len <= 16)
+                       return qla2xxx_start_scsi_mq(sp);
+       }
+
+       /* Setup qpair pointers */
+       rsp = qpair->rsp;
+       req = qpair->req;
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+                   QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&qpair->qp_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Compute number of required data segments */
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+               else
+                       sp->flags |= SRB_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       struct qla2_sgx sgx;
+                       uint32_t        partial;
+
+                       memset(&sgx, 0, sizeof(struct qla2_sgx));
+                       sgx.tot_bytes = scsi_bufflen(cmd);
+                       sgx.cur_sg = scsi_sglist(cmd);
+                       sgx.sp = sp;
+
+                       nseg = 0;
+                       while (qla24xx_get_one_block_sg(
+                           cmd->device->sector_size, &sgx, &partial))
+                               nseg++;
+               }
+       } else
+               nseg = 0;
+
+       /* number of required data segments */
+       tot_dsds = nseg;
+
+       /* Compute number of required protection segments */
+       if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+                   scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+               else
+                       sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+               if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+                   (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+                       nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+               }
+       } else {
+               nseg = 0;
+       }
+
+       req_cnt = 1;
+       /* Total Data and protection sg segment(s) */
+       tot_prot_dsds = nseg;
+       tot_dsds += nseg;
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       status |= QDSS_GOT_Q_SPACE;
+
+       /* Build header part of command packet (excluding the OPCODE). */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       /* Fill-in common area */
+       cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+       clr_ptr = (uint32_t *)cmd_pkt + 2;
+       memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+       /* Set NPORT-ID and LUN number*/
+       cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+       cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+       cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
 
-       if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
-               affinity < ha->max_rsp_queues - 1)
-               *rsp = ha->rsp_q_map[affinity + 1];
-        else
-               *rsp = ha->rsp_q_map[0];
+       int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+       host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+       /* Total Data and protection segment(s) */
+       cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+       /* Build IOCB segments and adjust for data protection segments */
+       if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
+           req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
+               QLA_SUCCESS)
+               goto queuing_error;
+
+       cmd_pkt->entry_count = (uint8_t)req_cnt;
+       cmd_pkt->timeout = cpu_to_le16(0);
+       wmb();
+
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       /* Set chip new ring index. */
+       WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+       /* Manage unprocessed RIO/ZIO commands in response queue. */
+       if (vha->flags.process_response_queue &&
+           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+               qla24xx_process_response_queue(vha, rsp);
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+       return QLA_SUCCESS;
+
+queuing_error:
+       if (status & QDSS_GOT_Q_SPACE) {
+               req->outstanding_cmds[handle] = NULL;
+               req->cnt += req_cnt;
+       }
+       /* Cleanup will be performed by the caller (queuecommand) */
+
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
+       return QLA_FUNCTION_FAILED;
 }
 
 /* Generic Control-SRB manipulation functions. */
@@ -2664,7 +2991,7 @@ sufficient_dsds:
                cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 
                /* Build IOCB segments */
-               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+               qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
 
                /* Set total data segment count. */
                cmd_pkt->entry_count = (uint8_t)req_cnt;
index 19f18485a854ff1dbe8a4db1105fca4b75009931..5093ca9b02ec52c8e70674f88205941cc0967d9f 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
@@ -2870,41 +2871,6 @@ out:
        return IRQ_HANDLED;
 }
 
-static irqreturn_t
-qla25xx_msix_rsp_q(int irq, void *dev_id)
-{
-       struct qla_hw_data *ha;
-       scsi_qla_host_t *vha;
-       struct rsp_que *rsp;
-       struct device_reg_24xx __iomem *reg;
-       unsigned long flags;
-       uint32_t hccr = 0;
-
-       rsp = (struct rsp_que *) dev_id;
-       if (!rsp) {
-               ql_log(ql_log_info, NULL, 0x505b,
-                   "%s: NULL response queue pointer.\n", __func__);
-               return IRQ_NONE;
-       }
-       ha = rsp->hw;
-       vha = pci_get_drvdata(ha->pdev);
-
-       /* Clear the interrupt, if enabled, for this response queue */
-       if (!ha->flags.disable_msix_handshake) {
-               reg = &ha->iobase->isp24;
-               spin_lock_irqsave(&ha->hardware_lock, flags);
-               WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
-               hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       }
-       if (qla2x00_check_reg32_for_disconnect(vha, hccr))
-               goto out;
-       queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
-
-out:
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t
 qla24xx_msix_default(int irq, void *dev_id)
 {
@@ -3001,6 +2967,35 @@ qla24xx_msix_default(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+irqreturn_t
+qla2xxx_msix_rsp_q(int irq, void *dev_id)
+{
+       struct qla_hw_data *ha;
+       struct qla_qpair *qpair;
+       struct device_reg_24xx __iomem *reg;
+       unsigned long flags;
+
+       qpair = dev_id;
+       if (!qpair) {
+               ql_log(ql_log_info, NULL, 0x505b,
+                   "%s: NULL response queue pointer.\n", __func__);
+               return IRQ_NONE;
+       }
+       ha = qpair->hw;
+
+       /* Clear the interrupt, if enabled, for this response queue */
+       if (unlikely(!ha->flags.disable_msix_handshake)) {
+               reg = &ha->iobase->isp24;
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       }
+
+       queue_work(ha->wq, &qpair->q_work);
+
+       return IRQ_HANDLED;
+}
+
 /* Interrupt handling helpers. */
 
 struct qla_init_msix_entry {
@@ -3008,69 +3003,28 @@ struct qla_init_msix_entry {
        irq_handler_t handler;
 };
 
-static struct qla_init_msix_entry msix_entries[3] = {
+static struct qla_init_msix_entry msix_entries[] = {
        { "qla2xxx (default)", qla24xx_msix_default },
        { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
-       { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
+       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+       { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
 };
 
-static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
+static struct qla_init_msix_entry qla82xx_msix_entries[] = {
        { "qla2xxx (default)", qla82xx_msix_default },
        { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
 };
 
-static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
-       { "qla2xxx (default)", qla24xx_msix_default },
-       { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
-       { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
-};
-
-static void
-qla24xx_disable_msix(struct qla_hw_data *ha)
-{
-       int i;
-       struct qla_msix_entry *qentry;
-       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
-
-       for (i = 0; i < ha->msix_count; i++) {
-               qentry = &ha->msix_entries[i];
-               if (qentry->have_irq) {
-                       /* un-register irq cpu affinity notification */
-                       irq_set_affinity_notifier(qentry->vector, NULL);
-                       free_irq(qentry->vector, qentry->rsp);
-               }
-       }
-       pci_disable_msix(ha->pdev);
-       kfree(ha->msix_entries);
-       ha->msix_entries = NULL;
-       ha->flags.msix_enabled = 0;
-       ql_dbg(ql_dbg_init, vha, 0x0042,
-           "Disabled the MSI.\n");
-}
-
 static int
 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
 #define MIN_MSIX_COUNT 2
-#define ATIO_VECTOR    2
        int i, ret;
-       struct msix_entry *entries;
        struct qla_msix_entry *qentry;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
-       entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
-                       GFP_KERNEL);
-       if (!entries) {
-               ql_log(ql_log_warn, vha, 0x00bc,
-                   "Failed to allocate memory for msix_entry.\n");
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < ha->msix_count; i++)
-               entries[i].entry = i;
-
-       ret = pci_enable_msix_range(ha->pdev,
-                                   entries, MIN_MSIX_COUNT, ha->msix_count);
+       ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
+                                   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (ret < 0) {
                ql_log(ql_log_fatal, vha, 0x00c7,
                    "MSI-X: Failed to enable support, "
@@ -3080,10 +3034,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
        } else if (ret < ha->msix_count) {
                ql_log(ql_log_warn, vha, 0x00c6,
                    "MSI-X: Failed to enable support "
-                   "-- %d/%d\n Retry with %d vectors.\n",
-                   ha->msix_count, ret, ret);
+                    "with %d vectors, using %d vectors.\n",
+                   ha->msix_count, ret);
                ha->msix_count = ret;
-               ha->max_rsp_queues = ha->msix_count - 1;
+               /* Recalculate queue values */
+               if (ha->mqiobase && ql2xmqsupport) {
+                       ha->max_req_queues = ha->msix_count - 1;
+
+                       /* ATIOQ needs 1 vector. That's 1 less QPair */
+                       if (QLA_TGT_MODE_ENABLED())
+                               ha->max_req_queues--;
+
+                       ha->max_rsp_queues = ha->max_req_queues;
+
+                       ha->max_qpairs = ha->max_req_queues - 1;
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+                           "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
+               }
        }
        ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
                                ha->msix_count, GFP_KERNEL);
@@ -3097,20 +3064,23 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 
        for (i = 0; i < ha->msix_count; i++) {
                qentry = &ha->msix_entries[i];
-               qentry->vector = entries[i].vector;
-               qentry->entry = entries[i].entry;
+               qentry->vector = pci_irq_vector(ha->pdev, i);
+               qentry->entry = i;
                qentry->have_irq = 0;
-               qentry->rsp = NULL;
+               qentry->in_use = 0;
+               qentry->handle = NULL;
                qentry->irq_notify.notify  = qla_irq_affinity_notify;
                qentry->irq_notify.release = qla_irq_affinity_release;
                qentry->cpuid = -1;
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
                qentry = &ha->msix_entries[i];
-               qentry->rsp = rsp;
+               qentry->handle = rsp;
                rsp->msix = qentry;
+               scnprintf(qentry->name, sizeof(qentry->name),
+                   msix_entries[i].name);
                if (IS_P3P_TYPE(ha))
                        ret = request_irq(qentry->vector,
                                qla82xx_msix_entries[i].handler,
@@ -3122,6 +3092,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                if (ret)
                        goto msix_register_fail;
                qentry->have_irq = 1;
+               qentry->in_use = 1;
 
                /* Register for CPU affinity notification. */
                irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3141,12 +3112,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
         * queue.
         */
        if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
-               qentry = &ha->msix_entries[ATIO_VECTOR];
-               qentry->rsp = rsp;
+               qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
                rsp->msix = qentry;
+               qentry->handle = rsp;
+               scnprintf(qentry->name, sizeof(qentry->name),
+                   msix_entries[QLA_ATIO_VECTOR].name);
+               qentry->in_use = 1;
                ret = request_irq(qentry->vector,
-                       qla83xx_msix_entries[ATIO_VECTOR].handler,
-                       0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+                       msix_entries[QLA_ATIO_VECTOR].handler,
+                       0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
                qentry->have_irq = 1;
        }
 
@@ -3155,7 +3129,7 @@ msix_register_fail:
                ql_log(ql_log_fatal, vha, 0x00cb,
                    "MSI-X: unable to register handler -- %x/%d.\n",
                    qentry->vector, ret);
-               qla24xx_disable_msix(ha);
+               qla2x00_free_irqs(vha);
                ha->mqenable = 0;
                goto msix_out;
        }
@@ -3163,11 +3137,13 @@ msix_register_fail:
        /* Enable MSI-X vector for response queue update for queue 0 */
        if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                if (ha->msixbase && ha->mqiobase &&
-                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+                    ql2xmqsupport))
                        ha->mqenable = 1;
        } else
-               if (ha->mqiobase
-                   && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
+               if (ha->mqiobase &&
+                   (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+                    ql2xmqsupport))
                        ha->mqenable = 1;
        ql_dbg(ql_dbg_multiq, vha, 0xc005,
            "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
@@ -3177,7 +3153,6 @@ msix_register_fail:
            ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
 
 msix_out:
-       kfree(entries);
        return ret;
 }
 
@@ -3230,7 +3205,7 @@ skip_msix:
            !IS_QLA27XX(ha))
                goto skip_msi;
 
-       ret = pci_enable_msi(ha->pdev);
+       ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
        if (!ret) {
                ql_dbg(ql_dbg_init, vha, 0x0038,
                    "MSI: Enabled.\n");
@@ -3275,6 +3250,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
        struct rsp_que *rsp;
+       struct qla_msix_entry *qentry;
+       int i;
 
        /*
         * We need to check that ha->rsp_q_map is valid in case we are called
@@ -3284,25 +3261,36 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
                return;
        rsp = ha->rsp_q_map[0];
 
-       if (ha->flags.msix_enabled)
-               qla24xx_disable_msix(ha);
-       else if (ha->flags.msi_enabled) {
-               free_irq(ha->pdev->irq, rsp);
-               pci_disable_msi(ha->pdev);
-       } else
-               free_irq(ha->pdev->irq, rsp);
-}
+       if (ha->flags.msix_enabled) {
+               for (i = 0; i < ha->msix_count; i++) {
+                       qentry = &ha->msix_entries[i];
+                       if (qentry->have_irq) {
+                               irq_set_affinity_notifier(qentry->vector, NULL);
+                               free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
+                       }
+               }
+               kfree(ha->msix_entries);
+               ha->msix_entries = NULL;
+               ha->flags.msix_enabled = 0;
+               ql_dbg(ql_dbg_init, vha, 0x0042,
+                       "Disabled MSI-X.\n");
+       } else {
+               free_irq(pci_irq_vector(ha->pdev, 0), rsp);
+       }
 
+       pci_free_irq_vectors(ha->pdev);
+}
 
-int qla25xx_request_irq(struct rsp_que *rsp)
+int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
+       struct qla_msix_entry *msix, int vector_type)
 {
-       struct qla_hw_data *ha = rsp->hw;
-       struct qla_init_msix_entry *intr = &msix_entries[2];
-       struct qla_msix_entry *msix = rsp->msix;
+       struct qla_init_msix_entry *intr = &msix_entries[vector_type];
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        int ret;
 
-       ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+       scnprintf(msix->name, sizeof(msix->name),
+           "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
+       ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
        if (ret) {
                ql_log(ql_log_fatal, vha, 0x00e6,
                    "MSI-X: Unable to register handler -- %x/%d.\n",
@@ -3310,7 +3298,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
                return ret;
        }
        msix->have_irq = 1;
-       msix->rsp = rsp;
+       msix->handle = qpair;
        return ret;
 }
 
@@ -3323,11 +3311,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
                container_of(notify, struct qla_msix_entry, irq_notify);
        struct qla_hw_data *ha;
        struct scsi_qla_host *base_vha;
+       struct rsp_que *rsp = e->handle;
 
        /* user is recommended to set mask to just 1 cpu */
        e->cpuid = cpumask_first(mask);
 
-       ha = e->rsp->hw;
+       ha = rsp->hw;
        base_vha = pci_get_drvdata(ha->pdev);
 
        ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3351,9 +3340,10 @@ static void qla_irq_affinity_release(struct kref *ref)
                container_of(ref, struct irq_affinity_notify, kref);
        struct qla_msix_entry *e =
                container_of(notify, struct qla_msix_entry, irq_notify);
-       struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
+       struct rsp_que *rsp = e->handle;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
 
        ql_dbg(ql_dbg_init, base_vha, 0xffff,
-           "%s: host%ld: vector %d cpu %d \n", __func__,
+               "%s: host%ld: vector %d cpu %d\n", __func__,
            base_vha->host_no, e->vector, e->cpuid);
 }
index 23698c9986998a0a7279e6e256d4c377f6fceab9..2819ceb96041e5b97b234f115c9b35d4b4251ffe 100644 (file)
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
+struct rom_cmd {
+       uint16_t cmd;
+} rom_cmds[] = {
+       { MBC_LOAD_RAM },
+       { MBC_EXECUTE_FIRMWARE },
+       { MBC_READ_RAM_WORD },
+       { MBC_MAILBOX_REGISTER_TEST },
+       { MBC_VERIFY_CHECKSUM },
+       { MBC_GET_FIRMWARE_VERSION },
+       { MBC_LOAD_RISC_RAM },
+       { MBC_DUMP_RISC_RAM },
+       { MBC_LOAD_RISC_RAM_EXTENDED },
+       { MBC_DUMP_RISC_RAM_EXTENDED },
+       { MBC_WRITE_RAM_WORD_EXTENDED },
+       { MBC_READ_RAM_EXTENDED },
+       { MBC_GET_RESOURCE_COUNTS },
+       { MBC_SET_FIRMWARE_OPTION },
+       { MBC_MID_INITIALIZE_FIRMWARE },
+       { MBC_GET_FIRMWARE_STATE },
+       { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
+       { MBC_GET_RETRY_COUNT },
+       { MBC_TRACE_CONTROL },
+};
+
+static int is_rom_cmd(uint16_t cmd)
+{
+       int i;
+       struct  rom_cmd *wc;
+
+       for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
+               wc = rom_cmds + i;
+               if (wc->cmd == cmd)
+                       return 1;
+       }
+
+       return 0;
+}
 
 /*
  * qla2x00_mailbox_command
@@ -92,6 +129,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
+       /* check if ISP abort is active and return cmd with timeout */
+       if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+           test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+           test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
+           !is_rom_cmd(mcp->mb[0])) {
+               ql_log(ql_log_info, vha, 0x1005,
+                   "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
+                   mcp->mb[0]);
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
        /*
         * Wait for active mailbox commands to finish by waiting at most tov
         * seconds. This is to serialize actual issuing of mailbox cmds during
@@ -178,6 +226,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+               wait_time = jiffies;
                if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
                    mcp->tov * HZ)) {
                        ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -186,6 +235,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                }
+               if (time_after(jiffies, wait_time + 5 * HZ))
+                       ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
+                           command, jiffies_to_msecs(jiffies - wait_time));
        } else {
                ql_dbg(ql_dbg_mbx, vha, 0x1011,
                    "Cmd=%x Polling Mode.\n", command);
@@ -1194,12 +1246,17 @@ qla2x00_abort_command(srb_t *sp)
        fc_port_t       *fcport = sp->fcport;
        scsi_qla_host_t *vha = fcport->vha;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = vha->req;
+       struct req_que *req;
        struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
            "Entered %s.\n", __func__);
 
+       if (vha->flags.qpairs_available && sp->qpair)
+               req = sp->qpair->req;
+       else
+               req = vha->req;
+
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
@@ -2152,10 +2209,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
            "Entered %s.\n", __func__);
 
-       if (ha->flags.cpu_affinity_enabled)
-               req = ha->req_q_map[0];
+       if (vha->vp_idx && vha->qpair)
+               req = vha->qpair->req;
        else
-               req = vha->req;
+               req = ha->req_q_map[0];
 
        lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
        if (lg == NULL) {
@@ -2435,10 +2492,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        }
        memset(lg, 0, sizeof(struct logio_entry_24xx));
 
-       if (ql2xmaxqueues > 1)
-               req = ha->req_q_map[0];
-       else
-               req = vha->req;
+       req = vha->req;
        lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        lg->entry_count = 1;
        lg->handle = MAKE_HANDLE(req->id, lg->handle);
@@ -2904,6 +2958,9 @@ qla24xx_abort_command(srb_t *sp)
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
            "Entered %s.\n", __func__);
 
+       if (vha->flags.qpairs_available && sp->qpair)
+               req = sp->qpair->req;
+
        if (ql2xasynctmfenable)
                return qla24xx_async_abort_command(sp);
 
@@ -2984,6 +3041,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        struct qla_hw_data *ha;
        struct req_que *req;
        struct rsp_que *rsp;
+       struct qla_qpair *qpair;
 
        vha = fcport->vha;
        ha = vha->hw;
@@ -2992,10 +3050,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
            "Entered %s.\n", __func__);
 
-       if (ha->flags.cpu_affinity_enabled)
-               rsp = ha->rsp_q_map[tag + 1];
-       else
+       if (vha->vp_idx && vha->qpair) {
+               /* NPIV port */
+               qpair = vha->qpair;
+               rsp = qpair->rsp;
+               req = qpair->req;
+       } else {
                rsp = req->rsp;
+       }
+
        tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
        if (tsk == NULL) {
                ql_log(ql_log_warn, vha, 0x1093,
index cf7ba52bae665fa482b8535f92e5fcb1dbe25244..c6d6f0d912ff75ffaf9b9d810f81af735e39549b 100644 (file)
@@ -540,9 +540,10 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        uint16_t que_id = rsp->id;
 
        if (rsp->msix && rsp->msix->have_irq) {
-               free_irq(rsp->msix->vector, rsp);
+               free_irq(rsp->msix->vector, rsp->msix->handle);
                rsp->msix->have_irq = 0;
-               rsp->msix->rsp = NULL;
+               rsp->msix->in_use = 0;
+               rsp->msix->handle = NULL;
        }
        dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
                sizeof(response_t), rsp->ring, rsp->dma);
@@ -573,7 +574,7 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
        return ret;
 }
 
-static int
+int
 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 {
        int ret = -1;
@@ -596,34 +597,42 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
        struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qpair, *tqpair;
 
-       /* Delete request queues */
-       for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
-               req = ha->req_q_map[cnt];
-               if (req && test_bit(cnt, ha->req_qid_map)) {
-                       ret = qla25xx_delete_req_que(vha, req);
-                       if (ret != QLA_SUCCESS) {
-                               ql_log(ql_log_warn, vha, 0x00ea,
-                                   "Couldn't delete req que %d.\n",
-                                   req->id);
-                               return ret;
+       if (ql2xmqsupport) {
+               list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
+                   qp_list_elem)
+                       qla2xxx_delete_qpair(vha, qpair);
+       } else {
+               /* Delete request queues */
+               for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+                       req = ha->req_q_map[cnt];
+                       if (req && test_bit(cnt, ha->req_qid_map)) {
+                               ret = qla25xx_delete_req_que(vha, req);
+                               if (ret != QLA_SUCCESS) {
+                                       ql_log(ql_log_warn, vha, 0x00ea,
+                                           "Couldn't delete req que %d.\n",
+                                           req->id);
+                                       return ret;
+                               }
                        }
                }
-       }
 
-       /* Delete response queues */
-       for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
-               rsp = ha->rsp_q_map[cnt];
-               if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
-                       ret = qla25xx_delete_rsp_que(vha, rsp);
-                       if (ret != QLA_SUCCESS) {
-                               ql_log(ql_log_warn, vha, 0x00eb,
-                                   "Couldn't delete rsp que %d.\n",
-                                   rsp->id);
-                               return ret;
+               /* Delete response queues */
+               for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+                       rsp = ha->rsp_q_map[cnt];
+                       if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+                               ret = qla25xx_delete_rsp_que(vha, rsp);
+                               if (ret != QLA_SUCCESS) {
+                                       ql_log(ql_log_warn, vha, 0x00eb,
+                                           "Couldn't delete rsp que %d.\n",
+                                           rsp->id);
+                                       return ret;
+                               }
                        }
                }
        }
+
        return ret;
 }
 
@@ -659,10 +668,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS)
                goto que_failed;
 
-       mutex_lock(&ha->vport_lock);
+       mutex_lock(&ha->mq_lock);
        que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
        if (que_id >= ha->max_req_queues) {
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                ql_log(ql_log_warn, base_vha, 0x00db,
                    "No resources to create additional request queue.\n");
                goto que_failed;
@@ -708,7 +717,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        req->req_q_out = &reg->isp25mq.req_q_out;
        req->max_q_depth = ha->req_q_map[0]->max_q_depth;
        req->out_ptr = (void *)(req->ring + req->length);
-       mutex_unlock(&ha->vport_lock);
+       mutex_unlock(&ha->mq_lock);
        ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
            "ring_ptr=%p ring_index=%d, "
            "cnt=%d id=%d max_q_depth=%d.\n",
@@ -724,9 +733,9 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS) {
                ql_log(ql_log_fatal, base_vha, 0x00df,
                    "%s failed.\n", __func__);
-               mutex_lock(&ha->vport_lock);
+               mutex_lock(&ha->mq_lock);
                clear_bit(que_id, ha->req_qid_map);
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                goto que_failed;
        }
 
@@ -741,20 +750,20 @@ failed:
 static void qla_do_work(struct work_struct *work)
 {
        unsigned long flags;
-       struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+       struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
        struct scsi_qla_host *vha;
-       struct qla_hw_data *ha = rsp->hw;
+       struct qla_hw_data *ha = qpair->hw;
 
-       spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+       spin_lock_irqsave(&qpair->qp_lock, flags);
        vha = pci_get_drvdata(ha->pdev);
-       qla24xx_process_response_queue(vha, rsp);
-       spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
+       qla24xx_process_response_queue(vha, qpair->rsp);
+       spin_unlock_irqrestore(&qpair->qp_lock, flags);
 }
 
 /* create response queue */
 int
 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
-       uint8_t vp_idx, uint16_t rid, int req)
+       uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
 {
        int ret = 0;
        struct rsp_que *rsp = NULL;
@@ -779,28 +788,24 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
                goto que_failed;
        }
 
-       mutex_lock(&ha->vport_lock);
+       mutex_lock(&ha->mq_lock);
        que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
        if (que_id >= ha->max_rsp_queues) {
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                ql_log(ql_log_warn, base_vha, 0x00e2,
                    "No resources to create additional request queue.\n");
                goto que_failed;
        }
        set_bit(que_id, ha->rsp_qid_map);
 
-       if (ha->flags.msix_enabled)
-               rsp->msix = &ha->msix_entries[que_id + 1];
-       else
-               ql_log(ql_log_warn, base_vha, 0x00e3,
-                   "MSIX not enabled.\n");
+       rsp->msix = qpair->msix;
 
        ha->rsp_q_map[que_id] = rsp;
        rsp->rid = rid;
        rsp->vp_idx = vp_idx;
        rsp->hw = ha;
        ql_dbg(ql_dbg_init, base_vha, 0x00e4,
-           "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+           "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
            que_id, rsp->rid, rsp->vp_idx, rsp->hw);
        /* Use alternate PCI bus number */
        if (MSB(rsp->rid))
@@ -812,23 +817,27 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        if (!IS_MSIX_NACK_CAPABLE(ha))
                options |= BIT_6;
 
+       /* Set option to indicate response queue creation */
+       options |= BIT_1;
+
        rsp->options = options;
        rsp->id = que_id;
        reg = ISP_QUE_REG(ha, que_id);
        rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
        rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
        rsp->in_ptr = (void *)(rsp->ring + rsp->length);
-       mutex_unlock(&ha->vport_lock);
+       mutex_unlock(&ha->mq_lock);
        ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
-           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
            rsp->options, rsp->id, rsp->rsp_q_in,
            rsp->rsp_q_out);
        ql_dbg(ql_dbg_init, base_vha, 0x00e5,
-           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+           "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
            rsp->options, rsp->id, rsp->rsp_q_in,
            rsp->rsp_q_out);
 
-       ret = qla25xx_request_irq(rsp);
+       ret = qla25xx_request_irq(ha, qpair, qpair->msix,
+           QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
        if (ret)
                goto que_failed;
 
@@ -836,19 +845,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        if (ret != QLA_SUCCESS) {
                ql_log(ql_log_fatal, base_vha, 0x00e7,
                    "%s failed.\n", __func__);
-               mutex_lock(&ha->vport_lock);
+               mutex_lock(&ha->mq_lock);
                clear_bit(que_id, ha->rsp_qid_map);
-               mutex_unlock(&ha->vport_lock);
+               mutex_unlock(&ha->mq_lock);
                goto que_failed;
        }
-       if (req >= 0)
-               rsp->req = ha->req_q_map[req];
-       else
-               rsp->req = NULL;
+       rsp->req = NULL;
 
        qla2x00_init_response_q_entries(rsp);
-       if (rsp->hw->wq)
-               INIT_WORK(&rsp->q_work, qla_do_work);
+       if (qpair->hw->wq)
+               INIT_WORK(&qpair->q_work, qla_do_work);
        return rsp->id;
 
 que_failed:
index 56d6142852a553ed9ad8011cb4c18a84e8656e0d..8521cfe302e9e3e72c7aaf1a4753ca75f953b972 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mutex.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
+#include <linux/blk-mq-pci.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -30,7 +31,7 @@ static int apidev_major;
 /*
  * SRB allocation cache
  */
-static struct kmem_cache *srb_cachep;
+struct kmem_cache *srb_cachep;
 
 /*
  * CT6 CTX allocation cache
@@ -143,19 +144,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
                "Enables iIDMA settings "
                "Default is 1 - perform iIDMA. 0 - no iIDMA.");
 
-int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmaxqueues,
-               "Enables MQ settings "
-               "Default is 1 for single queue. Set it to number "
-               "of queues in MQ mode.");
-
-int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO);
-MODULE_PARM_DESC(ql2xmultique_tag,
-               "Enables CPU affinity settings for the driver "
-               "Default is 0 for no affinity of request and response IO. "
-               "Set it to 1 to turn on the cpu affinity.");
+int ql2xmqsupport = 1;
+module_param(ql2xmqsupport, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xmqsupport,
+               "Enable on demand multiple queue pairs support "
+               "Default is 1 for supported. "
+               "Set it to 0 to turn off mq qpair support.");
 
 int ql2xfwloadbin;
 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
@@ -261,6 +255,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
 static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static void qla83xx_disable_laser(scsi_qla_host_t *vha);
+static int qla2xxx_map_queues(struct Scsi_Host *shost);
 
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
@@ -280,6 +275,7 @@ struct scsi_host_template qla2xxx_driver_template = {
        .scan_finished          = qla2xxx_scan_finished,
        .scan_start             = qla2xxx_scan_start,
        .change_queue_depth     = scsi_change_queue_depth,
+       .map_queues             = qla2xxx_map_queues,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
@@ -339,6 +335,8 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
        struct req_que **, struct rsp_que **);
 static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
+int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+       struct qla_qpair *qpair);
 
 /* -------------------------------------------------------------------------- */
 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
@@ -360,6 +358,25 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
                    "Unable to allocate memory for response queue ptrs.\n");
                goto fail_rsp_map;
        }
+
+       if (ql2xmqsupport && ha->max_qpairs) {
+               ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
+                       GFP_KERNEL);
+               if (!ha->queue_pair_map) {
+                       ql_log(ql_log_fatal, vha, 0x0180,
+                           "Unable to allocate memory for queue pair ptrs.\n");
+                       goto fail_qpair_map;
+               }
+               ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+               if (ha->base_qpair == NULL) {
+                       ql_log(ql_log_warn, vha, 0x0182,
+                           "Failed to allocate base queue pair memory.\n");
+                       goto fail_base_qpair;
+               }
+               ha->base_qpair->req = req;
+               ha->base_qpair->rsp = rsp;
+       }
+
        /*
         * Make sure we record at least the request and response queue zero in
         * case we need to free them if part of the probe fails.
@@ -370,6 +387,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
        set_bit(0, ha->req_qid_map);
        return 1;
 
+fail_base_qpair:
+       kfree(ha->queue_pair_map);
+fail_qpair_map:
+       kfree(ha->rsp_q_map);
+       ha->rsp_q_map = NULL;
 fail_rsp_map:
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
@@ -417,82 +439,43 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
        struct req_que *req;
        struct rsp_que *rsp;
        int cnt;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
                if (!test_bit(cnt, ha->req_qid_map))
                        continue;
 
                req = ha->req_q_map[cnt];
+               clear_bit(cnt, ha->req_qid_map);
+               ha->req_q_map[cnt] = NULL;
+
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_req_que(ha, req);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        kfree(ha->req_q_map);
        ha->req_q_map = NULL;
 
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
                if (!test_bit(cnt, ha->rsp_qid_map))
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
+               clear_bit(cnt, ha->req_qid_map);
+               ha->rsp_q_map[cnt] =  NULL;
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
-       kfree(ha->rsp_q_map);
-       ha->rsp_q_map = NULL;
-}
-
-static int qla25xx_setup_mode(struct scsi_qla_host *vha)
-{
-       uint16_t options = 0;
-       int ques, req, ret;
-       struct qla_hw_data *ha = vha->hw;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       if (!(ha->fw_attributes & BIT_6)) {
-               ql_log(ql_log_warn, vha, 0x00d8,
-                   "Firmware is not multi-queue capable.\n");
-               goto fail;
-       }
-       if (ql2xmultique_tag) {
-               /* create a request queue for IO */
-               options |= BIT_7;
-               req = qla25xx_create_req_que(ha, options, 0, 0, -1,
-                       QLA_DEFAULT_QUE_QOS);
-               if (!req) {
-                       ql_log(ql_log_warn, vha, 0x00e0,
-                           "Failed to create request queue.\n");
-                       goto fail;
-               }
-               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
-               vha->req = ha->req_q_map[req];
-               options |= BIT_1;
-               for (ques = 1; ques < ha->max_rsp_queues; ques++) {
-                       ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
-                       if (!ret) {
-                               ql_log(ql_log_warn, vha, 0x00e8,
-                                   "Failed to create response queue.\n");
-                               goto fail2;
-                       }
-               }
-               ha->flags.cpu_affinity_enabled = 1;
-               ql_dbg(ql_dbg_multiq, vha, 0xc007,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-               ql_dbg(ql_dbg_init, vha, 0x00e9,
-                   "CPU affinity mode enabled, "
-                   "no. of response queues:%d no. of request queues:%d.\n",
-                   ha->max_rsp_queues, ha->max_req_queues);
-       }
-       return 0;
-fail2:
-       qla25xx_delete_queues(vha);
-       destroy_workqueue(ha->wq);
-       ha->wq = NULL;
-       vha->req = ha->req_q_map[0];
-fail:
-       ha->mqenable = 0;
-       kfree(ha->req_q_map);
        kfree(ha->rsp_q_map);
-       ha->max_req_queues = ha->max_rsp_queues = 1;
-       return 1;
+       ha->rsp_q_map = NULL;
 }
 
 static char *
@@ -669,7 +652,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-static void
+void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -693,6 +676,75 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
        cmd->scsi_done(cmd);
 }
 
+void
+qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct qla_hw_data *ha = sp->fcport->vha->hw;
+       void *ctx = GET_CMD_CTX_SP(sp);
+
+       if (sp->flags & SRB_DMA_VALID) {
+               scsi_dma_unmap(cmd);
+               sp->flags &= ~SRB_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
+               dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
+                   scsi_prot_sg_count(cmd), cmd->sc_data_direction);
+               sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
+               /* List assured to be having elements */
+               qla2x00_clean_dsd_pool(ha, sp, NULL);
+               sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
+       }
+
+       if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
+               dma_pool_free(ha->dl_dma_pool, ctx,
+                   ((struct crc_context *)ctx)->crc_ctx_dma);
+               sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
+       }
+
+       if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
+               struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+
+               dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
+                   ctx1->fcp_cmnd_dma);
+               list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
+               ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
+               ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+               mempool_free(ctx1, ha->ctx_mempool);
+       }
+
+       CMD_SP(cmd) = NULL;
+       qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void
+qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
+       cmd->result = res;
+
+       if (atomic_read(&sp->ref_count) == 0) {
+               ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
+                   "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+                   sp, GET_CMD_SP(sp));
+               if (ql2xextended_error_logging & ql_dbg_io)
+                       WARN_ON(atomic_read(&sp->ref_count) == 0);
+               return;
+       }
+       if (!atomic_dec_and_test(&sp->ref_count))
+               return;
+
+       qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+       cmd->scsi_done(cmd);
+}
+
 /* If we are SP1 here, we need to still take and release the host_lock as SP1
  * does not have the changes necessary to avoid taking host->host_lock.
  */
@@ -706,12 +758,28 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
        srb_t *sp;
        int rval;
+       struct qla_qpair *qpair = NULL;
+       uint32_t tag;
+       uint16_t hwq;
 
        if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
                cmd->result = DID_NO_CONNECT << 16;
                goto qc24_fail_command;
        }
 
+       if (ha->mqenable) {
+               if (shost_use_blk_mq(vha->host)) {
+                       tag = blk_mq_unique_tag(cmd->request);
+                       hwq = blk_mq_unique_tag_to_hwq(tag);
+                       qpair = ha->queue_pair_map[hwq];
+               } else if (vha->vp_idx && vha->qpair) {
+                       qpair = vha->qpair;
+               }
+
+               if (qpair)
+                       return qla2xxx_mqueuecommand(host, cmd, qpair);
+       }
+
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure) {
                        ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -808,6 +876,95 @@ qc24_fail_command:
        return 0;
 }
 
+/* For MQ supported I/O */
+int
+qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
+    struct qla_qpair *qpair)
+{
+       scsi_qla_host_t *vha = shost_priv(host);
+       fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+       struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+       struct qla_hw_data *ha = vha->hw;
+       struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       srb_t *sp;
+       int rval;
+
+       rval = fc_remote_port_chkready(rport);
+       if (rval) {
+               cmd->result = rval;
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
+                   "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+                   cmd, rval);
+               goto qc24_fail_command;
+       }
+
+       if (!fcport) {
+               cmd->result = DID_NO_CONNECT << 16;
+               goto qc24_fail_command;
+       }
+
+       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+               if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
+                       atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+                       ql_dbg(ql_dbg_io, vha, 0x3077,
+                           "Returning DNC, fcport_state=%d loop_state=%d.\n",
+                           atomic_read(&fcport->state),
+                           atomic_read(&base_vha->loop_state));
+                       cmd->result = DID_NO_CONNECT << 16;
+                       goto qc24_fail_command;
+               }
+               goto qc24_target_busy;
+       }
+
+       /*
+        * Return target busy if we've received a non-zero retry_delay_timer
+        * in a FCP_RSP.
+        */
+       if (fcport->retry_delay_timestamp == 0) {
+               /* retry delay not set */
+       } else if (time_after(jiffies, fcport->retry_delay_timestamp))
+               fcport->retry_delay_timestamp = 0;
+       else
+               goto qc24_target_busy;
+
+       sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+       if (!sp)
+               goto qc24_host_busy;
+
+       sp->u.scmd.cmd = cmd;
+       sp->type = SRB_SCSI_CMD;
+       atomic_set(&sp->ref_count, 1);
+       CMD_SP(cmd) = (void *)sp;
+       sp->free = qla2xxx_qpair_sp_free_dma;
+       sp->done = qla2xxx_qpair_sp_compl;
+       sp->qpair = qpair;
+
+       rval = ha->isp_ops->start_scsi_mq(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
+                   "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+               if (rval == QLA_INTERFACE_ERROR)
+                       goto qc24_fail_command;
+               goto qc24_host_busy_free_sp;
+       }
+
+       return 0;
+
+qc24_host_busy_free_sp:
+       qla2xxx_qpair_sp_free_dma(vha, sp);
+
+qc24_host_busy:
+       return SCSI_MLQUEUE_HOST_BUSY;
+
+qc24_target_busy:
+       return SCSI_MLQUEUE_TARGET_BUSY;
+
+qc24_fail_command:
+       cmd->scsi_done(cmd);
+
+       return 0;
+}
+
 /*
  * qla2x00_eh_wait_on_command
  *    Waits for the command to be returned by the Firmware for some
@@ -1601,7 +1758,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 {
        resource_size_t pio;
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1658,9 +1814,7 @@ skip_pio:
 
        /* Determine queue resources */
        ha->max_req_queues = ha->max_rsp_queues = 1;
-       if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
-               (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
-               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+       if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
                goto mqiobase_exit;
 
        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -1670,26 +1824,18 @@ skip_pio:
                    "MQIO Base=%p.\n", ha->mqiobase);
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
+               ha->msix_count = msix + 1;
                /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                           QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-               }
+               /* MB interrupt uses 1 vector */
+               ha->max_req_queues = ha->msix_count - 1;
+               ha->max_rsp_queues = ha->max_req_queues;
+               /* Queue pairs is the max value minus the base queue pair */
+               ha->max_qpairs = ha->max_rsp_queues - 1;
+               ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
+                   "Max no of queues pairs: %d.\n", ha->max_qpairs);
+
                ql_log_pci(ql_log_info, ha->pdev, 0x001a,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x001b,
                    "BAR 3 not enabled.\n");
@@ -1709,7 +1855,6 @@ static int
 qla83xx_iospace_config(struct qla_hw_data *ha)
 {
        uint16_t msix;
-       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1761,32 +1906,36 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
                /* Read MSIX vector size of the board */
                pci_read_config_word(ha->pdev,
                    QLA_83XX_PCI_MSIX_CONTROL, &msix);
-               ha->msix_count = msix;
-               /* Max queues are bounded by available msix vectors */
-               /* queue 0 uses two msix vectors */
-               if (ql2xmultique_tag) {
-                       cpus = num_online_cpus();
-                       ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
-                               (cpus + 1) : (ha->msix_count - 1);
-                       ha->max_req_queues = 2;
-               } else if (ql2xmaxqueues > 1) {
-                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
-                                               QLA_MQ_SIZE : ql2xmaxqueues;
-                       ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
-                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
-                           "QoS mode set, max no of request queues:%d.\n",
-                           ha->max_req_queues);
+               ha->msix_count = msix + 1;
+               /*
+                * By default, driver uses at least two msix vectors
+                * (default & rspq)
+                */
+               if (ql2xmqsupport) {
+                       /* MB interrupt uses 1 vector */
+                       ha->max_req_queues = ha->msix_count - 1;
+                       ha->max_rsp_queues = ha->max_req_queues;
+
+                       /* ATIOQ needs 1 vector. That's 1 less QPair */
+                       if (QLA_TGT_MODE_ENABLED())
+                               ha->max_req_queues--;
+
+                       /* Queue pairs is the max value minus
+                        * the base queue pair */
+                       ha->max_qpairs = ha->max_req_queues - 1;
+                       ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+                           "Max no of queues pairs: %d.\n", ha->max_qpairs);
                }
                ql_log_pci(ql_log_info, ha->pdev, 0x011c,
-                   "MSI-X vector count: %d.\n", msix);
+                   "MSI-X vector count: %d.\n", ha->msix_count);
        } else
                ql_log_pci(ql_log_info, ha->pdev, 0x011e,
                    "BAR 1 not enabled.\n");
 
 mqiobase_exit:
        ha->msix_count = ha->max_rsp_queues + 1;
+       if (QLA_TGT_MODE_ENABLED())
+               ha->msix_count++;
 
        qlt_83xx_iospace_config(ha);
 
@@ -1831,6 +1980,7 @@ static struct isp_operations qla2100_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1869,6 +2019,7 @@ static struct isp_operations qla2300_isp_ops = {
        .write_optrom           = qla2x00_write_optrom_data,
        .get_flash_version      = qla2x00_get_flash_version,
        .start_scsi             = qla2x00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1907,6 +2058,7 @@ static struct isp_operations qla24xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1945,6 +2097,7 @@ static struct isp_operations qla25xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -1983,6 +2136,7 @@ static struct isp_operations qla81xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2021,6 +2175,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .write_optrom           = qla82xx_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla82xx_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2059,6 +2214,7 @@ static struct isp_operations qla8044_isp_ops = {
        .write_optrom           = qla8044_write_optrom_data,
        .get_flash_version      = qla82xx_get_flash_version,
        .start_scsi             = qla82xx_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qla8044_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2097,6 +2253,7 @@ static struct isp_operations qla83xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2135,6 +2292,7 @@ static struct isp_operations qlafx00_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qlafx00_start_scsi,
+       .start_scsi_mq          = NULL,
        .abort_isp              = qlafx00_abort_isp,
        .iospace_config         = qlafx00_iospace_config,
        .initialize_adapter     = qlafx00_initialize_adapter,
@@ -2173,6 +2331,7 @@ static struct isp_operations qla27xx_isp_ops = {
        .write_optrom           = qla24xx_write_optrom_data,
        .get_flash_version      = qla24xx_get_flash_version,
        .start_scsi             = qla24xx_dif_start_scsi,
+       .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
        .initialize_adapter     = qla2x00_initialize_adapter,
@@ -2387,6 +2546,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        uint16_t req_length = 0, rsp_length = 0;
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
+       int i;
+
        bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
        sht = &qla2xxx_driver_template;
        if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2650,6 +2811,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "Found an ISP%04X irq %d iobase 0x%p.\n",
            pdev->device, pdev->irq, ha->iobase);
        mutex_init(&ha->vport_lock);
+       mutex_init(&ha->mq_lock);
        init_completion(&ha->mbx_cmd_comp);
        complete(&ha->mbx_cmd_comp);
        init_completion(&ha->mbx_intr_comp);
@@ -2737,7 +2899,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->max_cmd_len, host->max_channel, host->max_lun,
            host->transportt, sht->vendor_id);
 
-que_init:
+       /* Set up the irqs */
+       ret = qla2x00_request_irqs(ha, rsp);
+       if (ret)
+               goto probe_init_failed;
+
        /* Alloc arrays of request and response ring ptrs */
        if (!qla2x00_alloc_queues(ha, req, rsp)) {
                ql_log(ql_log_fatal, base_vha, 0x003d,
@@ -2746,12 +2912,17 @@ que_init:
                goto probe_init_failed;
        }
 
-       qlt_probe_one_stage1(base_vha, ha);
+       if (ha->mqenable && shost_use_blk_mq(host)) {
+               /* number of hardware queues supported by blk/scsi-mq*/
+               host->nr_hw_queues = ha->max_qpairs;
 
-       /* Set up the irqs */
-       ret = qla2x00_request_irqs(ha, rsp);
-       if (ret)
-               goto probe_init_failed;
+               ql_dbg(ql_dbg_init, base_vha, 0x0192,
+                       "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
+       } else
+               ql_dbg(ql_dbg_init, base_vha, 0x0193,
+                       "blk/scsi-mq disabled.\n");
+
+       qlt_probe_one_stage1(base_vha, ha);
 
        pci_save_state(pdev);
 
@@ -2842,11 +3013,12 @@ que_init:
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
-       if (ha->mqenable) {
-               if (qla25xx_setup_mode(base_vha)) {
-                       ql_log(ql_log_warn, base_vha, 0x00ec,
-                           "Failed to create queues, falling back to single queue mode.\n");
-                       goto que_init;
+       if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+               ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+               /* Create start of day qpairs for Block MQ */
+               if (shost_use_blk_mq(host)) {
+                       for (i = 0; i < ha->max_qpairs; i++)
+                               qla2xxx_create_qpair(base_vha, 5, 0);
                }
        }
 
@@ -3115,13 +3287,6 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
 static void
 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
 {
-       /* Flush the work queue and remove it */
-       if (ha->wq) {
-               flush_workqueue(ha->wq);
-               destroy_workqueue(ha->wq);
-               ha->wq = NULL;
-       }
-
        /* Cancel all work and destroy DPC workqueues */
        if (ha->dpc_lp_wq) {
                cancel_work_sync(&ha->idc_aen);
@@ -3317,9 +3482,17 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                ha->isp_ops->disable_intrs(ha);
        }
 
+       qla2x00_free_fcports(vha);
+
        qla2x00_free_irqs(vha);
 
-       qla2x00_free_fcports(vha);
+       /* Flush the work queue and remove it */
+       if (ha->wq) {
+               flush_workqueue(ha->wq);
+               destroy_workqueue(ha->wq);
+               ha->wq = NULL;
+       }
+
 
        qla2x00_mem_free(ha);
 
@@ -4034,6 +4207,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
        INIT_LIST_HEAD(&vha->logo_list);
        INIT_LIST_HEAD(&vha->plogi_ack_list);
+       INIT_LIST_HEAD(&vha->qp_list);
 
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
@@ -5038,8 +5212,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
 
        base_vha->flags.init_done = 0;
        qla25xx_delete_queues(base_vha);
-       qla2x00_free_irqs(base_vha);
        qla2x00_free_fcports(base_vha);
+       qla2x00_free_irqs(base_vha);
        qla2x00_mem_free(ha);
        qla82xx_md_free(base_vha);
        qla2x00_free_queues(ha);
@@ -5073,6 +5247,8 @@ qla2x00_do_dpc(void *data)
 {
        scsi_qla_host_t *base_vha;
        struct qla_hw_data *ha;
+       uint32_t online;
+       struct qla_qpair *qpair;
 
        ha = (struct qla_hw_data *)data;
        base_vha = pci_get_drvdata(ha->pdev);
@@ -5334,6 +5510,22 @@ intr_on_check:
                                ha->isp_ops->beacon_blink(base_vha);
                }
 
+               /* qpair online check */
+               if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
+                   &base_vha->dpc_flags)) {
+                       if (ha->flags.eeh_busy ||
+                           ha->flags.pci_channel_io_perm_failure)
+                               online = 0;
+                       else
+                               online = 1;
+
+                       mutex_lock(&ha->mq_lock);
+                       list_for_each_entry(qpair, &base_vha->qp_list,
+                           qp_list_elem)
+                       qpair->online = online;
+                       mutex_unlock(&ha->mq_lock);
+               }
+
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
 
@@ -5676,6 +5868,10 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
        switch (state) {
        case pci_channel_io_normal:
                ha->flags.eeh_busy = 0;
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
                ha->flags.eeh_busy = 1;
@@ -5689,10 +5885,18 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                pci_disable_device(pdev);
                /* Return back all IOs */
                qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
                ha->flags.pci_channel_io_perm_failure = 1;
                qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+               if (ql2xmqsupport) {
+                       set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return PCI_ERS_RESULT_DISCONNECT;
        }
        return PCI_ERS_RESULT_NEED_RESET;
@@ -5960,6 +6164,13 @@ qla83xx_disable_laser(scsi_qla_host_t *vha)
        qla83xx_wr_reg(vha, reg, data);
 }
 
+static int qla2xxx_map_queues(struct Scsi_Host *shost)
+{
+       scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+
+       return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+}
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
        .error_detected = qla2xxx_pci_error_detected,
        .mmio_enabled = qla2xxx_pci_mmio_enabled,
index 07349270535d19002a39fa12f968f22c3ebd8cb1..82dfe07b1d47f7e1f8ae3517191f15d190168834 100644 (file)
@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        struct request_queue *rq = sdev->request_queue;
        struct scsi_target *starget = sdev->sdev_target;
 
-       error = scsi_device_set_state(sdev, SDEV_RUNNING);
-       if (error)
-               return error;
-
        error = scsi_target_add(starget);
        if (error)
                return error;
index 070332eb41f33de2c765bedc724f50f404af62a9..dbe5b4b95df0d9d317dbdc2261914e4d8771991f 100644 (file)
@@ -581,6 +581,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
        sg_io_hdr_t *hp;
        unsigned char cmnd[SG_MAX_CDB_SIZE];
 
+       if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+               return -EINVAL;
+
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
index aa43bfea0d004270d79411c91f5ee5b413daba73..abe6173726614f627c085129f4e8aa9dc6ba1eda 100644 (file)
@@ -23,6 +23,7 @@
 #include "unipro.h"
 #include "ufs-qcom.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN  \
        (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
@@ -1031,6 +1032,34 @@ out:
        return ret;
 }
 
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+       int err;
+       u32 pa_vs_config_reg1;
+
+       err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+                            &pa_vs_config_reg1);
+       if (err)
+               goto out;
+
+       /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+       err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+                           (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+       return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+       int err = 0;
+
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+               err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+       return err;
+}
+
 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1194,7 +1223,16 @@ static int ufs_qcom_init(struct ufs_hba *hba)
         */
        host->generic_phy = devm_phy_get(dev, "ufsphy");
 
-       if (IS_ERR(host->generic_phy)) {
+       if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+               /*
+                * UFS driver might be probed before the phy driver does.
+                * In that case we would like to return EPROBE_DEFER code.
+                */
+               err = -EPROBE_DEFER;
+               dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+                       __func__, err);
+               goto out_variant_clear;
+       } else if (IS_ERR(host->generic_phy)) {
                err = PTR_ERR(host->generic_phy);
                dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
                goto out_variant_clear;
@@ -1432,7 +1470,8 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
        reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
        print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
 
-       ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+       /* clear bit 17 - UTP_DBG_RAMS_EN */
+       ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
 
        reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
        print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1609,6 +1648,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
        .hce_enable_notify      = ufs_qcom_hce_enable_notify,
        .link_startup_notify    = ufs_qcom_link_startup_notify,
        .pwr_change_notify      = ufs_qcom_pwr_change_notify,
+       .apply_dev_quirks       = ufs_qcom_apply_dev_quirks,
        .suspend                = ufs_qcom_suspend,
        .resume                 = ufs_qcom_resume,
        .dbg_register_dump      = ufs_qcom_dump_dbg_regs,
index a19307a57ce248f5cb102c294408227b8ed603de..fe517cd7dac348b40b97c322e49e26976b7256cc 100644 (file)
@@ -142,6 +142,7 @@ enum ufs_qcom_phy_init_type {
         UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
 /* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1      0x9000
 #define DME_VS_CORE_CLK_CTRL   0xD002
 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
 #define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT               BIT(8)
index f7983058f3f716ed27505fbcef0d420eaf8a6163..08b799d4efcc68b99b7c43fa7af968dd3b543a2c 100644 (file)
@@ -134,29 +134,17 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE     (1 << 7)
 
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME        (1 << 8)
 
 struct ufs_hba;
 void ufs_advertise_fixup_device(struct ufs_hba *hba);
 
-static struct ufs_dev_fix ufs_fixups[] = {
-       /* UFS cards deviations table */
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_NO_FASTAUTO),
-       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
-               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
-               UFS_DEVICE_QUIRK_PA_TACTIVATE),
-       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
-       END_FIX
-};
 #endif /* UFS_QUIRKS_H_ */
index ef8548c3a423d213dd04537753190b48f820cb83..a2c2817fc566911e59f019d91639260d2df2fa09 100644 (file)
@@ -185,6 +185,30 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
        return ufs_pm_lvl_states[lvl].link_state;
 }
 
+static struct ufs_dev_fix ufs_fixups[] = {
+       /* UFS cards deviations table */
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_NO_FASTAUTO),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+               UFS_DEVICE_QUIRK_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+               UFS_DEVICE_QUIRK_PA_TACTIVATE),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+
+       END_FIX
+};
+
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
@@ -288,10 +312,24 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  */
 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-       if (hba->ufs_version == UFSHCI_VERSION_10)
-               return INTERRUPT_MASK_ALL_VER_10;
-       else
-               return INTERRUPT_MASK_ALL_VER_11;
+       u32 intr_mask = 0;
+
+       switch (hba->ufs_version) {
+       case UFSHCI_VERSION_10:
+               intr_mask = INTERRUPT_MASK_ALL_VER_10;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_11:
+       case UFSHCI_VERSION_20:
+               intr_mask = INTERRUPT_MASK_ALL_VER_11;
+               break;
+       /* allow fall through */
+       case UFSHCI_VERSION_21:
+       default:
+               intr_mask = INTERRUPT_MASK_ALL_VER_21;
+       }
+
+       return intr_mask;
 }
 
 /**
@@ -5199,6 +5237,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 
        if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
                ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+       ufshcd_vops_apply_dev_quirks(hba);
 }
 
 /**
@@ -6667,6 +6707,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Get UFS version supported by the controller */
        hba->ufs_version = ufshcd_get_ufs_version(hba);
 
+       if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+           (hba->ufs_version != UFSHCI_VERSION_11) &&
+           (hba->ufs_version != UFSHCI_VERSION_20) &&
+           (hba->ufs_version != UFSHCI_VERSION_21))
+               dev_err(hba->dev, "invalid UFS version 0x%x\n",
+                       hba->ufs_version);
+
        /* Get Interrupt bit mask per version */
        hba->intr_mask = ufshcd_get_intr_mask(hba);
 
index 7d9ff22acfeaf78c29c1ed4e3d95144d2fa18d2c..08cd26ed238270a3f6bfb797137f4962bfe04312 100644 (file)
@@ -266,7 +266,7 @@ struct ufs_pwr_mode_info {
  * @setup_task_mgmt: called before any task management request is issued
  *                  to set some things
  * @hibern8_notify: called around hibern8 enter/exit
- *                 to configure some things
+ * @apply_dev_quirks: called to apply device specific quirks
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
@@ -293,7 +293,8 @@ struct ufs_hba_variant_ops {
        void    (*setup_xfer_req)(struct ufs_hba *, int, bool);
        void    (*setup_task_mgmt)(struct ufs_hba *, int, u8);
        void    (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
-                                      enum ufs_notify_change_status);
+                                       enum ufs_notify_change_status);
+       int     (*apply_dev_quirks)(struct ufs_hba *);
        int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
        int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
        void    (*dbg_register_dump)(struct ufs_hba *hba);
@@ -839,6 +840,13 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
                return hba->vops->hibern8_notify(hba, cmd, status);
 }
 
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+       if (hba->vops && hba->vops->apply_dev_quirks)
+               return hba->vops->apply_dev_quirks(hba);
+       return 0;
+}
+
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
        if (hba->vops && hba->vops->suspend)
index 5d978867be57199beac282369eae799599a1ea9f..8c5190e2e1c928407e8aac111b67758fff9bc191 100644 (file)
@@ -72,6 +72,10 @@ enum {
        REG_UIC_COMMAND_ARG_1                   = 0x94,
        REG_UIC_COMMAND_ARG_2                   = 0x98,
        REG_UIC_COMMAND_ARG_3                   = 0x9C,
+       REG_UFS_CCAP                            = 0x100,
+       REG_UFS_CRYPTOCAP                       = 0x104,
+
+       UFSHCI_CRYPTO_REG_SPACE_SIZE            = 0x400,
 };
 
 /* Controller capability masks */
@@ -275,6 +279,9 @@ enum {
 
        /* Interrupt disable mask for UFSHCI v1.1 */
        INTERRUPT_MASK_ALL_VER_11       = 0x31FFF,
+
+       /* Interrupt disable mask for UFSHCI v2.1 */
+       INTERRUPT_MASK_ALL_VER_21       = 0x71FFF,
 };
 
 /*
index d02bf58aea6d85bfdfa0473b95ca94794135becc..8bcb9b71f764325d585f659fb1430b393b54214c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <asm/unaligned.h>
+#include <net/tcp.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include "cxgbit.h"
index b7d747e92c7abf589e35154b25482a9dedb57118..da2c73a255dec194bba90826f6b3e95e9a264e32 100644 (file)
@@ -23,7 +23,9 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/idr.h>
+#include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <net/ipv6.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/iscsi_proto.h>
 #include <scsi/scsi_tcq.h>
index 4cf2c0f2ba2f981699499cce77726d20aeee9dc9..e0db2ceb0f87cb170a2ff1b12fcb5a8e7a407cb2 100644 (file)
@@ -1,6 +1,18 @@
 #ifndef ISCSI_TARGET_H
 #define ISCSI_TARGET_H
 
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsi_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
 extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
 extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
 extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
index e116f0e845c08c4f7c91bd9d91c891c1fb9683b7..903b667f8e0136d1e1d919c1bc51d2a3c99bd7c3 100644 (file)
@@ -20,8 +20,8 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/err.h>
+#include <linux/random.h>
 #include <linux/scatterlist.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
index d22f7b96a06ca98aa3bd83f669d92eb686cfcec9..1b91c13cc9657e5661c6ea254e799a068eedc4b7 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ISCSI_CHAP_H_
 #define _ISCSI_CHAP_H_
 
+#include <linux/types.h>
+
 #define CHAP_DIGEST_UNKNOWN    0
 #define CHAP_DIGEST_MD5                5
 #define CHAP_DIGEST_SHA                6
@@ -18,6 +20,9 @@
 #define CHAP_STAGE_CLIENT_NRIC 4
 #define CHAP_STAGE_SERVER_NR   5
 
+struct iscsi_node_auth;
+struct iscsi_conn;
+
 extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
                                int *, int *);
 
index 923c032f0b95f1efe2d1e12e90cb9086f3fd9346..bf40f03755ddc50697652ccde864d40df840fa0b 100644 (file)
 #include <linux/ctype.h>
 #include <linux/export.h>
 #include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
@@ -100,8 +101,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
 
                tpg_np_new = iscsit_tpg_add_network_portal(tpg,
                                        &np->np_sockaddr, tpg_np, type);
-               if (IS_ERR(tpg_np_new))
+               if (IS_ERR(tpg_np_new)) {
+                       rc = PTR_ERR(tpg_np_new);
                        goto out;
+               }
        } else {
                tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
                if (tpg_np_new) {
index 647d4a5dca5281838b904ba67fd03d8a0ea80642..173ddd93c75726937dabfb33a11e85479437a207 100644 (file)
@@ -16,8 +16,8 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
index 646429ac5a02bf3055f87204e7591d09c530062e..16edeeeb7777b447cd93a4222e4e2cf54a10cfa1 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_TARGET_DATAIN_VALUES_H
 #define ISCSI_TARGET_DATAIN_VALUES_H
 
+struct iscsi_cmd;
+struct iscsi_datain;
+
 extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
 extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
 extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
index a0e2df9e809032034d80da9b398252365b626b92..06dbff5cd52069af9539d24305d0a2960bf5402f 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_TARGET_DEVICE_H
 #define ISCSI_TARGET_DEVICE_H
 
+struct iscsi_cmd;
+struct iscsi_session;
+
 extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
 extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
 
index a9e2f9497fb22a1734ae27393e63fa351533f2d8..60e69e2af6eda981efb74e4ac313fb0d031093bd 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef ISCSI_TARGET_ERL0_H
 #define ISCSI_TARGET_ERL0_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_session;
+
 extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
 extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
 extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
index 9214c9dafa2be56082b792eaccd31ca87420b59e..fe9b7f1e44aca5c8bcda8677310351f03e507095 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <linux/list.h>
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 2a3ebf118a342fb33200810237afef1694c78536..54d36bd25beacdf4da8856b0b97ab5e237d4b2aa 100644 (file)
@@ -1,6 +1,16 @@
 #ifndef ISCSI_TARGET_ERL1_H
 #define ISCSI_TARGET_ERL1_H
 
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsi_session;
+
 extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
 extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
                        struct iscsi_cmd *, struct iscsi_datain_req *);
index e24f1c7c5862d4af2f0ae53efb3e981f153080db..faf9ae014b30443583555c1a49f089a3ad7d32c0 100644 (file)
@@ -17,6 +17,7 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index 63f2501f3fe08344ea000d3ae83e414fe4e361de..7965f1e865061ef0ec40ef63fe89d33a5e189379 100644 (file)
@@ -1,6 +1,13 @@
 #ifndef ISCSI_TARGET_ERL2_H
 #define ISCSI_TARGET_ERL2_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
 extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
 extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
 extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
index 15f79a2ca34ab6e17fd5fda6f68425b9af1809eb..450f51deb2a2ae18137ede36d4a3e8c188fd7352 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/string.h>
 #include <linux/kthread.h>
 #include <linux/idr.h>
+#include <linux/tcp.h>        /* TCP_NODELAY */
+#include <net/ipv6.h>         /* ipv6_addr_v4mapped() */
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index b597aa2c61a1c60d2794610796ac156c220e43fe..0e1fd6cedd54cb83ffc576654d5428d47e75efa9 100644 (file)
@@ -1,6 +1,13 @@
 #ifndef ISCSI_TARGET_LOGIN_H
 #define ISCSI_TARGET_LOGIN_H
 
+#include <linux/types.h>
+
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
 extern int iscsi_login_setup_crypto(struct iscsi_conn *);
 extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
 extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
index 89d34bd6d87f94519c26168741b74bcb6478061a..46388c9e08dad3e5de751d3280f8fc829683154d 100644 (file)
@@ -18,6 +18,8 @@
 
 #include <linux/ctype.h>
 #include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/sock.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
index f021cbd330e51e0c61d85fc4f909e502073f09e5..53438bfca4c66bee15a05ca56ae4d89686d6a05d 100644 (file)
@@ -4,6 +4,10 @@
 #define DECIMAL         0
 #define HEX             1
 
+struct iscsi_conn;
+struct iscsi_login;
+struct iscsi_np;
+
 extern void convert_null_to_semi(char *, int);
 extern int extract_param(const char *, const char *, unsigned int, char *,
                unsigned char *);
index 0c69a46a62ec7f9b082679e7a24f24fb0ffe321e..79cdf06ade48bf8d63336877b40af572778f574a 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef ISCSI_TARGET_NODEATTRIB_H
 #define ISCSI_TARGET_NODEATTRIB_H
 
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
 extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
                                              struct iscsi_portal_group *);
 extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
index 0efa80bb89628602598346c0647a958536acdf3b..e65bf78ceef3740fc1923c1b3ed446aa2996b82d 100644 (file)
@@ -17,7 +17,7 @@
  ******************************************************************************/
 
 #include <linux/slab.h>
-
+#include <linux/uio.h> /* struct kvec */
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
index a0751e3f0813429bd5c87c2ecab16780cfbfa1c7..9962ccf0ccd7d923d074923661a48e4b058e5400 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef ISCSI_PARAMETERS_H
 #define ISCSI_PARAMETERS_H
 
+#include <linux/types.h>
 #include <scsi/iscsi_proto.h>
 
 struct iscsi_extra_response {
@@ -23,6 +24,11 @@ struct iscsi_param {
        struct list_head p_list;
 } ____cacheline_aligned;
 
+struct iscsi_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
 extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
 extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
 extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
index d5b153751a8d223c9edb21a01490748911a4756d..be1234362271b0b3f672c33a83b26f0407b630c8 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef ISCSI_SEQ_AND_PDU_LIST_H
 #define ISCSI_SEQ_AND_PDU_LIST_H
 
+#include <linux/types.h>
+#include <linux/cache.h>
+
 /* struct iscsi_pdu->status */
 #define DATAOUT_PDU_SENT                       1
 
@@ -78,6 +81,8 @@ struct iscsi_seq {
        u32             xfer_len;
 } ____cacheline_aligned;
 
+struct iscsi_cmd;
+
 extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
index 142e992cb097a63cb9a8efbd41c0a725ba719592..64cc5c07e47c2d301cfc66c14082a6a76458542f 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef ISCSI_TARGET_TMR_H
 #define ISCSI_TARGET_TMR_H
 
+#include <linux/types.h>
+
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_tmr_req;
+
 extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
 extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
                        unsigned char *);
index 0814e5894a9616ffcc79fb0d0f086f718a971fd7..2e7e08dbda4807ed51c6e886b3399bc95199f3d5 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <linux/slab.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
-
 #include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
@@ -260,7 +260,6 @@ err_out:
                iscsi_release_param_list(tpg->param_list);
                tpg->param_list = NULL;
        }
-       kfree(tpg);
        return -ENOMEM;
 }
 
index 2da211920c186215e740d1aaa47f781999d1fb71..ceba298511677a5cf5ca49d784258f6c111da7dd 100644 (file)
@@ -1,6 +1,15 @@
 #ifndef ISCSI_TARGET_TPG_H
 #define ISCSI_TARGET_TPG_H
 
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsi_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
 extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
 extern int iscsit_load_discovery_tpg(void);
 extern void iscsit_release_discovery_tpg(void);
index 08217d62fb0d6860e40bcb9fa4b2947e710b3bd4..c4eb141c6435983ea3493159f48cf6c3919a9f7c 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/module.h>
 #include <target/iscsi/iscsi_transport.h>
 
 static LIST_HEAD(g_transport_list);
index 1f38177207e0806b18641766fdd6849ae1555a9b..b5a1b4ccba124d4dbf60fd528ec05d3a7d0dbf32 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/list.h>
 #include <linux/percpu_ida.h>
+#include <net/ipv6.h>         /* ipv6_addr_equal() */
 #include <scsi/scsi_tcq.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
index 995f1cb29d0e08268acf9f3547494d23498dc87b..8ff08856516aba68394fc07661ec71b635c8b6a2 100644 (file)
@@ -1,8 +1,16 @@
 #ifndef ISCSI_TARGET_UTIL_H
 #define ISCSI_TARGET_UTIL_H
 
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h>        /* itt_t */
+
 #define MARKER_SIZE    8
 
+struct iscsi_cmd;
+struct iscsi_conn;
+struct iscsi_conn_recovery;
+struct iscsi_session;
+
 extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
 extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
 extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
index 4346462094a1af4e4ce778abd41a0f1da1d0a559..a8a230b4e6b532866becda3b04d70d2fee7c93c2 100644 (file)
@@ -1,3 +1,7 @@
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
 #define TCM_LOOP_VERSION               "v2.1-rc2"
 #define TL_WWN_ADDR_LEN                        256
 #define TL_TPGS_PER_HBA                        32
index 58bb6ed181853b49370d9fda31f73e7bd24c7fda..e5c3e5f827d0b8f163bbe9f1e78c2da7cfc2bae5 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/string.h>
 #include <linux/configfs.h>
 #include <linux/ctype.h>
+#include <linux/delay.h>
 #include <linux/firewire.h>
 #include <linux/firewire-constants.h>
 #include <scsi/scsi_proto.h>
@@ -928,7 +929,7 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
        struct sbp_target_request *req;
        int tag;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0)
                return ERR_PTR(-ENOMEM);
 
index 4c82bbe19003d083979fac3139ab91e3a15a01a0..f5e330099bfca713f4cb12bd2dc77826fdad1b3b 100644 (file)
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/configfs.h>
+#include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/fcntl.h>
 #include <linux/file.h>
+#include <linux/fs.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 9b250f9b33bfb830ff194e3b0bf028f9e0ff02fb..c69c11baf07f03ab6dae23a52bace4e956b613a2 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_ALUA_H
 #define TARGET_CORE_ALUA_H
 
+#include <target/target_core_base.h>
+
 /*
  * INQUIRY response data, TPGS Field
  *
index a35a347ec357ad48626cc4083eca16588c29eb8b..54b36c9835be3ae2127cb1f447321eba73b824ac 100644 (file)
@@ -144,12 +144,12 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
                return -EINVAL;
        }
        if (!S_ISDIR(file_inode(fp)->i_mode)) {
-               filp_close(fp, 0);
+               filp_close(fp, NULL);
                mutex_unlock(&g_tf_lock);
                pr_err("db_root: not a directory: %s\n", db_root_stage);
                return -EINVAL;
        }
-       filp_close(fp, 0);
+       filp_close(fp, NULL);
 
        strncpy(db_root, db_root_stage, read_bytes);
 
index 6b423485c5d6b4f6e8e54a332f1dd62eda0325d9..1ebd13ef7bd333c5cbc488f7543eda58e29a2123 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kthread.h>
 #include <linux/in.h>
 #include <linux/export.h>
+#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
index d545993df18be9ede3253861e24c25726d9a8e27..87aa376a1a1ae9f9119369725199d7bd5ba22a1e 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/falloc.h>
+#include <linux/uio.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 068966fce3089527fb7f14dd7bd3aa0fafdd1041..526595a072de899c618487b4edc909ad0fd64d91 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_FILE_H
 #define TARGET_CORE_FILE_H
 
+#include <target/target_core_base.h>
+
 #define FD_VERSION             "4.0"
 
 #define FD_MAX_DEV_NAME                256
index 01c2afd815008d6cfa9d6b6d5490e1c39394db89..718d3fcd3e7cd8d8cacd7057ff85119a836725ca 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef TARGET_CORE_IBLOCK_H
 #define TARGET_CORE_IBLOCK_H
 
+#include <linux/atomic.h>
+#include <target/target_core_base.h>
+
 #define IBLOCK_VERSION         "4.0"
 
 #define IBLOCK_MAX_CDBS                16
index e2c970a9d61c32c7a95d034f889992a7560fdb64..9ab7090f7c839c6900cb30ddf7db1b8be4bc78cf 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef TARGET_CORE_INTERNAL_H
 #define TARGET_CORE_INTERNAL_H
 
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define TARGET_CORE_NAME_MAX_LEN       64
 #define TARGET_FABRIC_NAME_SIZE                32
 
index 47463c99c3181ed8e133b2d39ba9362d0196541a..d761025144f9dc178cc43d4803b4c79b0147815b 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/list.h>
 #include <linux/vmalloc.h>
 #include <linux/file.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
@@ -253,8 +255,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
 
        if ((cmd->t_task_cdb[1] & 0x01) &&
            (cmd->t_task_cdb[1] & 0x02)) {
-               pr_err("LongIO and Obselete Bits set, returning"
-                               " ILLEGAL_REQUEST\n");
+               pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
        /*
index e3d26e9126a01fa860693d4ecb6852b0c7de3d36..847bd470339c7ab1e1d498a6ddf37be37c526d57 100644 (file)
@@ -1,5 +1,9 @@
 #ifndef TARGET_CORE_PR_H
 #define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 /*
  * PERSISTENT_RESERVE_OUT service action codes
  *
index 6d2007e35df65919f1687341499b26076ba25ed1..8a02fa47c7e8e907952f740b490f145f083e8459 100644 (file)
 #define PS_TIMEOUT_DISK                (15*HZ)
 #define PS_TIMEOUT_OTHER       (500*HZ)
 
-#include <linux/device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
+#include <linux/cache.h>             /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
 
+struct block_device;
 struct scsi_device;
+struct Scsi_Host;
 
 struct pscsi_plugin_task {
        unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
index 24b36fd785f19a03d4dcd4507890d4ef850f023f..ddc216c9f1f63dcdea780b5be5edbf34d9cc93d4 100644 (file)
@@ -26,7 +26,9 @@
 
 #include <linux/string.h>
 #include <linux/parser.h>
+#include <linux/highmem.h>
 #include <linux/timer.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <scsi/scsi_proto.h>
index cc46a6a89b38e863a3d7b4c2f13207d251539fd2..91fc1a34791d909a1d68d265321caeaa833657db 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef TARGET_CORE_RD_H
 #define TARGET_CORE_RD_H
 
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define RD_HBA_VERSION         "v4.0"
 #define RD_MCP_VERSION         "4.0"
 
index 04f616b3ba0a848a80d4a70c084c1b45d406c168..4879e70e2eefb68ddc229effbe4a9822f369ce3f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/ratelimit.h>
 #include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_proto.h>
 #include <scsi/scsi_tcq.h>
index bd6e78ba153d68bd37b784ba7ebd52290932906c..97402856a8f0e3be40ae8eee5b0f74e74fdb2f9d 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef TARGET_CORE_UA_H
 #define TARGET_CORE_UA_H
 
+#include <target/target_core_base.h>
+
 /*
  * From spc4r17, Table D.1: ASC and ASCQ Assignement
  */
index 2b3c8564ace8154548349c6a71872f0b1aceeadb..8041710b697298ec7073c4e5910849bd1a154703 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uio_driver.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
+#include <linux/highmem.h>
 #include <net/genetlink.h>
 #include <scsi/scsi_common.h>
 #include <scsi/scsi_proto.h>
@@ -537,7 +538,7 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
        struct se_device *se_dev = se_cmd->se_dev;
        struct tcmu_dev *udev = TCMU_DEV(se_dev);
        struct tcmu_cmd *tcmu_cmd;
-       int ret;
+       sense_reason_t ret;
 
        tcmu_cmd = tcmu_alloc_cmd(se_cmd);
        if (!tcmu_cmd)
@@ -685,8 +686,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
        target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
        cmd->se_cmd = NULL;
 
-       kmem_cache_free(tcmu_cmd_cache, cmd);
-
        return 0;
 }
 
index 094a1440eacb3dccdd9c35a678a2940c3e03216d..37d5caebffa6b593025a28b703a54a71e7d940d3 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/configfs.h>
+#include <linux/ratelimit.h>
 #include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
index 700a981c7b415264c40d70058cc3fc6c497b32ed..4d3d4dd060f28366ebd069abb603472ae0275d5b 100644 (file)
@@ -1,3 +1,5 @@
+#include <target/target_core_base.h>
+
 #define XCOPY_TARGET_DESC_LEN          32
 #define XCOPY_SEGMENT_DESC_LEN         28
 #define XCOPY_NAA_IEEE_REGEX_LEN       16
index e28209b99b59804de51663afe0a677c91745a827..11d27b93b41392aee08a4034c1e828927521defc 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef __TCM_FC_H__
 #define __TCM_FC_H__
 
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define FT_VERSION "0.4"
 
 #define FT_NAMELEN 32          /* length of ASCII WWPNs including pad */
index 197f73386fac9ab45473de09db462ab5e4640d90..d2351139342f6200209078e769e04f5ea1eb2d1f 100644 (file)
@@ -1073,7 +1073,7 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
        struct usbg_cmd *cmd;
        int tag;
 
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0)
                return ERR_PTR(-ENOMEM);
 
index 8edf253484af981c0e0f6a9a931a71f85d721918..8c79e1a53af95ac12ca9a1f4e28b23e97c8d69f4 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1367,6 +1367,39 @@ out:
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
+{
+       struct kioctx *ioctx = NULL;
+       unsigned long ctx;
+       long ret;
+
+       ret = get_user(ctx, ctx32p);
+       if (unlikely(ret))
+               goto out;
+
+       ret = -EINVAL;
+       if (unlikely(ctx || nr_events == 0)) {
+               pr_debug("EINVAL: ctx %lu nr_events %u\n",
+                        ctx, nr_events);
+               goto out;
+       }
+
+       ioctx = ioctx_alloc(nr_events);
+       ret = PTR_ERR(ioctx);
+       if (!IS_ERR(ioctx)) {
+               /* truncating is ok because it's a user address */
+               ret = put_user((u32)ioctx->user_id, ctx32p);
+               if (ret)
+                       kill_ioctx(current->mm, ioctx, NULL);
+               percpu_ref_put(&ioctx->users);
+       }
+
+out:
+       return ret;
+}
+#endif
+
 /* sys_io_destroy:
  *     Destroy the aio_context specified.  May cancel any outstanding 
  *     AIOs and block on completion.  Will fail with -ENOSYS if not
@@ -1591,8 +1624,8 @@ out_put_req:
        return ret;
 }
 
-long do_io_submit(aio_context_t ctx_id, long nr,
-                 struct iocb __user *__user *iocbpp, bool compat)
+static long do_io_submit(aio_context_t ctx_id, long nr,
+                         struct iocb __user *__user *iocbpp, bool compat)
 {
        struct kioctx *ctx;
        long ret = 0;
@@ -1662,6 +1695,44 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
        return do_io_submit(ctx_id, nr, iocbpp, 0);
 }
 
+#ifdef CONFIG_COMPAT
+static inline long
+copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
+{
+       compat_uptr_t uptr;
+       int i;
+
+       for (i = 0; i < nr; ++i) {
+               if (get_user(uptr, ptr32 + i))
+                       return -EFAULT;
+               if (put_user(compat_ptr(uptr), ptr64 + i))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+#define MAX_AIO_SUBMITS        (PAGE_SIZE/sizeof(struct iocb *))
+
+COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
+                      int, nr, u32 __user *, iocb)
+{
+       struct iocb __user * __user *iocb64;
+       long ret;
+
+       if (unlikely(nr < 0))
+               return -EINVAL;
+
+       if (nr > MAX_AIO_SUBMITS)
+               nr = MAX_AIO_SUBMITS;
+
+       iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
+       ret = copy_iocb(nr, iocb, iocb64);
+       if (!ret)
+               ret = do_io_submit(ctx_id, nr, iocb64, 1);
+       return ret;
+}
+#endif
+
 /* lookup_kiocb
  *     Finds a given iocb for cancellation.
  */
@@ -1761,3 +1832,25 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
        }
        return ret;
 }
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
+                      compat_long_t, min_nr,
+                      compat_long_t, nr,
+                      struct io_event __user *, events,
+                      struct compat_timespec __user *, timeout)
+{
+       struct timespec t;
+       struct timespec __user *ut = NULL;
+
+       if (timeout) {
+               if (compat_get_timespec(&t, timeout))
+                       return -EFAULT;
+
+               ut = compat_alloc_user_space(sizeof(*ut));
+               if (copy_to_user(ut, &t, sizeof(t)))
+                       return -EFAULT;
+       }
+       return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
+}
+#endif
index c6bad51d8ec7b56d8027c019a98a656596911110..b914cfb03820aa7780e629dc047287df1ea56b08 100644 (file)
@@ -129,6 +129,7 @@ static inline befs_inode_addr
 blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
 {
        befs_inode_addr iaddr;
+
        iaddr.allocation_group = blockno >> BEFS_SB(sb)->ag_shift;
        iaddr.start =
            blockno - (iaddr.allocation_group << BEFS_SB(sb)->ag_shift);
@@ -140,7 +141,7 @@ blockno2iaddr(struct super_block *sb, befs_blocknr_t blockno)
 static inline unsigned int
 befs_iaddrs_per_block(struct super_block *sb)
 {
-       return BEFS_SB(sb)->block_size / sizeof (befs_disk_inode_addr);
+       return BEFS_SB(sb)->block_size / sizeof(befs_disk_inode_addr);
 }
 
 #include "endian.h"
index eb557d9dc8be973ae82c89feb274275202080059..69c9d8cde95563b58dd31c5a5a7e41e9f02fb3c1 100644 (file)
@@ -55,12 +55,12 @@ enum super_flags {
 };
 
 #define BEFS_BYTEORDER_NATIVE 0x42494745
-#define BEFS_BYTEORDER_NATIVE_LE (__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE)
-#define BEFS_BYTEORDER_NATIVE_BE (__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE)
+#define BEFS_BYTEORDER_NATIVE_LE ((__force fs32)cpu_to_le32(BEFS_BYTEORDER_NATIVE))
+#define BEFS_BYTEORDER_NATIVE_BE ((__force fs32)cpu_to_be32(BEFS_BYTEORDER_NATIVE))
 
 #define BEFS_SUPER_MAGIC BEFS_SUPER_MAGIC1
-#define BEFS_SUPER_MAGIC1_LE (__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1)
-#define BEFS_SUPER_MAGIC1_BE (__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1)
+#define BEFS_SUPER_MAGIC1_LE ((__force fs32)cpu_to_le32(BEFS_SUPER_MAGIC1))
+#define BEFS_SUPER_MAGIC1_BE ((__force fs32)cpu_to_be32(BEFS_SUPER_MAGIC1))
 
 /*
  * Flags of inode
@@ -79,7 +79,7 @@ enum inode_flags {
        BEFS_INODE_WAS_WRITTEN = 0x00020000,
        BEFS_NO_TRANSACTION = 0x00040000,
 };
-/* 
+/*
  * On-Disk datastructures of BeFS
  */
 
@@ -139,7 +139,7 @@ typedef struct {
 
 } PACKED befs_super_block;
 
-/* 
+/*
  * Note: the indirect and dbl_indir block_runs may
  * be longer than one block!
  */
index 7e135ea73fddf65295a804502d7c3a57e906cffb..d509887c580ceedba9ab003cb390417aeab993e9 100644 (file)
@@ -12,8 +12,8 @@
  *
  * Dominic Giampaolo, author of "Practical File System
  * Design with the Be File System", for such a helpful book.
- * 
- * Marcus J. Ranum, author of the b+tree package in 
+ *
+ * Marcus J. Ranum, author of the b+tree package in
  * comp.sources.misc volume 10. This code is not copied from that
  * work, but it is partially based on it.
  *
  */
 
 /* Befs B+tree structure:
- * 
+ *
  * The first thing in the tree is the tree superblock. It tells you
  * all kinds of useful things about the tree, like where the rootnode
  * is located, and the size of the nodes (always 1024 with current version
  * of BeOS).
  *
  * The rest of the tree consists of a series of nodes. Nodes contain a header
- * (struct befs_btree_nodehead), the packed key data, an array of shorts 
+ * (struct befs_btree_nodehead), the packed key data, an array of shorts
  * containing the ending offsets for each of the keys, and an array of
- * befs_off_t values. In interior nodes, the keys are the ending keys for 
- * the childnode they point to, and the values are offsets into the 
- * datastream containing the tree. 
+ * befs_off_t values. In interior nodes, the keys are the ending keys for
+ * the childnode they point to, and the values are offsets into the
+ * datastream containing the tree.
  */
 
 /* Note:
- * 
- * The book states 2 confusing things about befs b+trees. First, 
+ *
+ * The book states 2 confusing things about befs b+trees. First,
  * it states that the overflow field of node headers is used by internal nodes
  * to point to another node that "effectively continues this one". Here is what
  * I believe that means. Each key in internal nodes points to another node that
- * contains key values less than itself. Inspection reveals that the last key 
- * in the internal node is not the last key in the index. Keys that are 
- * greater than the last key in the internal node go into the overflow node. 
+ * contains key values less than itself. Inspection reveals that the last key
+ * in the internal node is not the last key in the index. Keys that are
+ * greater than the last key in the internal node go into the overflow node.
  * I imagine there is a performance reason for this.
  *
- * Second, it states that the header of a btree node is sufficient to 
- * distinguish internal nodes from leaf nodes. Without saying exactly how. 
+ * Second, it states that the header of a btree node is sufficient to
+ * distinguish internal nodes from leaf nodes. Without saying exactly how.
  * After figuring out the first, it becomes obvious that internal nodes have
  * overflow nodes and leafnodes do not.
  */
 
-/* 
+/*
  * Currently, this code is only good for directory B+trees.
  * In order to be used for other BFS indexes, it needs to be extended to handle
  * duplicate keys and non-string keytypes (int32, int64, float, double).
@@ -237,8 +237,8 @@ befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
  * with @key (usually the disk block number of an inode).
  *
  * On failure, returns BEFS_ERR or BEFS_BT_NOT_FOUND.
- * 
- * Algorithm: 
+ *
+ * Algorithm:
  *   Read the superblock and rootnode of the b+tree.
  *   Drill down through the interior nodes using befs_find_key().
  *   Once at the correct leaf node, use befs_find_key() again to get the
@@ -402,12 +402,12 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
  *
  * Here's how it works: Key_no is the index of the key/value pair to
  * return in keybuf/value.
- * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is 
+ * Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is
  * the number of characters in the key (just a convenience).
  *
  * Algorithm:
  *   Get the first leafnode of the tree. See if the requested key is in that
- *   node. If not, follow the node->right link to the next leafnode. Repeat 
+ *   node. If not, follow the node->right link to the next leafnode. Repeat
  *   until the (key_no)th key is found or the tree is out of keys.
  */
 int
@@ -536,7 +536,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
  * @node_off: Pointer to offset of current node within datastream. Modified
  *             by the function.
  *
- * Helper function for btree traverse. Moves the current position to the 
+ * Helper function for btree traverse. Moves the current position to the
  * start of the first leaf node.
  *
  * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY.
@@ -592,10 +592,10 @@ befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
 }
 
 /**
- * befs_leafnode - Determine if the btree node is a leaf node or an 
+ * befs_leafnode - Determine if the btree node is a leaf node or an
  * interior node
  * @node: Pointer to node structure to test
- * 
+ *
  * Return 1 if leaf, 0 if interior
  */
 static int
@@ -656,7 +656,7 @@ befs_bt_valarray(struct befs_btree_node *node)
  * @node: Pointer to the node structure to find the keydata array within
  *
  * Returns a pointer to the start of the keydata array
- * of the node pointed to by the node header 
+ * of the node pointed to by the node header
  */
 static char *
 befs_bt_keydata(struct befs_btree_node *node)
@@ -702,7 +702,7 @@ befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node,
 
 /**
  * befs_compare_strings - compare two strings
- * @key1: pointer to the first key to be compared 
+ * @key1: pointer to the first key to be compared
  * @keylen1: length in bytes of key1
  * @key2: pointer to the second key to be compared
  * @keylen2: length in bytes of key2
index f2a8f637e9e07faf3faf7773737bdaea257ddf29..60c6c728e64e78168ad9c37d759948d6e82f596a 100644 (file)
@@ -1,13 +1,11 @@
 /*
  * btree.h
- * 
+ *
  */
 
-
 int befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
-                   const char *key, befs_off_t * value);
+                   const char *key, befs_off_t *value);
 
 int befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
                    loff_t key_no, size_t bufsize, char *keybuf,
-                   size_t * keysize, befs_off_t * value);
-
+                   size_t *keysize, befs_off_t *value);
index b4c7ba013c0d6e752296599d6b29597e5dd13512..720b3bc5c16a70bd664562bfe72abacffa8c7360 100644 (file)
@@ -84,13 +84,11 @@ befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
  *
  * Takes a file position and gives back a brun who's starting block
  * is block number fblock of the file.
- * 
+ *
  * Returns BEFS_OK or BEFS_ERR.
- * 
+ *
  * Calls specialized functions for each of the three possible
  * datastream regions.
- *
- * 2001-11-15 Will Dyson
  */
 int
 befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
@@ -120,7 +118,7 @@ befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
 
 /**
  * befs_read_lsmylink - read long symlink from datastream.
- * @sb: Filesystem superblock 
+ * @sb: Filesystem superblock
  * @ds: Datastream to read from
  * @buff: Buffer in which to place long symlink data
  * @len: Length of the long symlink in bytes
index 91ba8203d83f221278df23e8bb4ab5c3e9cb6d11..7ff9ff09ec6e70718b4b8bd398239922d1065f84 100644 (file)
@@ -5,10 +5,10 @@
 
 struct buffer_head *befs_read_datastream(struct super_block *sb,
                                         const befs_data_stream *ds,
-                                        befs_off_t pos, uint * off);
+                                        befs_off_t pos, uint *off);
 
 int befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
-                    befs_blocknr_t fblock, befs_block_run * run);
+                    befs_blocknr_t fblock, befs_block_run *run);
 
 size_t befs_read_lsymlink(struct super_block *sb, const befs_data_stream *data,
                          void *buff, befs_off_t len);
@@ -17,4 +17,3 @@ befs_blocknr_t befs_count_blocks(struct super_block *sb,
                          const befs_data_stream *ds);
 
 extern const befs_inode_addr BAD_IADDR;
-
index 85c13392e9e897dae2ee288f301c88c3067e8303..36656c86f50ec526d0a9f9c3b8ce76ec6098198a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  linux/fs/befs/debug.c
- * 
+ *
  * Copyright (C) 2001 Will Dyson (will_dyson at pobox.com)
  *
  * With help from the ntfs-tng driver by Anton Altparmakov
@@ -57,6 +57,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
 
        struct va_format vaf;
        va_list args;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -67,7 +68,7 @@ befs_debug(const struct super_block *sb, const char *fmt, ...)
 }
 
 void
-befs_dump_inode(const struct super_block *sb, befs_inode * inode)
+befs_dump_inode(const struct super_block *sb, befs_inode *inode)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -151,7 +152,7 @@ befs_dump_inode(const struct super_block *sb, befs_inode * inode)
  */
 
 void
-befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
+befs_dump_super_block(const struct super_block *sb, befs_super_block *sup)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -202,7 +203,7 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
 #if 0
 /* unused */
 void
-befs_dump_small_data(const struct super_block *sb, befs_small_data * sd)
+befs_dump_small_data(const struct super_block *sb, befs_small_data *sd)
 {
 }
 
@@ -221,7 +222,8 @@ befs_dump_run(const struct super_block *sb, befs_disk_block_run run)
 #endif  /*  0  */
 
 void
-befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * super)
+befs_dump_index_entry(const struct super_block *sb,
+                     befs_disk_btree_super *super)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
@@ -242,7 +244,7 @@ befs_dump_index_entry(const struct super_block *sb, befs_disk_btree_super * supe
 }
 
 void
-befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead * node)
+befs_dump_index_node(const struct super_block *sb, befs_btree_nodehead *node)
 {
 #ifdef CONFIG_BEFS_DEBUG
 
index fa4b718de597394cf4b50be1fb877937d8f5935d..5367a6470a69fb59c0db37e321ff9d9c343d5519 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * inode.c
- * 
+ *
  * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
  */
 
 #include "inode.h"
 
 /*
      Validates the correctness of the befs inode
      Returns BEFS_OK if the inode should be used, otherwise
      returns BEFS_BAD_INODE
-*/
* Validates the correctness of the befs inode
* Returns BEFS_OK if the inode should be used, otherwise
* returns BEFS_BAD_INODE
+ */
 int
-befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
                 befs_blocknr_t inode)
 {
        u32 magic1 = fs32_to_cpu(sb, raw_inode->magic1);
index 9dc7fd9b7570d63321c86b3088e5a975f7e8c419..2219e412f49bee9255241464701290c6d006fa58 100644 (file)
@@ -1,8 +1,7 @@
 /*
  * inode.h
- * 
+ *
  */
 
-int befs_check_inode(struct super_block *sb, befs_inode * raw_inode,
+int befs_check_inode(struct super_block *sb, befs_inode *raw_inode,
                     befs_blocknr_t inode);
-
index b4a558126ee1724b0d3bd833f68a1c201833af33..227cb86e07fe3a99afc45c1790a87671641eb5af 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com
  *
- * Based on portions of file.c and inode.c 
+ * Based on portions of file.c and inode.c
  * by Makoto Kato (m_kato@ga2.so-net.ne.jp)
  *
  * Many thanks to Dominic Giampaolo, author of Practical File System
@@ -19,8 +19,7 @@
 /*
  * Converts befs notion of disk addr to a disk offset and uses
  * linux kernel function sb_bread() to get the buffer containing
- * the offset. -Will Dyson
- *
+ * the offset.
  */
 
 struct buffer_head *
@@ -55,7 +54,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
        befs_debug(sb, "<--- %s", __func__);
        return bh;
 
-      error:
+error:
        befs_debug(sb, "<--- %s ERROR", __func__);
        return NULL;
 }
index 78d7bc6e60dee4d51fb1ac6325cca8b3929bd9c2..9b3e1967cb313f100a7ae96d9559f4ae6cea902d 100644 (file)
@@ -4,4 +4,3 @@
 
 struct buffer_head *befs_bread_iaddr(struct super_block *sb,
                                     befs_inode_addr iaddr);
-
index 647a276eba5654593739aafa9a6984ca967d8adf..19407165f4aad9719ef5f339fabc31bb2c22ef9e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/parser.h>
 #include <linux/namei.h>
 #include <linux/sched.h>
+#include <linux/exportfs.h>
 
 #include "befs.h"
 #include "btree.h"
@@ -37,7 +38,8 @@ static int befs_readdir(struct file *, struct dir_context *);
 static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 static int befs_readpage(struct file *file, struct page *page);
 static sector_t befs_bmap(struct address_space *mapping, sector_t block);
-static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int);
+static struct dentry *befs_lookup(struct inode *, struct dentry *,
+                                 unsigned int);
 static struct inode *befs_iget(struct super_block *, unsigned long);
 static struct inode *befs_alloc_inode(struct super_block *sb);
 static void befs_destroy_inode(struct inode *inode);
@@ -51,6 +53,10 @@ static void befs_put_super(struct super_block *);
 static int befs_remount(struct super_block *, int *, char *);
 static int befs_statfs(struct dentry *, struct kstatfs *);
 static int parse_options(char *, struct befs_mount_options *);
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type);
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type);
 
 static const struct super_operations befs_sops = {
        .alloc_inode    = befs_alloc_inode,     /* allocate a new inode */
@@ -83,9 +89,14 @@ static const struct address_space_operations befs_symlink_aops = {
        .readpage       = befs_symlink_readpage,
 };
 
-/* 
+static const struct export_operations befs_export_operations = {
+       .fh_to_dentry   = befs_fh_to_dentry,
+       .fh_to_parent   = befs_fh_to_parent,
+};
+
+/*
  * Called by generic_file_read() to read a page of data
- * 
+ *
  * In turn, simply calls a generic block read function and
  * passes it the address of befs_get_block, for mapping file
  * positions to disk blocks.
@@ -102,15 +113,13 @@ befs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, befs_get_block);
 }
 
-/* 
- * Generic function to map a file position (block) to a 
+/*
+ * Generic function to map a file position (block) to a
  * disk offset (passed back in bh_result).
  *
  * Used by many higher level functions.
  *
  * Calls befs_fblock2brun() in datastream.c to do the real work.
- *
- * -WD 10-26-01
  */
 
 static int
@@ -269,15 +278,15 @@ befs_alloc_inode(struct super_block *sb)
        struct befs_inode_info *bi;
 
        bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL);
-        if (!bi)
-                return NULL;
-        return &bi->vfs_inode;
+       if (!bi)
+               return NULL;
+       return &bi->vfs_inode;
 }
 
 static void befs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
-        kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
+       kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
 static void befs_destroy_inode(struct inode *inode)
@@ -287,7 +296,7 @@ static void befs_destroy_inode(struct inode *inode)
 
 static void init_once(void *foo)
 {
-        struct befs_inode_info *bi = (struct befs_inode_info *) foo;
+       struct befs_inode_info *bi = (struct befs_inode_info *) foo;
 
        inode_init_once(&bi->vfs_inode);
 }
@@ -338,7 +347,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
        /*
         * set uid and gid.  But since current BeOS is single user OS, so
         * you can change by "uid" or "gid" options.
-        */   
+        */
 
        inode->i_uid = befs_sb->mount_opts.use_uid ?
                befs_sb->mount_opts.uid :
@@ -353,14 +362,14 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
         * BEFS's time is 64 bits, but current VFS is 32 bits...
         * BEFS don't have access time. Nor inode change time. VFS
         * doesn't have creation time.
-        * Also, the lower 16 bits of the last_modified_time and 
+        * Also, the lower 16 bits of the last_modified_time and
         * create_time are just a counter to help ensure uniqueness
         * for indexing purposes. (PFD, page 54)
         */
 
        inode->i_mtime.tv_sec =
            fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
-       inode->i_mtime.tv_nsec = 0;   /* lower 16 bits are not a time */        
+       inode->i_mtime.tv_nsec = 0;   /* lower 16 bits are not a time */
        inode->i_ctime = inode->i_mtime;
        inode->i_atime = inode->i_mtime;
 
@@ -414,10 +423,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
        unlock_new_inode(inode);
        return inode;
 
-      unacquire_bh:
+unacquire_bh:
        brelse(bh);
 
-      unacquire_none:
+unacquire_none:
        iget_failed(inode);
        befs_debug(sb, "<--- %s - Bad inode", __func__);
        return ERR_PTR(-EIO);
@@ -442,7 +451,7 @@ befs_init_inodecache(void)
 }
 
 /* Called at fs teardown.
- * 
+ *
  * Taken from NFS implementation by Al Viro.
  */
 static void
@@ -491,13 +500,10 @@ fail:
 }
 
 /*
- * UTF-8 to NLS charset  convert routine
- * 
+ * UTF-8 to NLS charset convert routine
  *
- * Changed 8/10/01 by Will Dyson. Now use uni2char() / char2uni() rather than
- * the nls tables directly
+ * Uses uni2char() / char2uni() rather than the nls tables directly
  */
-
 static int
 befs_utf2nls(struct super_block *sb, const char *in,
             int in_len, char **out, int *out_len)
@@ -521,9 +527,8 @@ befs_utf2nls(struct super_block *sb, const char *in,
        }
 
        *out = result = kmalloc(maxlen, GFP_NOFS);
-       if (!*out) {
+       if (!*out)
                return -ENOMEM;
-       }
 
        for (i = o = 0; i < in_len; i += utflen, o += unilen) {
 
@@ -546,7 +551,7 @@ befs_utf2nls(struct super_block *sb, const char *in,
 
        return o;
 
-      conv_err:
+conv_err:
        befs_error(sb, "Name using character set %s contains a character that "
                   "cannot be converted to unicode.", nls->charset);
        befs_debug(sb, "<--- %s", __func__);
@@ -561,18 +566,18 @@ befs_utf2nls(struct super_block *sb, const char *in,
  * @in_len: Length of input string in bytes
  * @out: The output string in UTF-8 format
  * @out_len: Length of the output buffer
- * 
+ *
  * Converts input string @in, which is in the format of the loaded NLS map,
  * into a utf8 string.
- * 
+ *
  * The destination string @out is allocated by this function and the caller is
  * responsible for freeing it with kfree()
- * 
+ *
  * On return, *@out_len is the length of @out in bytes.
  *
  * On success, the return value is the number of utf8 characters written to
  * the output buffer @out.
- *  
+ *
  * On Failure, a negative number coresponding to the error code is returned.
  */
 
@@ -585,9 +590,11 @@ befs_nls2utf(struct super_block *sb, const char *in,
        wchar_t uni;
        int unilen, utflen;
        char *result;
-       /* There're nls characters that will translate to 3-chars-wide UTF-8
-        * characters, a additional byte is needed to save the final \0
-        * in special cases */
+       /*
+        * There are nls characters that will translate to 3-chars-wide UTF-8
+        * characters, an additional byte is needed to save the final \0
+        * in special cases
+        */
        int maxlen = (3 * in_len) + 1;
 
        befs_debug(sb, "---> %s\n", __func__);
@@ -624,14 +631,41 @@ befs_nls2utf(struct super_block *sb, const char *in,
 
        return i;
 
-      conv_err:
-       befs_error(sb, "Name using charecter set %s contains a charecter that "
+conv_err:
+       befs_error(sb, "Name using character set %s contains a character that "
                   "cannot be converted to unicode.", nls->charset);
        befs_debug(sb, "<--- %s", __func__);
        kfree(result);
        return -EILSEQ;
 }
 
+static struct inode *befs_nfs_get_inode(struct super_block *sb, uint64_t ino,
+                                        uint32_t generation)
+{
+       /* No need to handle i_generation */
+       return befs_iget(sb, ino);
+}
+
+/*
+ * Map a NFS file handle to a corresponding dentry
+ */
+static struct dentry *befs_fh_to_dentry(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type)
+{
+       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+                                   befs_nfs_get_inode);
+}
+
+/*
+ * Find the parent for a file specified by NFS handle
+ */
+static struct dentry *befs_fh_to_parent(struct super_block *sb,
+                               struct fid *fid, int fh_len, int fh_type)
+{
+       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+                                   befs_nfs_get_inode);
+}
+
 enum {
        Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
 };
@@ -666,6 +700,7 @@ parse_options(char *options, struct befs_mount_options *opts)
 
        while ((p = strsep(&options, ",")) != NULL) {
                int token;
+
                if (!*p)
                        continue;
 
@@ -721,7 +756,7 @@ parse_options(char *options, struct befs_mount_options *opts)
 }
 
 /* This function has the responsibiltiy of getting the
- * filesystem ready for unmounting. 
+ * filesystem ready for unmounting.
  * Basically, we free everything that we allocated in
  * befs_read_inode
  */
@@ -782,8 +817,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
         * Linux 2.4.10 and later refuse to read blocks smaller than
         * the logical block size for the device. But we also need to read at
         * least 1k to get the second 512 bytes of the volume.
-        * -WD 10-26-01
-        */ 
+        */
        blocksize = sb_min_blocksize(sb, 1024);
        if (!blocksize) {
                if (!silent)
@@ -791,7 +825,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
                goto unacquire_priv_sbp;
        }
 
-       if (!(bh = sb_bread(sb, sb_block))) {
+       bh = sb_bread(sb, sb_block);
+       if (!bh) {
                if (!silent)
                        befs_error(sb, "unable to read superblock");
                goto unacquire_priv_sbp;
@@ -816,7 +851,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
 
        brelse(bh);
 
-       if( befs_sb->num_blocks > ~((sector_t)0) ) {
+       if (befs_sb->num_blocks > ~((sector_t)0)) {
                if (!silent)
                        befs_error(sb, "blocks count: %llu is larger than the host can use",
                                        befs_sb->num_blocks);
@@ -831,6 +866,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        /* Set real blocksize of fs */
        sb_set_blocksize(sb, (ulong) befs_sb->block_size);
        sb->s_op = &befs_sops;
+       sb->s_export_op = &befs_export_operations;
        root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
        if (IS_ERR(root)) {
                ret = PTR_ERR(root);
@@ -861,16 +897,16 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        return 0;
-/*****************/
-      unacquire_bh:
+
+unacquire_bh:
        brelse(bh);
 
-      unacquire_priv_sbp:
+unacquire_priv_sbp:
        kfree(befs_sb->mount_opts.iocharset);
        kfree(sb->s_fs_info);
        sb->s_fs_info = NULL;
 
-      unacquire_none:
+unacquire_none:
        return ret;
 }
 
@@ -919,7 +955,7 @@ static struct file_system_type befs_fs_type = {
        .name           = "befs",
        .mount          = befs_mount,
        .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV,      
+       .fs_flags       = FS_REQUIRES_DEV,
 };
 MODULE_ALIAS_FS("befs");
 
@@ -956,9 +992,9 @@ exit_befs_fs(void)
 }
 
 /*
-Macros that typecheck the init and exit functions,
-ensures that they are called at init and cleanup,
-and eliminates warnings about unused functions.
-*/
+ * Macros that typecheck the init and exit functions,
+ * ensures that they are called at init and cleanup,
+ * and eliminates warnings about unused functions.
+ */
 module_init(init_befs_fs)
 module_exit(exit_befs_fs)
index dc4556376a2206ac43431bddb3815b60615bcb42..ec1df30a7e9ab859e25ca0ea384f0681fc24adcc 100644 (file)
@@ -2,7 +2,5 @@
  * super.h
  */
 
-int befs_load_sb(struct super_block *sb, befs_super_block * disk_sb);
-
+int befs_load_sb(struct super_block *sb, befs_super_block *disk_sb);
 int befs_check_sb(struct super_block *sb);
-
index 543b48c29ac3157eec076bf78b15190d3cd38316..3f4908c286988769cc20ae63aba326f4321616e5 100644 (file)
@@ -487,45 +487,6 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
        return compat_sys_fcntl64(fd, cmd, arg);
 }
 
-COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
-{
-       long ret;
-       aio_context_t ctx64;
-
-       mm_segment_t oldfs = get_fs();
-       if (unlikely(get_user(ctx64, ctx32p)))
-               return -EFAULT;
-
-       set_fs(KERNEL_DS);
-       /* The __user pointer cast is valid because of the set_fs() */
-       ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
-       set_fs(oldfs);
-       /* truncating is ok because it's a user address */
-       if (!ret)
-               ret = put_user((u32) ctx64, ctx32p);
-       return ret;
-}
-
-COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
-                      compat_long_t, min_nr,
-                      compat_long_t, nr,
-                      struct io_event __user *, events,
-                      struct compat_timespec __user *, timeout)
-{
-       struct timespec t;
-       struct timespec __user *ut = NULL;
-
-       if (timeout) {
-               if (compat_get_timespec(&t, timeout))
-                       return -EFAULT;
-
-               ut = compat_alloc_user_space(sizeof(*ut));
-               if (copy_to_user(ut, &t, sizeof(t)) )
-                       return -EFAULT;
-       } 
-       return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
-}
-
 /* A write operation does a read from user space and vice versa */
 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
 
@@ -602,42 +563,6 @@ out:
        return ret;
 }
 
-static inline long
-copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
-{
-       compat_uptr_t uptr;
-       int i;
-
-       for (i = 0; i < nr; ++i) {
-               if (get_user(uptr, ptr32 + i))
-                       return -EFAULT;
-               if (put_user(compat_ptr(uptr), ptr64 + i))
-                       return -EFAULT;
-       }
-       return 0;
-}
-
-#define MAX_AIO_SUBMITS        (PAGE_SIZE/sizeof(struct iocb *))
-
-COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
-                      int, nr, u32 __user *, iocb)
-{
-       struct iocb __user * __user *iocb64; 
-       long ret;
-
-       if (unlikely(nr < 0))
-               return -EINVAL;
-
-       if (nr > MAX_AIO_SUBMITS)
-               nr = MAX_AIO_SUBMITS;
-       
-       iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
-       ret = copy_iocb(nr, iocb, iocb64);
-       if (!ret)
-               ret = do_io_submit(ctx_id, nr, iocb64, 1);
-       return ret;
-}
-
 struct compat_ncp_mount_data {
        compat_int_t version;
        compat_uint_t ncp_fd;
index 8112eacf10f3afe7243385e46e37625b4ea3d03e..eadbf5069c388ab5431a271a39e5814cbb50b0d0 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -19,7 +19,7 @@
  * current->executable is only used by the procfs.  This allows a dispatch
  * table to check for several different types  of binary formats.  We keep
  * trying until we recognize the file or we run out of supported binary
- * formats. 
+ * formats.
  */
 
 #include <linux/slab.h>
@@ -1268,6 +1268,13 @@ int flush_old_exec(struct linux_binprm * bprm)
        flush_thread();
        current->personality &= ~bprm->per_clear;
 
+       /*
+        * We have to apply CLOEXEC before we change whether the process is
+        * dumpable (in setup_new_exec) to avoid a race with a process in userspace
+        * trying to access the should-be-closed file descriptors of a process
+        * undergoing exec(2).
+        */
+       do_close_on_exec(current->files);
        return 0;
 
 out:
@@ -1330,7 +1337,6 @@ void setup_new_exec(struct linux_binprm * bprm)
           group */
        current->self_exec_id++;
        flush_signal_handlers(current, 0);
-       do_close_on_exec(current->files);
 }
 EXPORT_SYMBOL(setup_new_exec);
 
index f7e28f8ea04d2a629ae7c5ead8b84ca71913cc8a..b5b1259e064f8d9661110ba1f1f73d1a0ff19d51 100644 (file)
@@ -96,10 +96,6 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
        return &mountpoint_hashtable[tmp & mp_hash_mask];
 }
 
-/*
- * allocation is serialized by namespace_sem, but we need the spinlock to
- * serialize with freeing.
- */
 static int mnt_alloc_id(struct mount *mnt)
 {
        int res;
@@ -1034,6 +1030,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                if (IS_MNT_SLAVE(old))
                        list_add(&mnt->mnt_slave, &old->mnt_slave);
                mnt->mnt_master = old->mnt_master;
+       } else {
+               CLEAR_MNT_SHARED(mnt);
        }
        if (flag & CL_MAKE_SHARED)
                set_mnt_shared(mnt);
@@ -1828,9 +1826,7 @@ struct vfsmount *clone_private_mount(const struct path *path)
        if (IS_MNT_UNBINDABLE(old_mnt))
                return ERR_PTR(-EINVAL);
 
-       down_read(&namespace_sem);
        new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
-       up_read(&namespace_sem);
        if (IS_ERR(new_mnt))
                return ERR_CAST(new_mnt);
 
index cb22a9f9ae7e3694db1532a683ae34d4d313e787..fad81041f5ab6a60e23e53f45ae429bbde7a9470 100644 (file)
@@ -1273,8 +1273,8 @@ out_error:
  */
 static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       int error;
        struct inode *inode = d_inode(dentry);
+       int error = 0;
 
        /*
         * I believe we can only get a negative dentry here in the case of a
@@ -1293,7 +1293,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
                return 0;
        }
 
-       error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       if (nfs_mapping_need_revalidate_inode(inode))
+               error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
        dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
                        __func__, inode->i_ino, error ? "invalid" : "valid");
        return !error;
@@ -2285,8 +2286,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
                if (cache == NULL)
                        goto out;
                /* Found an entry, is our attribute cache valid? */
-               if (!nfs_attribute_cache_expired(inode) &&
-                   !(nfsi->cache_validity & NFS_INO_INVALID_ATTR))
+               if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
                        break;
                err = -ECHILD;
                if (!may_block)
@@ -2334,12 +2334,12 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
                cache = NULL;
        if (cache == NULL)
                goto out;
-       err = nfs_revalidate_inode_rcu(NFS_SERVER(inode), inode);
-       if (err)
+       if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
                goto out;
        res->jiffies = cache->jiffies;
        res->cred = cache->cred;
        res->mask = cache->mask;
+       err = 0;
 out:
        rcu_read_unlock();
        return err;
@@ -2491,12 +2491,13 @@ EXPORT_SYMBOL_GPL(nfs_may_open);
 static int nfs_execute_ok(struct inode *inode, int mask)
 {
        struct nfs_server *server = NFS_SERVER(inode);
-       int ret;
+       int ret = 0;
 
-       if (mask & MAY_NOT_BLOCK)
-               ret = nfs_revalidate_inode_rcu(server, inode);
-       else
-               ret = nfs_revalidate_inode(server, inode);
+       if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) {
+               if (mask & MAY_NOT_BLOCK)
+                       return -ECHILD;
+               ret = __nfs_revalidate_inode(server, inode);
+       }
        if (ret == 0 && !execute_ok(inode))
                ret = -EACCES;
        return ret;
index 55208b9b3c110b1bf9ded64a90a93c196c0fa160..157cb43ce9dbef4bd1c190ae54da4a3ec07e780e 100644 (file)
@@ -101,21 +101,11 @@ EXPORT_SYMBOL_GPL(nfs_file_release);
 static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
 {
        struct nfs_server *server = NFS_SERVER(inode);
-       struct nfs_inode *nfsi = NFS_I(inode);
-       const unsigned long force_reval = NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
-       unsigned long cache_validity = nfsi->cache_validity;
-
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
-           (cache_validity & force_reval) != force_reval)
-               goto out_noreval;
 
        if (filp->f_flags & O_DIRECT)
                goto force_reval;
-       if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
-               goto force_reval;
-       if (nfs_attribute_timeout(inode))
+       if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE))
                goto force_reval;
-out_noreval:
        return 0;
 force_reval:
        return __nfs_revalidate_inode(server, inode);
index a5589b791439af1c6f426cb95e2a0670edd2a30b..f956ca20a8a3595e36e6cae0e913dc90a47b1e22 100644 (file)
@@ -282,7 +282,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
                             s->nfs_client->cl_minorversion);
 
 out_test_devid:
-       if (filelayout_test_devid_unavailable(devid))
+       if (ret->ds_clp == NULL ||
+           filelayout_test_devid_unavailable(devid))
                ret = NULL;
 out:
        return ret;
index 9e111d07f66747b200051955d9c21997187d4002..45962fe5098c6ff9e87a1a23158409841ad02033 100644 (file)
@@ -1126,7 +1126,8 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
        case -EPIPE:
                dprintk("%s DS connection error %d\n", __func__,
                        task->tk_status);
-               nfs4_mark_deviceid_unavailable(devid);
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
                rpc_wake_up(&tbl->slot_tbl_waitq);
                /* fall through */
        default:
@@ -1175,7 +1176,8 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
        default:
                dprintk("%s DS connection error %d\n", __func__,
                        task->tk_status);
-               nfs4_mark_deviceid_unavailable(devid);
+               nfs4_delete_deviceid(devid->ld, devid->nfs_client,
+                               &devid->deviceid);
        }
        /* FIXME: Need to prevent infinite looping here. */
        return -NFS4ERR_RESET_TO_PNFS;
index 3cc39d1c1206512b4b58b189f7bd39be20a4611a..e5a6f248697b369003e89ed526608d7cd2a296eb 100644 (file)
@@ -177,7 +177,7 @@ out_err:
 static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
                struct nfs4_deviceid_node *devid)
 {
-       nfs4_mark_deviceid_unavailable(devid);
+       nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid);
        if (!ff_layout_has_available_ds(lseg))
                pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
                                lseg);
index 5864146e05e6ad36d616593b10aeddeb8398eece..011e4f8c1e015d72f6ec7b485f27555200168457 100644 (file)
@@ -160,6 +160,43 @@ int nfs_sync_mapping(struct address_space *mapping)
        return ret;
 }
 
+static int nfs_attribute_timeout(struct inode *inode)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+
+       return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static bool nfs_check_cache_invalid_delegated(struct inode *inode, unsigned long flags)
+{
+       unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+       /* Special case for the pagecache or access cache */
+       if (flags == NFS_INO_REVAL_PAGECACHE &&
+           !(cache_validity & NFS_INO_REVAL_FORCED))
+               return false;
+       return (cache_validity & flags) != 0;
+}
+
+static bool nfs_check_cache_invalid_not_delegated(struct inode *inode, unsigned long flags)
+{
+       unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+
+       if ((cache_validity & flags) != 0)
+               return true;
+       if (nfs_attribute_timeout(inode))
+               return true;
+       return false;
+}
+
+bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
+{
+       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+               return nfs_check_cache_invalid_delegated(inode, flags);
+
+       return nfs_check_cache_invalid_not_delegated(inode, flags);
+}
+
 static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -795,6 +832,8 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
        if (!is_sync)
                return;
        inode = d_inode(ctx->dentry);
+       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+               return;
        nfsi = NFS_I(inode);
        if (inode->i_mapping->nrpages == 0)
                return;
@@ -1044,13 +1083,6 @@ out:
        return status;
 }
 
-int nfs_attribute_timeout(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
-}
-
 int nfs_attribute_cache_expired(struct inode *inode)
 {
        if (nfs_have_delegated_attributes(inode))
@@ -1073,15 +1105,6 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
 }
 EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
 
-int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode)
-{
-       if (!(NFS_I(inode)->cache_validity &
-                       (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
-                       && !nfs_attribute_cache_expired(inode))
-               return NFS_STALE(inode) ? -ESTALE : 0;
-       return -ECHILD;
-}
-
 static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
@@ -1114,17 +1137,8 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
 
 bool nfs_mapping_need_revalidate_inode(struct inode *inode)
 {
-       unsigned long cache_validity = NFS_I(inode)->cache_validity;
-
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
-               const unsigned long force_reval =
-                       NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
-               return (cache_validity & force_reval) == force_reval;
-       }
-
-       return (cache_validity & NFS_INO_REVAL_PAGECACHE)
-               || nfs_attribute_timeout(inode)
-               || NFS_STALE(inode);
+       return nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE) ||
+               NFS_STALE(inode);
 }
 
 int nfs_revalidate_mapping_rcu(struct inode *inode)
@@ -1536,13 +1550,6 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
 {
        unsigned long invalid = NFS_INO_INVALID_ATTR;
 
-       /*
-        * Don't revalidate the pagecache if we hold a delegation, but do
-        * force an attribute update
-        */
-       if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
-               invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
-
        if (S_ISDIR(inode->i_mode))
                invalid |= NFS_INO_INVALID_DATA;
        nfs_set_cache_invalid(inode, invalid);
index 6b79c2ca9b9a5eed783117d43b12006f6260f107..09ca5095c04e427c881785170aefe7fdf58e7621 100644 (file)
@@ -381,6 +381,7 @@ extern int nfs_drop_inode(struct inode *);
 extern void nfs_clear_inode(struct inode *);
 extern void nfs_evict_inode(struct inode *);
 void nfs_zap_acl_cache(struct inode *inode);
+extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
 extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
 extern int nfs_wait_atomic_killable(atomic_t *p);
 
index d33242c8d95d58a5366a4a57283005702852c29b..6dcbc5defb7a8dd670b63995eb553c379e47a0d4 100644 (file)
@@ -1089,8 +1089,15 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
 
        spin_lock(&dir->i_lock);
        nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-       if (!cinfo->atomic || cinfo->before != dir->i_version)
+       if (cinfo->atomic && cinfo->before == dir->i_version) {
+               nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
+               nfsi->attrtimeo_timestamp = jiffies;
+       } else {
                nfs_force_lookup_revalidate(dir);
+               if (cinfo->before != dir->i_version)
+                       nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
+                               NFS_INO_INVALID_ACL;
+       }
        dir->i_version = cinfo->after;
        nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        nfs_fscache_invalidate(dir);
@@ -3115,6 +3122,16 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
                        res_stateid = &calldata->res.stateid;
                        renew_lease(server, calldata->timestamp);
                        break;
+               case -NFS4ERR_ACCESS:
+                       if (calldata->arg.bitmask != NULL) {
+                               calldata->arg.bitmask = NULL;
+                               calldata->res.fattr = NULL;
+                               task->tk_status = 0;
+                               rpc_restart_call_prepare(task);
+                               goto out_release;
+
+                       }
+                       break;
                case -NFS4ERR_ADMIN_REVOKED:
                case -NFS4ERR_STALE_STATEID:
                case -NFS4ERR_EXPIRED:
@@ -3140,7 +3157,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
                        res_stateid, calldata->arg.fmode);
 out_release:
        nfs_release_seqid(calldata->arg.seqid);
-       nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+       nfs_refresh_inode(calldata->inode, &calldata->fattr);
        dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
 }
 
@@ -3193,9 +3210,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
-       if (calldata->arg.fmode == 0) {
+       if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
+       if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
                /* Close-to-open cache consistency revalidation */
                if (!nfs4_have_delegation(inode, FMODE_READ))
                        calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
@@ -3207,7 +3225,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                nfs4_map_atomic_open_share(NFS_SERVER(inode),
                                calldata->arg.fmode, 0);
 
-       nfs_fattr_init(calldata->res.fattr);
+       if (calldata->res.fattr == NULL)
+               calldata->arg.bitmask = NULL;
+       else if (calldata->arg.bitmask == NULL)
+               calldata->res.fattr = NULL;
        calldata->timestamp = jiffies;
        if (nfs4_setup_sequence(NFS_SERVER(inode),
                                &calldata->arg.seq_args,
@@ -3274,6 +3295,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
        calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
        if (IS_ERR(calldata->arg.seqid))
                goto out_free_calldata;
+       nfs_fattr_init(&calldata->fattr);
        calldata->arg.fmode = 0;
        calldata->lr.arg.ld_private = &calldata->lr.ld_private;
        calldata->res.fattr = &calldata->fattr;
@@ -5673,6 +5695,14 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_STALE_STATEID:
                task->tk_status = 0;
                break;
+       case -NFS4ERR_ACCESS:
+               if (data->args.bitmask) {
+                       data->args.bitmask = NULL;
+                       data->res.fattr = NULL;
+                       task->tk_status = 0;
+                       rpc_restart_call_prepare(task);
+                       return;
+               }
        default:
                if (nfs4_async_handle_error(task, data->res.server,
                                            NULL, NULL) == -EAGAIN) {
@@ -5692,6 +5722,7 @@ static void nfs4_delegreturn_release(void *calldata)
                if (data->lr.roc)
                        pnfs_roc_release(&data->lr.arg, &data->lr.res,
                                        data->res.lr_ret);
+               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
                nfs_iput_and_deactive(inode);
        }
        kfree(calldata);
@@ -5780,10 +5811,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
        if (status != 0)
                goto out;
        status = data->rpc_status;
-       if (status == 0)
-               nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
-       else
-               nfs_refresh_inode(inode, &data->fattr);
 out:
        rpc_put_task(task);
        return status;
index 95baf7d340f04117ef4a123ac3da5972b07693a1..1d152f4470cd6f6b0bcc6f73fd572e331b18778b 100644 (file)
@@ -494,21 +494,18 @@ nfs4_alloc_state_owner(struct nfs_server *server,
 }
 
 static void
-nfs4_drop_state_owner(struct nfs4_state_owner *sp)
-{
-       struct rb_node *rb_node = &sp->so_server_node;
-
-       if (!RB_EMPTY_NODE(rb_node)) {
-               struct nfs_server *server = sp->so_server;
-               struct nfs_client *clp = server->nfs_client;
-
-               spin_lock(&clp->cl_lock);
-               if (!RB_EMPTY_NODE(rb_node)) {
-                       rb_erase(rb_node, &server->state_owners);
-                       RB_CLEAR_NODE(rb_node);
-               }
-               spin_unlock(&clp->cl_lock);
-       }
+nfs4_reset_state_owner(struct nfs4_state_owner *sp)
+{
+       /* This state_owner is no longer usable, but must
+        * remain in place so that state recovery can find it
+        * and the opens associated with it.
+        * It may also be used for new 'open' request to
+        * return a delegation to the server.
+        * So update the 'create_time' so that it looks like
+        * a new state_owner.  This will cause the server to
+        * request an OPEN_CONFIRM to start a new sequence.
+        */
+       sp->so_seqid.create_time = ktime_get();
 }
 
 static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
@@ -797,21 +794,33 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
 
 /*
  * Search the state->lock_states for an existing lock_owner
- * that is compatible with current->files
+ * that is compatible with either of the given owners.
+ * If the second is non-zero, then the first refers to a Posix-lock
+ * owner (current->files) and the second refers to a flock/OFD
+ * owner (struct file*).  In that case, prefer a match for the first
+ * owner.
+ * If both sorts of locks are held on the one file we cannot know
+ * which stateid was intended to be used, so a "correct" choice cannot
+ * be made.  Failing that, a "consistent" choice is preferable.  The
+ * consistent choice we make is to prefer the first owner, that of a
+ * Posix lock.
  */
 static struct nfs4_lock_state *
 __nfs4_find_lock_state(struct nfs4_state *state,
                       fl_owner_t fl_owner, fl_owner_t fl_owner2)
 {
-       struct nfs4_lock_state *pos;
+       struct nfs4_lock_state *pos, *ret = NULL;
        list_for_each_entry(pos, &state->lock_states, ls_locks) {
-               if (pos->ls_owner != fl_owner &&
-                   pos->ls_owner != fl_owner2)
-                       continue;
-               atomic_inc(&pos->ls_count);
-               return pos;
+               if (pos->ls_owner == fl_owner) {
+                       ret = pos;
+                       break;
+               }
+               if (pos->ls_owner == fl_owner2)
+                       ret = pos;
        }
-       return NULL;
+       if (ret)
+               atomic_inc(&ret->ls_count);
+       return ret;
 }
 
 /*
@@ -1101,7 +1110,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
 
        sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid);
        if (status == -NFS4ERR_BAD_SEQID)
-               nfs4_drop_state_owner(sp);
+               nfs4_reset_state_owner(sp);
        if (!nfs4_has_session(sp->so_server->nfs_client))
                nfs_increment_seqid(status, seqid);
 }
index 1af6268a7d8c4c71779d3b64f7ce087b431df723..e9255cb453e664c385c9a94969f69bc3514024b1 100644 (file)
@@ -502,11 +502,13 @@ static int nfs4_stat_to_errno(int);
                                (compound_encode_hdr_maxsz + \
                                 encode_sequence_maxsz + \
                                 encode_putfh_maxsz + \
+                                encode_layoutreturn_maxsz + \
                                 encode_open_downgrade_maxsz)
 #define NFS4_dec_open_downgrade_sz \
                                (compound_decode_hdr_maxsz + \
                                 decode_sequence_maxsz + \
                                 decode_putfh_maxsz + \
+                                decode_layoutreturn_maxsz + \
                                 decode_open_downgrade_maxsz)
 #define NFS4_enc_close_sz      (compound_encode_hdr_maxsz + \
                                 encode_sequence_maxsz + \
@@ -2277,9 +2279,9 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
        encode_putfh(xdr, args->fh, &hdr);
        if (args->lr_args)
                encode_layoutreturn(xdr, args->lr_args, &hdr);
-       encode_close(xdr, args, &hdr);
        if (args->bitmask != NULL)
                encode_getfattr(xdr, args->bitmask, &hdr);
+       encode_close(xdr, args, &hdr);
        encode_nops(&hdr);
 }
 
@@ -2356,6 +2358,8 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
        encode_compound_hdr(xdr, req, &hdr);
        encode_sequence(xdr, &args->seq_args, &hdr);
        encode_putfh(xdr, args->fh, &hdr);
+       if (args->lr_args)
+               encode_layoutreturn(xdr, args->lr_args, &hdr);
        encode_open_downgrade(xdr, args, &hdr);
        encode_nops(&hdr);
 }
@@ -2701,7 +2705,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
        encode_putfh(xdr, args->fhandle, &hdr);
        if (args->lr_args)
                encode_layoutreturn(xdr, args->lr_args, &hdr);
-       encode_getfattr(xdr, args->bitmask, &hdr);
+       if (args->bitmask)
+               encode_getfattr(xdr, args->bitmask, &hdr);
        encode_delegreturn(xdr, args->stateid, &hdr);
        encode_nops(&hdr);
 }
@@ -6151,6 +6156,12 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
        status = decode_putfh(xdr);
        if (status)
                goto out;
+       if (res->lr_res) {
+               status = decode_layoutreturn(xdr, res->lr_res);
+               res->lr_ret = status;
+               if (status)
+                       goto out;
+       }
        status = decode_open_downgrade(xdr, res);
 out:
        return status;
@@ -6484,16 +6495,12 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
                if (status)
                        goto out;
        }
+       if (res->fattr != NULL) {
+               status = decode_getfattr(xdr, res->fattr, res->server);
+               if (status != 0)
+                       goto out;
+       }
        status = decode_close(xdr, res);
-       if (status != 0)
-               goto out;
-       /*
-        * Note: Server may do delete on close for this file
-        *      in which case the getattr call will fail with
-        *      an ESTALE error. Shouldn't be a problem,
-        *      though, since fattr->valid will remain unset.
-        */
-       decode_getfattr(xdr, res->fattr, res->server);
 out:
        return status;
 }
@@ -6966,9 +6973,11 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
                if (status)
                        goto out;
        }
-       status = decode_getfattr(xdr, res->fattr, res->server);
-       if (status != 0)
-               goto out;
+       if (res->fattr) {
+               status = decode_getfattr(xdr, res->fattr, res->server);
+               if (status != 0)
+                       goto out;
+       }
        status = decode_delegreturn(xdr);
 out:
        return status;
index 896df7bdf85f6c5a92b1c206cb799afea454c1e9..59554f3adf2948a10dd945b5f8441c236f53e9f2 100644 (file)
@@ -1251,6 +1251,7 @@ bool pnfs_roc(struct inode *ino,
        nfs4_stateid stateid;
        enum pnfs_iomode iomode = 0;
        bool layoutreturn = false, roc = false;
+       bool skip_read = false;
 
        if (!nfs_have_layout(ino))
                return false;
@@ -1270,18 +1271,27 @@ retry:
        }
 
        /* no roc if we hold a delegation */
-       if (nfs4_check_delegation(ino, FMODE_READ))
-               goto out_noroc;
+       if (nfs4_check_delegation(ino, FMODE_READ)) {
+               if (nfs4_check_delegation(ino, FMODE_WRITE))
+                       goto out_noroc;
+               skip_read = true;
+       }
 
        list_for_each_entry(ctx, &nfsi->open_files, list) {
                state = ctx->state;
+               if (state == NULL)
+                       continue;
                /* Don't return layout if there is open file state */
-               if (state != NULL && state->state != 0)
+               if (state->state & FMODE_WRITE)
                        goto out_noroc;
+               if (state->state & FMODE_READ)
+                       skip_read = true;
        }
 
 
        list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
+               if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
+                       continue;
                /* If we are sending layoutreturn, invalidate all valid lsegs */
                if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
                        continue;
index d171d2c53f7f8928762acb9731d202063328967b..f8933cb53d682aaf4c7e22efb75a43d4c1c688ac 100644 (file)
@@ -4834,7 +4834,7 @@ int ocfs2_reflink_remap_range(struct file *file_in,
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
                        &len, is_dedupe);
-       if (ret || len == 0)
+       if (ret <= 0)
                goto out_unlock;
 
        /* Lock out changes to the allocation maps and remap. */
index 234a9ac49958ed978f67aad68d81fb75cf5717ce..06a793f4ae38739c3bb70b3c5a35d5adf6b8e3c1 100644 (file)
@@ -67,49 +67,47 @@ int get_dominating_id(struct mount *mnt, const struct path *root)
 
 static int do_make_slave(struct mount *mnt)
 {
-       struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
-       struct mount *slave_mnt;
+       struct mount *master, *slave_mnt;
 
-       /*
-        * slave 'mnt' to a peer mount that has the
-        * same root dentry. If none is available then
-        * slave it to anything that is available.
-        */
-       while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
-              peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
-
-       if (peer_mnt == mnt) {
-               peer_mnt = next_peer(mnt);
-               if (peer_mnt == mnt)
-                       peer_mnt = NULL;
-       }
-       if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
-           list_empty(&mnt->mnt_share))
-               mnt_release_group_id(mnt);
-
-       list_del_init(&mnt->mnt_share);
-       mnt->mnt_group_id = 0;
-
-       if (peer_mnt)
-               master = peer_mnt;
-
-       if (master) {
-               list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
-                       slave_mnt->mnt_master = master;
-               list_move(&mnt->mnt_slave, &master->mnt_slave_list);
-               list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
-               INIT_LIST_HEAD(&mnt->mnt_slave_list);
+       if (list_empty(&mnt->mnt_share)) {
+               if (IS_MNT_SHARED(mnt)) {
+                       mnt_release_group_id(mnt);
+                       CLEAR_MNT_SHARED(mnt);
+               }
+               master = mnt->mnt_master;
+               if (!master) {
+                       struct list_head *p = &mnt->mnt_slave_list;
+                       while (!list_empty(p)) {
+                               slave_mnt = list_first_entry(p,
+                                               struct mount, mnt_slave);
+                               list_del_init(&slave_mnt->mnt_slave);
+                               slave_mnt->mnt_master = NULL;
+                       }
+                       return 0;
+               }
        } else {
-               struct list_head *p = &mnt->mnt_slave_list;
-               while (!list_empty(p)) {
-                        slave_mnt = list_first_entry(p,
-                                       struct mount, mnt_slave);
-                       list_del_init(&slave_mnt->mnt_slave);
-                       slave_mnt->mnt_master = NULL;
+               struct mount *m;
+               /*
+                * slave 'mnt' to a peer mount that has the
+                * same root dentry. If none is available then
+                * slave it to anything that is available.
+                */
+               for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) {
+                       if (m->mnt.mnt_root == mnt->mnt.mnt_root) {
+                               master = m;
+                               break;
+                       }
                }
+               list_del_init(&mnt->mnt_share);
+               mnt->mnt_group_id = 0;
+               CLEAR_MNT_SHARED(mnt);
        }
+       list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
+               slave_mnt->mnt_master = master;
+       list_move(&mnt->mnt_slave, &master->mnt_slave_list);
+       list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
+       INIT_LIST_HEAD(&mnt->mnt_slave_list);
        mnt->mnt_master = master;
-       CLEAR_MNT_SHARED(mnt);
        return 0;
 }
 
index da6de12b5c46d4a56e15e892f527153682ff6e30..7537b6b6b5a2dc80caa8774f8534ccc5e6b4fee2 100644 (file)
@@ -1669,6 +1669,9 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
  * Check that the two inodes are eligible for cloning, the ranges make
  * sense, and then flush all dirty data.  Caller must ensure that the
  * inodes have been locked against any other modifications.
+ *
+ * Returns: 0 for "nothing to clone", 1 for "something to clone", or
+ * the usual negative error code.
  */
 int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                               struct inode *inode_out, loff_t pos_out,
@@ -1695,17 +1698,15 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 
        /* Are we going all the way to the end? */
        isize = i_size_read(inode_in);
-       if (isize == 0) {
-               *len = 0;
+       if (isize == 0)
                return 0;
-       }
 
        /* Zero length dedupe exits immediately; reflink goes to EOF. */
        if (*len == 0) {
-               if (is_dedupe) {
-                       *len = 0;
+               if (is_dedupe || pos_in == isize)
                        return 0;
-               }
+               if (pos_in > isize)
+                       return -EINVAL;
                *len = isize - pos_in;
        }
 
@@ -1769,7 +1770,7 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                        return -EBADE;
        }
 
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
 
@@ -1955,6 +1956,9 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
                goto out;
        ret = 0;
 
+       if (off + len > i_size_read(src))
+               return -EINVAL;
+
        /* pre-format output fields to sane values */
        for (i = 0; i < count; i++) {
                same->info[i].bytes_deduped = 0ULL;
index 368bfb92b115c0e99ce4c654f6fdecb6ec5a2763..a11f271800ef990987b85bc1df1614b2d549650a 100644 (file)
@@ -190,6 +190,13 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
         */
        m->version = file->f_version;
 
+       /*
+        * if request is to read from zero offset, reset iterator to first
+        * record as it might have been already advanced by previous requests
+        */
+       if (*ppos == 0)
+               m->index = 0;
+
        /* Don't assume *ppos is where we left it */
        if (unlikely(*ppos != m->read_pos)) {
                while ((err = traverse(m, *ppos)) == -EAGAIN)
index 8ed7c9d8c0fbaf7ec81de0afa9ac3edafe8932aa..873d83104e79aed14a24c417f211e38ae4038122 100644 (file)
@@ -1087,7 +1087,13 @@ EXPORT_SYMBOL(do_splice_direct);
 
 static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
 {
-       while (pipe->nrbufs == pipe->buffers) {
+       for (;;) {
+               if (unlikely(!pipe->readers)) {
+                       send_sig(SIGPIPE, current, 0);
+                       return -EPIPE;
+               }
+               if (pipe->nrbufs != pipe->buffers)
+                       return 0;
                if (flags & SPLICE_F_NONBLOCK)
                        return -EAGAIN;
                if (signal_pending(current))
@@ -1096,7 +1102,6 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
                pipe_wait(pipe);
                pipe->waiting_writers--;
        }
-       return 0;
 }
 
 static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
index 45ceb94e89e42a362d633a35d2861e2808910e1a..1bc0bd6a9848cb14064bb09bb810daaac267a4d9 100644 (file)
@@ -1191,7 +1191,7 @@ out:
        return err;
 }
 
-void ufs_truncate_blocks(struct inode *inode)
+static void ufs_truncate_blocks(struct inode *inode)
 {
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
              S_ISLNK(inode->i_mode)))
index aca2d4bd4303b07b41a86d0bbe1e43bb4fe88308..07593a362cd03d0fdae120122d6d1c5aba8d809b 100644 (file)
@@ -1161,7 +1161,7 @@ xfs_reflink_remap_range(
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
                        &len, is_dedupe);
-       if (ret || len == 0)
+       if (ret <= 0)
                goto out_unlock;
 
        trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
index d7d0f495a34e975d7c045efab5dd029d5f393687..303315b9693fc999022b192f91b4681505f3c571 100644 (file)
@@ -13,6 +13,8 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 }
 #endif
 
+extern bool acpi_permanent_mmap;
+
 void __iomem *__ref
 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
index 5c7356adc10b5f96f0fc39f430ae4741fbda8ea1..f5e10dd8e86b712a4c0e97a206d36ff88fabe02f 100644 (file)
@@ -513,10 +513,12 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                             acpi_get_table(acpi_string signature, u32 instance,
                                            struct acpi_table_header
                                            **out_table))
+ACPI_EXTERNAL_RETURN_VOID(void acpi_put_table(struct acpi_table_header *table))
+
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                            acpi_get_table_by_index(u32 table_index,
-                                                    struct acpi_table_header
-                                                    **out_table))
+                           acpi_get_table_by_index(u32 table_index,
+                                                   struct acpi_table_header
+                                                   **out_table))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                             acpi_install_table_handler(acpi_table_handler
                                                        handler, void *context))
@@ -965,15 +967,6 @@ void acpi_terminate_debugger(void);
 /*
  * Divergences
  */
-ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                           acpi_get_table_with_size(acpi_string signature,
-                                                    u32 instance,
-                                                    struct acpi_table_header
-                                                    **out_table,
-                                                    acpi_size *tbl_size))
-
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                            acpi_get_data_full(acpi_handle object,
                                               acpi_object_handler handler,
index c19700e2a2fe25d169a64180593438d9815c3f77..da5708caf8a12493de0e52376c27b7c0bffd1378 100644 (file)
@@ -371,6 +371,7 @@ struct acpi_table_desc {
        union acpi_name_union signature;
        acpi_owner_id owner_id;
        u8 flags;
+       u16 validation_count;
 };
 
 /* Masks for Flags field above */
index a5509d87230a4778de5566f8cfe7b705a1dd89f1..7dbb1141f546077ceec4c9fe39b077ae8e1ba379 100644 (file)
@@ -142,7 +142,6 @@ static inline void acpi_os_terminate_command_signals(void)
 /*
  * OSL interfaces added by Linux
  */
-void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
 
 #endif                         /* __KERNEL__ */
 
index 9eb42dbc5582ace99283629f0905861ac820c7d5..fdd0a343f45527ee2b5d4a787ad5388a6ce4e47a 100644 (file)
@@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
 /* prototypes */
 #ifdef CONFIG_AIO
 extern void exit_aio(struct mm_struct *mm);
-extern long do_io_submit(aio_context_t ctx_id, long nr,
-                        struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
 #else
 static inline void exit_aio(struct mm_struct *mm) { }
-static inline long do_io_submit(aio_context_t ctx_id, long nr,
-                               struct iocb __user * __user *iocbpp,
-                               bool compat) { return 0; }
 static inline void kiocb_set_cancel_fn(struct kiocb *req,
                                       kiocb_cancel_fn *cancel) { }
 #endif /* CONFIG_AIO */
index 286b2a2643833615633e82d7d9e667b66abbfaef..83695641bd5ec272551857c448cc9b4f354898b8 100644 (file)
@@ -288,7 +288,6 @@ enum blk_queue_state {
 struct blk_queue_tag {
        struct request **tag_index;     /* map of busy tags */
        unsigned long *tag_map;         /* bit map of free/busy tags */
-       int busy;                       /* current depth */
        int max_depth;                  /* what we will send to device */
        int real_max_depth;             /* what the array can hold */
        atomic_t refcnt;                /* map can be shared */
index a951fd10aaaad07cf7ab897615a4e3d84caf6a42..6a524bf6a06d112613075547c5b17a069da00077 100644 (file)
@@ -18,6 +18,7 @@ enum cache_type {
 
 /**
  * struct cacheinfo - represent a cache leaf node
+ * @id: This cache's id. It is unique among caches with the same (type, level).
  * @type: type of the cache - data, inst or unified
  * @level: represents the hierarchy in the multi-level cache
  * @coherency_line_size: size of each cache line usually representing
@@ -44,6 +45,7 @@ enum cache_type {
  * keeping, the remaining members form the core properties of the cache
  */
 struct cacheinfo {
+       unsigned int id;
        enum cache_type type;
        unsigned int level;
        unsigned int coherency_line_size;
@@ -61,6 +63,7 @@ struct cacheinfo {
 #define CACHE_WRITE_ALLOCATE   BIT(3)
 #define CACHE_ALLOCATE_POLICY_MASK     \
        (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+#define CACHE_ID               BIT(4)
 
        struct device_node *of_node;
        bool disable_sysfs;
index 9a30b921f7401487cb6238b9bcb6e9f096e9a39f..2319b8c108e87b9e87c11cc4c9aa314d24eb0364 100644 (file)
 #ifndef _CONFIGFS_H_
 #define _CONFIGFS_H_
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/mutex.h>
-#include <linux/err.h>
-
-#include <linux/atomic.h>
+#include <linux/stat.h>   /* S_IRUGO */
+#include <linux/types.h>  /* ssize_t */
+#include <linux/list.h>   /* struct list_head */
+#include <linux/kref.h>   /* struct kref */
+#include <linux/mutex.h>  /* struct mutex */
 
 #define CONFIGFS_ITEM_NAME_LEN 20
 
index cb631973839a7ff7dd10a8426de10d3a30393e9a..f1da8c8dd473869897c3363f9e299bd28086c8d5 100644 (file)
@@ -340,10 +340,8 @@ extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
 extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
 extern int nfs_permission(struct inode *, int);
 extern int nfs_open(struct inode *, struct file *);
-extern int nfs_attribute_timeout(struct inode *inode);
 extern int nfs_attribute_cache_expired(struct inode *inode);
 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
-extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
 extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
index a440cf178191ee8b84c01068201abb5c846af031..4d1905245c7aa50df56acf0f77c77f3347c28c04 100644 (file)
@@ -1821,6 +1821,9 @@ struct task_struct {
        /* cg_list protected by css_set_lock and tsk->alloc_lock */
        struct list_head cg_list;
 #endif
+#ifdef CONFIG_INTEL_RDT_A
+       int closid;
+#endif
 #ifdef CONFIG_FUTEX
        struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
index 931a47ba45718ad5c329b1085c7fac5319e7448f..1beab5532035dc2126405384d44457f183de2a90 100644 (file)
@@ -205,10 +205,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
 
        dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
        if (dev) {
-               ip4 = (struct in_device *)dev->ip_ptr;
-               if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
+               ip4 = in_dev_get(dev);
+               if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
                        ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
                                               (struct in6_addr *)gid);
+                       in_dev_put(ip4);
+               }
                dev_put(dev);
        }
 }
index 4ac24f5a3308a8c8bcec7c973433f1f714cda9b2..275581d483ddd90d97c550ee8bf44d705833ecf8 100644 (file)
@@ -1,12 +1,14 @@
 #ifndef ISCSI_TARGET_CORE_H
 #define ISCSI_TARGET_CORE_H
 
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
+#include <linux/dma-direction.h>     /* enum dma_data_direction */
+#include <linux/list.h>              /* struct list_head */
+#include <linux/socket.h>            /* struct sockaddr_storage */
+#include <linux/types.h>             /* u8 */
+#include <scsi/iscsi_proto.h>        /* itt_t */
+#include <target/target_core_base.h> /* struct se_cmd */
+
+struct sock;
 
 #define ISCSIT_VERSION                 "v4.1.0"
 #define ISCSI_MAX_DATASN_MISSING_COUNT 16
index e615bb485d0b3a79ea43e7494db956db17805ac0..c27dd471656dc2da4745516d47253b5d06285cf3 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef ISCSI_TARGET_STAT_H
 #define ISCSI_TARGET_STAT_H
 
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+
 /*
  * For struct iscsi_tiqn->tiqn_wwn default groups
  */
index 40ac7cd801505db68a979a4fc4a22a6eb71019be..1277e9ba031818e22cd8720a4125f1c9ebf0cb12 100644 (file)
@@ -1,6 +1,6 @@
-#include <linux/module.h>
-#include <linux/list.h>
-#include "iscsi_target_core.h"
+#include "iscsi_target_core.h" /* struct iscsi_cmd */
+
+struct sockaddr_storage;
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME  16
index f6f3bc52c1ac2e21611ba7be2a274c7cb442166d..b54b98dc2d4a77681dd3ecf883d75e062589ee8c 100644 (file)
@@ -1,8 +1,14 @@
 #ifndef TARGET_CORE_BACKEND_H
 #define TARGET_CORE_BACKEND_H
 
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 #define TRANSPORT_FLAG_PASSTHROUGH             1
 
+struct request_queue;
+struct scatterlist;
+
 struct target_backend_ops {
        char name[16];
        char inquiry_prod[16];
index 00558287936d9a0bcc2e386e8fa0b2bcb9af4f97..29e6858bb1648b636dcce48072f9fe43e5a8a884 100644 (file)
@@ -1,14 +1,10 @@
 #ifndef TARGET_CORE_BASE_H
 #define TARGET_CORE_BASE_H
 
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/percpu_ida.h>
-#include <linux/t10-pi.h>
-#include <net/sock.h>
-#include <net/tcp.h>
+#include <linux/configfs.h>      /* struct config_group */
+#include <linux/dma-direction.h> /* enum dma_data_direction */
+#include <linux/percpu_ida.h>    /* struct percpu_ida */
+#include <linux/semaphore.h>     /* struct semaphore */
 
 #define TARGET_CORE_VERSION            "v5.0"
 
index 5cd6faa6e0d166ed07444cf5e3735e2626483205..358041bad1da0350b776d7ff174ab682b0bb82ef 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef TARGET_CORE_FABRIC_H
 #define TARGET_CORE_FABRIC_H
 
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
index 9bd559472c9280a6317e336f3a62471fd0aafa49..e230af2e68558fa8ed1778b2c154686f8b1e2481 100644 (file)
@@ -57,6 +57,7 @@
 #define CGROUP_SUPER_MAGIC     0x27e0eb
 #define CGROUP2_SUPER_MAGIC    0x63677270
 
+#define RDTGROUP_SUPER_MAGIC   0x7655821
 
 #define STACK_END_MAGIC                0x57AC6E9D
 
index 635482e60ca39f93e706f51dbccf7a19dfefe284..8acef8576ce9b211ea3ede4ccf63220466b40e22 100644 (file)
@@ -150,6 +150,9 @@ cond_syscall(sys_io_destroy);
 cond_syscall(sys_io_submit);
 cond_syscall(sys_io_cancel);
 cond_syscall(sys_io_getevents);
+cond_syscall(compat_sys_io_setup);
+cond_syscall(compat_sys_io_submit);
+cond_syscall(compat_sys_io_getevents);
 cond_syscall(sys_sysfs);
 cond_syscall(sys_syslog);
 cond_syscall(sys_process_vm_readv);
index 228892dabba6f5579478988b84ef007fa04bd661..25f57230380104f419257ea43c1cd3d2e31d7e65 100644 (file)
 }
 
 #define iterate_all_kinds(i, n, v, I, B, K) {                  \
-       size_t skip = i->iov_offset;                            \
-       if (unlikely(i->type & ITER_BVEC)) {                    \
-               struct bio_vec v;                               \
-               struct bvec_iter __bi;                          \
-               iterate_bvec(i, n, v, __bi, skip, (B))          \
-       } else if (unlikely(i->type & ITER_KVEC)) {             \
-               const struct kvec *kvec;                        \
-               struct kvec v;                                  \
-               iterate_kvec(i, n, v, kvec, skip, (K))          \
-       } else {                                                \
-               const struct iovec *iov;                        \
-               struct iovec v;                                 \
-               iterate_iovec(i, n, v, iov, skip, (I))          \
+       if (likely(n)) {                                        \
+               size_t skip = i->iov_offset;                    \
+               if (unlikely(i->type & ITER_BVEC)) {            \
+                       struct bio_vec v;                       \
+                       struct bvec_iter __bi;                  \
+                       iterate_bvec(i, n, v, __bi, skip, (B))  \
+               } else if (unlikely(i->type & ITER_KVEC)) {     \
+                       const struct kvec *kvec;                \
+                       struct kvec v;                          \
+                       iterate_kvec(i, n, v, kvec, skip, (K))  \
+               } else {                                        \
+                       const struct iovec *iov;                \
+                       struct iovec v;                         \
+                       iterate_iovec(i, n, v, iov, skip, (I))  \
+               }                                               \
        }                                                       \
 }
 
@@ -576,7 +578,7 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
                WARN_ON(1);
                return false;
        }
-       if (unlikely(i->count < bytes))                         \
+       if (unlikely(i->count < bytes))
                return false;
 
        iterate_all_kinds(i, bytes, v, ({
@@ -620,7 +622,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
                WARN_ON(1);
                return false;
        }
-       if (unlikely(i->count < bytes))                         \
+       if (unlikely(i->count < bytes))
                return false;
        iterate_all_kinds(i, bytes, v, ({
                if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
@@ -837,11 +839,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
        unsigned long res = 0;
        size_t size = i->count;
 
-       if (!size)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE)) {
-               if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
+               if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
                        return size | i->iov_offset;
                return size;
        }
@@ -856,10 +855,8 @@ EXPORT_SYMBOL(iov_iter_alignment);
 
 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 {
-        unsigned long res = 0;
+       unsigned long res = 0;
        size_t size = i->count;
-       if (!size)
-               return 0;
 
        if (unlikely(i->type & ITER_PIPE)) {
                WARN_ON(1);
@@ -874,7 +871,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
                (res |= (!res ? 0 : (unsigned long)v.iov_base) |
                        (size != v.iov_len ? size : 0))
                );
-               return res;
+       return res;
 }
 EXPORT_SYMBOL(iov_iter_gap_alignment);
 
@@ -908,6 +905,9 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
        size_t capacity;
        int idx;
 
+       if (!maxsize)
+               return 0;
+
        if (!sanity(i))
                return -EFAULT;
 
@@ -926,9 +926,6 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (!maxsize)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE))
                return pipe_get_pages(i, pages, maxsize, maxpages, start);
        iterate_all_kinds(i, maxsize, v, ({
@@ -975,6 +972,9 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
        int idx;
        int npages;
 
+       if (!maxsize)
+               return 0;
+
        if (!sanity(i))
                return -EFAULT;
 
@@ -1006,9 +1006,6 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (!maxsize)
-               return 0;
-
        if (unlikely(i->type & ITER_PIPE))
                return pipe_get_pages_alloc(i, pages, maxsize, start);
        iterate_all_kinds(i, maxsize, v, ({
index 1d1ac51359e3abe9ff1e32ebaba24de04f2909e9..6fc2b8789a0bf677f2ca874dab6eaec96ef4ca3b 100644 (file)
@@ -1,4 +1,6 @@
 hostprogs-y    := genheaders
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+       -I$(srctree)/include/uapi -I$(srctree)/include \
+       -I$(srctree)/security/selinux/include
 
 always         := $(hostprogs-y)
index 539855ff31f977f32a1afbc16b35c9bfc2df6387..f4dd41f900d5ce8a672479139938feb8b96105f9 100644 (file)
@@ -1,3 +1,7 @@
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
index dba7eff69a00962e99ca2e34ff4a4f47da59f58b..d6a83cafe59f46d35df53048005be42a1fca6c90 100644 (file)
@@ -1,5 +1,7 @@
 hostprogs-y    := mdp
-HOST_EXTRACFLAGS += -Isecurity/selinux/include
+HOST_EXTRACFLAGS += \
+       -I$(srctree)/include/uapi -I$(srctree)/include \
+       -I$(srctree)/security/selinux/include
 
 always         := $(hostprogs-y)
 clean-files    := policy.* file_contexts
index e10beb11b696e4f6d289e3c74a7dddf970b1b66b..c29fa4a6228d6f59f9346721d4569cb15002b3c6 100644 (file)
  * Authors: Serge E. Hallyn <serue@us.ibm.com>
  */
 
+
+/* NOTE: we really do want to use the kernel headers here */
+#define __EXPORTED_HEADERS__
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
index e2d4ad3a4b4c5e01a063747a4286e1b51c05f697..13ae49b0baa091f3ca9202fd51e3a20e46d9b6f4 100644 (file)
@@ -1,3 +1,5 @@
+#include <linux/capability.h>
+
 #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
     "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
 
index a2cdf3370afe75f5f5029f5fd78a4a32fa9cc1f7..15d1d5c63c3c40faa5f62316370ea9d8e711fa75 100644 (file)
@@ -384,9 +384,6 @@ static void snd_complete_urb(struct urb *urb)
        if (unlikely(atomic_read(&ep->chip->shutdown)))
                goto exit_clear;
 
-       if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
-               goto exit_clear;
-
        if (usb_pipeout(ep->pipe)) {
                retire_outbound_urb(ep, ctx);
                /* can be stopped during retire callback */
@@ -537,11 +534,6 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
                        alive, ep->ep_num);
        clear_bit(EP_FLAG_STOPPING, &ep->flags);
 
-       ep->data_subs = NULL;
-       ep->sync_slave = NULL;
-       ep->retire_data_urb = NULL;
-       ep->prepare_data_urb = NULL;
-
        return 0;
 }
 
@@ -1028,6 +1020,10 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
 
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, false);
+               ep->data_subs = NULL;
+               ep->sync_slave = NULL;
+               ep->retire_data_urb = NULL;
+               ep->prepare_data_urb = NULL;
                set_bit(EP_FLAG_STOPPING, &ep->flags);
        }
 }