Merge branch 's3-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/amit/virti...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 31 Mar 2012 22:11:39 +0000 (15:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 31 Mar 2012 22:11:39 +0000 (15:11 -0700)
Pull virtio S3 support patches from Amit Shah:
 "Turns out S3 is not different from S4 for virtio devices: the device
  is assumed to be reset, so the host and guest state are to be assumed
  to be out of sync upon resume.  We handle the S4 case with exactly the
  same scenario, so just point the suspend/resume routines to the
  freeze/restore ones.

  Once that is done, we also use the PM API's macro to initialise the
  sleep functions.

  A couple of cleanups are included: there's no need for special thaw
  processing in the balloon driver, so that's addressed in patches 1 and
  2.

  Testing: both S3 and S4 support have been tested using these patches
  using a similar method used earlier during S4 patch development: a
  guest is started with virtio-blk as the only disk, a virtio network
  card, a virtio-serial port and a virtio balloon device.  Ping from
  guest to host, dd /dev/zero to a file on the disk, and IO from the
  host on the virtio-serial port, all at once, while exercising S4 and
  S3 (separately) were tested.  They all continue to work fine after
  resume.  virtio balloon values too were tested by inflating and
  deflating the balloon."

Pulling from Amit, since Rusty is off getting married (and presumably
shaving people).

* 's3-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/amit/virtio-console:
  virtio-pci: switch to PM ops macro to initialise PM functions
  virtio-pci: S3 support
  virtio-pci: drop restore_common()
  virtio: drop thaw PM operation
  virtio: balloon: Allow stats update after restore from S4

164 files changed:
Documentation/ABI/testing/sysfs-bus-event_source-devices-format [new file with mode: 0644]
Documentation/scsi/00-INDEX
Documentation/scsi/st.txt
Documentation/scsi/ufs.txt [new file with mode: 0644]
arch/blackfin/kernel/setup.c
arch/parisc/include/asm/futex.h
arch/parisc/kernel/smp.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p6.c
drivers/mtd/mtdchar.c
drivers/pci/pcie/aspm.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/atp870u.c
drivers/scsi/bfa/bfa.h
drivers/scsi/bfa/bfa_core.c
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_fcs_rport.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_ioc_ct.c
drivers/scsi/bfa/bfa_svc.c
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_bsg.h
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfi_ms.h
drivers/scsi/bfa/bfi_reg.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/lpfc/Makefile
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/st.c
drivers/scsi/st.h
drivers/scsi/ufs/Kconfig [new file with mode: 0644]
drivers/scsi/ufs/Makefile [new file with mode: 0644]
drivers/scsi/ufs/ufs.h [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c [new file with mode: 0644]
drivers/scsi/ufs/ufshci.h [new file with mode: 0644]
drivers/scsi/vmw_pvscsi.c
drivers/scsi/vmw_pvscsi.h
fs/aio.c
fs/ext2/ext2.h
fs/ext2/xattr_security.c
fs/ext2/xattr_trusted.c
fs/ext2/xip.c
fs/ext3/acl.c
fs/ext3/balloc.c
fs/ext3/bitmap.c
fs/ext3/dir.c
fs/ext3/ext3.h [new file with mode: 0644]
fs/ext3/ext3_jbd.c
fs/ext3/file.c
fs/ext3/fsync.c
fs/ext3/hash.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/ioctl.c
fs/ext3/namei.c
fs/ext3/resize.c
fs/ext3/super.c
fs/ext3/symlink.c
fs/ext3/xattr.c
fs/ext3/xattr_security.c
fs/ext3/xattr_trusted.c
fs/ext3/xattr_user.c
fs/gfs2/file.c
fs/namei.c
fs/ocfs2/ioctl.c
fs/pstore/inode.c
include/linux/Kbuild
include/linux/ext2_fs.h
include/linux/ext2_fs_sb.h [deleted file]
include/linux/ext3_fs.h [deleted file]
include/linux/ext3_fs_i.h [deleted file]
include/linux/ext3_fs_sb.h [deleted file]
include/linux/ext3_jbd.h [deleted file]
include/linux/ftrace_event.h
include/linux/kernel.h
include/linux/mtio.h
include/linux/perf_event.h
include/linux/ring_buffer.h
include/scsi/iscsi_if.h
include/scsi/libfcoe.h
init/do_mounts_initrd.c
init/do_mounts_rd.c
kernel/events/core.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_entries.h
kernel/trace/trace_export.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/selinuxfs.c
tools/perf/Documentation/perf-report.txt
tools/perf/Makefile
tools/perf/builtin-diff.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/config/feature-tests.mak
tools/perf/util/annotate.c
tools/perf/util/cache.h
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/gtk/browser.c [new file with mode: 0644]
tools/perf/util/gtk/gtk.h [new file with mode: 0644]
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/export.h [new file with mode: 0644]
tools/perf/util/include/linux/module.h [deleted file]
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l [new file with mode: 0644]
tools/perf/util/parse-events.y [new file with mode: 0644]
tools/perf/util/pmu.c [new file with mode: 0644]
tools/perf/util/pmu.h [new file with mode: 0644]
tools/perf/util/pmu.l [new file with mode: 0644]
tools/perf/util/pmu.y [new file with mode: 0644]
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/symbol.c
tools/perf/util/trace-event-parse.c
tools/perf/util/ui/browser.h
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/keysyms.h
tools/perf/util/ui/util.c

diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-format b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
new file mode 100644 (file)
index 0000000..079afc7
--- /dev/null
@@ -0,0 +1,14 @@
+Where:         /sys/bus/event_source/devices/<dev>/format
+Date:          January 2012
+Kernel Version: 3.3
+Contact:       Jiri Olsa <jolsa@redhat.com>
+Description:
+               Attribute group to describe the magic bits that go into
+               perf_event_attr::config[012] for a particular pmu.
+               Each attribute of this group defines the 'hardware' bitmask
+               we want to export, so that userspace can deal with sane
+               name/value pairs.
+
+               Example: 'config1:1,6-10,44'
+               Defines contents of attribute that occupies bits 1,6-10,44 of
+               perf_event_attr::config1.
index b48ded55b555041bc638c0f003d6248217f60856..b7dd6502bec577a9830bad64000f537cebbe7b53 100644 (file)
@@ -94,3 +94,5 @@ sym53c8xx_2.txt
        - info on second generation driver for sym53c8xx based adapters
 tmscsim.txt
        - info on driver for AM53c974 based adapters
+ufs.txt
+       - info on Universal Flash Storage(UFS) and UFS host controller driver.
index 691ca292c24d751050bcf4bf726e900c0070aa09..685bf3582abe6104f367a76d48e03c5185bce911 100644 (file)
@@ -390,6 +390,10 @@ MTSETDRVBUFFER
             MT_ST_SYSV sets the SYSV semantics (mode)
             MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
                the command to finish) for some commands (e.g., rewind)
+            MT_ST_NOWAIT_EOF enables immediate filemark mode (i.e. when
+               writing a filemark, don't wait for it to complete). Please
+               see the BASICS note about MTWEOFI with respect to the
+               possible dangers of writing immediate filemarks.
             MT_ST_SILI enables setting the SILI bit in SCSI commands when
                reading in variable block mode to enhance performance when
                reading blocks shorter than the byte count; set this only
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
new file mode 100644 (file)
index 0000000..41a6164
--- /dev/null
@@ -0,0 +1,133 @@
+                       Universal Flash Storage
+                       =======================
+
+
+Contents
+--------
+
+1. Overview
+2. UFS Architecture Overview
+  2.1 Application Layer
+  2.2 UFS Transport Protocol(UTP) layer
+  2.3 UFS Interconnect(UIC) Layer
+3. UFSHCD Overview
+  3.1 UFS controller initialization
+  3.2 UTP Transfer requests
+  3.3 UFS error handling
+  3.4 SCSI Error handling
+
+
+1. Overview
+-----------
+
+Universal Flash Storage(UFS) is a storage specification for flash devices.
+It is aimed to provide a universal storage interface for both
+embedded and removable flash memory based storage in mobile
+devices such as smart phones and tablet computers. The specification
+is defined by JEDEC Solid State Technology Association. UFS is based
+on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
+physical layer and MIPI Unipro as the link layer.
+
+The main goals of UFS is to provide,
+ * Optimized performance:
+   For UFS version 1.0 and 1.1 the target performance is as follows,
+   Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
+   Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
+   Future version of the standard,
+   Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
+ * Low power consumption
+ * High random IOPs and low latency
+
+
+2. UFS Architecture Overview
+----------------------------
+
+UFS has a layered communication architecture which is based on SCSI
+SAM-5 architectural model.
+
+UFS communication architecture consists of following layers,
+
+2.1 Application Layer
+
+  The Application layer is composed of UFS command set layer(UCS),
+  Task Manager and Device manager. The UFS interface is designed to be
+  protocol agnostic, however SCSI has been selected as a baseline
+  protocol for versions 1.0 and 1.1 of UFS protocol  layer.
+  UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
+  * UCS: It handles SCSI commands supported by UFS specification.
+  * Task manager: It handles task management functions defined by the
+     UFS which are meant for command queue control.
+  * Device manager: It handles device level operations and device
+     configuration operations. Device level operations mainly involve
+     device power management operations and commands to Interconnect
+     layers. Device level configurations involve handling of query
+     requests which are used to modify and retrieve configuration
+     information of the device.
+
+2.2 UFS Transport Protocol(UTP) layer
+
+  UTP layer provides services for
+  the higher layers through Service Access Points. UTP defines 3
+  service access points for higher layers.
+  * UDM_SAP: Device manager service access point is exposed to device
+    manager for device level operations. These device level operations
+    are done through query requests.
+  * UTP_CMD_SAP: Command service access point is exposed to UFS command
+    set layer(UCS) to transport commands.
+  * UTP_TM_SAP: Task management service access point is exposed to task
+    manager to transport task management functions.
+  UTP transports messages through UFS protocol information unit(UPIU).
+
+2.3 UFS Interconnect(UIC) Layer
+
+  UIC is the lowest layer of UFS layered architecture. It handles
+  connection between UFS host and UFS device. UIC consists of
+  MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
+  to upper layer,
+  * UIC_SAP: To transport UPIU between UFS host and UFS device.
+  * UIO_SAP: To issue commands to Unipro layers.
+
+
+3. UFSHCD Overview
+------------------
+
+The UFS host controller driver is based on Linux SCSI Framework.
+UFSHCD is a low level device driver which acts as an interface between
+SCSI Midlayer and PCIe based UFS host controllers.
+
+The current UFSHCD implementation supports following functionality,
+
+3.1 UFS controller initialization
+
+  The initialization module brings UFS host controller to active state
+  and prepares the controller to transfer commands/response between
+  UFSHCD and UFS device.
+
+3.2 UTP Transfer requests
+
+  Transfer request handling module of UFSHCD receives SCSI commands
+  from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
+  controller. Also, the module decodes, responses received from UFS
+  host controller in the form of UPIUs and intimates the SCSI Midlayer
+  of the status of the command.
+
+3.3 UFS error handling
+
+  Error handling module handles Host controller fatal errors,
+  Device fatal errors and UIC interconnect layer related errors.
+
+3.4 SCSI Error handling
+
+  This is done through UFSHCD SCSI error handling routines registered
+  with SCSI Midlayer. Examples of some of the error handling commands
+  issues by SCSI Midlayer are Abort task, Lun reset and host reset.
+  UFSHCD Routines to perform these tasks are registered with
+  SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
+  .eh_host_reset_handler.
+
+In this version of UFSHCD Query requests and power management
+functionality are not implemented.
+
+UFS Specifications can be found at,
+UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
+UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
index 2aa01936850422a477c6da441d04d84408b2ef26..2ad747e909fb6bdedae841839383e8459f9e844e 100644 (file)
@@ -550,6 +550,7 @@ static __init void memory_setup(void)
 {
 #ifdef CONFIG_MTD_UCLINUX
        unsigned long mtd_phys = 0;
+       unsigned long n;
 #endif
        unsigned long max_mem;
 
@@ -593,9 +594,9 @@ static __init void memory_setup(void)
        mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
 
 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
-       if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
-               mtd_size =
-                   PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
+       n = ext2_image_size((void *)(mtd_phys + 0x400));
+       if (n)
+               mtd_size = PAGE_ALIGN(n * 1024);
 # endif
 
 # if defined(CONFIG_CRAMFS)
index 2388bdb3283283870016fc03448e8ac569011d57..49df14805a9b44bba6857ec4138fe62164e935ab 100644 (file)
@@ -8,6 +8,29 @@
 #include <asm/atomic.h>
 #include <asm/errno.h>
 
+/* The following has to match the LWS code in syscall.S.  We have
+   sixteen four-word locks. */
+
+static inline void
+_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
+{
+       extern u32 lws_lock_start[];
+       long index = ((long)uaddr & 0xf0) >> 2;
+       arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+       local_irq_save(*flags);
+       arch_spin_lock(s);
+}
+
+static inline void
+_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
+{
+       extern u32 lws_lock_start[];
+       long index = ((long)uaddr & 0xf0) >> 2;
+       arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
+       arch_spin_unlock(s);
+       local_irq_restore(*flags);
+}
+
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 {
@@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
 
        pagefault_disable();
 
-       _atomic_spin_lock_irqsave(uaddr, flags);
+       _futex_spin_lock_irqsave(uaddr, &flags);
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                ret = -ENOSYS;
        }
 
-       _atomic_spin_unlock_irqrestore(uaddr, flags);
+       _futex_spin_unlock_irqrestore(uaddr, &flags);
 
        pagefault_enable();
 
@@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
         * address. This should scale to a couple of CPUs.
         */
 
-       _atomic_spin_lock_irqsave(uaddr, flags);
+       _futex_spin_lock_irqsave(uaddr, &flags);
 
        ret = get_user(val, uaddr);
 
@@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 
        *uval = val;
 
-       _atomic_spin_unlock_irqrestore(uaddr, flags);
+       _futex_spin_unlock_irqrestore(uaddr, &flags);
 
        return ret;
 }
index 5006e8ea305183eb5998d6f273cd3ad15dd2df7c..0bb1d63907f88a4ac915c2398b9a154744d91265 100644 (file)
@@ -290,8 +290,7 @@ smp_cpu_init(int cpunum)
        mb();
 
        /* Well, support 2.4 linux scheme as well. */
-       if (cpu_isset(cpunum, cpu_online_map))
-       {
+       if (cpu_online(cpunum)) {
                extern void machine_halt(void); /* arch/parisc.../process.c */
 
                printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
index 40883ffe2da9261b4f34c2997d2b5e533df25648..bb8e03407e183fa24be61cf6c9432c7a7ffe68c9 100644 (file)
@@ -1313,6 +1313,11 @@ static void __init pmu_check_apic(void)
        pr_info("no hardware sampling interrupt available.\n");
 }
 
+static struct attribute_group x86_pmu_format_group = {
+       .name = "format",
+       .attrs = NULL,
+};
+
 static int __init init_hw_perf_events(void)
 {
        struct x86_pmu_quirk *quirk;
@@ -1387,6 +1392,7 @@ static int __init init_hw_perf_events(void)
        }
 
        x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+       x86_pmu_format_group.attrs = x86_pmu.format_attrs;
 
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
@@ -1615,6 +1621,9 @@ static int x86_pmu_event_idx(struct perf_event *event)
 {
        int idx = event->hw.idx;
 
+       if (!x86_pmu.attr_rdpmc)
+               return 0;
+
        if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
                idx -= X86_PMC_IDX_FIXED;
                idx |= 1 << 30;
@@ -1667,6 +1676,7 @@ static struct attribute_group x86_pmu_attr_group = {
 
 static const struct attribute_group *x86_pmu_attr_groups[] = {
        &x86_pmu_attr_group,
+       &x86_pmu_format_group,
        NULL,
 };
 
@@ -1698,14 +1708,19 @@ static struct pmu pmu = {
        .flush_branch_stack     = x86_pmu_flush_branch_stack,
 };
 
-void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
+       userpg->cap_usr_time = 0;
+       userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
+       userpg->pmc_width = x86_pmu.cntval_bits;
+
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
                return;
 
        if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
                return;
 
+       userpg->cap_usr_time = 1;
        userpg->time_mult = this_cpu_read(cyc2ns);
        userpg->time_shift = CYC2NS_SCALE_FACTOR;
        userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
index 8484e77c211ea663790890cbc06929ad866adce6..6638aaf5449302c2ea2d5d03073f11bbcaf5b6ba 100644 (file)
@@ -339,6 +339,7 @@ struct x86_pmu {
         * sysfs attrs
         */
        int             attr_rdpmc;
+       struct attribute **format_attrs;
 
        /*
         * CPU Hotplug hooks
index dd002faff7a65156fc5dccec44b052331d0d5c81..95e7fe1c5f0bf57c3bf2537a1856b00f81425458 100644 (file)
@@ -404,6 +404,21 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+PMU_FORMAT_ATTR(event, "config:0-7,32-35");
+PMU_FORMAT_ATTR(umask, "config:8-15"   );
+PMU_FORMAT_ATTR(edge,  "config:18"     );
+PMU_FORMAT_ATTR(inv,   "config:23"     );
+PMU_FORMAT_ATTR(cmask, "config:24-31"  );
+
+static struct attribute *amd_format_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask.attr,
+       NULL,
+};
+
 static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
        .handle_irq             = x86_pmu_handle_irq,
@@ -426,6 +441,8 @@ static __initconst const struct x86_pmu amd_pmu = {
        .get_event_constraints  = amd_get_event_constraints,
        .put_event_constraints  = amd_put_event_constraints,
 
+       .format_attrs           = amd_format_attr,
+
        .cpu_prepare            = amd_pmu_cpu_prepare,
        .cpu_starting           = amd_pmu_cpu_starting,
        .cpu_dead               = amd_pmu_cpu_dead,
@@ -596,6 +613,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
        .cpu_dead               = amd_pmu_cpu_dead,
 #endif
        .cpu_starting           = amd_pmu_cpu_starting,
+       .format_attrs           = amd_format_attr,
 };
 
 __init int amd_pmu_init(void)
index 6a84e7f28f057665c718d858fa794ea32c65652c..26b3e2fef1047a86a7d546d4b7e8326b3ecbad33 100644 (file)
@@ -1431,6 +1431,24 @@ static void core_pmu_enable_all(int added)
        }
 }
 
+PMU_FORMAT_ATTR(event, "config:0-7"    );
+PMU_FORMAT_ATTR(umask, "config:8-15"   );
+PMU_FORMAT_ATTR(edge,  "config:18"     );
+PMU_FORMAT_ATTR(pc,    "config:19"     );
+PMU_FORMAT_ATTR(any,   "config:21"     ); /* v3 + */
+PMU_FORMAT_ATTR(inv,   "config:23"     );
+PMU_FORMAT_ATTR(cmask, "config:24-31"  );
+
+static struct attribute *intel_arch_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_pc.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask.attr,
+       NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
        .name                   = "core",
        .handle_irq             = x86_pmu_handle_irq,
@@ -1455,6 +1473,7 @@ static __initconst const struct x86_pmu core_pmu = {
        .put_event_constraints  = intel_put_event_constraints,
        .event_constraints      = intel_core_event_constraints,
        .guest_get_msrs         = core_guest_get_msrs,
+       .format_attrs           = intel_arch_formats_attr,
 };
 
 struct intel_shared_regs *allocate_shared_regs(int cpu)
@@ -1553,6 +1572,21 @@ static void intel_pmu_flush_branch_stack(void)
                intel_pmu_lbr_reset();
 }
 
+PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
+
+static struct attribute *intel_arch3_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_pc.attr,
+       &format_attr_any.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask.attr,
+
+       &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
+       NULL,
+};
+
 static __initconst const struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
@@ -1576,6 +1610,8 @@ static __initconst const struct x86_pmu intel_pmu = {
        .get_event_constraints  = intel_get_event_constraints,
        .put_event_constraints  = intel_put_event_constraints,
 
+       .format_attrs           = intel_arch3_formats_attr,
+
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
index c7181befecde63e38a5691368a2d77c6de075b05..32bcfc7dd2300d2043245e22a2351dfa65615484 100644 (file)
@@ -87,6 +87,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
        (void)checking_wrmsrl(hwc->config_base, val);
 }
 
+PMU_FORMAT_ATTR(event, "config:0-7"    );
+PMU_FORMAT_ATTR(umask, "config:8-15"   );
+PMU_FORMAT_ATTR(edge,  "config:18"     );
+PMU_FORMAT_ATTR(pc,    "config:19"     );
+PMU_FORMAT_ATTR(inv,   "config:23"     );
+PMU_FORMAT_ATTR(cmask, "config:24-31"  );
+
+static struct attribute *intel_p6_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       &format_attr_edge.attr,
+       &format_attr_pc.attr,
+       &format_attr_inv.attr,
+       &format_attr_cmask.attr,
+       NULL,
+};
+
 static __initconst const struct x86_pmu p6_pmu = {
        .name                   = "p6",
        .handle_irq             = x86_pmu_handle_irq,
@@ -115,6 +132,8 @@ static __initconst const struct x86_pmu p6_pmu = {
        .cntval_mask            = (1ULL << 32) - 1,
        .get_event_constraints  = x86_get_event_constraints,
        .event_constraints      = p6_event_constraints,
+
+       .format_attrs           = intel_p6_formats_attr,
 };
 
 __init int p6_pmu_init(void)
index 55d82321d307de4db3fa0158f9282f66d03bd4f6..94eb05b1afdfd2bcab367a6e6fa6dbab0667dca9 100644 (file)
@@ -39,7 +39,6 @@
 #include <asm/uaccess.h>
 
 static DEFINE_MUTEX(mtd_mutex);
-static struct vfsmount *mtd_inode_mnt __read_mostly;
 
 /*
  * Data structure to hold the pointer to the mtd device as well
@@ -75,7 +74,9 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
        return -EINVAL;
 }
 
-
+static int count;
+static struct vfsmount *mnt;
+static struct file_system_type mtd_inodefs_type;
 
 static int mtdchar_open(struct inode *inode, struct file *file)
 {
@@ -92,6 +93,10 @@ static int mtdchar_open(struct inode *inode, struct file *file)
        if ((file->f_mode & FMODE_WRITE) && (minor & 1))
                return -EACCES;
 
+       ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
+       if (ret)
+               return ret;
+
        mutex_lock(&mtd_mutex);
        mtd = get_mtd_device(NULL, devnum);
 
@@ -106,7 +111,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
                goto out;
        }
 
-       mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum);
+       mtd_ino = iget_locked(mnt->mnt_sb, devnum);
        if (!mtd_ino) {
                put_mtd_device(mtd);
                ret = -ENOMEM;
@@ -141,6 +146,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
 
 out:
        mutex_unlock(&mtd_mutex);
+       simple_release_fs(&mnt, &count);
        return ret;
 } /* mtdchar_open */
 
@@ -162,6 +168,7 @@ static int mtdchar_close(struct inode *inode, struct file *file)
        put_mtd_device(mtd);
        file->private_data = NULL;
        kfree(mfi);
+       simple_release_fs(&mnt, &count);
 
        return 0;
 } /* mtdchar_close */
@@ -1175,10 +1182,15 @@ static const struct file_operations mtd_fops = {
 #endif
 };
 
+static const struct super_operations mtd_ops = {
+       .drop_inode = generic_delete_inode,
+       .statfs = simple_statfs,
+};
+
 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
                                int flags, const char *dev_name, void *data)
 {
-       return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
+       return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
 }
 
 static struct file_system_type mtd_inodefs_type = {
@@ -1187,26 +1199,6 @@ static struct file_system_type mtd_inodefs_type = {
        .kill_sb = kill_anon_super,
 };
 
-static void mtdchar_notify_add(struct mtd_info *mtd)
-{
-}
-
-static void mtdchar_notify_remove(struct mtd_info *mtd)
-{
-       struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
-
-       if (mtd_ino) {
-               /* Destroy the inode if it exists */
-               clear_nlink(mtd_ino);
-               iput(mtd_ino);
-       }
-}
-
-static struct mtd_notifier mtdchar_notifier = {
-       .add = mtdchar_notify_add,
-       .remove = mtdchar_notify_remove,
-};
-
 static int __init init_mtdchar(void)
 {
        int ret;
@@ -1224,19 +1216,8 @@ static int __init init_mtdchar(void)
                pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
                goto err_unregister_chdev;
        }
-
-       mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
-       if (IS_ERR(mtd_inode_mnt)) {
-               ret = PTR_ERR(mtd_inode_mnt);
-               pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
-               goto err_unregister_filesystem;
-       }
-       register_mtd_user(&mtdchar_notifier);
-
        return ret;
 
-err_unregister_filesystem:
-       unregister_filesystem(&mtd_inodefs_type);
 err_unregister_chdev:
        __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
        return ret;
@@ -1244,8 +1225,6 @@ err_unregister_chdev:
 
 static void __exit cleanup_mtdchar(void)
 {
-       unregister_mtd_user(&mtdchar_notifier);
-       kern_unmount(mtd_inode_mnt);
        unregister_filesystem(&mtd_inodefs_type);
        __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
index 4bdef24cd412ff75db214f0461ab98e908adccda..b500840a143b08ac72e8c045fea66135535676d5 100644 (file)
@@ -508,9 +508,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
        int pos;
        u32 reg32;
 
-       if (aspm_disabled)
-               return 0;
-
        /*
         * Some functions in a slot might not all be PCIe functions,
         * very strange. Disable ASPM for the whole slot
@@ -519,6 +516,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
                pos = pci_pcie_cap(child);
                if (!pos)
                        return -EINVAL;
+
+               /*
+                * If ASPM is disabled then we're not going to change
+                * the BIOS state. It's safe to continue even if it's a
+                * pre-1.1 device
+                */
+
+               if (aspm_disabled)
+                       continue;
+
                /*
                 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
                 * RBER bit to determine if a function is 1.1 version device
index a06e608789e390661e860605d6de3a29db0b7243..29684c8142b0e62fd814c352d23d955e1aee567e 100644 (file)
@@ -619,6 +619,7 @@ config SCSI_ARCMSR
 
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/ufs/Kconfig"
 
 config SCSI_HPTIOP
        tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
index ad24e065b1e553c4efd620e6aa161362cf1e40dc..8deedeaf5608d0623b6fda4a28926bc763a9bd03 100644 (file)
@@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY)       += megaraid.o
 obj-$(CONFIG_MEGARAID_NEWGEN)  += megaraid/
 obj-$(CONFIG_MEGARAID_SAS)     += megaraid/
 obj-$(CONFIG_SCSI_MPT2SAS)     += mpt2sas/
+obj-$(CONFIG_SCSI_UFSHCD)      += ufs/
 obj-$(CONFIG_SCSI_ACARD)       += atp870u.o
 obj-$(CONFIG_SCSI_SUNESP)      += esp_scsi.o   sun_esp.o
 obj-$(CONFIG_SCSI_GDTH)                += gdth.o
index f29d5121d5ed2985af146a6416b0618293246cd8..68ce08552f699b6752cecfaa2c8a9174e8c7c634 100644 (file)
@@ -2582,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * this than via the PCI device table
         */
        if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
-               error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+               atpdev->chip_ver = pdev->revision;
                if (atpdev->chip_ver < 2)
                        goto err_eio;
        }
@@ -2601,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        base_io &= 0xfffffff8;
 
        if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
-               error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver);
+               atpdev->chip_ver = pdev->revision;
                pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
 
                host_id = inb(base_io + 0x39);
index a796de9350541a4fcae375681bb911d1fc27ec00..4ad7e368bbc2505a0c0b86a637d6a718dd0a4dac 100644 (file)
@@ -225,9 +225,9 @@ struct bfa_faa_args_s {
 };
 
 struct bfa_iocfc_s {
+       bfa_fsm_t               fsm;
        struct bfa_s            *bfa;
        struct bfa_iocfc_cfg_s  cfg;
-       int                     action;
        u32             req_cq_pi[BFI_IOC_MAX_CQS];
        u32             rsp_cq_ci[BFI_IOC_MAX_CQS];
        u8              hw_qid[BFI_IOC_MAX_CQS];
@@ -236,7 +236,9 @@ struct bfa_iocfc_s {
        struct bfa_cb_qe_s      dis_hcb_qe;
        struct bfa_cb_qe_s      en_hcb_qe;
        struct bfa_cb_qe_s      stats_hcb_qe;
-       bfa_boolean_t           cfgdone;
+       bfa_boolean_t           submod_enabled;
+       bfa_boolean_t           cb_reqd;        /* Driver call back reqd */
+       bfa_status_t            op_status;      /* Status of bfa iocfc op */
 
        struct bfa_dma_s        cfg_info;
        struct bfi_iocfc_cfg_s *cfginfo;
@@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
 void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
                                 u32 *end);
 void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
-wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
-wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
 int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
                                struct bfi_pbc_vport_s *pbc_vport);
 
@@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)                \
        bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
index 4bd546bcc240740fdadd04fd435828b4dc94f4f6..456e5762977df90bc7984d98bdae383797ad674a 100644 (file)
@@ -199,14 +199,432 @@ enum {
 #define DEF_CFG_NUM_SBOOT_TGTS         16
 #define DEF_CFG_NUM_SBOOT_LUNS         16
 
+/*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+                  struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+                  struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+                  struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+                  struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+                  struct bfa_iocfc_s, enum iocfc_event);
+
 /*
  * forward declaration for IOC FC functions
  */
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_INIT:
+       case IOCFC_E_ENABLE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_IOC_ENABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_DCONF_DONE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_CFG_DONE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+       iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+       bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+                    bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_START:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+               break;
+       case IOCFC_E_STOP:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+               break;
+       case IOCFC_E_DISABLE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_fcport_init(iocfc->bfa);
+       bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_STOP:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+               break;
+       case IOCFC_E_DISABLE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_DCONF_DONE:
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_IOC_DISABLED:
+               bfa_isr_disable(iocfc->bfa);
+               bfa_iocfc_disable_submod(iocfc->bfa);
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+               iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+               bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+                            bfa_iocfc_stop_cb, iocfc->bfa);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_IOC_ENABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+               if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+                       break;
+
+               iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+               bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+                            bfa_iocfc_enable_cb, iocfc->bfa);
+               iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_CFG_DONE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+               if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+                       break;
+
+               iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+               bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+                            bfa_iocfc_enable_cb, iocfc->bfa);
+               iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+               break;
+       case IOCFC_E_IOC_FAILED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+               if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+                       break;
+
+               iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+               bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+                            bfa_iocfc_enable_cb, iocfc->bfa);
+               iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_IOC_DISABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_isr_disable(iocfc->bfa);
+       bfa_iocfc_disable_submod(iocfc->bfa);
+       iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+       bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+                    bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_STOP:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+               break;
+       case IOCFC_E_ENABLE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_isr_disable(iocfc->bfa);
+       bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_STOP:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+               break;
+       case IOCFC_E_DISABLE:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+               break;
+       case IOCFC_E_IOC_ENABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+       bfa_isr_disable(iocfc->bfa);
+       iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+       bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+                    bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+       bfa_trc(iocfc->bfa, event);
+
+       switch (event) {
+       case IOCFC_E_STOP:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+               break;
+       case IOCFC_E_DISABLE:
+               bfa_ioc_disable(&iocfc->bfa->ioc);
+               break;
+       case IOCFC_E_IOC_ENABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+               break;
+       case IOCFC_E_IOC_DISABLED:
+               bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+               iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+               bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+                            bfa_iocfc_disable_cb, iocfc->bfa);
+               break;
+       case IOCFC_E_IOC_FAILED:
+               break;
+       default:
+               bfa_sm_fault(iocfc->bfa, event);
+               break;
+       }
+}
 
 /*
  * BFA Interrupt handling functions
@@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
        }
 }
 
-static inline void
+bfa_boolean_t
 bfa_isr_rspq(struct bfa_s *bfa, int qid)
 {
        struct bfi_msg_s *m;
        u32     pi, ci;
        struct list_head *waitq;
+       bfa_boolean_t ret;
 
        ci = bfa_rspq_ci(bfa, qid);
        pi = bfa_rspq_pi(bfa, qid);
 
+       ret = (ci != pi);
+
        while (ci != pi) {
                m = bfa_rspq_elem(bfa, qid, ci);
                WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
@@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
        waitq = bfa_reqq(bfa, qid);
        if (!list_empty(waitq))
                bfa_reqq_resume(bfa, qid);
+
+       return ret;
 }
 
 static inline void
@@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa)
 {
        u32 intr, qintr;
        int queue;
+       bfa_boolean_t rspq_comp = BFA_FALSE;
 
        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
@@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa)
         */
        if (bfa->queue_process) {
                for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
-                       bfa_isr_rspq(bfa, queue);
+                       if (bfa_isr_rspq(bfa, queue))
+                               rspq_comp = BFA_TRUE;
        }
 
        if (!intr)
-               return BFA_TRUE;
+               return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
 
        /*
         * CPE completion queue interrupt
@@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg)
         * Enable interrupt coalescing if it is driver init path
         * and not ioc disable/enable path.
         */
-       if (!iocfc->cfgdone)
+       if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
                cfg_info->intr_attr.coalesce = BFA_TRUE;
 
-       iocfc->cfgdone = BFA_FALSE;
-
        /*
         * dma map IOC configuration itself
         */
@@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        bfa->bfad = bfad;
        iocfc->bfa = bfa;
-       iocfc->action = BFA_IOCFC_ACT_NONE;
-
        iocfc->cfg = *cfg;
 
        /*
@@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
 
        for (i = 0; hal_mods[i]; i++)
                hal_mods[i]->start(bfa);
+
+       bfa->iocfc.submod_enabled = BFA_TRUE;
 }
 
 /*
@@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa)
 {
        int             i;
 
+       if (bfa->iocfc.submod_enabled == BFA_FALSE)
+               return;
+
        for (i = 0; hal_mods[i]; i++)
                hal_mods[i]->iocdisable(bfa);
+
+       bfa->iocfc.submod_enabled = BFA_FALSE;
 }
 
 static void
@@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
 {
        struct bfa_s    *bfa = bfa_arg;
 
-       if (complete) {
-               if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
-                       bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
-               else
-                       bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
-       } else {
-               if (bfa->iocfc.cfgdone)
-                       bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
-       }
+       if (complete)
+               bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
 }
 
 static void
@@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
 
        if (compl)
                complete(&bfad->comp);
-       else
-               bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
 }
 
 static void
@@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
        fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
        fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
 
-       iocfc->cfgdone = BFA_TRUE;
-
        /*
         * configure queue register offsets as learnt from firmware
         */
@@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
         */
        bfa_msix_queue_install(bfa);
 
-       /*
-        * Configuration is complete - initialize/start submodules
-        */
-       bfa_fcport_init(bfa);
-
-       if (iocfc->action == BFA_IOCFC_ACT_INIT) {
-               if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-                       bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
-                               bfa_iocfc_init_cb, bfa);
-       } else {
-               if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-                                       bfa_iocfc_enable_cb, bfa);
-               bfa_iocfc_start_submod(bfa);
+       if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+               bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+               bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+               bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
        }
 }
+
 void
 bfa_iocfc_reset_queues(struct bfa_s *bfa)
 {
@@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
        }
 }
 
+/*
+ *     Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+       struct bfa_iocfc_s              *iocfc   = &bfa->iocfc;
+       struct bfi_iocfc_cfgrsp_s       *cfgrsp  = iocfc->cfgrsp;
+
+       cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+       cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+       bfa->ioc.attr->pwwn = msg->pwwn;
+       bfa->ioc.attr->nwwn = msg->nwwn;
+       bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
 /* Fabric Assigned Address specific functions */
 
 /*
@@ -855,83 +1280,12 @@ bfa_faa_validate_request(struct bfa_s *bfa)
                if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
                        return BFA_STATUS_FEATURE_NOT_SUPPORTED;
        } else {
-               if (!bfa_ioc_is_acq_addr(&bfa->ioc))
-                       return BFA_STATUS_IOC_NON_OP;
+               return BFA_STATUS_IOC_NON_OP;
        }
 
        return BFA_STATUS_OK;
 }
 
-bfa_status_t
-bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
-{
-       struct bfi_faa_en_dis_s faa_enable_req;
-       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
-       bfa_status_t            status;
-
-       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-       status = bfa_faa_validate_request(bfa);
-       if (status != BFA_STATUS_OK)
-               return status;
-
-       if (iocfc->faa_args.busy == BFA_TRUE)
-               return BFA_STATUS_DEVBUSY;
-
-       if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
-               return BFA_STATUS_FAA_ENABLED;
-
-       if (bfa_fcport_is_trunk_enabled(bfa))
-               return BFA_STATUS_ERROR_TRUNK_ENABLED;
-
-       bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
-       iocfc->faa_args.busy = BFA_TRUE;
-
-       memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-       bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
-               BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
-
-       bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
-                       sizeof(struct bfi_faa_en_dis_s));
-
-       return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
-               void *cbarg)
-{
-       struct bfi_faa_en_dis_s faa_disable_req;
-       struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
-       bfa_status_t            status;
-
-       iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-       iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-       status = bfa_faa_validate_request(bfa);
-       if (status != BFA_STATUS_OK)
-               return status;
-
-       if (iocfc->faa_args.busy == BFA_TRUE)
-               return BFA_STATUS_DEVBUSY;
-
-       if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
-               return BFA_STATUS_FAA_DISABLED;
-
-       bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
-       iocfc->faa_args.busy = BFA_TRUE;
-
-       memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-       bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
-               BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
-
-       bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
-               sizeof(struct bfi_faa_en_dis_s));
-
-       return BFA_STATUS_OK;
-}
-
 bfa_status_t
 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
                bfa_cb_iocfc_t cbfn, void *cbarg)
@@ -962,38 +1316,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
        return BFA_STATUS_OK;
 }
 
-/*
- *     FAA enable response
- */
-static void
-bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
-               struct bfi_faa_en_dis_rsp_s *rsp)
-{
-       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-       bfa_status_t    status = rsp->status;
-
-       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-       iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
- *     FAA disable response
- */
-static void
-bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
-               struct bfi_faa_en_dis_rsp_s *rsp)
-{
-       void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-       bfa_status_t    status = rsp->status;
-
-       WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-       iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-       iocfc->faa_args.busy = BFA_FALSE;
-}
-
 /*
  *     FAA query response
  */
@@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
 {
        struct bfa_s    *bfa = bfa_arg;
 
-       if (status == BFA_STATUS_FAA_ACQ_ADDR) {
-               bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-                               bfa_iocfc_init_cb, bfa);
-               return;
-       }
-
-       if (status != BFA_STATUS_OK) {
-               bfa_isr_disable(bfa);
-               if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-                       bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-                                    bfa_iocfc_init_cb, bfa);
-               else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-                       bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-                                       bfa_iocfc_enable_cb, bfa);
-               return;
-       }
-
-       bfa_iocfc_send_cfg(bfa);
-       bfa_dconf_modinit(bfa);
+       if (status == BFA_STATUS_OK)
+               bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+       else
+               bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
 {
        struct bfa_s    *bfa = bfa_arg;
 
-       bfa_isr_disable(bfa);
-       bfa_iocfc_disable_submod(bfa);
-
-       if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
-               bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
-                            bfa);
-       else {
-               WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
-               bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
-                            bfa);
-       }
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
 }
 
 /*
@@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
        struct bfa_s    *bfa = bfa_arg;
 
        bfa->queue_process = BFA_FALSE;
-
-       bfa_isr_disable(bfa);
-       bfa_iocfc_disable_submod(bfa);
-
-       if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-               bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
-                            bfa);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
        bfa_isr_enable(bfa);
 }
 
-
 /*
  * Query IOC memory requirement information.
  */
@@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        INIT_LIST_HEAD(&bfa->comp_q);
        for (i = 0; i < BFI_IOC_MAX_CQS; i++)
                INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+       bfa->iocfc.cb_reqd = BFA_FALSE;
+       bfa->iocfc.op_status = BFA_STATUS_OK;
+       bfa->iocfc.submod_enabled = BFA_FALSE;
+
+       bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
 }
 
 /*
@@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 void
 bfa_iocfc_init(struct bfa_s *bfa)
 {
-       bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
-       bfa_ioc_enable(&bfa->ioc);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
 }
 
 /*
@@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
 void
 bfa_iocfc_start(struct bfa_s *bfa)
 {
-       if (bfa->iocfc.cfgdone)
-               bfa_iocfc_start_submod(bfa);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
 }
 
 /*
@@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa)
 void
 bfa_iocfc_stop(struct bfa_s *bfa)
 {
-       bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
        bfa->queue_process = BFA_FALSE;
-       bfa_dconf_modexit(bfa);
-       if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-               bfa_ioc_disable(&bfa->ioc);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
 }
 
 void
@@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
        case BFI_IOCFC_I2H_UPDATEQ_RSP:
                iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
                break;
-       case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
-               bfa_faa_enable_reply(iocfc,
-                       (struct bfi_faa_en_dis_rsp_s *)msg);
-               break;
-       case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
-               bfa_faa_disable_reply(iocfc,
-                       (struct bfi_faa_en_dis_rsp_s *)msg);
+       case BFI_IOCFC_I2H_ADDR_MSG:
+               bfa_iocfc_process_faa_addr(bfa,
+                               (struct bfi_faa_addr_msg_s *)msg);
                break;
        case BFI_IOCFC_I2H_FAA_QUERY_RSP:
                bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa)
 {
        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
                     "IOC Enable");
-       bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
-       bfa_ioc_enable(&bfa->ioc);
+       bfa->iocfc.cb_reqd = BFA_TRUE;
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
 }
 
 void
@@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa)
 {
        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
                     "IOC Disable");
-       bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
 
        bfa->queue_process = BFA_FALSE;
-       bfa_ioc_disable(&bfa->ioc);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
 }
 
-
 bfa_boolean_t
 bfa_iocfc_is_operational(struct bfa_s *bfa)
 {
-       return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+       return bfa_ioc_is_operational(&bfa->ioc) &&
+               bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
 }
 
 /*
@@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
        }
 }
 
-void
-bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
-{
-       if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
-               if (bfa->iocfc.cfgdone == BFA_TRUE)
-                       bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-                               bfa_iocfc_init_cb, bfa);
-       }
-}
-
 /*
  * Return the list of PCI vendor/device id lists supported by this
  * BFA instance.
index cb07c628b2f1856a3a26dadba052e2587edd3ae7..36756ce0e58f63dcb34736ef5834c7641bdcb323 100644 (file)
@@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
        u16             num_uf_bufs;    /*  unsolicited recv buffers    */
        u8              num_cqs;
        u8              fw_tick_res;    /*  FW clock resolution in ms */
-       u8              rsvd[2];
+       u8              rsvd[6];
 };
 #pragma pack()
 
index d4f951fe753eecb1fbccade0e96e5adde904b986..5d2a1307e5cea333a73356a56ff4a054af5009d2 100644 (file)
@@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
 
        if (vport_drv->comp_del)
                complete(vport_drv->comp_del);
+       else
+               kfree(vport_drv);
 
        bfa_lps_delete(vport->lps);
 }
index 52628d5d3c9b0663486833db47eda3d449a60aa9..fe0463a1db0456993ac9c9de8de991d6e6ce9941 100644 (file)
@@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
         * - MAX receive frame size
         */
        rport->cisc = plogi->csp.cisc;
-       rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+       if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
+               rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
+       else
+               rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
 
        bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
        bfa_trc(port->fcs, port->fabric->bb_credit);
index eca7ab78085bef2664d63d173e4a5510cdb61cfd..14e6284e48e4110b4c907b06dacb311c06c26f77 100644 (file)
@@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
                                enum bfa_ioc_event_e event);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
@@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
-
 /*
  * IOC state machine definitions/declarations
  */
@@ -114,7 +112,6 @@ enum ioc_event {
        IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
        IOC_E_TIMEOUT           = 11,   /*  timeout                     */
        IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
-       IOC_E_FWRSP_ACQ_ADDR    = 13,   /*  Acquiring address           */
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
 
 static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
        {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
-       {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
 };
 
 /*
@@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        switch (event) {
        case IOC_E_FWRSP_GETATTR:
                bfa_ioc_timer_stop(ioc);
-               bfa_ioc_check_attr_wwns(ioc);
-               bfa_ioc_hb_monitor(ioc);
                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
                break;
 
-       case IOC_E_FWRSP_ACQ_ADDR:
-               bfa_ioc_timer_stop(ioc);
-               bfa_ioc_hb_monitor(ioc);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
-               break;
-
        case IOC_E_PFFAILED:
        case IOC_E_HWERROR:
                bfa_ioc_timer_stop(ioc);
@@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
-/*
- * Acquiring address from fabric (entry function)
- */
-static void
-bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
-{
-}
-
-/*
- *     Acquiring address from the fabric
- */
-static void
-bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
-{
-       bfa_trc(ioc, event);
-
-       switch (event) {
-       case IOC_E_FWRSP_GETATTR:
-               bfa_ioc_check_attr_wwns(ioc);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
-               break;
-
-       case IOC_E_PFFAILED:
-       case IOC_E_HWERROR:
-               bfa_hb_timer_stop(ioc);
-       case IOC_E_HBFAIL:
-               ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
-               if (event != IOC_E_PFFAILED)
-                       bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
-               break;
-
-       case IOC_E_DISABLE:
-               bfa_hb_timer_stop(ioc);
-               bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
-               break;
-
-       case IOC_E_ENABLE:
-               break;
-
-       default:
-               bfa_sm_fault(ioc, event);
-       }
-}
-
 static void
 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 {
@@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+       bfa_ioc_hb_monitor(ioc);
        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 }
@@ -738,26 +681,60 @@ static void
 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 {
        struct bfi_ioc_image_hdr_s      fwhdr;
-       u32     fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+       u32     r32, fwstate, pgnum, pgoff, loff = 0;
+       int     i;
+
+       /*
+        * Spin on init semaphore to serialize.
+        */
+       r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+       while (r32 & 0x1) {
+               udelay(20);
+               r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+       }
 
        /* h/w sem init */
-       if (fwstate == BFI_IOC_UNINIT)
+       fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+       if (fwstate == BFI_IOC_UNINIT) {
+               writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
                goto sem_get;
+       }
 
        bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 
-       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+       if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+               writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
                goto sem_get;
+       }
+
+       /*
+        * Clear fwver hdr
+        */
+       pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
+       pgoff = PSS_SMEM_PGOFF(loff);
+       writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
+
+       for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
+               bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
+               loff += sizeof(u32);
+       }
 
        bfa_trc(iocpf->ioc, fwstate);
-       bfa_trc(iocpf->ioc, fwhdr.exec);
+       bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
        writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+       writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
 
        /*
-        * Try to lock and then unlock the semaphore.
+        * Unlock the hw semaphore. Should be here only once per boot.
         */
        readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
        writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+
+       /*
+        * unlock init semaphore.
+        */
+       writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
+
 sem_get:
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
@@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
        u32 i;
        u32 asicmode;
 
-       /*
-        * Initialize LMEM first before code download
-        */
-       bfa_ioc_lmem_init(ioc);
-
        bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
        fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
@@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
        bfa_ioc_pll_init_asic(ioc);
 
        ioc->pllinit = BFA_TRUE;
+
+       /*
+        * Initialize LMEM
+        */
+       bfa_ioc_lmem_init(ioc);
+
        /*
         *  release semaphore.
         */
@@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
                bfa_ioc_getattr_reply(ioc);
                break;
 
-       case BFI_IOC_I2H_ACQ_ADDR_REPLY:
-               bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
-               break;
-
        default:
                bfa_trc(ioc, msg->mh.msg_id);
                WARN_ON(1);
@@ -2415,15 +2389,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
-/*
- * Return TRUE if IOC is in acquiring address state
- */
-bfa_boolean_t
-bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
-{
-       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
-}
-
 /*
  * return true if IOC firmware is different.
  */
@@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
-{
-       if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
-               return;
-       if (ioc->attr->nwwn == 0)
-               bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
-       if (ioc->attr->pwwn == 0)
-               bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
-}
-
 /*
  *  BFA IOC PF private functions
  */
@@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
  */
 
 #define BFA_DIAG_MEMTEST_TOV   50000   /* memtest timeout in msec */
-#define BFA_DIAG_FWPING_TOV    1000    /* msec */
+#define CT2_BFA_DIAG_MEMTEST_TOV       (9*30*1000)  /* 4.5 min */
 
 /* IOC event handler */
 static void
@@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
 }
 
 static void
-diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
 {
        bfa_trc(diag, diag->ledtest.lock);
        diag->ledtest.lock = BFA_FALSE;
@@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
                u32 pattern, struct bfa_diag_memtest_result *result,
                bfa_cb_diag_t cbfn, void *cbarg)
 {
+       u32     memtest_tov;
+
        bfa_trc(diag, pattern);
 
        if (!bfa_ioc_adapter_is_disabled(diag->ioc))
@@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
        /* download memtest code and take LPU0 out of reset */
        bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
 
+       memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
+                      CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
        bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
-                       bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+                       bfa_diag_memtest_done, diag, memtest_tov);
        diag->timer_active = 1;
        return BFA_STATUS_OK;
 }
@@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
        case BFA_DCONF_SM_INIT:
                if (dconf->min_cfg) {
                        bfa_trc(dconf->bfa, dconf->min_cfg);
+                       bfa_fsm_send_event(&dconf->bfa->iocfc,
+                                       IOCFC_E_DCONF_DONE);
                        return;
                }
                bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
-               dconf->flashdone = BFA_FALSE;
-               bfa_trc(dconf->bfa, dconf->flashdone);
+               bfa_timer_start(dconf->bfa, &dconf->timer,
+                       bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
                bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
                                        BFA_FLASH_PART_DRV, dconf->instance,
                                        dconf->dconf,
                                        sizeof(struct bfa_dconf_s), 0,
                                        bfa_dconf_init_cb, dconf->bfa);
                if (bfa_status != BFA_STATUS_OK) {
+                       bfa_timer_stop(&dconf->timer);
                        bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
                        bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
                        return;
                }
                break;
        case BFA_DCONF_SM_EXIT:
-               dconf->flashdone = BFA_TRUE;
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
        case BFA_DCONF_SM_IOCDISABLE:
        case BFA_DCONF_SM_WR:
        case BFA_DCONF_SM_FLASH_COMP:
@@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
 
        switch (event) {
        case BFA_DCONF_SM_FLASH_COMP:
+               bfa_timer_stop(&dconf->timer);
                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
                break;
        case BFA_DCONF_SM_TIMEOUT:
                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
                break;
        case BFA_DCONF_SM_EXIT:
-               dconf->flashdone = BFA_TRUE;
-               bfa_trc(dconf->bfa, dconf->flashdone);
+               bfa_timer_stop(&dconf->timer);
+               bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
+               break;
        case BFA_DCONF_SM_IOCDISABLE:
+               bfa_timer_stop(&dconf->timer);
                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
                break;
        default:
@@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
                break;
        case BFA_DCONF_SM_EXIT:
-               dconf->flashdone = BFA_TRUE;
-               bfa_trc(dconf->bfa, dconf->flashdone);
                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
                break;
        case BFA_DCONF_SM_INIT:
        case BFA_DCONF_SM_IOCDISABLE:
@@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
                bfa_timer_stop(&dconf->timer);
        case BFA_DCONF_SM_TIMEOUT:
                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
-               dconf->flashdone = BFA_TRUE;
-               bfa_trc(dconf->bfa, dconf->flashdone);
-               bfa_ioc_disable(&dconf->bfa->ioc);
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
                break;
        default:
                bfa_sm_fault(dconf->bfa, event);
@@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
                break;
        case BFA_DCONF_SM_EXIT:
-               dconf->flashdone = BFA_TRUE;
                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+               bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
                break;
        case BFA_DCONF_SM_IOCDISABLE:
                break;
@@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        if (cfg->drvcfg.min_cfg) {
                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
                dconf->min_cfg = BFA_TRUE;
-               /*
-                * Set the flashdone flag to TRUE explicitly as no flash
-                * write will happen in min_cfg mode.
-                */
-               dconf->flashdone = BFA_TRUE;
        } else {
                dconf->min_cfg = BFA_FALSE;
                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
@@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
        struct bfa_s *bfa = arg;
        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
 
-       dconf->flashdone = BFA_TRUE;
-       bfa_trc(bfa, dconf->flashdone);
-       bfa_iocfc_cb_dconf_modinit(bfa, status);
+       bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
        if (status == BFA_STATUS_OK) {
                bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
                if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
                if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
                        dconf->dconf->hdr.version = BFI_DCONF_VERSION;
        }
-       bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+       bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
 }
 
 void
@@ -5977,7 +5933,5 @@ void
 bfa_dconf_modexit(struct bfa_s *bfa)
 {
        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
-       BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
-       bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
        bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
 }
index 546d46b371017102136b0d5b87d49347b424755c..1a99d4b5b50feec22d97c2e87430c317589877e5 100644 (file)
@@ -372,6 +372,22 @@ struct bfa_cb_qe_s {
        void            *cbarg;
 };
 
+/*
+ * IOCFC state machine definitions/declarations
+ */
+enum iocfc_event {
+       IOCFC_E_INIT            = 1,    /* IOCFC init request           */
+       IOCFC_E_START           = 2,    /* IOCFC mod start request      */
+       IOCFC_E_STOP            = 3,    /* IOCFC stop request           */
+       IOCFC_E_ENABLE          = 4,    /* IOCFC enable request         */
+       IOCFC_E_DISABLE         = 5,    /* IOCFC disable request        */
+       IOCFC_E_IOC_ENABLED     = 6,    /* IOC enabled message          */
+       IOCFC_E_IOC_DISABLED    = 7,    /* IOC disabled message         */
+       IOCFC_E_IOC_FAILED      = 8,    /* failure notice by IOC sm     */
+       IOCFC_E_DCONF_DONE      = 9,    /* dconf read/write done        */
+       IOCFC_E_CFG_DONE        = 10,   /* IOCFC config complete        */
+};
+
 /*
  * ASIC block configurtion related
  */
@@ -706,7 +722,6 @@ struct bfa_dconf_s {
 struct bfa_dconf_mod_s {
        bfa_sm_t                sm;
        u8                      instance;
-       bfa_boolean_t           flashdone;
        bfa_boolean_t           read_data_valid;
        bfa_boolean_t           min_cfg;
        struct bfa_timer_s      timer;
index d1b8f0caaa79ed3f301d6c8b509339fee76d0169..2eb0c6a2938d68a9276c4f19039445cc3ec93fdd 100644 (file)
@@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
 }
 
 #define CT2_NFC_MAX_DELAY      1000
+#define CT2_NFC_VER_VALID      0x143
+#define BFA_IOC_PLL_POLL       1000000
+
+static bfa_boolean_t
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+       u32     r32;
+
+       r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+       if (r32 & __NFC_CONTROLLER_HALTED)
+               return BFA_TRUE;
+
+       return BFA_FALSE;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+       u32     r32;
+       int i;
+
+       writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+       for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+               r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+               if (!(r32 & __NFC_CONTROLLER_HALTED))
+                       return;
+               udelay(1000);
+       }
+       WARN_ON(1);
+}
+
 bfa_status_t
 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
 {
-       u32     wgn, r32;
-       int i;
+       u32 wgn, r32, nfc_ver, i;
 
-       /*
-        * Initialize PLL if not already done by NFC
-        */
        wgn = readl(rb + CT2_WGN_STATUS);
-       if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+       nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+       if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+           (nfc_ver >= CT2_NFC_VER_VALID)) {
+               if (bfa_ioc_ct2_nfc_halted(rb))
+                       bfa_ioc_ct2_nfc_resume(rb);
+
+               writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+                      rb + CT2_CSI_FW_CTL_SET_REG);
+
+               for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+                       r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+                       if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+                               break;
+               }
+
+               WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+               for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+                       r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+                       if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+                               break;
+               }
+
+               WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+               udelay(1000);
+
+               r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+               WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+       } else {
                writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
                for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
                        r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
                                break;
                        udelay(1000);
                }
-       }
 
-       /*
-        * Mask the interrupts and clear any
-        * pending interrupts.
-        */
-       writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
-       writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
-
-       r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-       if (r32 == 1) {
-               writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-               readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               bfa_ioc_ct2_mac_reset(rb);
+               bfa_ioc_ct2_sclk_init(rb);
+               bfa_ioc_ct2_lclk_init(rb);
+
+               /*
+                * release soft reset on s_clk & l_clk
+                */
+               r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
+               writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+                      (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+               /*
+                * release soft reset on s_clk & l_clk
+                */
+               r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+               writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+                     (rb + CT2_APP_PLL_LCLK_CTL_REG));
        }
-       r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-       if (r32 == 1) {
-               writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-               readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-       }
-
-       bfa_ioc_ct2_mac_reset(rb);
-       bfa_ioc_ct2_sclk_init(rb);
-       bfa_ioc_ct2_lclk_init(rb);
-
-       /*
-        * release soft reset on s_clk & l_clk
-        */
-       r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-       writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
-               (rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-       /*
-        * release soft reset on s_clk & l_clk
-        */
-       r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-       writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-               (rb + CT2_APP_PLL_LCLK_CTL_REG));
 
        /*
         * Announce flash device presence, if flash was corrupted.
         */
        if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-               r32 = readl((rb + PSS_GPIO_OUT_REG));
+               r32 = readl(rb + PSS_GPIO_OUT_REG);
                writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
-               r32 = readl((rb + PSS_GPIO_OE_REG));
+               r32 = readl(rb + PSS_GPIO_OE_REG);
                writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
        }
 
+       /*
+        * Mask the interrupts and clear any
+        * pending interrupts.
+        */
+       writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+       writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+       /* For first time initialization, no need to clear interrupts */
+       r32 = readl(rb + HOST_SEM5_REG);
+       if (r32 & 0x1) {
+               r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
+               if (r32 == 1) {
+                       writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
+                       readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+               }
+               r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+               if (r32 == 1) {
+                       writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
+                       readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
+               }
+       }
+
        bfa_ioc_ct2_mem_init(rb);
 
-       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
-       writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+       writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
+       writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
+
        return BFA_STATUS_OK;
 }
index aa8a0eaf91f9c6b7a2e846dc51048726dd10c5a2..2e856e6710f7d10b2e7083771cdac865bfd15404 100644 (file)
@@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
        switch (event) {
        case BFA_LPS_SM_RESUME:
                bfa_sm_set_state(lps, bfa_lps_sm_login);
+               bfa_lps_send_login(lps);
                break;
 
        case BFA_LPS_SM_OFFLINE:
@@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
                break;
 
        case BFA_STATUS_VPORT_MAX:
-               if (!rsp->ext_status)
+               if (rsp->ext_status)
                        bfa_lps_no_res(lps, rsp->ext_status);
                break;
 
@@ -3083,33 +3084,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
        bfa_trc(fcport->bfa, fcport->nwwn);
 }
 
-static void
-bfa_fcport_send_txcredit(void *port_cbarg)
-{
-
-       struct bfa_fcport_s *fcport = port_cbarg;
-       struct bfi_fcport_set_svc_params_req_s *m;
-
-       /*
-        * check for room in queue to send request now
-        */
-       m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-       if (!m) {
-               bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
-               return;
-       }
-
-       bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
-                       bfa_fn_lpu(fcport->bfa));
-       m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
-       m->bb_scn = fcport->cfg.bb_scn;
-
-       /*
-        * queue I/O message to firmware
-        */
-       bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
-}
-
 static void
 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
        struct bfa_qos_stats_s *s)
@@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
                return BFA_STATUS_UNSUPP_SPEED;
        }
 
-       /* For Mezz card, port speed entered needs to be checked */
-       if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
-               if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
-                       /* For CT2, 1G is not supported */
-                       if ((speed == BFA_PORT_SPEED_1GBPS) &&
-                           (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
-                               return BFA_STATUS_UNSUPP_SPEED;
+       /* Port speed entered needs to be checked */
+       if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+               /* For CT2, 1G is not supported */
+               if ((speed == BFA_PORT_SPEED_1GBPS) &&
+                   (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+                       return BFA_STATUS_UNSUPP_SPEED;
 
-                       /* Already checked for Auto Speed and Max Speed supp */
-                       if (!(speed == BFA_PORT_SPEED_1GBPS ||
-                             speed == BFA_PORT_SPEED_2GBPS ||
-                             speed == BFA_PORT_SPEED_4GBPS ||
-                             speed == BFA_PORT_SPEED_8GBPS ||
-                             speed == BFA_PORT_SPEED_16GBPS ||
-                             speed == BFA_PORT_SPEED_AUTO))
-                               return BFA_STATUS_UNSUPP_SPEED;
-               } else {
-                       if (speed != BFA_PORT_SPEED_10GBPS)
-                               return BFA_STATUS_UNSUPP_SPEED;
-               }
+               /* Already checked for Auto Speed and Max Speed supp */
+               if (!(speed == BFA_PORT_SPEED_1GBPS ||
+                     speed == BFA_PORT_SPEED_2GBPS ||
+                     speed == BFA_PORT_SPEED_4GBPS ||
+                     speed == BFA_PORT_SPEED_8GBPS ||
+                     speed == BFA_PORT_SPEED_16GBPS ||
+                     speed == BFA_PORT_SPEED_AUTO))
+                       return BFA_STATUS_UNSUPP_SPEED;
+       } else {
+               if (speed != BFA_PORT_SPEED_10GBPS)
+                       return BFA_STATUS_UNSUPP_SPEED;
        }
 
        fcport->cfg.speed = speed;
@@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
        fcport->cfg.bb_scn = bb_scn;
        if (bb_scn)
                fcport->bbsc_op_state = BFA_TRUE;
-       bfa_fcport_send_txcredit(fcport);
 }
 
 /*
@@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
                        attr->port_state = BFA_PORT_ST_IOCDIS;
                else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
                        attr->port_state = BFA_PORT_ST_FWMISMATCH;
-               else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
-                       attr->port_state = BFA_PORT_ST_ACQ_ADDR;
        }
 
        /* FCoE vlan */
index b52cbb6bcd5a3b6b4c7623753df79630892686ea..f300675646395b8702d8879cde778cddb08755e1 100644 (file)
@@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
 /* FAA specific APIs */
-bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
-                       bfa_cb_iocfc_t cbfn, void *cbarg);
-bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
-                       bfa_cb_iocfc_t cbfn, void *cbarg);
 bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
                        bfa_cb_iocfc_t cbfn, void *cbarg);
 
index 1938fe0473e99b9aa24a5ee6e50a4e6fe9e9ac4d..7b1ecd2b3ffe8b382689429c6e91d01ae04e45a1 100644 (file)
@@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
        return status;
 }
 
+int
+bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+       struct bfad_im_port_s *im_port =
+                       (struct bfad_im_port_s *) shost->hostdata[0];
+       struct bfad_s *bfad = im_port->bfad;
+       struct bfad_hal_comp fcomp;
+       unsigned long flags;
+       uint32_t status;
+
+       init_completion(&fcomp.comp);
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       status = bfa_port_disable(&bfad->bfa.modules.port,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+       if (status != BFA_STATUS_OK)
+               return -EIO;
+
+       wait_for_completion(&fcomp.comp);
+       if (fcomp.status != BFA_STATUS_OK)
+               return -EIO;
+
+       spin_lock_irqsave(&bfad->bfad_lock, flags);
+       status = bfa_port_enable(&bfad->bfa.modules.port,
+                                       bfad_hcb_comp, &fcomp);
+       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+       if (status != BFA_STATUS_OK)
+               return -EIO;
+
+       wait_for_completion(&fcomp.comp);
+       if (fcomp.status != BFA_STATUS_OK)
+               return -EIO;
+
+       return 0;
+}
+
 static int
 bfad_im_vport_delete(struct fc_vport *fc_vport)
 {
@@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
        unsigned long flags;
        struct completion fcomp;
 
-       if (im_port->flags & BFAD_PORT_DELETE)
-               goto free_scsi_host;
+       if (im_port->flags & BFAD_PORT_DELETE) {
+               bfad_scsi_host_free(bfad, im_port);
+               list_del(&vport->list_entry);
+               return 0;
+       }
 
        port = im_port->port;
 
@@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
 
        wait_for_completion(vport->comp_del);
 
-free_scsi_host:
        bfad_scsi_host_free(bfad, im_port);
        list_del(&vport->list_entry);
        kfree(vport);
@@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = {
        .show_rport_dev_loss_tmo = 1,
        .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
        .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
-
+       .issue_fc_host_lip = bfad_im_issue_fc_host_lip,
        .vport_create = bfad_im_vport_create,
        .vport_delete = bfad_im_vport_delete,
        .vport_disable = bfad_im_vport_disable,
index 8005c6c5a080efa3b9e2b2b2b6403a1323cee9e1..e1f4b10df42aaf14cb9d069c9ddc372393c473c3 100644 (file)
@@ -1287,50 +1287,6 @@ out:
        return 0;
 }
 
-int
-bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
-{
-       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
-       unsigned long   flags;
-       struct bfad_hal_comp    fcomp;
-
-       init_completion(&fcomp.comp);
-       iocmd->status = BFA_STATUS_OK;
-       spin_lock_irqsave(&bfad->bfad_lock, flags);
-       iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
-       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-       if (iocmd->status != BFA_STATUS_OK)
-               goto out;
-
-       wait_for_completion(&fcomp.comp);
-       iocmd->status = fcomp.status;
-out:
-       return 0;
-}
-
-int
-bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
-{
-       struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
-       unsigned long   flags;
-       struct bfad_hal_comp    fcomp;
-
-       init_completion(&fcomp.comp);
-       iocmd->status = BFA_STATUS_OK;
-       spin_lock_irqsave(&bfad->bfad_lock, flags);
-       iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
-       spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-       if (iocmd->status != BFA_STATUS_OK)
-               goto out;
-
-       wait_for_completion(&fcomp.comp);
-       iocmd->status = fcomp.status;
-out:
-       return 0;
-}
-
 int
 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
 {
@@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
        struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
        void    *iocmd_bufptr;
        unsigned long   flags;
+       u32 offset;
 
        if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
                        BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
@@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
 
        iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
        spin_lock_irqsave(&bfad->bfad_lock, flags);
+       offset = iocmd->offset;
        iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
-                               (u32 *)&iocmd->offset, &iocmd->bufsz);
+                               &offset, &iocmd->bufsz);
+       iocmd->offset = offset;
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 out:
        return 0;
@@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
        case IOCMD_FLASH_DISABLE_OPTROM:
                rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
                break;
-       case IOCMD_FAA_ENABLE:
-               rc = bfad_iocmd_faa_enable(bfad, iocmd);
-               break;
-       case IOCMD_FAA_DISABLE:
-               rc = bfad_iocmd_faa_disable(bfad, iocmd);
-               break;
        case IOCMD_FAA_QUERY:
                rc = bfad_iocmd_faa_query(bfad, iocmd);
                break;
@@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
        struct bfad_im_port_s *im_port =
                        (struct bfad_im_port_s *) job->shost->hostdata[0];
        struct bfad_s *bfad = im_port->bfad;
+       struct request_queue *request_q = job->req->q;
        void *payload_kbuf;
        int rc = -EINVAL;
 
+       /*
+        * Set the BSG device request_queue size to 256 to support
+        * payloads larger than 512*1024K bytes.
+        */
+       blk_queue_max_segments(request_q, 256);
+
        /* Allocate a temp buffer to hold the passed in user space command */
        payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
        if (!payload_kbuf) {
index e859adb9aa9e807ae779e2ddb5444ec31bc7400b..17ad67283130d1f1528ade5e97893766e6534eac 100644 (file)
@@ -83,8 +83,6 @@ enum {
        IOCMD_PORT_CFG_MODE,
        IOCMD_FLASH_ENABLE_OPTROM,
        IOCMD_FLASH_DISABLE_OPTROM,
-       IOCMD_FAA_ENABLE,
-       IOCMD_FAA_DISABLE,
        IOCMD_FAA_QUERY,
        IOCMD_CEE_GET_ATTR,
        IOCMD_CEE_GET_STATS,
index dc5b9d99c4505f1356f7e0bed16d05bd992285ef..7f74f1d19124a95ea4f39f4c1110bb4fbf93e50c 100644 (file)
@@ -56,7 +56,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.0.2.2"
+#define BFAD_DRIVER_VERSION    "3.0.23.0"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
index 0d9f1fb50db0c0ae74ea3e5e7a529b0594f436a9..d4220e13cafa09a4d8e90193e75514f691f596be 100644 (file)
@@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs {
        BFI_IOCFC_H2I_CFG_REQ           = 1,
        BFI_IOCFC_H2I_SET_INTR_REQ      = 2,
        BFI_IOCFC_H2I_UPDATEQ_REQ       = 3,
-       BFI_IOCFC_H2I_FAA_ENABLE_REQ    = 4,
-       BFI_IOCFC_H2I_FAA_DISABLE_REQ   = 5,
-       BFI_IOCFC_H2I_FAA_QUERY_REQ     = 6,
+       BFI_IOCFC_H2I_FAA_QUERY_REQ     = 4,
+       BFI_IOCFC_H2I_ADDR_REQ          = 5,
 };
 
 enum bfi_iocfc_i2h_msgs {
        BFI_IOCFC_I2H_CFG_REPLY         = BFA_I2HM(1),
        BFI_IOCFC_I2H_UPDATEQ_RSP       = BFA_I2HM(3),
-       BFI_IOCFC_I2H_FAA_ENABLE_RSP    = BFA_I2HM(4),
-       BFI_IOCFC_I2H_FAA_DISABLE_RSP   = BFA_I2HM(5),
-       BFI_IOCFC_I2H_FAA_QUERY_RSP     = BFA_I2HM(6),
+       BFI_IOCFC_I2H_FAA_QUERY_RSP     = BFA_I2HM(4),
+       BFI_IOCFC_I2H_ADDR_MSG          = BFA_I2HM(5),
 };
 
 struct bfi_iocfc_cfg_s {
@@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s {
        struct bfi_mhdr_s mh;   /* common msg header    */
 };
 
+struct bfi_faa_addr_msg_s {
+       struct  bfi_mhdr_s mh;  /* common msg header    */
+       u8      rsvd[4];
+       wwn_t   pwwn;           /* Fabric acquired PWWN */
+       wwn_t   nwwn;           /* Fabric acquired PWWN */
+};
+
 /*
  * BFI_IOCFC_H2I_FAA_QUERY_REQ message
  */
index d892064b64a8b42f848e3d8b5a134348075215b3..ed5f159e18671383569522922d6cb9134849c0e2 100644 (file)
@@ -335,11 +335,17 @@ enum {
 #define __PMM_1T_PNDB_P                        0x00000002
 #define CT2_PMM_1T_CONTROL_REG_P1      0x00023c1c
 #define CT2_WGN_STATUS                 0x00014990
+#define __A2T_AHB_LOAD                 0x00000800
 #define __WGN_READY                    0x00000400
 #define __GLBL_PF_VF_CFG_RDY           0x00000200
+#define CT2_NFC_CSR_CLR_REG            0x00027420
 #define CT2_NFC_CSR_SET_REG            0x00027424
 #define __HALT_NFC_CONTROLLER          0x00000002
 #define __NFC_CONTROLLER_HALTED                0x00001000
+#define CT2_RSC_GPR15_REG              0x0002765c
+#define CT2_CSI_FW_CTL_REG             0x00027080
+#define CT2_CSI_FW_CTL_SET_REG         0x00027088
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
 
 #define CT2_CSI_MAC0_CONTROL_REG       0x000270d0
 #define __CSI_MAC_RESET                        0x00000010
index abd72a01856d514ddd0918730e59d998edd9f487..c1c6a92a0b989737c9f8a15b81e8e24cd86b758e 100644 (file)
@@ -439,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        fr->fr_dev = lport;
 
        bg = &bnx2fc_global;
-       spin_lock_bh(&bg->fcoe_rx_list.lock);
+       spin_lock(&bg->fcoe_rx_list.lock);
 
        __skb_queue_tail(&bg->fcoe_rx_list, skb);
        if (bg->fcoe_rx_list.qlen == 1)
                wake_up_process(bg->thread);
 
-       spin_unlock_bh(&bg->fcoe_rx_list.lock);
+       spin_unlock(&bg->fcoe_rx_list.lock);
 
        return 0;
 err:
index ae7d15c44e2aec40f97e0ad8dd9b4d4716141c73..335e85192807a4b75e7aa229cc2b2b6f396e0b9b 100644 (file)
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
                goto err;
 
        fps = &per_cpu(fcoe_percpu, cpu);
-       spin_lock_bh(&fps->fcoe_rx_list.lock);
+       spin_lock(&fps->fcoe_rx_list.lock);
        if (unlikely(!fps->thread)) {
                /*
                 * The targeted CPU is not ready, let's target
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
                                "ready for incoming skb- using first online "
                                "CPU.\n");
 
-               spin_unlock_bh(&fps->fcoe_rx_list.lock);
+               spin_unlock(&fps->fcoe_rx_list.lock);
                cpu = cpumask_first(cpu_online_mask);
                fps = &per_cpu(fcoe_percpu, cpu);
-               spin_lock_bh(&fps->fcoe_rx_list.lock);
+               spin_lock(&fps->fcoe_rx_list.lock);
                if (!fps->thread) {
-                       spin_unlock_bh(&fps->fcoe_rx_list.lock);
+                       spin_unlock(&fps->fcoe_rx_list.lock);
                        goto err;
                }
        }
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
         * so we're free to queue skbs into it's queue.
         */
 
-       /* If this is a SCSI-FCP frame, and this is already executing on the
-        * correct CPU, and the queue for this CPU is empty, then go ahead
-        * and process the frame directly in the softirq context.
-        * This lets us process completions without context switching from the
-        * NET_RX softirq, to our receive processing thread, and then back to
-        * BLOCK softirq context.
+       /*
+        * Note: We used to have a set of conditions under which we would
+        * call fcoe_recv_frame directly, rather than queuing to the rx list
+        * as it could save a few cycles, but doing so is prohibited, as
+        * fcoe_recv_frame has several paths that may sleep, which is forbidden
+        * in softirq context.
         */
-       if (fh->fh_type == FC_TYPE_FCP &&
-           cpu == smp_processor_id() &&
-           skb_queue_empty(&fps->fcoe_rx_list)) {
-               spin_unlock_bh(&fps->fcoe_rx_list.lock);
-               fcoe_recv_frame(skb);
-       } else {
-               __skb_queue_tail(&fps->fcoe_rx_list, skb);
-               if (fps->fcoe_rx_list.qlen == 1)
-                       wake_up_process(fps->thread);
-               spin_unlock_bh(&fps->fcoe_rx_list.lock);
-       }
+       __skb_queue_tail(&fps->fcoe_rx_list, skb);
+       if (fps->thread->state == TASK_INTERRUPTIBLE)
+               wake_up_process(fps->thread);
+       spin_unlock(&fps->fcoe_rx_list.lock);
 
        return 0;
 err:
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
 {
        struct fcoe_percpu_s *p = arg;
        struct sk_buff *skb;
+       struct sk_buff_head tmp;
+
+       skb_queue_head_init(&tmp);
 
        set_user_nice(current, -20);
 
        while (!kthread_should_stop()) {
 
                spin_lock_bh(&p->fcoe_rx_list.lock);
-               while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+               skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+               spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+               while ((skb = __skb_dequeue(&tmp)) != NULL)
+                       fcoe_recv_frame(skb);
+
+               spin_lock_bh(&p->fcoe_rx_list.lock);
+               if (!skb_queue_len(&p->fcoe_rx_list)) {
                        set_current_state(TASK_INTERRUPTIBLE);
                        spin_unlock_bh(&p->fcoe_rx_list.lock);
                        schedule();
                        set_current_state(TASK_RUNNING);
-                       if (kthread_should_stop())
-                               return 0;
-                       spin_lock_bh(&p->fcoe_rx_list.lock);
-               }
-               spin_unlock_bh(&p->fcoe_rx_list.lock);
-               fcoe_recv_frame(skb);
+               } else
+                       spin_unlock_bh(&p->fcoe_rx_list.lock);
        }
        return 0;
 }
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* start FIP Discovery and FLOGI */
        lport->boot_time = jiffies;
        fc_fabric_login(lport);
-       if (!fcoe_link_ok(lport))
+       if (!fcoe_link_ok(lport)) {
+               rtnl_unlock();
                fcoe_ctlr_link_up(&fcoe->ctlr);
+               mutex_unlock(&fcoe_config_mutex);
+               return rc;
+       }
 
 out_nodev:
        rtnl_unlock();
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport)
 static void fcoe_percpu_clean(struct fc_lport *lport)
 {
        struct fcoe_percpu_s *pp;
-       struct fcoe_rcv_info *fr;
-       struct sk_buff_head *list;
-       struct sk_buff *skb, *next;
-       struct sk_buff *head;
+       struct sk_buff *skb;
        unsigned int cpu;
 
        for_each_possible_cpu(cpu) {
                pp = &per_cpu(fcoe_percpu, cpu);
-               spin_lock_bh(&pp->fcoe_rx_list.lock);
-               list = &pp->fcoe_rx_list;
-               head = list->next;
-               for (skb = head; skb != (struct sk_buff *)list;
-                    skb = next) {
-                       next = skb->next;
-                       fr = fcoe_dev_from_skb(skb);
-                       if (fr->fr_dev == lport) {
-                               __skb_unlink(skb, list);
-                               kfree_skb(skb);
-                       }
-               }
 
-               if (!pp->thread || !cpu_online(cpu)) {
-                       spin_unlock_bh(&pp->fcoe_rx_list.lock);
+               if (!pp->thread || !cpu_online(cpu))
                        continue;
-               }
 
                skb = dev_alloc_skb(0);
                if (!skb) {
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
                }
                skb->destructor = fcoe_percpu_flush_done;
 
+               spin_lock_bh(&pp->fcoe_rx_list.lock);
                __skb_queue_tail(&pp->fcoe_rx_list, skb);
                if (pp->fcoe_rx_list.qlen == 1)
                        wake_up_process(pp->thread);
index e7522dcc296eb8bb9c425da842f2a159d05862f2..249a106888d9309ab44ff0f4d2cc82d1b0ff2dc8 100644 (file)
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
                printk(KERN_INFO "libfcoe: host%d: FIP selected "
                       "Fibre-Channel Forwarder MAC %pM\n",
                       fip->lp->host->host_no, sel->fcf_mac);
-               memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+               memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
                fip->map_dest = 0;
        }
 unlock:
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
                        memcpy(fcf->fcf_mac,
                               ((struct fip_mac_desc *)desc)->fd_mac,
                               ETH_ALEN);
+                       memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
                        if (!is_valid_ether_addr(fcf->fcf_mac)) {
                                LIBFCOE_FIP_DBG(fip,
                                        "Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
        struct fip_desc *desc;
        struct fip_encaps *els;
        struct fcoe_dev_stats *stats;
+       struct fcoe_fcf *sel;
        enum fip_desc_type els_dtype = 0;
        u8 els_op;
        u8 sub;
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
                        goto drop;
                /* Drop ELS if there are duplicate critical descriptors */
                if (desc->fip_dtype < 32) {
-                       if (desc_mask & 1U << desc->fip_dtype) {
+                       if ((desc->fip_dtype != FIP_DT_MAC) &&
+                           (desc_mask & 1U << desc->fip_dtype)) {
                                LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
                                                "Descriptors in FIP ELS\n");
                                goto drop;
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
                }
                switch (desc->fip_dtype) {
                case FIP_DT_MAC:
+                       sel = fip->sel_fcf;
                        if (desc_cnt == 1) {
                                LIBFCOE_FIP_DBG(fip, "FIP descriptors "
                                                "received out of order\n");
                                goto drop;
                        }
+                       /*
+                        * Some switch implementations send two MAC descriptors,
+                        * with first MAC(granted_mac) being the FPMA, and the
+                        * second one(fcoe_mac) is used as destination address
+                        * for sending/receiving FCoE packets. FIP traffic is
+                        * sent using fip_mac. For regular switches, both
+                        * fip_mac and fcoe_mac would be the same.
+                        */
+                       if (desc_cnt == 2)
+                               memcpy(granted_mac,
+                                      ((struct fip_mac_desc *)desc)->fd_mac,
+                                      ETH_ALEN);
 
                        if (dlen != sizeof(struct fip_mac_desc))
                                goto len_err;
-                       memcpy(granted_mac,
-                              ((struct fip_mac_desc *)desc)->fd_mac,
-                              ETH_ALEN);
+
+                       if ((desc_cnt == 3) && (sel))
+                               memcpy(sel->fcoe_mac,
+                                      ((struct fip_mac_desc *)desc)->fd_mac,
+                                      ETH_ALEN);
                        break;
                case FIP_DT_FLOGI:
                case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
                 * No Vx_Port description. Clear all NPIV ports,
                 * followed by physical port
                 */
-               mutex_lock(&lport->lp_mutex);
-               list_for_each_entry(vn_port, &lport->vports, list)
-                       fc_lport_reset(vn_port);
-               mutex_unlock(&lport->lp_mutex);
-
                mutex_lock(&fip->ctlr_mutex);
                per_cpu_ptr(lport->dev_stats,
                            get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
                fcoe_ctlr_reset(fip);
                mutex_unlock(&fip->ctlr_mutex);
 
+               mutex_lock(&lport->lp_mutex);
+               list_for_each_entry(vn_port, &lport->vports, list)
+                       fc_lport_reset(vn_port);
+               mutex_unlock(&lport->lp_mutex);
+
                fc_lport_reset(fip->lp);
                fcoe_ctlr_solicit(fip, NULL);
        } else {
index cdfe5a16de2aefb3cf9a1a42d035bb0b958893e6..e002cd466e9a916d24d6064012d0488c854cd63e 100644 (file)
@@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
        { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
                .mailbox = 0x0042C,
+               .max_cmds = 100,
                .cache_line_size = 0x20,
+               .clear_isr = 1,
                {
                        .set_interrupt_mask_reg = 0x0022C,
                        .clr_interrupt_mask_reg = 0x00230,
@@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
        },
        { /* Snipe and Scamp */
                .mailbox = 0x0052C,
+               .max_cmds = 100,
                .cache_line_size = 0x20,
+               .clear_isr = 1,
                {
                        .set_interrupt_mask_reg = 0x00288,
                        .clr_interrupt_mask_reg = 0x0028C,
@@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
        },
        { /* CRoC */
                .mailbox = 0x00044,
+               .max_cmds = 1000,
                .cache_line_size = 0x20,
+               .clear_isr = 0,
                {
                        .set_interrupt_mask_reg = 0x00010,
                        .clr_interrupt_mask_reg = 0x00018,
@@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
 
        ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 
-       mb();
-
        ipr_send_command(ipr_cmd);
 }
 
@@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
 
                ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
 
-               mb();
-
                ipr_send_command(ipr_cmd);
        } else {
                list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
 
        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
                if ((res->bus == starget->channel) &&
-                   (res->target == starget->id) &&
-                   (res->lun == 0)) {
+                   (res->target == starget->id)) {
                        return res;
                }
        }
@@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget)
        struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
 
        if (ioa_cfg->sis64) {
-               if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
-                       clear_bit(starget->id, ioa_cfg->array_ids);
-               else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
-                       clear_bit(starget->id, ioa_cfg->vset_ids);
-               else if (starget->channel == 0)
-                       clear_bit(starget->id, ioa_cfg->target_ids);
+               if (!ipr_find_starget(starget)) {
+                       if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+                               clear_bit(starget->id, ioa_cfg->array_ids);
+                       else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+                               clear_bit(starget->id, ioa_cfg->vset_ids);
+                       else if (starget->channel == 0)
+                               clear_bit(starget->id, ioa_cfg->target_ids);
+               }
        }
 
        if (sata_port) {
@@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
                del_timer(&ioa_cfg->reset_cmd->timer);
                ipr_reset_ioa_job(ioa_cfg->reset_cmd);
        } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
-               if (ipr_debug && printk_ratelimit())
-                       dev_err(&ioa_cfg->pdev->dev,
-                               "Spurious interrupt detected. 0x%08X\n", int_reg);
-               writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
-               int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
-               return IRQ_NONE;
+               if (ioa_cfg->clear_isr) {
+                       if (ipr_debug && printk_ratelimit())
+                               dev_err(&ioa_cfg->pdev->dev,
+                                       "Spurious interrupt detected. 0x%08X\n", int_reg);
+                       writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+                       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
+                       return IRQ_NONE;
+               }
        } else {
                if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
                        ioa_cfg->ioa_unit_checked = 1;
@@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
                        }
                }
 
+               if (ipr_cmd && !ioa_cfg->clear_isr)
+                       break;
+
                if (ipr_cmd != NULL) {
                        /* Clear the PCI interrupt */
                        num_hrrq = 0;
@@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
                        rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
        }
 
-       if (likely(rc == 0)) {
-               mb();
-               ipr_send_command(ipr_cmd);
-       } else {
-                list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-                return SCSI_MLQUEUE_HOST_BUSY;
+       if (unlikely(rc != 0)) {
+               list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       ipr_send_command(ipr_cmd);
        return 0;
 }
 
@@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
                return AC_ERR_INVALID;
        }
 
-       mb();
-
        ipr_send_command(ipr_cmd);
 
        return 0;
@@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
        if (ioa_cfg->ipr_cmd_pool)
                pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
 
+       kfree(ioa_cfg->ipr_cmnd_list);
+       kfree(ioa_cfg->ipr_cmnd_list_dma);
+       ioa_cfg->ipr_cmnd_list = NULL;
+       ioa_cfg->ipr_cmnd_list_dma = NULL;
        ioa_cfg->ipr_cmd_pool = NULL;
 }
 
@@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
        int i;
 
        ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
-                                                sizeof(struct ipr_cmnd), 16, 0);
+                                                sizeof(struct ipr_cmnd), 512, 0);
 
        if (!ioa_cfg->ipr_cmd_pool)
                return -ENOMEM;
 
+       ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
+       ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
+
+       if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
+               ipr_free_cmd_blks(ioa_cfg);
+               return -ENOMEM;
+       }
+
        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
                ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
@@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        host->max_channel = IPR_MAX_BUS_TO_SCAN;
        host->unique_id = host->host_no;
        host->max_cmd_len = IPR_MAX_CDB_LEN;
+       host->can_queue = ioa_cfg->max_cmds;
        pci_set_drvdata(pdev, ioa_cfg);
 
        p = &ioa_cfg->chip_cfg->regs;
@@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
        /* set SIS 32 or SIS 64 */
        ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
        ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+       ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
+       ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
 
        if (ipr_transop_timeout)
                ioa_cfg->transop_timeout = ipr_transop_timeout;
index f94eaee2ff16b626ae516a718fced5d76979cd95..153b8bd91d1ef825952ec70fec861fea0e8d5fea 100644 (file)
@@ -38,8 +38,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.5.2"
-#define IPR_DRIVER_DATE "(April 27, 2011)"
+#define IPR_DRIVER_VERSION "2.5.3"
+#define IPR_DRIVER_DATE "(March 10, 2012)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -53,7 +53,7 @@
  * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
  *     ops the mid-layer can send to the adapter.
  */
-#define IPR_NUM_BASE_CMD_BLKS                          100
+#define IPR_NUM_BASE_CMD_BLKS                  (ioa_cfg->max_cmds)
 
 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E   0x0339
 
 #define IPR_NUM_INTERNAL_CMD_BLKS      (IPR_NUM_HCAMS + \
                                      ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
 
-#define IPR_MAX_COMMANDS               IPR_NUM_BASE_CMD_BLKS
+#define IPR_MAX_COMMANDS               100
 #define IPR_NUM_CMD_BLKS               (IPR_NUM_BASE_CMD_BLKS + \
                                                IPR_NUM_INTERNAL_CMD_BLKS)
 
@@ -1305,7 +1305,9 @@ struct ipr_interrupts {
 
 struct ipr_chip_cfg_t {
        u32 mailbox;
+       u16 max_cmds;
        u8 cache_line_size;
+       u8 clear_isr;
        struct ipr_interrupt_offsets regs;
 };
 
@@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg {
        u8 sis64:1;
        u8 dump_timeout:1;
        u8 cfg_locked:1;
+       u8 clear_isr:1;
 
        u8 revid;
 
@@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg {
        struct ata_host ata_host;
        char ipr_cmd_label[8];
 #define IPR_CMD_LABEL          "ipr_cmd"
-       struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
-       dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+       u32 max_cmds;
+       struct ipr_cmnd **ipr_cmnd_list;
+       dma_addr_t *ipr_cmnd_list_dma;
 }; /* struct ipr_ioa_cfg */
 
 struct ipr_cmnd {
index 630291f018262a3143645afe911ea97be1365f55..aceffadb21c79d347fdc7f138bce81afaf2394c9 100644 (file)
@@ -2263,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
        mp->class = class;
        /* adjust em exch xid range for offload */
        mp->min_xid = min_xid;
-       mp->max_xid = max_xid;
+
+       /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+       pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+               sizeof(struct fc_exch *);
+       if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+               mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+                       min_xid - 1;
+       } else {
+               mp->max_xid = max_xid;
+               pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+                       (fc_cpu_mask + 1);
+       }
 
        mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
        if (!mp->ep_pool)
@@ -2274,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
         * divided across all cpus. The exch pointers array memory is
         * allocated for exch range per pool.
         */
-       pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
        mp->pool_max_index = pool_exch_range - 1;
 
        /*
index bd5d31d022d911f245f87322ea84dbe01f7b2c93..ef9560dff295f9252bf9ccdf3a4d9d2de61d1852 100644 (file)
@@ -1743,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
        mfs = ntohs(flp->fl_csp.sp_bb_data) &
                FC_SP_BB_DATA_MASK;
        if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
-           mfs < lport->mfs)
+           mfs <= lport->mfs) {
                lport->mfs = mfs;
+               fc_host_maxframe_size(lport->host) = mfs;
+       } else {
+               FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+                            "lport->mfs:%hu\n", mfs, lport->mfs);
+               fc_lport_error(lport, fp);
+               goto err;
+       }
+
        csp_flags = ntohs(flp->fl_csp.sp_features);
        r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
        e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
index 88928f00aa2db3dc1896f93f1b9c261be77e629d..fe5d396aca73b7b6bc13e0d8fd3199f9759b7a82 100644 (file)
@@ -1,7 +1,7 @@
 #/*******************************************************************
 # * This file is part of the Emulex Linux Device Driver for         *
 # * Fibre Channel Host Bus Adapters.                                *
-# * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+# * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
 # * EMULEX and SLI are trademarks of Emulex.                        *
 # * www.emulex.com                                                  *
 # *                                                                 *
@@ -22,6 +22,8 @@
 ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
 ccflags-$(GCOV) += -O0
 
+ccflags-y += -Werror
+
 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
 
 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
index 5fc044ff656eb75f85bf61ec2666aa2465c35f02..3a1ffdd6d831ebfdd80fdb87b5b3dc15b0444cff 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -840,6 +840,8 @@ struct lpfc_hba {
        struct dentry *debug_dumpData;   /* BlockGuard BPL */
        struct dentry *debug_dumpDif;    /* BlockGuard BPL */
        struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
+       struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
+       struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
        struct dentry *debug_writeGuard; /* inject write guard_tag errors */
        struct dentry *debug_writeApp;   /* inject write app_tag errors */
        struct dentry *debug_writeRef;   /* inject write ref_tag errors */
@@ -854,6 +856,8 @@ struct lpfc_hba {
        uint32_t lpfc_injerr_rgrd_cnt;
        uint32_t lpfc_injerr_rapp_cnt;
        uint32_t lpfc_injerr_rref_cnt;
+       uint32_t lpfc_injerr_nportid;
+       struct lpfc_name lpfc_injerr_wwpn;
        sector_t lpfc_injerr_lba;
 #define LPFC_INJERR_LBA_OFF    (sector_t)(-1)
 
@@ -908,6 +912,8 @@ struct lpfc_hba {
        atomic_t fast_event_count;
        uint32_t fcoe_eventtag;
        uint32_t fcoe_eventtag_at_fcf_scan;
+       uint32_t fcoe_cvl_eventtag;
+       uint32_t fcoe_cvl_eventtag_attn;
        struct lpfc_fcf fcf;
        uint8_t fc_map[3];
        uint8_t valid_vlan;
index 296ad5bc42400692116686ac764570a350181896..5eb2bc11618368ca9b942b9f0fbcb3eeee103fd6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2575,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
 # objects that have been registered with the nameserver after login.
 */
-LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1,
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
                  "Deregister nameserver objects before LOGO");
 
 /*
index 22e17be04d8af228b33efef2009f2a9cfc7a7471..5bdf2eecb1782dd4c33f863607d045ed27535fe9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2007-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -1010,25 +1010,35 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
 {
        struct dentry *dent = file->f_dentry;
        struct lpfc_hba *phba = file->private_data;
-       char cbuf[16];
+       char cbuf[32];
+       uint64_t tmp = 0;
        int cnt = 0;
 
        if (dent == phba->debug_writeGuard)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
        else if (dent == phba->debug_writeApp)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
        else if (dent == phba->debug_writeRef)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
        else if (dent == phba->debug_readGuard)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
        else if (dent == phba->debug_readApp)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
        else if (dent == phba->debug_readRef)
-               cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
-       else if (dent == phba->debug_InjErrLBA)
-               cnt = snprintf(cbuf, 16, "0x%lx\n",
-                                (unsigned long) phba->lpfc_injerr_lba);
-       else
+               cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+       else if (dent == phba->debug_InjErrNPortID)
+               cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+       else if (dent == phba->debug_InjErrWWPN) {
+               memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+               tmp = cpu_to_be64(tmp);
+               cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+       } else if (dent == phba->debug_InjErrLBA) {
+               if (phba->lpfc_injerr_lba == (sector_t)(-1))
+                       cnt = snprintf(cbuf, 32, "off\n");
+               else
+                       cnt = snprintf(cbuf, 32, "0x%llx\n",
+                                (uint64_t) phba->lpfc_injerr_lba);
+       } else
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                         "0547 Unknown debugfs error injection entry\n");
 
@@ -1042,7 +1052,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
        struct dentry *dent = file->f_dentry;
        struct lpfc_hba *phba = file->private_data;
        char dstbuf[32];
-       unsigned long tmp;
+       uint64_t tmp = 0;
        int size;
 
        memset(dstbuf, 0, 32);
@@ -1050,7 +1060,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
        if (copy_from_user(dstbuf, buf, size))
                return 0;
 
-       if (strict_strtoul(dstbuf, 0, &tmp))
+       if (dent == phba->debug_InjErrLBA) {
+               if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+                       tmp = (uint64_t)(-1);
+       }
+
+       if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
                return 0;
 
        if (dent == phba->debug_writeGuard)
@@ -1067,7 +1082,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
                phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
        else if (dent == phba->debug_InjErrLBA)
                phba->lpfc_injerr_lba = (sector_t)tmp;
-       else
+       else if (dent == phba->debug_InjErrNPortID)
+               phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+       else if (dent == phba->debug_InjErrWWPN) {
+               tmp = cpu_to_be64(tmp);
+               memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+       } else
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                         "0548 Unknown debugfs error injection entry\n");
 
@@ -3949,6 +3969,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                }
                phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
 
+               snprintf(name, sizeof(name), "InjErrNPortID");
+               phba->debug_InjErrNPortID =
+                       debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+                       phba->hba_debugfs_root,
+                       phba, &lpfc_debugfs_op_dif_err);
+               if (!phba->debug_InjErrNPortID) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                               "0809 Cannot create debugfs InjErrNPortID\n");
+                       goto debug_failed;
+               }
+
+               snprintf(name, sizeof(name), "InjErrWWPN");
+               phba->debug_InjErrWWPN =
+                       debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+                       phba->hba_debugfs_root,
+                       phba, &lpfc_debugfs_op_dif_err);
+               if (!phba->debug_InjErrWWPN) {
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+                               "0810 Cannot create debugfs InjErrWWPN\n");
+                       goto debug_failed;
+               }
+
                snprintf(name, sizeof(name), "writeGuardInjErr");
                phba->debug_writeGuard =
                        debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4321,6 +4363,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
                        debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
                        phba->debug_InjErrLBA = NULL;
                }
+               if (phba->debug_InjErrNPortID) {         /* InjErrNPortID */
+                       debugfs_remove(phba->debug_InjErrNPortID);
+                       phba->debug_InjErrNPortID = NULL;
+               }
+               if (phba->debug_InjErrWWPN) {
+                       debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+                       phba->debug_InjErrWWPN = NULL;
+               }
                if (phba->debug_writeGuard) {
                        debugfs_remove(phba->debug_writeGuard); /* writeGuard */
                        phba->debug_writeGuard = NULL;
index 8db2fb3b45ec13a40fd36ad3aa4c48cf9659b37c..3407b39e0a3f82bfaf8239864ea1b764b2b386d6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * due to new FCF discovery
                 */
                if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
-                   (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
-                   !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                    (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
+                   (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+                       if (phba->link_state < LPFC_LINK_UP)
+                               goto stop_rr_fcf_flogi;
+                       if ((phba->fcoe_cvl_eventtag_attn ==
+                            phba->fcoe_cvl_eventtag) &&
+                           (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                           (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+                               goto stop_rr_fcf_flogi;
+                       else
+                               phba->fcoe_cvl_eventtag_attn =
+                                       phba->fcoe_cvl_eventtag;
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
                                        "2611 FLOGI failed on FCF (x%x), "
                                        "status:x%x/x%x, tmo:x%x, perform "
@@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                goto out;
                }
 
+stop_rr_fcf_flogi:
                /* FLOGI failure */
                lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
                                "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
index 343d87ba4df8f2e58cb2685c02bd86909d64e793..b507536dc5b569262f33b04df00cab96f48c4ab5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        struct lpfc_vport *vport = mboxq->vport;
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       if (mboxq->u.mb.mbxStatus) {
+       /*
+        * VFI not supported for interface type 0, so ignore any mailbox
+        * error (except VFI in use) and continue with the discovery.
+        */
+       if (mboxq->u.mb.mbxStatus &&
+           (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+                       LPFC_SLI_INTF_IF_TYPE_0) &&
+           mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
                         "2018 REG_VFI mbxStatus error x%x "
                         "HBA state x%x\n",
@@ -5673,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
                                ret = 1;
                                spin_unlock_irq(shost->host_lock);
                                goto out;
-                       } else {
+                       } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+                               ret = 1;
                                lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                                       "2624 RPI %x DID %x flg %x still "
-                                       "logged in\n",
-                                       ndlp->nlp_rpi, ndlp->nlp_DID,
-                                       ndlp->nlp_flag);
-                               if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
-                                       ret = 1;
+                                               "2624 RPI %x DID %x flag %x "
+                                               "still logged in\n",
+                                               ndlp->nlp_rpi, ndlp->nlp_DID,
+                                               ndlp->nlp_flag);
                        }
                }
                spin_unlock_irq(shost->host_lock);
index 9e2b9b227e1a2b732b39ee6cb424d421c6c03230..91f09761bd328136142fd3a37bd737789c6cee47 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * Copyright (C) 2009-2012 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -338,6 +338,12 @@ struct lpfc_cqe {
 #define CQE_CODE_XRI_ABORTED           0x5
 #define CQE_CODE_RECEIVE_V1            0x9
 
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK                0x1FF;
+
 /* completion queue entry for wqe completions */
 struct lpfc_wcqe_complete {
        uint32_t word0;
index b38f99f3be32b9e6e8bff87809b52d704e766070..9598fdcb08ab0ee50b147574d05406eee286376e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -2704,16 +2704,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
                                }
                                spin_lock_irq(shost->host_lock);
                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-
+                               spin_unlock_irq(shost->host_lock);
                                /*
                                 * Whenever an SLI4 port goes offline, free the
-                                * RPI.  A new RPI when the adapter port comes
-                                * back online.
+                                * RPI. Get a new RPI when the adapter port
+                                * comes back online.
                                 */
                                if (phba->sli_rev == LPFC_SLI_REV4)
                                        lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
-
-                               spin_unlock_irq(shost->host_lock);
                                lpfc_unreg_rpi(vports[i], ndlp);
                        }
                }
@@ -2786,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->hbalock);
        spin_lock(&phba->scsi_buf_list_lock);
-       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
                sb->cur_iocbq.sli4_xritag =
                        phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+               set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+               phba->sli4_hba.max_cfg_param.xri_used++;
+               phba->sli4_hba.xri_count++;
+       }
        spin_unlock(&phba->scsi_buf_list_lock);
        spin_unlock_irq(&phba->hbalock);
        return 0;
@@ -3723,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                break;
 
        case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+               phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2549 FCF (x%x) disconnected from network, "
                        "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3784,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                }
                break;
        case LPFC_FIP_EVENT_TYPE_CVL:
+               phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2718 Clear Virtual Link Received for VPI 0x%x"
                        " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5226,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
         * rpi is normalized to a zero base because the physical rpi is
         * port based.
         */
-       curr_rpi_range = phba->sli4_hba.next_rpi -
-               phba->sli4_hba.max_cfg_param.rpi_base;
+       curr_rpi_range = phba->sli4_hba.next_rpi;
        spin_unlock_irq(&phba->hbalock);
 
        /*
@@ -5818,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
                                        readl(phba->sli4_hba.u.if_type2.
                                              ERR2regaddr);
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2888 Port Error Detected "
-                                       "during POST: "
-                                       "port status reg 0x%x, "
-                                       "port_smphr reg 0x%x, "
+                                       "2888 Unrecoverable port error "
+                                       "following POST: port status reg "
+                                       "0x%x, port_smphr reg 0x%x, "
                                        "error 1=0x%x, error 2=0x%x\n",
                                        reg_data.word0,
                                        portsmphr_reg.word0,
@@ -6142,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
                phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
                phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
                phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
-               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
                phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
                                (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
                phba->max_vports = phba->max_vpi;
@@ -7231,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
        uint32_t rdy_chk, num_resets = 0, reset_again = 0;
        union lpfc_sli4_cfg_shdr *shdr;
        struct lpfc_register reg_data;
+       uint16_t devid;
 
        if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
        switch (if_type) {
@@ -7277,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
                               LPFC_SLIPORT_INIT_PORT);
                        writel(reg_data.word0, phba->sli4_hba.u.if_type2.
                               CTRLregaddr);
-
+                       /* flush */
+                       pci_read_config_word(phba->pcidev,
+                                            PCI_DEVICE_ID, &devid);
                        /*
                         * Poll the Port Status Register and wait for RDY for
                         * up to 10 seconds.  If the port doesn't respond, treat
@@ -7315,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
                                phba->work_status[1] = readl(
                                        phba->sli4_hba.u.if_type2.ERR2regaddr);
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "2890 Port Error Detected "
-                                       "during Port Reset: "
-                                       "port status reg 0x%x, "
+                                       "2890 Port error detected during port "
+                                       "reset(%d): port status reg 0x%x, "
                                        "error 1=0x%x, error 2=0x%x\n",
-                                       reg_data.word0,
+                                       num_resets, reg_data.word0,
                                        phba->work_status[0],
                                        phba->work_status[1]);
                                rc = -ENODEV;
index 7b6b2aa5795aba306e54830c0224a5fd3c8048be..15ca2a9a0cdd3122850fac7fbd3ca2991949a31b 100644 (file)
@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -440,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                spin_unlock_irq(shost->host_lock);
                stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
                stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
-               lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+               rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
                        ndlp, mbox);
+               if (rc)
+                       mempool_free(mbox, phba->mbox_mem_pool);
                return 1;
        }
-       lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+       if (rc)
+               mempool_free(mbox, phba->mbox_mem_pool);
        return 1;
 out:
        stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
index efc055b6bac4fe6fffb8432c4004ea2cbc9ac5aa..88f3a83dbd2eaf45a36d08a0c92b8fe186158ea3 100644 (file)
@@ -39,8 +39,8 @@
 #include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
-#include "lpfc_scsi.h"
 #include "lpfc.h"
+#include "lpfc_scsi.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_crtn.h"
 #include "lpfc_vport.h"
 int _dump_buf_done;
 
 static char *dif_op_str[] = {
-       "SCSI_PROT_NORMAL",
-       "SCSI_PROT_READ_INSERT",
-       "SCSI_PROT_WRITE_STRIP",
-       "SCSI_PROT_READ_STRIP",
-       "SCSI_PROT_WRITE_INSERT",
-       "SCSI_PROT_READ_PASS",
-       "SCSI_PROT_WRITE_PASS",
+       "PROT_NORMAL",
+       "PROT_READ_INSERT",
+       "PROT_WRITE_STRIP",
+       "PROT_READ_STRIP",
+       "PROT_WRITE_INSERT",
+       "PROT_READ_PASS",
+       "PROT_WRITE_PASS",
+};
+
+static char *dif_grd_str[] = {
+       "NO_GUARD",
+       "DIF_CRC",
+       "DIX_IP",
 };
 
 struct scsi_dif_tuple {
@@ -1281,10 +1287,14 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 
-#define BG_ERR_INIT    1
-#define BG_ERR_TGT     2
-#define BG_ERR_SWAP    3
-#define BG_ERR_CHECK   4
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT    0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT     0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP    0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK   0x20
 
 /**
  * lpfc_bg_err_inject - Determine if we should inject an error
@@ -1294,10 +1304,7 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
  * @apptag: (out) BlockGuard application tag for transmitted data
  * @new_guard (in) Value to replace CRC with if needed
  *
- * Returns (1) if error injection is detected by Initiator
- * Returns (2) if error injection is detected by Target
- * Returns (3) if swapping CSUM->CRC is required for error injection
- * Returns (4) disabling Guard/Ref/App checking is required for error injection
+ * Returns BG_ERR_* bit mask or 0 if request ignored
  **/
 static int
 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
@@ -1305,7 +1312,10 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 {
        struct scatterlist *sgpe; /* s/g prot entry */
        struct scatterlist *sgde; /* s/g data entry */
+       struct lpfc_scsi_buf *lpfc_cmd = NULL;
        struct scsi_dif_tuple *src = NULL;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_rport_data *rdata;
        uint32_t op = scsi_get_prot_op(sc);
        uint32_t blksize;
        uint32_t numblks;
@@ -1318,8 +1328,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        sgpe = scsi_prot_sglist(sc);
        sgde = scsi_sglist(sc);
-
        lba = scsi_get_lba(sc);
+
+       /* First check if we need to match the LBA */
        if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
                blksize = lpfc_cmd_blksize(sc);
                numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
@@ -1334,66 +1345,123 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                                sizeof(struct scsi_dif_tuple);
                        if (numblks < blockoff)
                                blockoff = numblks;
-                       src = (struct scsi_dif_tuple *)sg_virt(sgpe);
-                       src += blockoff;
                }
        }
 
+       /* Next check if we need to match the remote NPortID or WWPN */
+       rdata = sc->device->hostdata;
+       if (rdata && rdata->pnode) {
+               ndlp = rdata->pnode;
+
+               /* Make sure we have the right NPortID if one is specified */
+               if (phba->lpfc_injerr_nportid  &&
+                       (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+                       return 0;
+
+               /*
+                * Make sure we have the right WWPN if one is specified.
+                * wwn[0] should be a non-zero NAA in a good WWPN.
+                */
+               if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
+                       (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+                               sizeof(struct lpfc_name)) != 0))
+                       return 0;
+       }
+
+       /* Setup a ptr to the protection data if the SCSI host provides it */
+       if (sgpe) {
+               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+               src += blockoff;
+               lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+       }
+
        /* Should we change the Reference Tag */
        if (reftag) {
                if (phba->lpfc_injerr_wref_cnt) {
                        switch (op) {
                        case SCSI_PROT_WRITE_PASS:
-                               if (blockoff && src) {
-                                       /* Insert error in middle of the IO */
+                               if (src) {
+                                       /*
+                                        * For WRITE_PASS, force the error
+                                        * to be sent on the wire. It should
+                                        * be detected by the Target.
+                                        * If blockoff != 0 error will be
+                                        * inserted in middle of the IO.
+                                        */
 
                                        lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9076 BLKGRD: Injecting reftag error: "
                                        "write lba x%lx + x%x oldrefTag x%x\n",
                                        (unsigned long)lba, blockoff,
-                                       src->ref_tag);
+                                       be32_to_cpu(src->ref_tag));
 
                                        /*
-                                        * NOTE, this will change ref tag in
-                                        * the memory location forever!
+                                        * Save the old ref_tag so we can
+                                        * restore it on completion.
                                         */
-                                       src->ref_tag = 0xDEADBEEF;
+                                       if (lpfc_cmd) {
+                                               lpfc_cmd->prot_data_type =
+                                                       LPFC_INJERR_REFTAG;
+                                               lpfc_cmd->prot_data_segment =
+                                                       src;
+                                               lpfc_cmd->prot_data =
+                                                       src->ref_tag;
+                                       }
+                                       src->ref_tag = cpu_to_be32(0xDEADBEEF);
                                        phba->lpfc_injerr_wref_cnt--;
-                                       phba->lpfc_injerr_lba =
-                                               LPFC_INJERR_LBA_OFF;
-                                       rc = BG_ERR_CHECK;
+                                       if (phba->lpfc_injerr_wref_cnt == 0) {
+                                               phba->lpfc_injerr_nportid = 0;
+                                               phba->lpfc_injerr_lba =
+                                                       LPFC_INJERR_LBA_OFF;
+                                               memset(&phba->lpfc_injerr_wwpn,
+                                                 0, sizeof(struct lpfc_name));
+                                       }
+                                       rc = BG_ERR_TGT | BG_ERR_CHECK;
+
                                        break;
                                }
                                /* Drop thru */
-                       case SCSI_PROT_WRITE_STRIP:
+                       case SCSI_PROT_WRITE_INSERT:
                                /*
-                                * For WRITE_STRIP and WRITE_PASS,
-                                * force the error on data
-                                * being copied from SLI-Host to SLI-Port.
+                                * For WRITE_INSERT, force the error
+                                * to be sent on the wire. It should be
+                                * detected by the Target.
                                 */
+                               /* DEADBEEF will be the reftag on the wire */
                                *reftag = 0xDEADBEEF;
                                phba->lpfc_injerr_wref_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-                               rc = BG_ERR_INIT;
+                               if (phba->lpfc_injerr_wref_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                       LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
+                               rc = BG_ERR_TGT | BG_ERR_CHECK;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9077 BLKGRD: Injecting reftag error: "
+                                       "9078 BLKGRD: Injecting reftag error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
-                       case SCSI_PROT_WRITE_INSERT:
+                       case SCSI_PROT_WRITE_STRIP:
                                /*
-                                * For WRITE_INSERT, force the
-                                * error to be sent on the wire. It should be
-                                * detected by the Target.
+                                * For WRITE_STRIP and WRITE_PASS,
+                                * force the error on data
+                                * being copied from SLI-Host to SLI-Port.
                                 */
-                               /* DEADBEEF will be the reftag on the wire */
                                *reftag = 0xDEADBEEF;
                                phba->lpfc_injerr_wref_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-                               rc = BG_ERR_TGT;
+                               if (phba->lpfc_injerr_wref_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
+                               rc = BG_ERR_INIT;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9078 BLKGRD: Injecting reftag error: "
+                                       "9077 BLKGRD: Injecting reftag error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
                        }
@@ -1401,11 +1469,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                if (phba->lpfc_injerr_rref_cnt) {
                        switch (op) {
                        case SCSI_PROT_READ_INSERT:
-                               /*
-                                * For READ_INSERT, it doesn't make sense
-                                * to change the reftag.
-                                */
-                               break;
                        case SCSI_PROT_READ_STRIP:
                        case SCSI_PROT_READ_PASS:
                                /*
@@ -1415,7 +1478,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                                 */
                                *reftag = 0xDEADBEEF;
                                phba->lpfc_injerr_rref_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+                               if (phba->lpfc_injerr_rref_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
                                rc = BG_ERR_INIT;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1431,56 +1500,87 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                if (phba->lpfc_injerr_wapp_cnt) {
                        switch (op) {
                        case SCSI_PROT_WRITE_PASS:
-                               if (blockoff && src) {
-                                       /* Insert error in middle of the IO */
+                               if (src) {
+                                       /*
+                                        * For WRITE_PASS, force the error
+                                        * to be sent on the wire. It should
+                                        * be detected by the Target.
+                                        * If blockoff != 0 error will be
+                                        * inserted in middle of the IO.
+                                        */
 
                                        lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9080 BLKGRD: Injecting apptag error: "
                                        "write lba x%lx + x%x oldappTag x%x\n",
                                        (unsigned long)lba, blockoff,
-                                       src->app_tag);
+                                       be16_to_cpu(src->app_tag));
 
                                        /*
-                                        * NOTE, this will change app tag in
-                                        * the memory location forever!
+                                        * Save the old app_tag so we can
+                                        * restore it on completion.
                                         */
-                                       src->app_tag = 0xDEAD;
+                                       if (lpfc_cmd) {
+                                               lpfc_cmd->prot_data_type =
+                                                       LPFC_INJERR_APPTAG;
+                                               lpfc_cmd->prot_data_segment =
+                                                       src;
+                                               lpfc_cmd->prot_data =
+                                                       src->app_tag;
+                                       }
+                                       src->app_tag = cpu_to_be16(0xDEAD);
                                        phba->lpfc_injerr_wapp_cnt--;
-                                       phba->lpfc_injerr_lba =
-                                               LPFC_INJERR_LBA_OFF;
-                                       rc = BG_ERR_CHECK;
+                                       if (phba->lpfc_injerr_wapp_cnt == 0) {
+                                               phba->lpfc_injerr_nportid = 0;
+                                               phba->lpfc_injerr_lba =
+                                                       LPFC_INJERR_LBA_OFF;
+                                               memset(&phba->lpfc_injerr_wwpn,
+                                                 0, sizeof(struct lpfc_name));
+                                       }
+                                       rc = BG_ERR_TGT | BG_ERR_CHECK;
                                        break;
                                }
                                /* Drop thru */
-                       case SCSI_PROT_WRITE_STRIP:
+                       case SCSI_PROT_WRITE_INSERT:
                                /*
-                                * For WRITE_STRIP and WRITE_PASS,
-                                * force the error on data
-                                * being copied from SLI-Host to SLI-Port.
+                                * For WRITE_INSERT, force the
+                                * error to be sent on the wire. It should be
+                                * detected by the Target.
                                 */
+                               /* DEAD will be the apptag on the wire */
                                *apptag = 0xDEAD;
                                phba->lpfc_injerr_wapp_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-                               rc = BG_ERR_INIT;
+                               if (phba->lpfc_injerr_wapp_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
+                               rc = BG_ERR_TGT | BG_ERR_CHECK;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "0812 BLKGRD: Injecting apptag error: "
+                                       "0813 BLKGRD: Injecting apptag error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
-                       case SCSI_PROT_WRITE_INSERT:
+                       case SCSI_PROT_WRITE_STRIP:
                                /*
-                                * For WRITE_INSERT, force the
-                                * error to be sent on the wire. It should be
-                                * detected by the Target.
+                                * For WRITE_STRIP and WRITE_PASS,
+                                * force the error on data
+                                * being copied from SLI-Host to SLI-Port.
                                 */
-                               /* DEAD will be the apptag on the wire */
                                *apptag = 0xDEAD;
                                phba->lpfc_injerr_wapp_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
-                               rc = BG_ERR_TGT;
+                               if (phba->lpfc_injerr_wapp_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
+                               rc = BG_ERR_INIT;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "0813 BLKGRD: Injecting apptag error: "
+                                       "0812 BLKGRD: Injecting apptag error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
                        }
@@ -1488,11 +1588,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                if (phba->lpfc_injerr_rapp_cnt) {
                        switch (op) {
                        case SCSI_PROT_READ_INSERT:
-                               /*
-                                * For READ_INSERT, it doesn't make sense
-                                * to change the apptag.
-                                */
-                               break;
                        case SCSI_PROT_READ_STRIP:
                        case SCSI_PROT_READ_PASS:
                                /*
@@ -1502,7 +1597,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                                 */
                                *apptag = 0xDEAD;
                                phba->lpfc_injerr_rapp_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+                               if (phba->lpfc_injerr_rapp_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
                                rc = BG_ERR_INIT;
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1519,57 +1620,51 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                if (phba->lpfc_injerr_wgrd_cnt) {
                        switch (op) {
                        case SCSI_PROT_WRITE_PASS:
-                               if (blockoff && src) {
-                                       /* Insert error in middle of the IO */
-
-                                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "0815 BLKGRD: Injecting guard error: "
-                                       "write lba x%lx + x%x oldgrdTag x%x\n",
-                                       (unsigned long)lba, blockoff,
-                                       src->guard_tag);
-
-                                       /*
-                                        * NOTE, this will change guard tag in
-                                        * the memory location forever!
-                                        */
-                                       src->guard_tag = 0xDEAD;
-                                       phba->lpfc_injerr_wgrd_cnt--;
-                                       phba->lpfc_injerr_lba =
-                                               LPFC_INJERR_LBA_OFF;
-                                       rc = BG_ERR_CHECK;
-                                       break;
-                               }
+                               rc = BG_ERR_CHECK;
                                /* Drop thru */
-                       case SCSI_PROT_WRITE_STRIP:
+
+                       case SCSI_PROT_WRITE_INSERT:
                                /*
-                                * For WRITE_STRIP and WRITE_PASS,
-                                * force the error on data
-                                * being copied from SLI-Host to SLI-Port.
+                                * For WRITE_INSERT, force the
+                                * error to be sent on the wire. It should be
+                                * detected by the Target.
                                 */
                                phba->lpfc_injerr_wgrd_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+                               if (phba->lpfc_injerr_wgrd_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
 
-                               rc = BG_ERR_SWAP;
+                               rc |= BG_ERR_TGT | BG_ERR_SWAP;
                                /* Signals the caller to swap CRC->CSUM */
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "0816 BLKGRD: Injecting guard error: "
+                                       "0817 BLKGRD: Injecting guard error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
-                       case SCSI_PROT_WRITE_INSERT:
+                       case SCSI_PROT_WRITE_STRIP:
                                /*
-                                * For WRITE_INSERT, force the
-                                * error to be sent on the wire. It should be
-                                * detected by the Target.
+                                * For WRITE_STRIP and WRITE_PASS,
+                                * force the error on data
+                                * being copied from SLI-Host to SLI-Port.
                                 */
                                phba->lpfc_injerr_wgrd_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+                               if (phba->lpfc_injerr_wgrd_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
 
-                               rc = BG_ERR_SWAP;
+                               rc = BG_ERR_INIT | BG_ERR_SWAP;
                                /* Signals the caller to swap CRC->CSUM */
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "0817 BLKGRD: Injecting guard error: "
+                                       "0816 BLKGRD: Injecting guard error: "
                                        "write lba x%lx\n", (unsigned long)lba);
                                break;
                        }
@@ -1577,11 +1672,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                if (phba->lpfc_injerr_rgrd_cnt) {
                        switch (op) {
                        case SCSI_PROT_READ_INSERT:
-                               /*
-                                * For READ_INSERT, it doesn't make sense
-                                * to change the guard tag.
-                                */
-                               break;
                        case SCSI_PROT_READ_STRIP:
                        case SCSI_PROT_READ_PASS:
                                /*
@@ -1589,11 +1679,16 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                                 * error on data being read off the wire. It
                                 * should force an IO error to the driver.
                                 */
-                               *apptag = 0xDEAD;
                                phba->lpfc_injerr_rgrd_cnt--;
-                               phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+                               if (phba->lpfc_injerr_rgrd_cnt == 0) {
+                                       phba->lpfc_injerr_nportid = 0;
+                                       phba->lpfc_injerr_lba =
+                                               LPFC_INJERR_LBA_OFF;
+                                       memset(&phba->lpfc_injerr_wwpn,
+                                               0, sizeof(struct lpfc_name));
+                               }
 
-                               rc = BG_ERR_SWAP;
+                               rc = BG_ERR_INIT | BG_ERR_SWAP;
                                /* Signals the caller to swap CRC->CSUM */
 
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1629,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
-                       *txop = BG_OP_IN_CSUM_OUT_NODIF;
                        *rxop = BG_OP_IN_NODIF_OUT_CSUM;
+                       *txop = BG_OP_IN_CSUM_OUT_NODIF;
                        break;
 
                case SCSI_PROT_READ_STRIP:
                case SCSI_PROT_WRITE_INSERT:
-                       *txop = BG_OP_IN_NODIF_OUT_CRC;
                        *rxop = BG_OP_IN_CRC_OUT_NODIF;
+                       *txop = BG_OP_IN_NODIF_OUT_CRC;
                        break;
 
                case SCSI_PROT_READ_PASS:
                case SCSI_PROT_WRITE_PASS:
-                       *txop = BG_OP_IN_CSUM_OUT_CRC;
                        *rxop = BG_OP_IN_CRC_OUT_CSUM;
+                       *txop = BG_OP_IN_CSUM_OUT_CRC;
                        break;
 
                case SCSI_PROT_NORMAL:
@@ -1658,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_STRIP:
                case SCSI_PROT_WRITE_INSERT:
-                       *txop = BG_OP_IN_NODIF_OUT_CRC;
                        *rxop = BG_OP_IN_CRC_OUT_NODIF;
+                       *txop = BG_OP_IN_NODIF_OUT_CRC;
                        break;
 
                case SCSI_PROT_READ_PASS:
                case SCSI_PROT_WRITE_PASS:
-                       *txop = BG_OP_IN_CRC_OUT_CRC;
                        *rxop = BG_OP_IN_CRC_OUT_CRC;
+                       *txop = BG_OP_IN_CRC_OUT_CRC;
                        break;
 
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
-                       *txop = BG_OP_IN_CRC_OUT_NODIF;
                        *rxop = BG_OP_IN_NODIF_OUT_CRC;
+                       *txop = BG_OP_IN_CRC_OUT_NODIF;
                        break;
 
                case SCSI_PROT_NORMAL:
@@ -1710,20 +1805,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
-                       *txop = BG_OP_IN_CRC_OUT_NODIF;
                        *rxop = BG_OP_IN_NODIF_OUT_CRC;
+                       *txop = BG_OP_IN_CRC_OUT_NODIF;
                        break;
 
                case SCSI_PROT_READ_STRIP:
                case SCSI_PROT_WRITE_INSERT:
-                       *txop = BG_OP_IN_NODIF_OUT_CSUM;
                        *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+                       *txop = BG_OP_IN_NODIF_OUT_CSUM;
                        break;
 
                case SCSI_PROT_READ_PASS:
                case SCSI_PROT_WRITE_PASS:
-                       *txop = BG_OP_IN_CRC_OUT_CRC;
-                       *rxop = BG_OP_IN_CRC_OUT_CRC;
+                       *rxop = BG_OP_IN_CSUM_OUT_CRC;
+                       *txop = BG_OP_IN_CRC_OUT_CSUM;
                        break;
 
                case SCSI_PROT_NORMAL:
@@ -1735,20 +1830,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                switch (scsi_get_prot_op(sc)) {
                case SCSI_PROT_READ_STRIP:
                case SCSI_PROT_WRITE_INSERT:
-                       *txop = BG_OP_IN_NODIF_OUT_CSUM;
                        *rxop = BG_OP_IN_CSUM_OUT_NODIF;
+                       *txop = BG_OP_IN_NODIF_OUT_CSUM;
                        break;
 
                case SCSI_PROT_READ_PASS:
                case SCSI_PROT_WRITE_PASS:
-                       *txop = BG_OP_IN_CSUM_OUT_CRC;
-                       *rxop = BG_OP_IN_CRC_OUT_CSUM;
+                       *rxop = BG_OP_IN_CSUM_OUT_CSUM;
+                       *txop = BG_OP_IN_CSUM_OUT_CSUM;
                        break;
 
                case SCSI_PROT_READ_INSERT:
                case SCSI_PROT_WRITE_STRIP:
-                       *txop = BG_OP_IN_CSUM_OUT_NODIF;
                        *rxop = BG_OP_IN_NODIF_OUT_CSUM;
+                       *txop = BG_OP_IN_CSUM_OUT_NODIF;
                        break;
 
                case SCSI_PROT_NORMAL:
@@ -1817,11 +1912,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+       rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
        if (rc) {
-               if (rc == BG_ERR_SWAP)
+               if (rc & BG_ERR_SWAP)
                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-               if (rc == BG_ERR_CHECK)
+               if (rc & BG_ERR_CHECK)
                        checking = 0;
        }
 #endif
@@ -1964,11 +2059,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+       rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
        if (rc) {
-               if (rc == BG_ERR_SWAP)
+               if (rc & BG_ERR_SWAP)
                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-               if (rc == BG_ERR_CHECK)
+               if (rc & BG_ERR_CHECK)
                        checking = 0;
        }
 #endif
@@ -2172,11 +2267,11 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+       rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
        if (rc) {
-               if (rc == BG_ERR_SWAP)
+               if (rc & BG_ERR_SWAP)
                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-               if (rc == BG_ERR_CHECK)
+               if (rc & BG_ERR_CHECK)
                        checking = 0;
        }
 #endif
@@ -2312,11 +2407,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-       rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
+       rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
        if (rc) {
-               if (rc == BG_ERR_SWAP)
+               if (rc & BG_ERR_SWAP)
                        lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
-               if (rc == BG_ERR_CHECK)
+               if (rc & BG_ERR_CHECK)
                        checking = 0;
        }
 #endif
@@ -2788,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                /* No error was reported - problem in FW? */
                cmd->result = ScsiResult(DID_ERROR, 0);
                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9057 BLKGRD: no errors reported!\n");
+                       "9057 BLKGRD: Unknown error reported!\n");
        }
 
 out:
@@ -3460,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        /* pick up SLI4 exhange busy status from HBA */
        lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
 
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       if (lpfc_cmd->prot_data_type) {
+               struct scsi_dif_tuple *src = NULL;
+
+               src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+               /*
+                * Used to restore any changes to protection
+                * data for error injection.
+                */
+               switch (lpfc_cmd->prot_data_type) {
+               case LPFC_INJERR_REFTAG:
+                       src->ref_tag =
+                               lpfc_cmd->prot_data;
+                       break;
+               case LPFC_INJERR_APPTAG:
+                       src->app_tag =
+                               (uint16_t)lpfc_cmd->prot_data;
+                       break;
+               case LPFC_INJERR_GUARD:
+                       src->guard_tag =
+                               (uint16_t)lpfc_cmd->prot_data;
+                       break;
+               default:
+                       break;
+               }
+
+               lpfc_cmd->prot_data = 0;
+               lpfc_cmd->prot_data_type = 0;
+               lpfc_cmd->prot_data_segment = NULL;
+       }
+#endif
        if (pnode && NLP_CHK_NODE_ACT(pnode))
                atomic_dec(&pnode->cmd_pending);
 
@@ -4061,15 +4187,6 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
                cmnd->result = err;
                goto out_fail_command;
        }
-       /*
-        * Do not let the mid-layer retry I/O too fast. If an I/O is retried
-        * without waiting a bit then indicate that the device is busy.
-        */
-       if (cmnd->retries &&
-           time_before(jiffies, (cmnd->jiffies_at_alloc +
-                                 msecs_to_jiffies(LPFC_RETRY_PAUSE *
-                                                  cmnd->retries))))
-               return SCSI_MLQUEUE_DEVICE_BUSY;
        ndlp = rdata->pnode;
 
        if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
@@ -4119,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                               "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
-                               "str=%s\n",
-                               cmnd->cmnd[0], scsi_get_prot_op(cmnd),
-                               dif_op_str[scsi_get_prot_op(cmnd)]);
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                               "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
-                               "%02x %02x %02x %02x %02x\n",
-                               cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
-                               cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
-                               cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
-                               cmnd->cmnd[9]);
+                               "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
+                               "guard=%s\n", cmnd->cmnd[0],
+                               dif_op_str[scsi_get_prot_op(cmnd)],
+                               dif_grd_str[scsi_host_get_guard(shost)]);
                        if (cmnd->cmnd[0] == READ_10)
                                lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9035 BLKGRD: READ @ sector %llu, "
-                                       "count %u\n",
+                                       "cnt %u, rpt %d\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
-                                       blk_rq_sectors(cmnd->request));
+                                       blk_rq_sectors(cmnd->request),
+                                       (cmnd->cmnd[1]>>5));
                        else if (cmnd->cmnd[0] == WRITE_10)
                                lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9036 BLKGRD: WRITE @ sector %llu, "
-                                       "count %u cmd=%p\n",
+                                       "cnt %u, wpt %d\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
                                        blk_rq_sectors(cmnd->request),
-                                       cmnd);
+                                       (cmnd->cmnd[1]>>5));
                }
 
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9038 BLKGRD: rcvd unprotected cmd:"
-                                       "%02x op:%02x str=%s\n",
-                                       cmnd->cmnd[0], scsi_get_prot_op(cmnd),
-                                       dif_op_str[scsi_get_prot_op(cmnd)]);
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                       "9039 BLKGRD: CDB: %02x %02x %02x "
-                                       "%02x %02x %02x %02x %02x %02x %02x\n",
-                                       cmnd->cmnd[0], cmnd->cmnd[1],
-                                       cmnd->cmnd[2], cmnd->cmnd[3],
-                                       cmnd->cmnd[4], cmnd->cmnd[5],
-                                       cmnd->cmnd[6], cmnd->cmnd[7],
-                                       cmnd->cmnd[8], cmnd->cmnd[9]);
+                               "9038 BLKGRD: rcvd unprotected cmd:"
+                               "%02x op=%s guard=%s\n", cmnd->cmnd[0],
+                               dif_op_str[scsi_get_prot_op(cmnd)],
+                               dif_grd_str[scsi_host_get_guard(shost)]);
                        if (cmnd->cmnd[0] == READ_10)
                                lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9040 dbg: READ @ sector %llu, "
-                                       "count %u\n",
+                                       "cnt %u, rpt %d\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
-                                        blk_rq_sectors(cmnd->request));
+                                        blk_rq_sectors(cmnd->request),
+                                       (cmnd->cmnd[1]>>5));
                        else if (cmnd->cmnd[0] == WRITE_10)
                                lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                        "9041 dbg: WRITE @ sector %llu, "
-                                        "count %u cmd=%p\n",
-                                        (unsigned long long)scsi_get_lba(cmnd),
-                                        blk_rq_sectors(cmnd->request), cmnd);
-                       else
-                               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
-                                        "9042 dbg: parser not implemented\n");
+                                       "9041 dbg: WRITE @ sector %llu, "
+                                       "cnt %u, wpt %d\n",
+                                       (unsigned long long)scsi_get_lba(cmnd),
+                                       blk_rq_sectors(cmnd->request),
+                                       (cmnd->cmnd[1]>>5));
                }
                err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
        }
index 9075a08cf78155af9ee0a635453bbd6ff9512f31..21a2ffe67eacf998089d527b736ac8f63a634ffa 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -150,9 +150,18 @@ struct lpfc_scsi_buf {
        struct lpfc_iocbq cur_iocbq;
        wait_queue_head_t *waitq;
        unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+       /* Used to restore any changes to protection data for error injection */
+       void *prot_data_segment;
+       uint32_t prot_data;
+       uint32_t prot_data_type;
+#define        LPFC_INJERR_REFTAG      1
+#define        LPFC_INJERR_APPTAG      2
+#define        LPFC_INJERR_GUARD       3
+#endif
 };
 
 #define LPFC_SCSI_DMA_EXT_SIZE 264
 #define LPFC_BPL_SIZE          1024
-#define LPFC_RETRY_PAUSE       300
 #define MDAC_DIRECT_CMD                  0x22
index e0e4d8d18244a9402ccfa5647bd07bdde8ca8576..dbaf5b963bff2f0cd07c028ddedd5b1ff0bc0bcc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
@@ -5578,8 +5578,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
                for (i = 0; i < count; i++)
                        phba->sli4_hba.rpi_ids[i] = base + i;
 
-               lpfc_sli4_node_prep(phba);
-
                /* VPIs. */
                count = phba->sli4_hba.max_cfg_param.max_vpi;
                base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -5613,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
                        rc = -ENOMEM;
                        goto free_vpi_ids;
                }
+               phba->sli4_hba.max_cfg_param.xri_used = 0;
+               phba->sli4_hba.xri_count = 0;
                phba->sli4_hba.xri_ids = kzalloc(count *
                                                 sizeof(uint16_t),
                                                 GFP_KERNEL);
@@ -6147,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
                rc = -ENODEV;
                goto out_free_mbox;
        }
+       lpfc_sli4_node_prep(phba);
 
        /* Create all the SLI4 queues */
        rc = lpfc_sli4_queue_create(phba);
@@ -7251,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
 
 out_not_finished:
        spin_lock_irqsave(&phba->hbalock, iflags);
-       mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
-       __lpfc_mbox_cmpl_put(phba, mboxq);
-       /* Release the token */
-       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-       phba->sli.mbox_active = NULL;
+       if (phba->sli.mbox_active) {
+               mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+               __lpfc_mbox_cmpl_put(phba, mboxq);
+               /* Release the token */
+               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+               phba->sli.mbox_active = NULL;
+       }
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
        return MBX_NOT_FINISHED;
@@ -7743,6 +7746,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                        if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
                                *pcmd == ELS_CMD_SCR ||
                                *pcmd == ELS_CMD_FDISC ||
+                               *pcmd == ELS_CMD_LOGO ||
                                *pcmd == ELS_CMD_PLOGI)) {
                                bf_set(els_req64_sp, &wqe->els_req, 1);
                                bf_set(els_req64_sid, &wqe->els_req,
@@ -8385,6 +8389,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
                           struct sli4_wcqe_xri_aborted *axri)
 {
        struct lpfc_vport *vport;
+       uint32_t ext_status = 0;
 
        if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8396,12 +8401,20 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
        vport = ndlp->vport;
        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                        "3116 Port generated FCP XRI ABORT event on "
-                       "vpi %d rpi %d xri x%x status 0x%x\n",
+                       "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
                        ndlp->vport->vpi, ndlp->nlp_rpi,
                        bf_get(lpfc_wcqe_xa_xri, axri),
-                       bf_get(lpfc_wcqe_xa_status, axri));
+                       bf_get(lpfc_wcqe_xa_status, axri),
+                       axri->parameter);
 
-       if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+       /*
+        * Catch the ABTS protocol failure case.  Older OCe FW releases returned
+        * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+        * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+        */
+       ext_status = axri->parameter & WCQE_PARAM_MASK;
+       if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+           ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
                lpfc_sli_abts_recover_port(vport, ndlp);
 }
 
@@ -9807,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
        unsigned long timeout;
 
        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
-       spin_unlock_irq(&phba->hbalock);
 
        if (psli->sli_flag & LPFC_SLI_ACTIVE) {
-               spin_lock_irq(&phba->hbalock);
                /* Determine how long we might wait for the active mailbox
                 * command to be gracefully completed by firmware.
                 */
@@ -9831,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
                                 */
                                break;
                }
-       }
+       } else
+               spin_unlock_irq(&phba->hbalock);
+
        lpfc_sli_mbox_sys_flush(phba);
 }
 
@@ -13272,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
        LPFC_MBOXQ_t *mbox;
        uint32_t reqlen, alloclen, index;
        uint32_t mbox_tmo;
-       uint16_t rsrc_start, rsrc_size, els_xri_cnt;
+       uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
        uint16_t xritag_start = 0, lxri = 0;
        struct lpfc_rsrc_blks *rsrc_blk;
        int cnt, ttl_cnt, rc = 0;
@@ -13294,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
 
        cnt = 0;
        ttl_cnt = 0;
+       post_els_xri_cnt = els_xri_cnt;
        list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
                            list) {
                rsrc_start = rsrc_blk->rsrc_start;
@@ -13303,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
                                "3014 Working ELS Extent start %d, cnt %d\n",
                                rsrc_start, rsrc_size);
 
-               loop_cnt = min(els_xri_cnt, rsrc_size);
-               if (ttl_cnt + loop_cnt >= els_xri_cnt) {
-                       loop_cnt = els_xri_cnt - ttl_cnt;
-                       ttl_cnt = els_xri_cnt;
-               }
+               loop_cnt = min(post_els_xri_cnt, rsrc_size);
+               if (loop_cnt < post_els_xri_cnt) {
+                       post_els_xri_cnt -= loop_cnt;
+                       ttl_cnt += loop_cnt;
+               } else
+                       ttl_cnt += post_els_xri_cnt;
 
                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (!mbox)
@@ -14203,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
                 * field and RX_ID from ABTS for RX_ID field.
                 */
                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
-               bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
        } else {
                /* ABTS sent by initiator to CT exchange, construction
                 * of BA_ACC will need to allocate a new XRI as for the
-                * XRI_TAG and RX_ID fields.
+                * XRI_TAG field.
                 */
                bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
-               bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
        }
+       bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
        bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
 
        /* Xmit CT abts response on exchange <xid> */
@@ -15042,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
        LPFC_MBOXQ_t *mboxq;
 
        phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+       phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
index f2a2602e5c3528d8504f9033630be2403019c017..25cefc254b76409b414caed817cd9980c4fc786c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.29"
+#define LPFC_DRIVER_VERSION "8.3.30"
 #define LPFC_DRIVER_NAME               "lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME    "lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME    "lpfc:fp"
index 5e69f468535f243d200aaf1a6af31f4c8b9205cb..8a59a772fdf22841ead34c937dd92e4ccb20f749 100644 (file)
@@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
                return;
 
        /* eat the loginfos associated with task aborts */
-       if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
+       if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
            0x31140000 || log_info == 0x31130000))
                return;
 
@@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
 {
        int i = 0;
        char desc[16];
-       u8 revision;
        u32 iounit_pg1_flags;
        u32 bios_version;
 
        bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
-       pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
        strncpy(desc, ioc->manu_pg0.ChipName, 16);
        printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
           "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
@@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
           (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
           (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
           ioc->facts.FWVersion.Word & 0x000000FF,
-          revision,
+          ioc->pdev->revision,
           (bios_version & 0xFF000000) >> 24,
           (bios_version & 0x00FF0000) >> 16,
           (bios_version & 0x0000FF00) >> 8,
index 7fceb899029ed990b7b19dde1c22491ad29a4dc5..3b9a28efea8281fe85b5c5c1e4757da60f5cf061 100644 (file)
@@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg)
 {
        struct mpt2_ioctl_iocinfo karg;
        struct MPT2SAS_ADAPTER *ioc;
-       u8 revision;
 
        if (copy_from_user(&karg, arg, sizeof(karg))) {
                printk(KERN_ERR "failure at %s:%d/%s()!\n",
@@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg)
                karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
        if (ioc->pfacts)
                karg.port_number = ioc->pfacts[0].PortNumber;
-       pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
-       karg.hw_rev = revision;
+       karg.hw_rev = ioc->pdev->revision;
        karg.pci_id = ioc->pdev->device;
        karg.subsystem_device = ioc->pdev->subsystem_device;
        karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
index 3619f6eeeeda2b44731d80c6e8bd6c063c434911..9d82ee5c10de657572df223a11beb123709b2be7 100644 (file)
@@ -2093,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        struct ata_task_resp *resp ;
        u32 *sata_resp;
        struct pm8001_device *pm8001_dev;
+       unsigned long flags;
 
        psataPayload = (struct sata_completion_resp *)(piomb + 4);
        status = le32_to_cpu(psataPayload->status);
@@ -2382,26 +2383,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                ts->stat = SAS_DEV_NO_RESPONSE;
                break;
        }
-       spin_lock_irq(&t->task_state_lock);
+       spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
        t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
        t->task_state_flags |= SAS_TASK_STATE_DONE;
        if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("task 0x%p done with io_status 0x%x"
                        " resp 0x%x stat 0x%x but aborted by upper layer!\n",
                        t, status, ts->resp, ts->stat));
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        } else if (t->uldd_task) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
                mb();/* ditto */
                spin_unlock_irq(&pm8001_ha->lock);
                t->task_done(t);
                spin_lock_irq(&pm8001_ha->lock);
        } else if (!t->uldd_task) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
                mb();/*ditto*/
                spin_unlock_irq(&pm8001_ha->lock);
@@ -2423,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        u32 tag = le32_to_cpu(psataPayload->tag);
        u32 port_id = le32_to_cpu(psataPayload->port_id);
        u32 dev_id = le32_to_cpu(psataPayload->device_id);
+       unsigned long flags;
 
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
@@ -2593,26 +2595,26 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
                ts->stat = SAS_OPEN_TO;
                break;
        }
-       spin_lock_irq(&t->task_state_lock);
+       spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
        t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
        t->task_state_flags |= SAS_TASK_STATE_DONE;
        if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("task 0x%p done with io_status 0x%x"
                        " resp 0x%x stat 0x%x but aborted by upper layer!\n",
                        t, event, ts->resp, ts->stat));
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        } else if (t->uldd_task) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
                mb();/* ditto */
                spin_unlock_irq(&pm8001_ha->lock);
                t->task_done(t);
                spin_lock_irq(&pm8001_ha->lock);
        } else if (!t->uldd_task) {
-               spin_unlock_irq(&t->task_state_lock);
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
                pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
                mb();/*ditto*/
                spin_unlock_irq(&pm8001_ha->lock);
index 7c9f28b7da7283c33741d6f6ae6377e7b71ead79..fc542a9bb106231ac9f4991388b2f9ee1e3d0d46 100644 (file)
@@ -431,9 +431,9 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
                                  mbox_sts_entry->out_mbox[6]));
 
                if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
-                       status = QLA_SUCCESS;
+                       status = ISCSI_PING_SUCCESS;
                else
-                       status = QLA_ERROR;
+                       status = mbox_sts_entry->out_mbox[6];
 
                data_size = sizeof(mbox_sts_entry->out_mbox);
 
index 3d9419460e0c5742177b290c6d31bbe0790a2212..ee47820c30a6591824cfa42abff426fb09114e7b 100644 (file)
@@ -834,7 +834,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
 {
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct iscsi_cls_host *ihost = shost_priv(shost);
+       struct iscsi_cls_host *ihost = shost->shost_data;
        uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
 
        qla4xxx_get_firmware_state(ha);
@@ -859,7 +859,7 @@ static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
 {
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct iscsi_cls_host *ihost = shost_priv(shost);
+       struct iscsi_cls_host *ihost = shost->shost_data;
        uint32_t state = ISCSI_PORT_STATE_DOWN;
 
        if (test_bit(AF_LINK_UP, &ha->flags))
@@ -3445,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
 {
        int status = 0;
-       uint8_t revision_id;
        unsigned long mem_base, mem_len, db_base, db_len;
        struct pci_dev *pdev = ha->pdev;
 
@@ -3457,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
                goto iospace_error_exit;
        }
 
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
        DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
-           __func__, revision_id));
-       ha->revision_id = revision_id;
+           __func__, pdev->revision));
+       ha->revision_id = pdev->revision;
 
        /* remap phys address */
        mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
index ede9af9441417e6740aff7d68d4bf8aa0a48f45f..97b30c108e365f6d22e93fc4ca826b6eb46c5e34 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k15"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
index 591856131c4e14b96cf537a1d4e9c05bc235c6ee..182d5a57ab7468a6ddf5c6bfb684bf1396cb7f7b 100644 (file)
@@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324";
 #define DEF_LBPU 0
 #define DEF_LBPWS 0
 #define DEF_LBPWS10 0
+#define DEF_LBPRZ 1
 #define DEF_LOWEST_ALIGNED 0
 #define DEF_NO_LUN_0   0
 #define DEF_NUM_PARTS   0
@@ -186,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
 static unsigned int scsi_debug_lbpu = DEF_LBPU;
 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
+static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
@@ -775,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr)
        return 0x3c;
 }
 
-/* Thin provisioning VPD page (SBC-3) */
+/* Logical block provisioning VPD page (SBC-3) */
 static int inquiry_evpd_b2(unsigned char *arr)
 {
-       memset(arr, 0, 0x8);
+       memset(arr, 0, 0x4);
        arr[0] = 0;                     /* threshold exponent */
 
        if (scsi_debug_lbpu)
@@ -790,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr)
        if (scsi_debug_lbpws10)
                arr[1] |= 1 << 5;
 
-       return 0x8;
+       if (scsi_debug_lbprz)
+               arr[1] |= 1 << 2;
+
+       return 0x4;
 }
 
 #define SDEBUG_LONG_INQ_SZ 96
@@ -1071,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp,
        arr[13] = scsi_debug_physblk_exp & 0xf;
        arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
 
-       if (scsi_debug_lbp())
+       if (scsi_debug_lbp()) {
                arr[14] |= 0x80; /* LBPME */
+               if (scsi_debug_lbprz)
+                       arr[14] |= 0x40; /* LBPRZ */
+       }
 
        arr[15] = scsi_debug_lowest_aligned & 0xff;
 
@@ -2046,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len)
                block = lba + alignment;
                rem = do_div(block, granularity);
 
-               if (rem == 0 && lba + granularity <= end &&
-                   block < map_size)
+               if (rem == 0 && lba + granularity <= end && block < map_size) {
                        clear_bit(block, map_storep);
-
+                       if (scsi_debug_lbprz)
+                               memset(fake_storep +
+                                      block * scsi_debug_sector_size, 0,
+                                      scsi_debug_sector_size);
+               }
                lba += granularity - rem;
        }
 }
@@ -2731,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
@@ -2772,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
index fac31730addfd986990055c0baa8ef092fe9b797..1cf640e575da4567fb22516bc5eff3cf3ff0efe9 100644 (file)
@@ -1486,7 +1486,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
        struct iscsi_uevent *ev;
        int len = NLMSG_SPACE(sizeof(*ev) + data_size);
 
-       skb = alloc_skb(len, GFP_KERNEL);
+       skb = alloc_skb(len, GFP_NOIO);
        if (!skb) {
                printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
                       host_no, code);
@@ -1504,7 +1504,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
        if (data_size)
                memcpy((char *)ev + sizeof(*ev), data, data_size);
 
-       iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+       iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
 }
 EXPORT_SYMBOL_GPL(iscsi_post_host_event);
 
@@ -1517,7 +1517,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
        struct iscsi_uevent *ev;
        int len = NLMSG_SPACE(sizeof(*ev) + data_size);
 
-       skb = alloc_skb(len, GFP_KERNEL);
+       skb = alloc_skb(len, GFP_NOIO);
        if (!skb) {
                printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
                return;
@@ -1533,7 +1533,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
        ev->r.ping_comp.data_size = data_size;
        memcpy((char *)ev + sizeof(*ev), data, data_size);
 
-       iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
+       iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
 }
 EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
 
index 09e3df42a4024925a69836d7c2a0d3a3149815e9..5ba5c2a9e8e987ffe95da22a179d42b18590570e 100644 (file)
@@ -664,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
 }
 
 /**
- *     sd_init_command - build a scsi (read or write) command from
+ *     sd_prep_fn - build a scsi (read or write) command from
  *     information in the request structure.
  *     @SCpnt: pointer to mid-level's per scsi command structure that
  *     contains request and into which the scsi command is written
@@ -711,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        ret = BLKPREP_KILL;
 
        SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
-                                       "sd_init_command: block=%llu, "
+                                       "sd_prep_fn: block=%llu, "
                                        "count=%d\n",
                                        (unsigned long long)block,
                                        this_count));
@@ -1212,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
        retval = -ENODEV;
 
        if (scsi_block_when_processing_errors(sdp)) {
+               retval = scsi_autopm_get_device(sdp);
+               if (retval)
+                       goto out;
+
                sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
                retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
                                              sshdr);
+               scsi_autopm_put_device(sdp);
        }
 
        /* failed to execute TUR, assume media not present */
@@ -2644,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
  *     (e.g. /dev/sda). More precisely it is the block device major 
  *     and minor number that is chosen here.
  *
- *     Assume sd_attach is not re-entrant (for time being)
- *     Also think about sd_attach() and sd_remove() running coincidentally.
+ *     Assume sd_probe is not re-entrant (for time being)
+ *     Also think about sd_probe() and sd_remove() running coincidentally.
  **/
 static int sd_probe(struct device *dev)
 {
@@ -2660,7 +2665,7 @@ static int sd_probe(struct device *dev)
                goto out;
 
        SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
-                                       "sd_attach\n"));
+                                       "sd_probe\n"));
 
        error = -ENOMEM;
        sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
index a15f691f9d3423e270f17b1267e81ba1b105f252..e41998cb098ebbea2a6243bda198cc463f857a35 100644 (file)
@@ -1105,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
                                     STp->drv_buffer));
                }
                STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
+               if (!STp->drv_buffer && STp->immediate_filemark) {
+                       printk(KERN_WARNING
+                           "%s: non-buffered tape: disabling writing immediate filemarks\n",
+                           name);
+                       STp->immediate_filemark = 0;
+               }
        }
        st_release_request(SRpnt);
        SRpnt = NULL;
@@ -1313,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
 
                memset(cmd, 0, MAX_COMMAND_SIZE);
                cmd[0] = WRITE_FILEMARKS;
+               if (STp->immediate_filemark)
+                       cmd[1] = 1;
                cmd[4] = 1 + STp->two_fm;
 
                SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -2180,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char
                       name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
                       STp->scsi2_logical);
                printk(KERN_INFO
-                      "%s:    sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate,
-                       STp->sili);
+                      "%s:    sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
+                      name, STm->sysv, STp->immediate, STp->sili,
+                      STp->immediate_filemark);
                printk(KERN_INFO "%s:    debugging: %d\n",
                       name, debugging);
        }
@@ -2223,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
                        STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
                STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
                STp->immediate = (options & MT_ST_NOWAIT) != 0;
+               STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
                STm->sysv = (options & MT_ST_SYSV) != 0;
                STp->sili = (options & MT_ST_SILI) != 0;
                DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
@@ -2254,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
                        STp->scsi2_logical = value;
                if ((options & MT_ST_NOWAIT) != 0)
                        STp->immediate = value;
+               if ((options & MT_ST_NOWAIT_EOF) != 0)
+                       STp->immediate_filemark = value;
                if ((options & MT_ST_SYSV) != 0)
                        STm->sysv = value;
                if ((options & MT_ST_SILI) != 0)
@@ -2713,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
                cmd[0] = WRITE_FILEMARKS;
                if (cmd_in == MTWSM)
                        cmd[1] = 2;
-               if (cmd_in == MTWEOFI)
+               if (cmd_in == MTWEOFI ||
+                   (cmd_in == MTWEOF && STp->immediate_filemark))
                        cmd[1] |= 1;
                cmd[2] = (arg >> 16);
                cmd[3] = (arg >> 8);
@@ -4092,6 +4105,7 @@ static int st_probe(struct device *dev)
        tpnt->scsi2_logical = ST_SCSI2LOGICAL;
        tpnt->sili = ST_SILI;
        tpnt->immediate = ST_NOWAIT;
+       tpnt->immediate_filemark = 0;
        tpnt->default_drvbuffer = 0xff;         /* No forced buffering */
        tpnt->partition = 0;
        tpnt->new_partition = 0;
@@ -4477,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
        options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
        options |= STm->sysv ? MT_ST_SYSV : 0;
        options |= STp->immediate ? MT_ST_NOWAIT : 0;
+       options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
        options |= STp->sili ? MT_ST_SILI : 0;
 
        l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
index f91a67c6d9686c07646538e3dbd5745a6ca7fefe..ea35632b986c555f17281e8dcf944b26ddbcc761 100644 (file)
@@ -120,6 +120,7 @@ struct scsi_tape {
        unsigned char c_algo;                   /* compression algorithm */
        unsigned char pos_unknown;                      /* after reset position unknown */
        unsigned char sili;                     /* use SILI when reading in variable b mode */
+       unsigned char immediate_filemark;       /* write filemark immediately */
        int tape_type;
        int long_timeout;       /* timeout for commands known to take long time */
 
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644 (file)
index 0000000..8f27f9d
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# Kernel configuration file for the UFS Host Controller
+#
+# This code is based on drivers/scsi/ufs/Kconfig
+# Copyright (C) 2011  Samsung Samsung India Software Operations
+#
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+# USA.
+
+config SCSI_UFSHCD
+       tristate "Universal Flash Storage host controller driver"
+       depends on PCI && SCSI
+       ---help---
+       This is a generic driver which supports PCIe UFS Host controllers.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644 (file)
index 0000000..adf7895
--- /dev/null
@@ -0,0 +1,2 @@
+# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644 (file)
index 0000000..b207529
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufs.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#ifndef _UFS_H
+#define _UFS_H
+
+#define MAX_CDB_SIZE   16
+
+#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
+                       ((byte3 << 24) | (byte2 << 16) |\
+                        (byte1 << 8) | (byte0))
+
+/*
+ * UFS Protocol Information Unit related definitions
+ */
+
+/* Task management functions */
+enum {
+       UFS_ABORT_TASK          = 0x01,
+       UFS_ABORT_TASK_SET      = 0x02,
+       UFS_CLEAR_TASK_SET      = 0x04,
+       UFS_LOGICAL_RESET       = 0x08,
+       UFS_QUERY_TASK          = 0x80,
+       UFS_QUERY_TASK_SET      = 0x81,
+};
+
+/* UTP UPIU Transaction Codes Initiator to Target */
+enum {
+       UPIU_TRANSACTION_NOP_OUT        = 0x00,
+       UPIU_TRANSACTION_COMMAND        = 0x01,
+       UPIU_TRANSACTION_DATA_OUT       = 0x02,
+       UPIU_TRANSACTION_TASK_REQ       = 0x04,
+       UPIU_TRANSACTION_QUERY_REQ      = 0x26,
+};
+
+/* UTP UPIU Transaction Codes Target to Initiator */
+enum {
+       UPIU_TRANSACTION_NOP_IN         = 0x20,
+       UPIU_TRANSACTION_RESPONSE       = 0x21,
+       UPIU_TRANSACTION_DATA_IN        = 0x22,
+       UPIU_TRANSACTION_TASK_RSP       = 0x24,
+       UPIU_TRANSACTION_READY_XFER     = 0x31,
+       UPIU_TRANSACTION_QUERY_RSP      = 0x36,
+};
+
+/* UPIU Read/Write flags */
+enum {
+       UPIU_CMD_FLAGS_NONE     = 0x00,
+       UPIU_CMD_FLAGS_WRITE    = 0x20,
+       UPIU_CMD_FLAGS_READ     = 0x40,
+};
+
+/* UPIU Task Attributes */
+enum {
+       UPIU_TASK_ATTR_SIMPLE   = 0x00,
+       UPIU_TASK_ATTR_ORDERED  = 0x01,
+       UPIU_TASK_ATTR_HEADQ    = 0x02,
+       UPIU_TASK_ATTR_ACA      = 0x03,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum {
+       UPIU_QUERY_OPCODE_NOP           = 0x0,
+       UPIU_QUERY_OPCODE_READ_DESC     = 0x1,
+       UPIU_QUERY_OPCODE_WRITE_DESC    = 0x2,
+       UPIU_QUERY_OPCODE_READ_ATTR     = 0x3,
+       UPIU_QUERY_OPCODE_WRITE_ATTR    = 0x4,
+       UPIU_QUERY_OPCODE_READ_FLAG     = 0x5,
+       UPIU_QUERY_OPCODE_SET_FLAG      = 0x6,
+       UPIU_QUERY_OPCODE_CLEAR_FLAG    = 0x7,
+       UPIU_QUERY_OPCODE_TOGGLE_FLAG   = 0x8,
+};
+
+/* UTP Transfer Request Command Type (CT) */
+enum {
+       UPIU_COMMAND_SET_TYPE_SCSI      = 0x0,
+       UPIU_COMMAND_SET_TYPE_UFS       = 0x1,
+       UPIU_COMMAND_SET_TYPE_QUERY     = 0x2,
+};
+
+enum {
+       MASK_SCSI_STATUS        = 0xFF,
+       MASK_TASK_RESPONSE      = 0xFF00,
+       MASK_RSP_UPIU_RESULT    = 0xFFFF,
+};
+
+/* Task management service response */
+enum {
+       UPIU_TASK_MANAGEMENT_FUNC_COMPL         = 0x00,
+       UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
+       UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED     = 0x08,
+       UPIU_TASK_MANAGEMENT_FUNC_FAILED        = 0x05,
+       UPIU_INCORRECT_LOGICAL_UNIT_NO          = 0x09,
+};
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+       u32 dword_0;
+       u32 dword_1;
+       u32 dword_2;
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+       struct utp_upiu_header header;
+       u32 exp_data_transfer_len;
+       u8 cdb[MAX_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_rsp - Response UPIU structure
+ * @header: UPIU header DW-0 to DW-2
+ * @residual_transfer_count: Residual transfer count DW-3
+ * @reserved: Reserved double words DW-4 to DW-7
+ * @sense_data_len: Sense data length DW-8 U16
+ * @sense_data: Sense data field DW-8 to DW-12
+ */
+struct utp_upiu_rsp {
+       struct utp_upiu_header header;
+       u32 residual_transfer_count;
+       u32 reserved[4];
+       u16 sense_data_len;
+       u8 sense_data[18];
+};
+
+/**
+ * struct utp_upiu_task_req - Task request UPIU structure
+ * @header - UPIU header structure DW0 to DW-2
+ * @input_param1: Input parameter 1 DW-3
+ * @input_param2: Input parameter 2 DW-4
+ * @input_param3: Input parameter 3 DW-5
+ * @reserved: Reserved double words DW-6 to DW-7
+ */
+struct utp_upiu_task_req {
+       struct utp_upiu_header header;
+       u32 input_param1;
+       u32 input_param2;
+       u32 input_param3;
+       u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_task_rsp - Task Management Response UPIU structure
+ * @header: UPIU header structure DW0-DW-2
+ * @output_param1: Ouput parameter 1 DW3
+ * @output_param2: Output parameter 2 DW4
+ * @reserved: Reserved double words DW-5 to DW-7
+ */
+struct utp_upiu_task_rsp {
+       struct utp_upiu_header header;
+       u32 output_param1;
+       u32 output_param2;
+       u32 reserved[3];
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644 (file)
index 0000000..52b96e8
--- /dev/null
@@ -0,0 +1,1978 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.c
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.1"
+
+enum {
+       UFSHCD_MAX_CHANNEL      = 0,
+       UFSHCD_MAX_ID           = 1,
+       UFSHCD_MAX_LUNS         = 8,
+       UFSHCD_CMD_PER_LUN      = 32,
+       UFSHCD_CAN_QUEUE        = 32,
+};
+
+/* UFSHCD states */
+enum {
+       UFSHCD_STATE_OPERATIONAL,
+       UFSHCD_STATE_RESET,
+       UFSHCD_STATE_ERROR,
+};
+
+/* Interrupt configuration options */
+enum {
+       UFSHCD_INT_DISABLE,
+       UFSHCD_INT_ENABLE,
+       UFSHCD_INT_CLEAR,
+};
+
+/* Interrupt aggregation options */
+enum {
+       INT_AGGR_RESET,
+       INT_AGGR_CONFIG,
+};
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+       u32 command;
+       u32 argument1;
+       u32 argument2;
+       u32 argument3;
+       int cmd_active;
+       int result;
+};
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @pdev: PCI device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+       void __iomem *mmio_base;
+
+       /* Virtual memory reference */
+       struct utp_transfer_cmd_desc *ucdl_base_addr;
+       struct utp_transfer_req_desc *utrdl_base_addr;
+       struct utp_task_req_desc *utmrdl_base_addr;
+
+       /* DMA memory reference */
+       dma_addr_t ucdl_dma_addr;
+       dma_addr_t utrdl_dma_addr;
+       dma_addr_t utmrdl_dma_addr;
+
+       struct Scsi_Host *host;
+       struct pci_dev *pdev;
+
+       struct ufshcd_lrb *lrb;
+
+       unsigned long outstanding_tasks;
+       unsigned long outstanding_reqs;
+
+       u32 capabilities;
+       int nutrs;
+       int nutmrs;
+       u32 ufs_version;
+
+       struct uic_command active_uic_cmd;
+       wait_queue_head_t ufshcd_tm_wait_queue;
+       unsigned long tm_condition;
+
+       u32 ufshcd_state;
+       u32 int_enable_mask;
+
+       /* Work Queues */
+       struct work_struct uic_workq;
+       struct work_struct feh_workq;
+
+       /* HBA Errors */
+       u32 errors;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+       struct utp_transfer_req_desc *utr_descriptor_ptr;
+       struct utp_upiu_cmd *ucd_cmd_ptr;
+       struct utp_upiu_rsp *ucd_rsp_ptr;
+       struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+       struct scsi_cmnd *cmd;
+       u8 *sense_buffer;
+       unsigned int sense_bufflen;
+       int scsi_status;
+
+       int command_type;
+       int task_tag;
+       unsigned int lun;
+};
+
+/**
+ * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
+ * @hba - Pointer to adapter instance
+ *
+ * Returns UFSHCI version supported by the controller
+ */
+static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
+{
+       return readl(hba->mmio_base + REG_UFS_VERSION);
+}
+
+/**
+ * ufshcd_is_device_present - Check if any device connected to
+ *                           the host controller
+ * @reg_hcs - host controller status register value
+ *
+ * Returns 0 if device present, non-zero if no device detected
+ */
+static inline int ufshcd_is_device_present(u32 reg_hcs)
+{
+       return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+}
+
+/**
+ * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
+ * @lrb: pointer to local command reference block
+ *
+ * This function is used to get the OCS field from UTRD
+ * Returns the OCS field in the UTRD
+ */
+static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+{
+       return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
+ * @task_req_descp: pointer to utp_task_req_desc structure
+ *
+ * This function is used to get the OCS field from UTMRD
+ * Returns the OCS field in the UTMRD
+ */
+static inline int
+ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
+{
+       return task_req_descp->header.dword_2 & MASK_OCS;
+}
+
+/**
+ * ufshcd_get_tm_free_slot - get a free slot for task management request
+ * @hba: per adapter instance
+ *
+ * Returns maximum number of task management request slots in case of
+ * task management queue full or returns the free slot number
+ */
+static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+{
+       return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+}
+
+/**
+ * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
+ * @hba: per adapter instance
+ * @pos: position of the bit to be cleared
+ */
+static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
+{
+       writel(~(1 << pos),
+               (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
+}
+
+/**
+ * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
+ * @reg: Register value of host controller status
+ *
+ * Returns integer, 0 on Success and positive value if failed
+ */
+static inline int ufshcd_get_lists_status(u32 reg)
+{
+       /*
+        * The mask 0xFF is for the following HCS register bits
+        * Bit          Description
+        *  0           Device Present
+        *  1           UTRLRDY
+        *  2           UTMRLRDY
+        *  3           UCRDY
+        *  4           HEI
+        *  5           DEI
+        * 6-7          reserved
+        */
+       return (((reg) & (0xFF)) >> 1) ^ (0x07);
+}
+
+/**
+ * ufshcd_get_uic_cmd_result - Get the UIC command result
+ * @hba: Pointer to adapter instance
+ *
+ * This function gets the result of UIC command completion
+ * Returns 0 on success, non zero value on error
+ */
+static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
+{
+       return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
+              MASK_UIC_COMMAND_RESULT;
+}
+
+/**
+ * ufshcd_free_hba_memory - Free allocated memory for LRB, request
+ *                         and task lists
+ * @hba: Pointer to adapter instance
+ */
+static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
+{
+       size_t utmrdl_size, utrdl_size, ucdl_size;
+
+       kfree(hba->lrb);
+
+       if (hba->utmrdl_base_addr) {
+               utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+               dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+                                 hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
+       }
+
+       if (hba->utrdl_base_addr) {
+               utrdl_size =
+               (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+               dma_free_coherent(&hba->pdev->dev, utrdl_size,
+                                 hba->utrdl_base_addr, hba->utrdl_dma_addr);
+       }
+
+       if (hba->ucdl_base_addr) {
+               ucdl_size =
+               (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+               dma_free_coherent(&hba->pdev->dev, ucdl_size,
+                                 hba->ucdl_base_addr, hba->ucdl_dma_addr);
+       }
+}
+
+/**
+ * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function checks the response UPIU for valid transaction type in
+ * response field
+ * Returns 0 on success, non-zero on failure
+ */
+static inline int
+ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
+                UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+}
+
+/**
+ * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * This function gets the response status and scsi_status from response UPIU
+ * Returns the response result code.
+ */
+static inline int
+ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+       return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
+}
+
+/**
+ * ufshcd_config_int_aggr - Configure interrupt aggregation values.
+ *             Currently there is no use case where we want to configure
+ *             interrupt aggregation dynamically. So to configure interrupt
+ *             aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
+ *             INT_AGGR_TIMEOUT_VALUE are used.
+ * @hba: per adapter instance
+ * @option: Interrupt aggregation option
+ */
+static inline void
+ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
+{
+       switch (option) {
+       case INT_AGGR_RESET:
+               writel((INT_AGGR_ENABLE |
+                       INT_AGGR_COUNTER_AND_TIMER_RESET),
+                       (hba->mmio_base +
+                        REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+               break;
+       case INT_AGGR_CONFIG:
+               writel((INT_AGGR_ENABLE |
+                       INT_AGGR_PARAM_WRITE |
+                       INT_AGGR_COUNTER_THRESHOLD_VALUE |
+                       INT_AGGR_TIMEOUT_VALUE),
+                       (hba->mmio_base +
+                        REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
+               break;
+       }
+}
+
+/**
+ * ufshcd_enable_run_stop_reg - Enable run-stop registers,
+ *                     When run-stop registers are set to 1, it indicates the
+ *                     host controller that it can process the requests
+ * @hba: per adapter instance
+ */
+static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
+{
+       writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
+              (hba->mmio_base +
+               REG_UTP_TASK_REQ_LIST_RUN_STOP));
+       writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
+              (hba->mmio_base +
+               REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
+}
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+       writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_hba_start - Start controller initialization sequence
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_start(struct ufs_hba *hba)
+{
+       writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+/**
+ * ufshcd_is_hba_active - Get controller state
+ * @hba: per adapter instance
+ *
+ * Returns zero if controller is active, 1 otherwise
+ */
+static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+{
+       return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+}
+
+/**
+ * ufshcd_send_command - Send SCSI or device management commands
+ * @hba: per adapter instance
+ * @task_tag: Task tag of the command
+ */
+static inline
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+{
+       __set_bit(task_tag, &hba->outstanding_reqs);
+       writel((1 << task_tag),
+              (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
+}
+
+/**
+ * ufshcd_copy_sense_data - Copy sense data in case of check condition
+ * @lrb - pointer to local reference block
+ */
+static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
+{
+       int len;
+       if (lrbp->sense_buffer) {
+               len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+               memcpy(lrbp->sense_buffer,
+                       lrbp->ucd_rsp_ptr->sense_data,
+                       min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+       }
+}
+
+/**
+ * ufshcd_hba_capabilities - Read controller capabilities
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
+{
+       hba->capabilities =
+               readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
+
+       /* nutrs and nutmrs are 0 based values */
+       hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
+       hba->nutmrs =
+       ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+}
+
+/**
+ * ufshcd_send_uic_command - Send UIC commands to unipro layers
+ * @hba: per adapter instance
+ * @uic_command: UIC command
+ */
+static inline void
+ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
+{
+       /* Write Args */
+       writel(uic_cmnd->argument1,
+             (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
+       writel(uic_cmnd->argument2,
+             (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
+       writel(uic_cmnd->argument3,
+             (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
+
+       /* Write UIC Cmd */
+       writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
+              (hba->mmio_base + REG_UIC_COMMAND));
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @lrbp - pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+{
+       struct ufshcd_sg_entry *prd_table;
+       struct scatterlist *sg;
+       struct scsi_cmnd *cmd;
+       int sg_segments;
+       int i;
+
+       cmd = lrbp->cmd;
+       sg_segments = scsi_dma_map(cmd);
+       if (sg_segments < 0)
+               return sg_segments;
+
+       if (sg_segments) {
+               lrbp->utr_descriptor_ptr->prd_table_length =
+                                       cpu_to_le16((u16) (sg_segments));
+
+               prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+
+               scsi_for_each_sg(cmd, sg, sg_segments, i) {
+                       prd_table[i].size  =
+                               cpu_to_le32(((u32) sg_dma_len(sg))-1);
+                       prd_table[i].base_addr =
+                               cpu_to_le32(lower_32_bits(sg->dma_address));
+                       prd_table[i].upper_addr =
+                               cpu_to_le32(upper_32_bits(sg->dma_address));
+               }
+       } else {
+               lrbp->utr_descriptor_ptr->prd_table_length = 0;
+       }
+
+       return 0;
+}
+
+/**
+ * ufshcd_int_config - enable/disable interrupts
+ * @hba: per adapter instance
+ * @option: interrupt option
+ */
+static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
+{
+       switch (option) {
+       case UFSHCD_INT_ENABLE:
+               writel(hba->int_enable_mask,
+                     (hba->mmio_base + REG_INTERRUPT_ENABLE));
+               break;
+       case UFSHCD_INT_DISABLE:
+               if (hba->ufs_version == UFSHCI_VERSION_10)
+                       writel(INTERRUPT_DISABLE_MASK_10,
+                             (hba->mmio_base + REG_INTERRUPT_ENABLE));
+               else
+                       writel(INTERRUPT_DISABLE_MASK_11,
+                              (hba->mmio_base + REG_INTERRUPT_ENABLE));
+               break;
+       }
+}
+
+/**
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @lrb - pointer to local reference block
+ */
+static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+{
+       struct utp_transfer_req_desc *req_desc;
+       struct utp_upiu_cmd *ucd_cmd_ptr;
+       u32 data_direction;
+       u32 upiu_flags;
+
+       ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
+       req_desc = lrbp->utr_descriptor_ptr;
+
+       switch (lrbp->command_type) {
+       case UTP_CMD_TYPE_SCSI:
+               if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
+                       data_direction = UTP_DEVICE_TO_HOST;
+                       upiu_flags = UPIU_CMD_FLAGS_READ;
+               } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
+                       data_direction = UTP_HOST_TO_DEVICE;
+                       upiu_flags = UPIU_CMD_FLAGS_WRITE;
+               } else {
+                       data_direction = UTP_NO_DATA_TRANSFER;
+                       upiu_flags = UPIU_CMD_FLAGS_NONE;
+               }
+
+               /* Transfer request descriptor header fields */
+               req_desc->header.dword_0 =
+                       cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
+
+               /*
+                * assigning invalid value for command status. Controller
+                * updates OCS on command completion, with the command
+                * status
+                */
+               req_desc->header.dword_2 =
+                       cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+               /* command descriptor fields */
+               ucd_cmd_ptr->header.dword_0 =
+                       cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
+                                                     upiu_flags,
+                                                     lrbp->lun,
+                                                     lrbp->task_tag));
+               ucd_cmd_ptr->header.dword_1 =
+                       cpu_to_be32(
+                               UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
+                                                 0,
+                                                 0,
+                                                 0));
+
+               /* Total EHS length and Data segment length will be zero */
+               ucd_cmd_ptr->header.dword_2 = 0;
+
+               ucd_cmd_ptr->exp_data_transfer_len =
+                       cpu_to_be32(lrbp->cmd->transfersize);
+
+               memcpy(ucd_cmd_ptr->cdb,
+                      lrbp->cmd->cmnd,
+                      (min_t(unsigned short,
+                             lrbp->cmd->cmd_len,
+                             MAX_CDB_SIZE)));
+               break;
+       case UTP_CMD_TYPE_DEV_MANAGE:
+               /* For query function implementation */
+               break;
+       case UTP_CMD_TYPE_UFS:
+               /* For UFS native command implementation */
+               break;
+       } /* end of switch */
+}
+
+/**
+ * ufshcd_queuecommand - main entry point for SCSI requests
+ * @cmd: command from SCSI Midlayer
+ * @done: call back function
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+       struct ufshcd_lrb *lrbp;
+       struct ufs_hba *hba;
+       unsigned long flags;
+       int tag;
+       int err = 0;
+
+       hba = shost_priv(host);
+
+       tag = cmd->request->tag;
+
+       if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+               err = SCSI_MLQUEUE_HOST_BUSY;
+               goto out;
+       }
+
+       lrbp = &hba->lrb[tag];
+
+       lrbp->cmd = cmd;
+       lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+       lrbp->sense_buffer = cmd->sense_buffer;
+       lrbp->task_tag = tag;
+       lrbp->lun = cmd->device->lun;
+
+       lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+       /* form UPIU before issuing the command */
+       ufshcd_compose_upiu(lrbp);
+       err = ufshcd_map_sg(lrbp);
+       if (err)
+               goto out;
+
+       /* issue command to the controller */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_send_command(hba, tag);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+       return err;
+}
+
+/**
+ * ufshcd_memory_alloc - allocate memory for host memory space data structures
+ * @hba: per adapter instance
+ *
+ * 1. Allocate DMA memory for Command Descriptor array
+ *     Each command descriptor consist of Command UPIU, Response UPIU and PRDT
+ * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
+ * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
+ *     (UTMRDL)
+ * 4. Allocate memory for local reference block(lrb).
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_memory_alloc(struct ufs_hba *hba)
+{
+       size_t utmrdl_size, utrdl_size, ucdl_size;
+
+       /* Allocate memory for UTP command descriptors */
+       ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+       hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+                                                ucdl_size,
+                                                &hba->ucdl_dma_addr,
+                                                GFP_KERNEL);
+
+       /*
+        * UFSHCI requires UTP command descriptor to be 128 byte aligned.
+        * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
+        * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
+        * be aligned to 128 bytes as well
+        */
+       if (!hba->ucdl_base_addr ||
+           WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+               dev_err(&hba->pdev->dev,
+                       "Command Descriptor Memory allocation failed\n");
+               goto out;
+       }
+
+       /*
+        * Allocate memory for UTP Transfer descriptors
+        * UFSHCI requires 1024 byte alignment of UTRD
+        */
+       utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
+       hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+                                                 utrdl_size,
+                                                 &hba->utrdl_dma_addr,
+                                                 GFP_KERNEL);
+       if (!hba->utrdl_base_addr ||
+           WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+               dev_err(&hba->pdev->dev,
+                       "Transfer Descriptor Memory allocation failed\n");
+               goto out;
+       }
+
+       /*
+        * Allocate memory for UTP Task Management descriptors
+        * UFSHCI requires 1024 byte alignment of UTMRD
+        */
+       utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
+       hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+                                                  utmrdl_size,
+                                                  &hba->utmrdl_dma_addr,
+                                                  GFP_KERNEL);
+       if (!hba->utmrdl_base_addr ||
+           WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+               dev_err(&hba->pdev->dev,
+               "Task Management Descriptor Memory allocation failed\n");
+               goto out;
+       }
+
+       /* Allocate memory for local reference block */
+       hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
+       if (!hba->lrb) {
+               dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+               goto out;
+       }
+       return 0;
+out:
+       ufshcd_free_hba_memory(hba);
+       return -ENOMEM;
+}
+
+/**
+ * ufshcd_host_memory_configure - configure local reference block with
+ *                             memory offsets
+ * @hba: per adapter instance
+ *
+ * Configure Host memory space
+ * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
+ * address.
+ * 2. Update each UTRD with Response UPIU offset, Response UPIU length
+ * and PRDT offset.
+ * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
+ * into local reference block.
+ */
+static void ufshcd_host_memory_configure(struct ufs_hba *hba)
+{
+       struct utp_transfer_cmd_desc *cmd_descp;
+       struct utp_transfer_req_desc *utrdlp;
+       dma_addr_t cmd_desc_dma_addr;
+       dma_addr_t cmd_desc_element_addr;
+       u16 response_offset;
+       u16 prdt_offset;
+       int cmd_desc_size;
+       int i;
+
+       utrdlp = hba->utrdl_base_addr;
+       cmd_descp = hba->ucdl_base_addr;
+
+       response_offset =
+               offsetof(struct utp_transfer_cmd_desc, response_upiu);
+       prdt_offset =
+               offsetof(struct utp_transfer_cmd_desc, prd_table);
+
+       cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+       cmd_desc_dma_addr = hba->ucdl_dma_addr;
+
+       for (i = 0; i < hba->nutrs; i++) {
+               /* Configure UTRD with command descriptor base address */
+               cmd_desc_element_addr =
+                               (cmd_desc_dma_addr + (cmd_desc_size * i));
+               utrdlp[i].command_desc_base_addr_lo =
+                               cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
+               utrdlp[i].command_desc_base_addr_hi =
+                               cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
+
+               /* Response upiu and prdt offset should be in double words */
+               utrdlp[i].response_upiu_offset =
+                               cpu_to_le16((response_offset >> 2));
+               utrdlp[i].prd_table_offset =
+                               cpu_to_le16((prdt_offset >> 2));
+               utrdlp[i].response_upiu_length =
+                               cpu_to_le16(ALIGNED_UPIU_SIZE);
+
+               hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+               hba->lrb[i].ucd_cmd_ptr =
+                       (struct utp_upiu_cmd *)(cmd_descp + i);
+               hba->lrb[i].ucd_rsp_ptr =
+                       (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+               hba->lrb[i].ucd_prdt_ptr =
+                       (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+       }
+}
+
+/**
+ * ufshcd_dme_link_startup - Notify Unipro to perform link startup
+ * @hba: per adapter instance
+ *
+ * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
+ * in order to initialize the Unipro link startup procedure.
+ * Once the Unipro links are up, the device connected to the controller
+ * is detected.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dme_link_startup(struct ufs_hba *hba)
+{
+       struct uic_command *uic_cmd;
+       unsigned long flags;
+
+       /* check if controller is ready to accept UIC commands */
+       if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
+           UIC_COMMAND_READY) == 0x0) {
+               dev_err(&hba->pdev->dev,
+                       "Controller not ready"
+                       " to accept UIC commands\n");
+               return -EIO;
+       }
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+
+       /* form UIC command */
+       uic_cmd = &hba->active_uic_cmd;
+       uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
+       uic_cmd->argument1 = 0;
+       uic_cmd->argument2 = 0;
+       uic_cmd->argument3 = 0;
+
+       /* enable UIC related interrupts */
+       hba->int_enable_mask |= UIC_COMMAND_COMPL;
+       ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+       /* sending UIC commands to controller */
+       ufshcd_send_uic_command(hba, uic_cmd);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return 0;
+}
+
+/**
+ * ufshcd_make_hba_operational - Make UFS controller operational
+ * @hba: per adapter instance
+ *
+ * To bring UFS host controller to operational state,
+ * 1. Check if device is present
+ * 2. Configure run-stop-registers
+ * 3. Enable required interrupts
+ * 4. Configure interrupt aggregation
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_make_hba_operational(struct ufs_hba *hba)
+{
+       int err = 0;
+       u32 reg;
+
+       /* check if device present */
+       reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
+       if (ufshcd_is_device_present(reg)) {
+               dev_err(&hba->pdev->dev, "cc: Device not present\n");
+               err = -ENXIO;
+               goto out;
+       }
+
+       /*
+        * UCRDY, UTMRLDY and UTRLRDY bits must be 1
+        * DEI, HEI bits must be 0
+        */
+       if (!(ufshcd_get_lists_status(reg))) {
+               ufshcd_enable_run_stop_reg(hba);
+       } else {
+               dev_err(&hba->pdev->dev,
+                       "Host controller not ready to process requests");
+               err = -EIO;
+               goto out;
+       }
+
+       /* Enable required interrupts */
+       hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
+                                UIC_ERROR |
+                                UTP_TASK_REQ_COMPL |
+                                DEVICE_FATAL_ERROR |
+                                CONTROLLER_FATAL_ERROR |
+                                SYSTEM_BUS_FATAL_ERROR);
+       ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
+
+       /* Configure interrupt aggregation */
+       ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
+
+       if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+               scsi_unblock_requests(hba->host);
+
+       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+       scsi_scan_host(hba->host);
+out:
+       return err;
+}
+
+/**
+ * ufshcd_hba_enable - initialize the controller
+ * @hba: per adapter instance
+ *
+ * The controller resets itself and controller firmware initialization
+ * sequence kicks off. When controller is ready it will set
+ * the Host Controller Enable bit to 1.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_hba_enable(struct ufs_hba *hba)
+{
+       int retry;
+
+       /*
+        * msleep of 1 and 5 used in this function might result in msleep(20),
+        * but it was necessary to send the UFS FPGA to reset mode during
+        * development and testing of this driver. msleep can be changed to
+        * mdelay and retry count can be reduced based on the controller.
+        */
+       if (!ufshcd_is_hba_active(hba)) {
+
+               /* change controller state to "reset state" */
+               ufshcd_hba_stop(hba);
+
+               /*
+                * This delay is based on the testing done with UFS host
+                * controller FPGA. The delay can be changed based on the
+                * host controller used.
+                */
+               msleep(5);
+       }
+
+       /* start controller initialization sequence */
+       ufshcd_hba_start(hba);
+
+       /*
+        * To initialize a UFS host controller HCE bit must be set to 1.
+        * During initialization the HCE bit value changes from 1->0->1.
+        * When the host controller completes initialization sequence
+        * it sets the value of HCE bit to 1. The same HCE bit is read back
+        * to check if the controller has completed initialization sequence.
+        * So without this delay the value HCE = 1, set in the previous
+        * instruction might be read back.
+        * This delay can be changed based on the controller.
+        */
+       msleep(1);
+
+       /* wait for the host controller to complete initialization */
+       retry = 10;
+       while (ufshcd_is_hba_active(hba)) {
+               if (retry) {
+                       retry--;
+               } else {
+                       dev_err(&hba->pdev->dev,
+                               "Controller enable failed\n");
+                       return -EIO;
+               }
+               msleep(5);
+       }
+       return 0;
+}
+
+/**
+ * ufshcd_initialize_hba - start the initialization process
+ * @hba: per adapter instance
+ *
+ * 1. Enable the controller via ufshcd_hba_enable.
+ * 2. Program the Transfer Request List Address with the starting address of
+ * UTRDL.
+ * 3. Program the Task Management Request List Address with starting address
+ * of UTMRDL.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int ufshcd_initialize_hba(struct ufs_hba *hba)
+{
+       if (ufshcd_hba_enable(hba))
+               return -EIO;
+
+       /* Configure UTRL and UTMRL base address registers */
+       writel(hba->utrdl_dma_addr,
+              (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+       writel(lower_32_bits(hba->utrdl_dma_addr),
+              (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
+       writel(hba->utmrdl_dma_addr,
+              (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
+       writel(upper_32_bits(hba->utmrdl_dma_addr),
+              (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
+
+       /* Initialize unipro link startup procedure */
+       return ufshcd_dme_link_startup(hba);
+}
+
+/**
+ * ufshcd_do_reset - reset the host controller
+ * @hba: per adapter instance
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_do_reset(struct ufs_hba *hba)
+{
+       struct ufshcd_lrb *lrbp;
+       unsigned long flags;
+       int tag;
+
+       /* block commands from midlayer */
+       scsi_block_requests(hba->host);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->ufshcd_state = UFSHCD_STATE_RESET;
+
+       /* send controller to reset state */
+       ufshcd_hba_stop(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* abort outstanding commands */
+       for (tag = 0; tag < hba->nutrs; tag++) {
+               if (test_bit(tag, &hba->outstanding_reqs)) {
+                       lrbp = &hba->lrb[tag];
+                       scsi_dma_unmap(lrbp->cmd);
+                       lrbp->cmd->result = DID_RESET << 16;
+                       lrbp->cmd->scsi_done(lrbp->cmd);
+                       lrbp->cmd = NULL;
+               }
+       }
+
+       /* clear outstanding request/task bit maps */
+       hba->outstanding_reqs = 0;
+       hba->outstanding_tasks = 0;
+
+       /* start the initialization process */
+       if (ufshcd_initialize_hba(hba)) {
+               dev_err(&hba->pdev->dev,
+                       "Reset: Controller initialization failed\n");
+               return FAILED;
+       }
+       return SUCCESS;
+}
+
+/**
+ * ufshcd_slave_alloc - handle initial SCSI device configurations
+ * @sdev: pointer to SCSI device
+ *
+ * Returns success
+ */
+static int ufshcd_slave_alloc(struct scsi_device *sdev)
+{
+       struct ufs_hba *hba;
+
+       hba = shost_priv(sdev->host);
+       sdev->tagged_supported = 1;
+
+       /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
+       sdev->use_10_for_ms = 1;
+       scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+
+       /*
+        * Inform SCSI Midlayer that the LUN queue depth is same as the
+        * controller queue depth. If a LUN queue depth is less than the
+        * controller queue depth and if the LUN reports
+        * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
+        * with scsi_adjust_queue_depth.
+        */
+       scsi_activate_tcq(sdev, hba->nutrs);
+       return 0;
+}
+
+/**
+ * ufshcd_slave_destroy - remove SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static void ufshcd_slave_destroy(struct scsi_device *sdev)
+{
+       struct ufs_hba *hba;
+
+       hba = shost_priv(sdev->host);
+       scsi_deactivate_tcq(sdev, hba->nutrs);
+}
+
+/**
+ * ufshcd_task_req_compl - handle task management request completion
+ * @hba: per adapter instance
+ * @index: index of the completed request
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+{
+       struct utp_task_req_desc *task_req_descp;
+       struct utp_upiu_task_rsp *task_rsp_upiup;
+       unsigned long flags;
+       int ocs_value;
+       int task_result;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+
+       /* Clear completed tasks from outstanding_tasks */
+       __clear_bit(index, &hba->outstanding_tasks);
+
+       task_req_descp = hba->utmrdl_base_addr;
+       ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
+
+       if (ocs_value == OCS_SUCCESS) {
+               task_rsp_upiup = (struct utp_upiu_task_rsp *)
+                               task_req_descp[index].task_rsp_upiu;
+               task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
+               task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
+
+               if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+                   task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
+                       task_result = FAILED;
+       } else {
+               task_result = FAILED;
+               dev_err(&hba->pdev->dev,
+                       "trc: Invalid ocs = %x\n", ocs_value);
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return task_result;
+}
+
+/**
+ * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
+ *                           SAM_STAT_TASK_SET_FULL SCSI command status.
+ * @cmd: pointer to SCSI command
+ */
+static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
+{
+       struct ufs_hba *hba;
+       int i;
+       int lun_qdepth = 0;
+
+       hba = shost_priv(cmd->device->host);
+
+       /*
+        * LUN queue depth can be obtained by counting outstanding commands
+        * on the LUN.
+        */
+       for (i = 0; i < hba->nutrs; i++) {
+               if (test_bit(i, &hba->outstanding_reqs)) {
+
+                       /*
+                        * Check if the outstanding command belongs
+                        * to the LUN which reported SAM_STAT_TASK_SET_FULL.
+                        */
+                       if (cmd->device->lun == hba->lrb[i].lun)
+                               lun_qdepth++;
+               }
+       }
+
+       /*
+        * LUN queue depth will be total outstanding commands, except the
+        * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
+        */
+       scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
+}
+
+/**
+ * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
+ * @lrb: pointer to local reference block of completed command
+ * @scsi_status: SCSI command status
+ *
+ * Returns value base on SCSI command status
+ */
+static inline int
+ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
+{
+       int result = 0;
+
+       switch (scsi_status) {
+       case SAM_STAT_GOOD:
+               result |= DID_OK << 16 |
+                         COMMAND_COMPLETE << 8 |
+                         SAM_STAT_GOOD;
+               break;
+       case SAM_STAT_CHECK_CONDITION:
+               result |= DID_OK << 16 |
+                         COMMAND_COMPLETE << 8 |
+                         SAM_STAT_CHECK_CONDITION;
+               ufshcd_copy_sense_data(lrbp);
+               break;
+       case SAM_STAT_BUSY:
+               result |= SAM_STAT_BUSY;
+               break;
+       case SAM_STAT_TASK_SET_FULL:
+
+               /*
+                * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
+                * depth needs to be adjusted to the exact number of
+                * outstanding commands the LUN can handle at any given time.
+                */
+               ufshcd_adjust_lun_qdepth(lrbp->cmd);
+               result |= SAM_STAT_TASK_SET_FULL;
+               break;
+       case SAM_STAT_TASK_ABORTED:
+               result |= SAM_STAT_TASK_ABORTED;
+               break;
+       default:
+               result |= DID_ERROR << 16;
+               break;
+       } /* end of switch */
+
+       return result;
+}
+
+/**
+ * ufshcd_transfer_rsp_status - Get overall status of the response
+ * @hba: per adapter instance
+ * @lrb: pointer to local reference block of completed command
+ *
+ * Returns result of the command to notify SCSI midlayer
+ */
+static inline int
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+       int result = 0;
+       int scsi_status;
+       int ocs;
+
+       /* overall command status of utrd */
+       ocs = ufshcd_get_tr_ocs(lrbp);
+
+       switch (ocs) {
+       case OCS_SUCCESS:
+
+               /* check if the returned transfer response is valid */
+               result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
+               if (result) {
+                       dev_err(&hba->pdev->dev,
+                               "Invalid response = %x\n", result);
+                       break;
+               }
+
+               /*
+                * get the response UPIU result to extract
+                * the SCSI command status
+                */
+               result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+               /*
+                * get the result based on SCSI status response
+                * to notify the SCSI midlayer of the command status
+                */
+               scsi_status = result & MASK_SCSI_STATUS;
+               result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+               break;
+       case OCS_ABORTED:
+               result |= DID_ABORT << 16;
+               break;
+       case OCS_INVALID_CMD_TABLE_ATTR:
+       case OCS_INVALID_PRDT_ATTR:
+       case OCS_MISMATCH_DATA_BUF_SIZE:
+       case OCS_MISMATCH_RESP_UPIU_SIZE:
+       case OCS_PEER_COMM_FAILURE:
+       case OCS_FATAL_ERROR:
+       default:
+               result |= DID_ERROR << 16;
+               dev_err(&hba->pdev->dev,
+               "OCS error from controller = %x\n", ocs);
+               break;
+       } /* end of switch */
+
+       return result;
+}
+
+/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+       struct ufshcd_lrb *lrb;
+       unsigned long completed_reqs;
+       u32 tr_doorbell;
+       int result;
+       int index;
+
+       lrb = hba->lrb;
+       tr_doorbell =
+               readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+       for (index = 0; index < hba->nutrs; index++) {
+               if (test_bit(index, &completed_reqs)) {
+
+                       result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
+
+                       if (lrb[index].cmd) {
+                               scsi_dma_unmap(lrb[index].cmd);
+                               lrb[index].cmd->result = result;
+                               lrb[index].cmd->scsi_done(lrb[index].cmd);
+
+                               /* Mark completed command as NULL in LRB */
+                               lrb[index].cmd = NULL;
+                       }
+               } /* end of if */
+       } /* end of for */
+
+       /* clear corresponding bits of completed commands */
+       hba->outstanding_reqs ^= completed_reqs;
+
+       /* Reset interrupt aggregation counters */
+       ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+}
+
+/**
+ * ufshcd_uic_cc_handler - handle UIC command completion
+ * @work: pointer to a work queue structure
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static void ufshcd_uic_cc_handler (struct work_struct *work)
+{
+       struct ufs_hba *hba;
+
+       hba = container_of(work, struct ufs_hba, uic_workq);
+
+       if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
+           !(ufshcd_get_uic_cmd_result(hba))) {
+
+               if (ufshcd_make_hba_operational(hba))
+                       dev_err(&hba->pdev->dev,
+                               "cc: hba not operational state\n");
+               return;
+       }
+}
+
+/**
+ * ufshcd_fatal_err_handler - handle fatal errors
+ * @hba: per adapter instance
+ */
+static void ufshcd_fatal_err_handler(struct work_struct *work)
+{
+       struct ufs_hba *hba;
+       hba = container_of(work, struct ufs_hba, feh_workq);
+
+       /* check if reset is already in progress */
+       if (hba->ufshcd_state != UFSHCD_STATE_RESET)
+               ufshcd_do_reset(hba);
+}
+
+/**
+ * ufshcd_err_handler - Check for fatal errors
+ * @work: pointer to a work queue structure
+ */
+static void ufshcd_err_handler(struct ufs_hba *hba)
+{
+       u32 reg;
+
+       if (hba->errors & INT_FATAL_ERRORS)
+               goto fatal_eh;
+
+       if (hba->errors & UIC_ERROR) {
+
+               reg = readl(hba->mmio_base +
+                           REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+               if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+                       goto fatal_eh;
+       }
+       return;
+fatal_eh:
+       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+       schedule_work(&hba->feh_workq);
+}
+
+/**
+ * ufshcd_tmc_handler - handle task management function completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_tmc_handler(struct ufs_hba *hba)
+{
+       u32 tm_doorbell;
+
+       tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
+       hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
+       wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+}
+
+/**
+ * ufshcd_sl_intr - Interrupt service routine
+ * @hba: per adapter instance
+ * @intr_status: contains interrupts generated by the controller
+ */
+static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+{
+       hba->errors = UFSHCD_ERROR_MASK & intr_status;
+       if (hba->errors)
+               ufshcd_err_handler(hba);
+
+       if (intr_status & UIC_COMMAND_COMPL)
+               schedule_work(&hba->uic_workq);
+
+       if (intr_status & UTP_TASK_REQ_COMPL)
+               ufshcd_tmc_handler(hba);
+
+       if (intr_status & UTP_TRANSFER_REQ_COMPL)
+               ufshcd_transfer_req_compl(hba);
+}
+
+/**
+ * ufshcd_intr - Main interrupt service routine
+ * @irq: irq number
+ * @__hba: pointer to adapter instance
+ *
+ * Returns IRQ_HANDLED - If interrupt is valid
+ *             IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_intr(int irq, void *__hba)
+{
+       u32 intr_status;
+       irqreturn_t retval = IRQ_NONE;
+       struct ufs_hba *hba = __hba;
+
+       spin_lock(hba->host->host_lock);
+       intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
+
+       if (intr_status) {
+               ufshcd_sl_intr(hba, intr_status);
+
+               /* If UFSHCI 1.0 then clear interrupt status register */
+               if (hba->ufs_version == UFSHCI_VERSION_10)
+                       writel(intr_status,
+                              (hba->mmio_base + REG_INTERRUPT_STATUS));
+               retval = IRQ_HANDLED;
+       }
+       spin_unlock(hba->host->host_lock);
+       return retval;
+}
+
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int
+ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+                   struct ufshcd_lrb *lrbp,
+                   u8 tm_function)
+{
+       struct utp_task_req_desc *task_req_descp;
+       struct utp_upiu_task_req *task_req_upiup;
+       struct Scsi_Host *host;
+       unsigned long flags;
+       int free_slot = 0;
+       int err;
+
+       host = hba->host;
+
+       spin_lock_irqsave(host->host_lock, flags);
+
+       /* If task management queue is full */
+       free_slot = ufshcd_get_tm_free_slot(hba);
+       if (free_slot >= hba->nutmrs) {
+               spin_unlock_irqrestore(host->host_lock, flags);
+               dev_err(&hba->pdev->dev, "Task management queue full\n");
+               err = FAILED;
+               goto out;
+       }
+
+       task_req_descp = hba->utmrdl_base_addr;
+       task_req_descp += free_slot;
+
+       /* Configure task request descriptor */
+       task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+       task_req_descp->header.dword_2 =
+                       cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+       /* Configure task request UPIU */
+       task_req_upiup =
+               (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+       task_req_upiup->header.dword_0 =
+               cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+                                             lrbp->lun, lrbp->task_tag));
+       task_req_upiup->header.dword_1 =
+       cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+
+       task_req_upiup->input_param1 = lrbp->lun;
+       task_req_upiup->input_param1 =
+               cpu_to_be32(task_req_upiup->input_param1);
+       task_req_upiup->input_param2 = lrbp->task_tag;
+       task_req_upiup->input_param2 =
+               cpu_to_be32(task_req_upiup->input_param2);
+
+       /* send command to the controller */
+       __set_bit(free_slot, &hba->outstanding_tasks);
+       writel((1 << free_slot),
+              (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
+
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       /* wait until the task management command is completed */
+       err =
+       wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
+                                        (test_bit(free_slot,
+                                        &hba->tm_condition) != 0),
+                                        60 * HZ);
+       if (!err) {
+               dev_err(&hba->pdev->dev,
+                       "Task management command timed-out\n");
+               err = FAILED;
+               goto out;
+       }
+       clear_bit(free_slot, &hba->tm_condition);
+       return ufshcd_task_req_compl(hba, free_slot);
+out:
+       return err;
+}
+
+/**
+ * ufshcd_device_reset - reset device and abort all the pending commands
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *host;
+       struct ufs_hba *hba;
+       unsigned int tag;
+       u32 pos;
+       int err;
+
+       host = cmd->device->host;
+       hba = shost_priv(host);
+       tag = cmd->request->tag;
+
+       err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
+       if (err)
+               goto out;
+
+       for (pos = 0; pos < hba->nutrs; pos++) {
+               if (test_bit(pos, &hba->outstanding_reqs) &&
+                   (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
+
+                       /* clear the respective UTRLCLR register bit */
+                       ufshcd_utrl_clear(hba, pos);
+
+                       clear_bit(pos, &hba->outstanding_reqs);
+
+                       if (hba->lrb[pos].cmd) {
+                               scsi_dma_unmap(hba->lrb[pos].cmd);
+                               hba->lrb[pos].cmd->result =
+                                               DID_ABORT << 16;
+                               hba->lrb[pos].cmd->scsi_done(cmd);
+                               hba->lrb[pos].cmd = NULL;
+                       }
+               }
+       } /* end of for */
+out:
+       return err;
+}
+
+/**
+ * ufshcd_host_reset - Main reset function registered with scsi layer
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_host_reset(struct scsi_cmnd *cmd)
+{
+       struct ufs_hba *hba;
+
+       hba = shost_priv(cmd->device->host);
+
+       if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+               return SUCCESS;
+
+       return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+}
+
+/**
+ * ufshcd_abort - abort a specific command
+ * @cmd: SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_abort(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *host;
+       struct ufs_hba *hba;
+       unsigned long flags;
+       unsigned int tag;
+       int err;
+
+       host = cmd->device->host;
+       hba = shost_priv(host);
+       tag = cmd->request->tag;
+
+       spin_lock_irqsave(host->host_lock, flags);
+
+       /* check if command is still pending */
+       if (!(test_bit(tag, &hba->outstanding_reqs))) {
+               err = FAILED;
+               spin_unlock_irqrestore(host->host_lock, flags);
+               goto out;
+       }
+       spin_unlock_irqrestore(host->host_lock, flags);
+
+       err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
+       if (err)
+               goto out;
+
+       scsi_dma_unmap(cmd);
+
+       spin_lock_irqsave(host->host_lock, flags);
+
+       /* clear the respective UTRLCLR register bit */
+       ufshcd_utrl_clear(hba, tag);
+
+       __clear_bit(tag, &hba->outstanding_reqs);
+       hba->lrb[tag].cmd = NULL;
+       spin_unlock_irqrestore(host->host_lock, flags);
+out:
+       return err;
+}
+
+static struct scsi_host_template ufshcd_driver_template = {
+       .module                 = THIS_MODULE,
+       .name                   = UFSHCD,
+       .proc_name              = UFSHCD,
+       .queuecommand           = ufshcd_queuecommand,
+       .slave_alloc            = ufshcd_slave_alloc,
+       .slave_destroy          = ufshcd_slave_destroy,
+       .eh_abort_handler       = ufshcd_abort,
+       .eh_device_reset_handler = ufshcd_device_reset,
+       .eh_host_reset_handler  = ufshcd_host_reset,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
+       .can_queue              = UFSHCD_CAN_QUEUE,
+};
+
+/**
+ * ufshcd_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_shutdown(struct pci_dev *pdev)
+{
+       ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       /*
+        * TODO:
+        * 1. Block SCSI requests from SCSI midlayer
+        * 2. Change the internal driver state to non operational
+        * 3. Set UTRLRSR and UTMRLRSR bits to zero
+        * 4. Wait until outstanding commands are completed
+        * 5. Set HCE to zero to send the UFS host controller to reset state
+        */
+
+       return -ENOSYS;
+}
+
+/**
+ * ufshcd_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_resume(struct pci_dev *pdev)
+{
+       /*
+        * TODO:
+        * 1. Set HCE to 1, to start the UFS host controller
+        * initialization process
+        * 2. Set UTRLRSR and UTMRLRSR bits to 1
+        * 3. Change the internal driver state to operational
+        * 4. Unblock SCSI requests from SCSI midlayer
+        */
+
+       return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_hba_free - free allocated memory for
+ *                     host memory space data structures
+ * @hba: per adapter instance
+ */
+static void ufshcd_hba_free(struct ufs_hba *hba)
+{
+       iounmap(hba->mmio_base);
+       ufshcd_free_hba_memory(hba);
+       pci_release_regions(hba->pdev);
+}
+
+/**
+ * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ *             data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_remove(struct pci_dev *pdev)
+{
+       struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+       /* disable interrupts */
+       ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
+       free_irq(pdev->irq, hba);
+
+       ufshcd_hba_stop(hba);
+       ufshcd_hba_free(hba);
+
+       scsi_remove_host(hba->host);
+       scsi_host_put(hba->host);
+       pci_set_drvdata(pdev, NULL);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ *                      addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+       int err;
+       u64 dma_mask;
+
+       /*
+        * If controller supports 64 bit addressing mode, then set the DMA
+        * mask to 64-bit, else set the DMA mask to 32-bit
+        */
+       if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
+               dma_mask = DMA_BIT_MASK(64);
+       else
+               dma_mask = DMA_BIT_MASK(32);
+
+       err = pci_set_dma_mask(hba->pdev, dma_mask);
+       if (err)
+               return err;
+
+       err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
+
+       return err;
+}
+
+/**
+ * ufshcd_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int __devinit
+ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct Scsi_Host *host;
+       struct ufs_hba *hba;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto out_error;
+       }
+
+       pci_set_master(pdev);
+
+       host = scsi_host_alloc(&ufshcd_driver_template,
+                               sizeof(struct ufs_hba));
+       if (!host) {
+               dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+               err = -ENOMEM;
+               goto out_disable;
+       }
+       hba = shost_priv(host);
+
+       err = pci_request_regions(pdev, UFSHCD);
+       if (err < 0) {
+               dev_err(&pdev->dev, "request regions failed\n");
+               goto out_disable;
+       }
+
+       hba->mmio_base = pci_ioremap_bar(pdev, 0);
+       if (!hba->mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       hba->host = host;
+       hba->pdev = pdev;
+
+       /* Read capabilities registers */
+       ufshcd_hba_capabilities(hba);
+
+       /* Get UFS version supported by the controller */
+       hba->ufs_version = ufshcd_get_ufs_version(hba);
+
+       err = ufshcd_set_dma_mask(hba);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       /* Allocate memory for host memory space */
+       err = ufshcd_memory_alloc(hba);
+       if (err) {
+               dev_err(&pdev->dev, "Memory allocation failed\n");
+               goto out_iounmap;
+       }
+
+       /* Configure LRB */
+       ufshcd_host_memory_configure(hba);
+
+       host->can_queue = hba->nutrs;
+       host->cmd_per_lun = hba->nutrs;
+       host->max_id = UFSHCD_MAX_ID;
+       host->max_lun = UFSHCD_MAX_LUNS;
+       host->max_channel = UFSHCD_MAX_CHANNEL;
+       host->unique_id = host->host_no;
+       host->max_cmd_len = MAX_CDB_SIZE;
+
+       /* Initailize wait queue for task management */
+       init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+
+       /* Initialize work queues */
+       INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
+       INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+
+       /* IRQ registration */
+       err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+       if (err) {
+               dev_err(&pdev->dev, "request irq failed\n");
+               goto out_lrb_free;
+       }
+
+       /* Enable SCSI tag mapping */
+       err = scsi_init_shared_tag_map(host, host->can_queue);
+       if (err) {
+               dev_err(&pdev->dev, "init shared queue failed\n");
+               goto out_free_irq;
+       }
+
+       pci_set_drvdata(pdev, hba);
+
+       err = scsi_add_host(host, &pdev->dev);
+       if (err) {
+               dev_err(&pdev->dev, "scsi_add_host failed\n");
+               goto out_free_irq;
+       }
+
+       /* Initialization routine */
+       err = ufshcd_initialize_hba(hba);
+       if (err) {
+               dev_err(&pdev->dev, "Initialization failed\n");
+               goto out_free_irq;
+       }
+
+       return 0;
+
+out_free_irq:
+       free_irq(pdev->irq, hba);
+out_lrb_free:
+       ufshcd_free_hba_memory(hba);
+out_iounmap:
+       iounmap(hba->mmio_base);
+out_release_regions:
+       pci_release_regions(pdev);
+out_disable:
+       scsi_host_put(host);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+out_error:
+       return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+       { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+       { }     /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+       .name = UFSHCD,
+       .id_table = ufshcd_pci_tbl,
+       .probe = ufshcd_probe,
+       .remove = __devexit_p(ufshcd_remove),
+       .shutdown = ufshcd_shutdown,
+#ifdef CONFIG_PM
+       .suspend = ufshcd_suspend,
+       .resume = ufshcd_resume,
+#endif
+};
+
+/**
+ * ufshcd_init - Driver registration routine
+ */
+static int __init ufshcd_init(void)
+{
+       return pci_register_driver(&ufshcd_pci_driver);
+}
+module_init(ufshcd_init);
+
+/**
+ * ufshcd_exit - Driver exit clean-up routine
+ */
+static void __exit ufshcd_exit(void)
+{
+       pci_unregister_driver(&ufshcd_pci_driver);
+}
+module_exit(ufshcd_exit);
+
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
+             "Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644 (file)
index 0000000..6e3510f
--- /dev/null
@@ -0,0 +1,376 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshci.h
+ * Copyright (C) 2011-2012 Samsung India Software Operations
+ *
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA.
+ */
+
+#ifndef _UFSHCI_H
+#define _UFSHCI_H
+
+enum {
+       TASK_REQ_UPIU_SIZE_DWORDS       = 8,
+       TASK_RSP_UPIU_SIZE_DWORDS       = 8,
+       ALIGNED_UPIU_SIZE               = 128,
+};
+
+/* UFSHCI Registers */
+enum {
+       REG_CONTROLLER_CAPABILITIES             = 0x00,
+       REG_UFS_VERSION                         = 0x08,
+       REG_CONTROLLER_DEV_ID                   = 0x10,
+       REG_CONTROLLER_PROD_ID                  = 0x14,
+       REG_INTERRUPT_STATUS                    = 0x20,
+       REG_INTERRUPT_ENABLE                    = 0x24,
+       REG_CONTROLLER_STATUS                   = 0x30,
+       REG_CONTROLLER_ENABLE                   = 0x34,
+       REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER    = 0x38,
+       REG_UIC_ERROR_CODE_DATA_LINK_LAYER      = 0x3C,
+       REG_UIC_ERROR_CODE_NETWORK_LAYER        = 0x40,
+       REG_UIC_ERROR_CODE_TRANSPORT_LAYER      = 0x44,
+       REG_UIC_ERROR_CODE_DME                  = 0x48,
+       REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL    = 0x4C,
+       REG_UTP_TRANSFER_REQ_LIST_BASE_L        = 0x50,
+       REG_UTP_TRANSFER_REQ_LIST_BASE_H        = 0x54,
+       REG_UTP_TRANSFER_REQ_DOOR_BELL          = 0x58,
+       REG_UTP_TRANSFER_REQ_LIST_CLEAR         = 0x5C,
+       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP      = 0x60,
+       REG_UTP_TASK_REQ_LIST_BASE_L            = 0x70,
+       REG_UTP_TASK_REQ_LIST_BASE_H            = 0x74,
+       REG_UTP_TASK_REQ_DOOR_BELL              = 0x78,
+       REG_UTP_TASK_REQ_LIST_CLEAR             = 0x7C,
+       REG_UTP_TASK_REQ_LIST_RUN_STOP          = 0x80,
+       REG_UIC_COMMAND                         = 0x90,
+       REG_UIC_COMMAND_ARG_1                   = 0x94,
+       REG_UIC_COMMAND_ARG_2                   = 0x98,
+       REG_UIC_COMMAND_ARG_3                   = 0x9C,
+};
+
+/* Controller capability masks */
+enum {
+       MASK_TRANSFER_REQUESTS_SLOTS            = 0x0000001F,
+       MASK_TASK_MANAGEMENT_REQUEST_SLOTS      = 0x00070000,
+       MASK_64_ADDRESSING_SUPPORT              = 0x01000000,
+       MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
+       MASK_UIC_DME_TEST_MODE_SUPPORT          = 0x04000000,
+};
+
+/* UFS Version 08h */
+#define MINOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 0)
+#define MAJOR_VERSION_NUM_MASK         UFS_MASK(0xFFFF, 16)
+
+/* Controller UFSHCI version */
+enum {
+       UFSHCI_VERSION_10 = 0x00010000,
+       UFSHCI_VERSION_11 = 0x00010100,
+};
+
+/*
+ * HCDDID - Host Controller Identification Descriptor
+ *       - Device ID and Device Class 10h
+ */
+#define DEVICE_CLASS   UFS_MASK(0xFFFF, 0)
+#define DEVICE_ID      UFS_MASK(0xFF, 24)
+
+/*
+ * HCPMID - Host Controller Identification Descriptor
+ *       - Product/Manufacturer ID  14h
+ */
+#define MANUFACTURE_ID_MASK    UFS_MASK(0xFFFF, 0)
+#define PRODUCT_ID_MASK                UFS_MASK(0xFFFF, 16)
+
+#define UFS_BIT(x)     (1L << (x))
+
+#define UTP_TRANSFER_REQ_COMPL                 UFS_BIT(0)
+#define UIC_DME_END_PT_RESET                   UFS_BIT(1)
+#define UIC_ERROR                              UFS_BIT(2)
+#define UIC_TEST_MODE                          UFS_BIT(3)
+#define UIC_POWER_MODE                         UFS_BIT(4)
+#define UIC_HIBERNATE_EXIT                     UFS_BIT(5)
+#define UIC_HIBERNATE_ENTER                    UFS_BIT(6)
+#define UIC_LINK_LOST                          UFS_BIT(7)
+#define UIC_LINK_STARTUP                       UFS_BIT(8)
+#define UTP_TASK_REQ_COMPL                     UFS_BIT(9)
+#define UIC_COMMAND_COMPL                      UFS_BIT(10)
+#define DEVICE_FATAL_ERROR                     UFS_BIT(11)
+#define CONTROLLER_FATAL_ERROR                 UFS_BIT(16)
+#define SYSTEM_BUS_FATAL_ERROR                 UFS_BIT(17)
+
+#define UFSHCD_ERROR_MASK      (UIC_ERROR |\
+                               DEVICE_FATAL_ERROR |\
+                               CONTROLLER_FATAL_ERROR |\
+                               SYSTEM_BUS_FATAL_ERROR)
+
+#define INT_FATAL_ERRORS       (DEVICE_FATAL_ERROR |\
+                               CONTROLLER_FATAL_ERROR |\
+                               SYSTEM_BUS_FATAL_ERROR)
+
+/* HCS - Host Controller Status 30h */
+#define DEVICE_PRESENT                         UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_READY            UFS_BIT(1)
+#define UTP_TASK_REQ_LIST_READY                        UFS_BIT(2)
+#define UIC_COMMAND_READY                      UFS_BIT(3)
+#define HOST_ERROR_INDICATOR                   UFS_BIT(4)
+#define DEVICE_ERROR_INDICATOR                 UFS_BIT(5)
+#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK  UFS_MASK(0x7, 8)
+
+/* HCE - Host Controller Enable 34h */
+#define CONTROLLER_ENABLE      UFS_BIT(0)
+#define CONTROLLER_DISABLE     0x0
+
+/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
+#define UIC_PHY_ADAPTER_LAYER_ERROR                    UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK          0x1F
+
+/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
+#define UIC_DATA_LINK_LAYER_ERROR              UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK    0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT      0x2000
+
+/* UECN - Host UIC Error Code Network Layer 40h */
+#define UIC_NETWORK_LAYER_ERROR                        UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR_CODE_MASK      0x7
+
+/* UECT - Host UIC Error Code Transport Layer 44h */
+#define UIC_TRANSPORT_LAYER_ERROR              UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK    0x7F
+
+/* UECDME - Host UIC Error Code DME 48h */
+#define UIC_DME_ERROR                  UFS_BIT(31)
+#define UIC_DME_ERROR_CODE_MASK                0x1
+
+#define INT_AGGR_TIMEOUT_VAL_MASK              0xFF
+#define INT_AGGR_COUNTER_THRESHOLD_MASK                UFS_MASK(0x1F, 8)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET       UFS_BIT(16)
+#define INT_AGGR_STATUS_BIT                    UFS_BIT(20)
+#define INT_AGGR_PARAM_WRITE                   UFS_BIT(24)
+#define INT_AGGR_ENABLE                                UFS_BIT(31)
+
+/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT     UFS_BIT(0)
+
+/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT         UFS_BIT(0)
+
+/* UICCMD - UIC Command */
+#define COMMAND_OPCODE_MASK            0xFF
+#define GEN_SELECTOR_INDEX_MASK                0xFFFF
+
+#define MIB_ATTRIBUTE_MASK             UFS_MASK(0xFFFF, 16)
+#define RESET_LEVEL                    0xFF
+
+#define ATTR_SET_TYPE_MASK             UFS_MASK(0xFF, 16)
+#define CONFIG_RESULT_CODE_MASK                0xFF
+#define GENERIC_ERROR_CODE_MASK                0xFF
+
+/* UIC Commands */
+enum {
+       UIC_CMD_DME_GET                 = 0x01,
+       UIC_CMD_DME_SET                 = 0x02,
+       UIC_CMD_DME_PEER_GET            = 0x03,
+       UIC_CMD_DME_PEER_SET            = 0x04,
+       UIC_CMD_DME_POWERON             = 0x10,
+       UIC_CMD_DME_POWEROFF            = 0x11,
+       UIC_CMD_DME_ENABLE              = 0x12,
+       UIC_CMD_DME_RESET               = 0x14,
+       UIC_CMD_DME_END_PT_RST          = 0x15,
+       UIC_CMD_DME_LINK_STARTUP        = 0x16,
+       UIC_CMD_DME_HIBER_ENTER         = 0x17,
+       UIC_CMD_DME_HIBER_EXIT          = 0x18,
+       UIC_CMD_DME_TEST_MODE           = 0x1A,
+};
+
+/* UIC Config result code / Generic error code */
+enum {
+       UIC_CMD_RESULT_SUCCESS                  = 0x00,
+       UIC_CMD_RESULT_INVALID_ATTR             = 0x01,
+       UIC_CMD_RESULT_FAILURE                  = 0x01,
+       UIC_CMD_RESULT_INVALID_ATTR_VALUE       = 0x02,
+       UIC_CMD_RESULT_READ_ONLY_ATTR           = 0x03,
+       UIC_CMD_RESULT_WRITE_ONLY_ATTR          = 0x04,
+       UIC_CMD_RESULT_BAD_INDEX                = 0x05,
+       UIC_CMD_RESULT_LOCKED_ATTR              = 0x06,
+       UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX   = 0x07,
+       UIC_CMD_RESULT_PEER_COMM_FAILURE        = 0x08,
+       UIC_CMD_RESULT_BUSY                     = 0x09,
+       UIC_CMD_RESULT_DME_FAILURE              = 0x0A,
+};
+
+#define MASK_UIC_COMMAND_RESULT                        0xFF
+
+#define INT_AGGR_COUNTER_THRESHOLD_VALUE       (0x1F << 8)
+#define INT_AGGR_TIMEOUT_VALUE                 (0x02)
+
+/* Interrupt disable masks */
+enum {
+       /* Interrupt disable mask for UFSHCI v1.0 */
+       INTERRUPT_DISABLE_MASK_10       = 0xFFFF,
+
+       /* Interrupt disable mask for UFSHCI v1.1 */
+       INTERRUPT_DISABLE_MASK_11       = 0x0,
+};
+
+/*
+ * Request Descriptor Definitions
+ */
+
+/* Transfer request command type */
+enum {
+       UTP_CMD_TYPE_SCSI               = 0x0,
+       UTP_CMD_TYPE_UFS                = 0x1,
+       UTP_CMD_TYPE_DEV_MANAGE         = 0x2,
+};
+
+enum {
+       UTP_SCSI_COMMAND                = 0x00000000,
+       UTP_NATIVE_UFS_COMMAND          = 0x10000000,
+       UTP_DEVICE_MANAGEMENT_FUNCTION  = 0x20000000,
+       UTP_REQ_DESC_INT_CMD            = 0x01000000,
+};
+
+/* UTP Transfer Request Data Direction (DD) */
+enum {
+       UTP_NO_DATA_TRANSFER    = 0x00000000,
+       UTP_HOST_TO_DEVICE      = 0x02000000,
+       UTP_DEVICE_TO_HOST      = 0x04000000,
+};
+
+/* Overall command status values */
+enum {
+       OCS_SUCCESS                     = 0x0,
+       OCS_INVALID_CMD_TABLE_ATTR      = 0x1,
+       OCS_INVALID_PRDT_ATTR           = 0x2,
+       OCS_MISMATCH_DATA_BUF_SIZE      = 0x3,
+       OCS_MISMATCH_RESP_UPIU_SIZE     = 0x4,
+       OCS_PEER_COMM_FAILURE           = 0x5,
+       OCS_ABORTED                     = 0x6,
+       OCS_FATAL_ERROR                 = 0x7,
+       OCS_INVALID_COMMAND_STATUS      = 0x0F,
+       MASK_OCS                        = 0x0F,
+};
+
+/**
+ * struct ufshcd_sg_entry - UFSHCI PRD Entry
+ * @base_addr: Lower 32bit physical address DW-0
+ * @upper_addr: Upper 32bit physical address DW-1
+ * @reserved: Reserved for future use DW-2
+ * @size: size of physical segment DW-3
+ */
+struct ufshcd_sg_entry {
+       u32    base_addr;
+       u32    upper_addr;
+       u32    reserved;
+       u32    size;
+};
+
+/**
+ * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * @command_upiu: Command UPIU Frame address
+ * @response_upiu: Response UPIU Frame address
+ * @prd_table: Physical Region Descriptor
+ */
+struct utp_transfer_cmd_desc {
+       u8 command_upiu[ALIGNED_UPIU_SIZE];
+       u8 response_upiu[ALIGNED_UPIU_SIZE];
+       struct ufshcd_sg_entry    prd_table[SG_ALL];
+};
+
+/**
+ * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
+ * @dword0: Descriptor Header DW0
+ * @dword1: Descriptor Header DW1
+ * @dword2: Descriptor Header DW2
+ * @dword3: Descriptor Header DW3
+ */
+struct request_desc_header {
+       u32 dword_0;
+       u32 dword_1;
+       u32 dword_2;
+       u32 dword_3;
+};
+
+/**
+ * struct utp_transfer_req_desc - UTRD structure
+ * @header: UTRD header DW-0 to DW-3
+ * @command_desc_base_addr_lo: UCD base address low DW-4
+ * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @response_upiu_length: response UPIU length DW-6
+ * @response_upiu_offset: response UPIU offset DW-6
+ * @prd_table_length: Physical region descriptor length DW-7
+ * @prd_table_offset: Physical region descriptor offset DW-7
+ */
+struct utp_transfer_req_desc {
+
+       /* DW 0-3 */
+       struct request_desc_header header;
+
+       /* DW 4-5*/
+       u32  command_desc_base_addr_lo;
+       u32  command_desc_base_addr_hi;
+
+       /* DW 6 */
+       u16  response_upiu_length;
+       u16  response_upiu_offset;
+
+       /* DW 7 */
+       u16  prd_table_length;
+       u16  prd_table_offset;
+};
+
+/**
+ * struct utp_task_req_desc - UTMRD structure
+ * @header: UTMRD header DW-0 to DW-3
+ * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
+ * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+ */
+struct utp_task_req_desc {
+
+       /* DW 0-3 */
+       struct request_desc_header header;
+
+       /* DW 4-11 */
+       u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+
+       /* DW 12-19 */
+       u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+};
+
+#endif /* End of Header */
index 7264116185d590eba8ed6154552c0f17b3ad109a..4411d42244011ffa2d562db19d7a7e654f2e7fa4 100644 (file)
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
  *
  */
 
@@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
        return 0;
 }
 
+/*
+ * Query the device, fetch the config info and return the
+ * maximum number of targets on the adapter. In case of
+ * failure due to any reason return default i.e. 16.
+ */
+static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
+{
+       struct PVSCSICmdDescConfigCmd cmd;
+       struct PVSCSIConfigPageHeader *header;
+       struct device *dev;
+       dma_addr_t configPagePA;
+       void *config_page;
+       u32 numPhys = 16;
+
+       dev = pvscsi_dev(adapter);
+       config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
+                                          &configPagePA);
+       if (!config_page) {
+               dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
+               goto exit;
+       }
+       BUG_ON(configPagePA & ~PAGE_MASK);
+
+       /* Fetch config info from the device. */
+       cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
+       cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
+       cmd.cmpAddr = configPagePA;
+       cmd._pad = 0;
+
+       /*
+        * Mark the completion page header with error values. If the device
+        * completes the command successfully, it sets the status values to
+        * indicate success.
+        */
+       header = config_page;
+       memset(header, 0, sizeof *header);
+       header->hostStatus = BTSTAT_INVPARAM;
+       header->scsiStatus = SDSTAT_CHECK;
+
+       pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
+
+       if (header->hostStatus == BTSTAT_SUCCESS &&
+           header->scsiStatus == SDSTAT_GOOD) {
+               struct PVSCSIConfigPageController *config;
+
+               config = config_page;
+               numPhys = config->numPhys;
+       } else
+               dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
+                        header->hostStatus, header->scsiStatus);
+       pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+exit:
+       return numPhys;
+}
+
 static int __devinit pvscsi_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct pvscsi_adapter *adapter;
        struct Scsi_Host *host;
+       struct device *dev;
        unsigned int i;
        unsigned long flags = 0;
        int error;
@@ -1271,6 +1327,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev,
                goto out_release_resources;
        }
 
+       /*
+        * Ask the device for max number of targets.
+        */
+       host->max_id = pvscsi_get_max_targets(adapter);
+       dev = pvscsi_dev(adapter);
+       dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
+
        /*
         * From this point on we should reset the adapter if anything goes
         * wrong.
index 62e36e75715e310a98ca219cb06f24fe3302b5d5..3546e8662e30af8f85e24f896cf1d2fb4fc2ef3f 100644 (file)
@@ -17,7 +17,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Alok N Kataria <akataria@vmware.com>
+ * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
  *
  */
 
@@ -26,7 +26,7 @@
 
 #include <linux/types.h>
 
-#define PVSCSI_DRIVER_VERSION_STRING   "1.0.1.0-k"
+#define PVSCSI_DRIVER_VERSION_STRING   "1.0.2.0-k"
 
 #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
 
  * host adapter status/error codes
  */
 enum HostBusAdapterStatus {
-   BTSTAT_SUCCESS       = 0x00,  /* CCB complete normally with no errors */
-   BTSTAT_LINKED_COMMAND_COMPLETED           = 0x0a,
-   BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
-   BTSTAT_DATA_UNDERRUN = 0x0c,
-   BTSTAT_SELTIMEO      = 0x11,  /* SCSI selection timeout */
-   BTSTAT_DATARUN       = 0x12,  /* data overrun/underrun */
-   BTSTAT_BUSFREE       = 0x13,  /* unexpected bus free */
-   BTSTAT_INVPHASE      = 0x14,  /* invalid bus phase or sequence requested by target */
-   BTSTAT_LUNMISMATCH   = 0x17,  /* linked CCB has different LUN from first CCB */
-   BTSTAT_SENSFAILED    = 0x1b,  /* auto request sense failed */
-   BTSTAT_TAGREJECT     = 0x1c,  /* SCSI II tagged queueing message rejected by target */
-   BTSTAT_BADMSG        = 0x1d,  /* unsupported message received by the host adapter */
-   BTSTAT_HAHARDWARE    = 0x20,  /* host adapter hardware failed */
-   BTSTAT_NORESPONSE    = 0x21,  /* target did not respond to SCSI ATN, sent a SCSI RST */
-   BTSTAT_SENTRST       = 0x22,  /* host adapter asserted a SCSI RST */
-   BTSTAT_RECVRST       = 0x23,  /* other SCSI devices asserted a SCSI RST */
-   BTSTAT_DISCONNECT    = 0x24,  /* target device reconnected improperly (w/o tag) */
-   BTSTAT_BUSRESET      = 0x25,  /* host adapter issued BUS device reset */
-   BTSTAT_ABORTQUEUE    = 0x26,  /* abort queue generated */
-   BTSTAT_HASOFTWARE    = 0x27,  /* host adapter software error */
-   BTSTAT_HATIMEOUT     = 0x30,  /* host adapter hardware timeout error */
-   BTSTAT_SCSIPARITY    = 0x34,  /* SCSI parity error detected */
+       BTSTAT_SUCCESS       = 0x00,  /* CCB complete normally with no errors */
+       BTSTAT_LINKED_COMMAND_COMPLETED           = 0x0a,
+       BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
+       BTSTAT_DATA_UNDERRUN = 0x0c,
+       BTSTAT_SELTIMEO      = 0x11,  /* SCSI selection timeout */
+       BTSTAT_DATARUN       = 0x12,  /* data overrun/underrun */
+       BTSTAT_BUSFREE       = 0x13,  /* unexpected bus free */
+       BTSTAT_INVPHASE      = 0x14,  /* invalid bus phase or sequence
+                                      * requested by target */
+       BTSTAT_LUNMISMATCH   = 0x17,  /* linked CCB has different LUN from
+                                      * first CCB */
+       BTSTAT_INVPARAM      = 0x1a,  /* invalid parameter in CCB or segment
+                                      * list */
+       BTSTAT_SENSFAILED    = 0x1b,  /* auto request sense failed */
+       BTSTAT_TAGREJECT     = 0x1c,  /* SCSI II tagged queueing message
+                                      * rejected by target */
+       BTSTAT_BADMSG        = 0x1d,  /* unsupported message received by the
+                                      * host adapter */
+       BTSTAT_HAHARDWARE    = 0x20,  /* host adapter hardware failed */
+       BTSTAT_NORESPONSE    = 0x21,  /* target did not respond to SCSI ATN,
+                                      * sent a SCSI RST */
+       BTSTAT_SENTRST       = 0x22,  /* host adapter asserted a SCSI RST */
+       BTSTAT_RECVRST       = 0x23,  /* other SCSI devices asserted a SCSI
+                                      * RST */
+       BTSTAT_DISCONNECT    = 0x24,  /* target device reconnected improperly
+                                      * (w/o tag) */
+       BTSTAT_BUSRESET      = 0x25,  /* host adapter issued BUS device reset */
+       BTSTAT_ABORTQUEUE    = 0x26,  /* abort queue generated */
+       BTSTAT_HASOFTWARE    = 0x27,  /* host adapter software error */
+       BTSTAT_HATIMEOUT     = 0x30,  /* host adapter hardware timeout error */
+       BTSTAT_SCSIPARITY    = 0x34,  /* SCSI parity error detected */
+};
+
+/*
+ * SCSI device status values.
+ */
+enum ScsiDeviceStatus {
+       SDSTAT_GOOD  = 0x00, /* No errors. */
+       SDSTAT_CHECK = 0x02, /* Check condition. */
 };
 
 /*
@@ -113,6 +130,29 @@ struct PVSCSICmdDescResetDevice {
        u8      lun[8];
 } __packed;
 
+/*
+ * Command descriptor for PVSCSI_CMD_CONFIG --
+ */
+
+struct PVSCSICmdDescConfigCmd {
+       u64 cmpAddr;
+       u64 configPageAddress;
+       u32 configPageNum;
+       u32 _pad;
+} __packed;
+
+enum PVSCSIConfigPageType {
+       PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
+       PVSCSI_CONFIG_PAGE_PHY        = 0x1959,
+       PVSCSI_CONFIG_PAGE_DEVICE     = 0x195a,
+};
+
+enum PVSCSIConfigPageAddressType {
+       PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
+       PVSCSI_CONFIG_BUSTARGET_ADDRESS  = 0x2121,
+       PVSCSI_CONFIG_PHY_ADDRESS        = 0x2122,
+};
+
 /*
  * Command descriptor for PVSCSI_CMD_ABORT_CMD --
  *
@@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc {
        u32     _pad[2];
 } __packed;
 
+struct PVSCSIConfigPageHeader {
+       u32 pageNum;
+       u16 numDwords;
+       u16 hostStatus;
+       u16 scsiStatus;
+       u16 reserved[3];
+} __packed;
+
+struct PVSCSIConfigPageController {
+       struct PVSCSIConfigPageHeader header;
+       u64 nodeWWN; /* Device name as defined in the SAS spec. */
+       u16 manufacturer[64];
+       u16 serialNumber[64];
+       u16 opromVersion[32];
+       u16 hwVersion[32];
+       u16 firmwareVersion[32];
+       u32 numPhys;
+       u8  useConsecutivePhyWWNs;
+       u8  reserved[3];
+} __packed;
+
 /*
  * Interrupt status / IRQ bits.
  */
index 4f71627264fd9276d6acb3b94f3ee5316372a394..da887604dfc51929cd4cc17b10a943645b4618ef 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -305,15 +305,18 @@ out_freectx:
        return ERR_PTR(err);
 }
 
-/* aio_cancel_all
+/* kill_ctx
  *     Cancels all outstanding aio requests on an aio context.  Used 
  *     when the processes owning a context have all exited to encourage 
  *     the rapid destruction of the kioctx.
  */
-static void aio_cancel_all(struct kioctx *ctx)
+static void kill_ctx(struct kioctx *ctx)
 {
        int (*cancel)(struct kiocb *, struct io_event *);
+       struct task_struct *tsk = current;
+       DECLARE_WAITQUEUE(wait, tsk);
        struct io_event res;
+
        spin_lock_irq(&ctx->ctx_lock);
        ctx->dead = 1;
        while (!list_empty(&ctx->active_reqs)) {
@@ -329,15 +332,7 @@ static void aio_cancel_all(struct kioctx *ctx)
                        spin_lock_irq(&ctx->ctx_lock);
                }
        }
-       spin_unlock_irq(&ctx->ctx_lock);
-}
-
-static void wait_for_all_aios(struct kioctx *ctx)
-{
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
 
-       spin_lock_irq(&ctx->ctx_lock);
        if (!ctx->reqs_active)
                goto out;
 
@@ -387,9 +382,7 @@ void exit_aio(struct mm_struct *mm)
                ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
                hlist_del_rcu(&ctx->list);
 
-               aio_cancel_all(ctx);
-
-               wait_for_all_aios(ctx);
+               kill_ctx(ctx);
 
                if (1 != atomic_read(&ctx->users))
                        printk(KERN_DEBUG
@@ -1269,8 +1262,7 @@ static void io_destroy(struct kioctx *ioctx)
        if (likely(!was_dead))
                put_ioctx(ioctx);       /* twice for the list */
 
-       aio_cancel_all(ioctx);
-       wait_for_all_aios(ioctx);
+       kill_ctx(ioctx);
 
        /*
         * Wake up any waiters.  The setting of ctx->dead must be seen
@@ -1278,7 +1270,6 @@ static void io_destroy(struct kioctx *ioctx)
         * locking done by the above calls to ensure this consistency.
         */
        wake_up_all(&ioctx->wait);
-       put_ioctx(ioctx);       /* once for the lookup */
 }
 
 /* sys_io_setup:
@@ -1315,11 +1306,9 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        ret = PTR_ERR(ioctx);
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
-               if (!ret) {
-                       put_ioctx(ioctx);
-                       return 0;
-               }
-               io_destroy(ioctx);
+               if (ret)
+                       io_destroy(ioctx);
+               put_ioctx(ioctx);
        }
 
 out:
@@ -1337,6 +1326,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
        struct kioctx *ioctx = lookup_ioctx(ctx);
        if (likely(NULL != ioctx)) {
                io_destroy(ioctx);
+               put_ioctx(ioctx);
                return 0;
        }
        pr_debug("EINVAL: io_destroy: invalid context id\n");
index 75ad433c66913ae828784e68438c14309f035a0e..0b2b4db5bdcd026c928377d9e93d30715ea323a0 100644 (file)
@@ -1,5 +1,636 @@
+/*
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
 #include <linux/fs.h>
 #include <linux/ext2_fs.h>
+#include <linux/blockgroup_lock.h>
+#include <linux/percpu_counter.h>
+#include <linux/rbtree.h>
+
+/* XXX Here for now... not interested in restructing headers JUST now */
+
+/* data type for block offset of block group */
+typedef int ext2_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext2_fsblk_t;
+
+#define E2FSBLK "%lu"
+
+struct ext2_reserve_window {
+       ext2_fsblk_t            _rsv_start;     /* First byte reserved */
+       ext2_fsblk_t            _rsv_end;       /* Last byte reserved or 0 */
+};
+
+struct ext2_reserve_window_node {
+       struct rb_node          rsv_node;
+       __u32                   rsv_goal_size;
+       __u32                   rsv_alloc_hit;
+       struct ext2_reserve_window      rsv_window;
+};
+
+struct ext2_block_alloc_info {
+       /* information about reservation window */
+       struct ext2_reserve_window_node rsv_window_node;
+       /*
+        * was i_next_alloc_block in ext2_inode_info
+        * is the logical (file-relative) number of the
+        * most-recently-allocated block in this file.
+        * We use this for detecting linearly ascending allocation requests.
+        */
+       __u32                   last_alloc_logical_block;
+       /*
+        * Was i_next_alloc_goal in ext2_inode_info
+        * is the *physical* companion to i_next_alloc_block.
+        * it the the physical block number of the block which was most-recentl
+        * allocated to this file.  This give us the goal (target) for the next
+        * allocation when we detect linearly ascending requests.
+        */
+       ext2_fsblk_t            last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * second extended-fs super-block data in memory
+ */
+struct ext2_sb_info {
+       unsigned long s_frag_size;      /* Size of a fragment in bytes */
+       unsigned long s_frags_per_block;/* Number of fragments per block */
+       unsigned long s_inodes_per_block;/* Number of inodes per block */
+       unsigned long s_frags_per_group;/* Number of fragments in a group */
+       unsigned long s_blocks_per_group;/* Number of blocks in a group */
+       unsigned long s_inodes_per_group;/* Number of inodes in a group */
+       unsigned long s_itb_per_group;  /* Number of inode table blocks per group */
+       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
+       unsigned long s_desc_per_block; /* Number of group descriptors per block */
+       unsigned long s_groups_count;   /* Number of groups in the fs */
+       unsigned long s_overhead_last;  /* Last calculated overhead */
+       unsigned long s_blocks_last;    /* Last seen block count */
+       struct buffer_head * s_sbh;     /* Buffer containing the super block */
+       struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
+       struct buffer_head ** s_group_desc;
+       unsigned long  s_mount_opt;
+       unsigned long s_sb_block;
+       uid_t s_resuid;
+       gid_t s_resgid;
+       unsigned short s_mount_state;
+       unsigned short s_pad;
+       int s_addr_per_block_bits;
+       int s_desc_per_block_bits;
+       int s_inode_size;
+       int s_first_ino;
+       spinlock_t s_next_gen_lock;
+       u32 s_next_generation;
+       unsigned long s_dir_count;
+       u8 *s_debts;
+       struct percpu_counter s_freeblocks_counter;
+       struct percpu_counter s_freeinodes_counter;
+       struct percpu_counter s_dirs_counter;
+       struct blockgroup_lock *s_blockgroup_lock;
+       /* root of the per fs reservation window tree */
+       spinlock_t s_rsv_window_lock;
+       struct rb_root s_rsv_window_root;
+       struct ext2_reserve_window_node s_rsv_window_head;
+       /*
+        * s_lock protects against concurrent modifications of s_mount_state,
+        * s_blocks_last, s_overhead_last and the content of superblock's
+        * buffer pointed to by sbi->s_es.
+        *
+        * Note: It is used in ext2_show_options() to provide a consistent view
+        * of the mount options.
+        */
+       spinlock_t s_lock;
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
+{
+       return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
+/*
+ * Define EXT2FS_DEBUG to produce debug messages
+ */
+#undef EXT2FS_DEBUG
+
+/*
+ * Define EXT2_RESERVATION to reserve data blocks for expanding files
+ */
+#define EXT2_DEFAULT_RESERVE_BLOCKS     8
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT2_MAX_RESERVE_BLOCKS         1027
+#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
+/*
+ * The second extended file system version
+ */
+#define EXT2FS_DATE            "95/08/09"
+#define EXT2FS_VERSION         "0.5b"
+
+/*
+ * Debug code
+ */
+#ifdef EXT2FS_DEBUG
+#      define ext2_debug(f, a...)      { \
+                                       printk ("EXT2-fs DEBUG (%s, %d): %s:", \
+                                               __FILE__, __LINE__, __func__); \
+                                       printk (f, ## a); \
+                                       }
+#else
+#      define ext2_debug(f, a...)      /**/
+#endif
+
+/*
+ * Special inode numbers
+ */
+#define        EXT2_BAD_INO             1      /* Bad blocks inode */
+#define EXT2_ROOT_INO           2      /* Root inode */
+#define EXT2_BOOT_LOADER_INO    5      /* Boot loader inode */
+#define EXT2_UNDEL_DIR_INO      6      /* Undelete directory inode */
+
+/* First non-reserved inode for old ext2 filesystems */
+#define EXT2_GOOD_OLD_FIRST_INO        11
+
+static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
+{
+       return sb->s_fs_info;
+}
+
+/*
+ * Macro-instructions used to manage several block sizes
+ */
+#define EXT2_MIN_BLOCK_SIZE            1024
+#define        EXT2_MAX_BLOCK_SIZE             4096
+#define EXT2_MIN_BLOCK_LOG_SIZE                  10
+#define EXT2_BLOCK_SIZE(s)             ((s)->s_blocksize)
+#define        EXT2_ADDR_PER_BLOCK(s)          (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
+#define EXT2_BLOCK_SIZE_BITS(s)                ((s)->s_blocksize_bits)
+#define        EXT2_ADDR_PER_BLOCK_BITS(s)     (EXT2_SB(s)->s_addr_per_block_bits)
+#define EXT2_INODE_SIZE(s)             (EXT2_SB(s)->s_inode_size)
+#define EXT2_FIRST_INO(s)              (EXT2_SB(s)->s_first_ino)
+
+/*
+ * Macro-instructions used to manage fragments
+ */
+#define EXT2_MIN_FRAG_SIZE             1024
+#define        EXT2_MAX_FRAG_SIZE              4096
+#define EXT2_MIN_FRAG_LOG_SIZE           10
+#define EXT2_FRAG_SIZE(s)              (EXT2_SB(s)->s_frag_size)
+#define EXT2_FRAGS_PER_BLOCK(s)                (EXT2_SB(s)->s_frags_per_block)
+
+/*
+ * Structure of a blocks group descriptor
+ */
+struct ext2_group_desc
+{
+       __le32  bg_block_bitmap;                /* Blocks bitmap block */
+       __le32  bg_inode_bitmap;                /* Inodes bitmap block */
+       __le32  bg_inode_table;         /* Inodes table block */
+       __le16  bg_free_blocks_count;   /* Free blocks count */
+       __le16  bg_free_inodes_count;   /* Free inodes count */
+       __le16  bg_used_dirs_count;     /* Directories count */
+       __le16  bg_pad;
+       __le32  bg_reserved[3];
+};
+
+/*
+ * Macro-instructions used to manage group descriptors
+ */
+#define EXT2_BLOCKS_PER_GROUP(s)       (EXT2_SB(s)->s_blocks_per_group)
+#define EXT2_DESC_PER_BLOCK(s)         (EXT2_SB(s)->s_desc_per_block)
+#define EXT2_INODES_PER_GROUP(s)       (EXT2_SB(s)->s_inodes_per_group)
+#define EXT2_DESC_PER_BLOCK_BITS(s)    (EXT2_SB(s)->s_desc_per_block_bits)
+
+/*
+ * Constants relative to the data blocks
+ */
+#define        EXT2_NDIR_BLOCKS                12
+#define        EXT2_IND_BLOCK                  EXT2_NDIR_BLOCKS
+#define        EXT2_DIND_BLOCK                 (EXT2_IND_BLOCK + 1)
+#define        EXT2_TIND_BLOCK                 (EXT2_DIND_BLOCK + 1)
+#define        EXT2_N_BLOCKS                   (EXT2_TIND_BLOCK + 1)
+
+/*
+ * Inode flags (GETFLAGS/SETFLAGS)
+ */
+#define        EXT2_SECRM_FL                   FS_SECRM_FL     /* Secure deletion */
+#define        EXT2_UNRM_FL                    FS_UNRM_FL      /* Undelete */
+#define        EXT2_COMPR_FL                   FS_COMPR_FL     /* Compress file */
+#define EXT2_SYNC_FL                   FS_SYNC_FL      /* Synchronous updates */
+#define EXT2_IMMUTABLE_FL              FS_IMMUTABLE_FL /* Immutable file */
+#define EXT2_APPEND_FL                 FS_APPEND_FL    /* writes to file may only append */
+#define EXT2_NODUMP_FL                 FS_NODUMP_FL    /* do not dump file */
+#define EXT2_NOATIME_FL                        FS_NOATIME_FL   /* do not update atime */
+/* Reserved for compression usage... */
+#define EXT2_DIRTY_FL                  FS_DIRTY_FL
+#define EXT2_COMPRBLK_FL               FS_COMPRBLK_FL  /* One or more compressed clusters */
+#define EXT2_NOCOMP_FL                 FS_NOCOMP_FL    /* Don't compress */
+#define EXT2_ECOMPR_FL                 FS_ECOMPR_FL    /* Compression error */
+/* End compression flags --- maybe not all used */     
+#define EXT2_BTREE_FL                  FS_BTREE_FL     /* btree format dir */
+#define EXT2_INDEX_FL                  FS_INDEX_FL     /* hash-indexed directory */
+#define EXT2_IMAGIC_FL                 FS_IMAGIC_FL    /* AFS directory */
+#define EXT2_JOURNAL_DATA_FL           FS_JOURNAL_DATA_FL /* Reserved for ext3 */
+#define EXT2_NOTAIL_FL                 FS_NOTAIL_FL    /* file tail should not be merged */
+#define EXT2_DIRSYNC_FL                        FS_DIRSYNC_FL   /* dirsync behaviour (directories only) */
+#define EXT2_TOPDIR_FL                 FS_TOPDIR_FL    /* Top of directory hierarchies*/
+#define EXT2_RESERVED_FL               FS_RESERVED_FL  /* reserved for ext2 lib */
+
+#define EXT2_FL_USER_VISIBLE           FS_FL_USER_VISIBLE      /* User visible flags */
+#define EXT2_FL_USER_MODIFIABLE                FS_FL_USER_MODIFIABLE   /* User modifiable flags */
+
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
+                          EXT2_SYNC_FL | EXT2_NODUMP_FL |\
+                          EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
+                          EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
+                          EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
+{
+       if (S_ISDIR(mode))
+               return flags;
+       else if (S_ISREG(mode))
+               return flags & EXT2_REG_FLMASK;
+       else
+               return flags & EXT2_OTHER_FLMASK;
+}
+
+/*
+ * ioctl commands
+ */
+#define        EXT2_IOC_GETFLAGS               FS_IOC_GETFLAGS
+#define        EXT2_IOC_SETFLAGS               FS_IOC_SETFLAGS
+#define        EXT2_IOC_GETVERSION             FS_IOC_GETVERSION
+#define        EXT2_IOC_SETVERSION             FS_IOC_SETVERSION
+#define        EXT2_IOC_GETRSVSZ               _IOR('f', 5, long)
+#define        EXT2_IOC_SETRSVSZ               _IOW('f', 6, long)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT2_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
+#define EXT2_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
+#define EXT2_IOC32_GETVERSION          FS_IOC32_GETVERSION
+#define EXT2_IOC32_SETVERSION          FS_IOC32_SETVERSION
+
+/*
+ * Structure of an inode on the disk
+ */
+struct ext2_inode {
+       __le16  i_mode;         /* File mode */
+       __le16  i_uid;          /* Low 16 bits of Owner Uid */
+       __le32  i_size;         /* Size in bytes */
+       __le32  i_atime;        /* Access time */
+       __le32  i_ctime;        /* Creation time */
+       __le32  i_mtime;        /* Modification time */
+       __le32  i_dtime;        /* Deletion Time */
+       __le16  i_gid;          /* Low 16 bits of Group Id */
+       __le16  i_links_count;  /* Links count */
+       __le32  i_blocks;       /* Blocks count */
+       __le32  i_flags;        /* File flags */
+       union {
+               struct {
+                       __le32  l_i_reserved1;
+               } linux1;
+               struct {
+                       __le32  h_i_translator;
+               } hurd1;
+               struct {
+                       __le32  m_i_reserved1;
+               } masix1;
+       } osd1;                         /* OS dependent 1 */
+       __le32  i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
+       __le32  i_generation;   /* File version (for NFS) */
+       __le32  i_file_acl;     /* File ACL */
+       __le32  i_dir_acl;      /* Directory ACL */
+       __le32  i_faddr;        /* Fragment address */
+       union {
+               struct {
+                       __u8    l_i_frag;       /* Fragment number */
+                       __u8    l_i_fsize;      /* Fragment size */
+                       __u16   i_pad1;
+                       __le16  l_i_uid_high;   /* these 2 fields    */
+                       __le16  l_i_gid_high;   /* were reserved2[0] */
+                       __u32   l_i_reserved2;
+               } linux2;
+               struct {
+                       __u8    h_i_frag;       /* Fragment number */
+                       __u8    h_i_fsize;      /* Fragment size */
+                       __le16  h_i_mode_high;
+                       __le16  h_i_uid_high;
+                       __le16  h_i_gid_high;
+                       __le32  h_i_author;
+               } hurd2;
+               struct {
+                       __u8    m_i_frag;       /* Fragment number */
+                       __u8    m_i_fsize;      /* Fragment size */
+                       __u16   m_pad1;
+                       __u32   m_i_reserved2[2];
+               } masix2;
+       } osd2;                         /* OS dependent 2 */
+};
+
+#define i_size_high    i_dir_acl
+
+#define i_reserved1    osd1.linux1.l_i_reserved1
+#define i_frag         osd2.linux2.l_i_frag
+#define i_fsize                osd2.linux2.l_i_fsize
+#define i_uid_low      i_uid
+#define i_gid_low      i_gid
+#define i_uid_high     osd2.linux2.l_i_uid_high
+#define i_gid_high     osd2.linux2.l_i_gid_high
+#define i_reserved2    osd2.linux2.l_i_reserved2
+
+/*
+ * File system states
+ */
+#define        EXT2_VALID_FS                   0x0001  /* Unmounted cleanly */
+#define        EXT2_ERROR_FS                   0x0002  /* Errors detected */
+
+/*
+ * Mount flags
+ */
+#define EXT2_MOUNT_CHECK               0x000001  /* Do mount-time checks */
+#define EXT2_MOUNT_OLDALLOC            0x000002  /* Don't use the new Orlov allocator */
+#define EXT2_MOUNT_GRPID               0x000004  /* Create files with directory's group */
+#define EXT2_MOUNT_DEBUG               0x000008  /* Some debugging messages */
+#define EXT2_MOUNT_ERRORS_CONT         0x000010  /* Continue on errors */
+#define EXT2_MOUNT_ERRORS_RO           0x000020  /* Remount fs ro on errors */
+#define EXT2_MOUNT_ERRORS_PANIC                0x000040  /* Panic on errors */
+#define EXT2_MOUNT_MINIX_DF            0x000080  /* Mimics the Minix statfs */
+#define EXT2_MOUNT_NOBH                        0x000100  /* No buffer_heads */
+#define EXT2_MOUNT_NO_UID32            0x000200  /* Disable 32-bit UIDs */
+#define EXT2_MOUNT_XATTR_USER          0x004000  /* Extended user attributes */
+#define EXT2_MOUNT_POSIX_ACL           0x008000  /* POSIX Access Control Lists */
+#define EXT2_MOUNT_XIP                 0x010000  /* Execute in place */
+#define EXT2_MOUNT_USRQUOTA            0x020000  /* user quota */
+#define EXT2_MOUNT_GRPQUOTA            0x040000  /* group quota */
+#define EXT2_MOUNT_RESERVATION         0x080000  /* Preallocation */
+
+
+#define clear_opt(o, opt)              o &= ~EXT2_MOUNT_##opt
+#define set_opt(o, opt)                        o |= EXT2_MOUNT_##opt
+#define test_opt(sb, opt)              (EXT2_SB(sb)->s_mount_opt & \
+                                        EXT2_MOUNT_##opt)
+/*
+ * Maximal mount counts between two filesystem checks
+ */
+#define EXT2_DFL_MAX_MNT_COUNT         20      /* Allow 20 mounts */
+#define EXT2_DFL_CHECKINTERVAL         0       /* Don't use interval check */
+
+/*
+ * Behaviour when detecting errors
+ */
+#define EXT2_ERRORS_CONTINUE           1       /* Continue execution */
+#define EXT2_ERRORS_RO                 2       /* Remount fs read-only */
+#define EXT2_ERRORS_PANIC              3       /* Panic */
+#define EXT2_ERRORS_DEFAULT            EXT2_ERRORS_CONTINUE
+
+/*
+ * Structure of the super block
+ */
+struct ext2_super_block {
+       __le32  s_inodes_count;         /* Inodes count */
+       __le32  s_blocks_count;         /* Blocks count */
+       __le32  s_r_blocks_count;       /* Reserved blocks count */
+       __le32  s_free_blocks_count;    /* Free blocks count */
+       __le32  s_free_inodes_count;    /* Free inodes count */
+       __le32  s_first_data_block;     /* First Data Block */
+       __le32  s_log_block_size;       /* Block size */
+       __le32  s_log_frag_size;        /* Fragment size */
+       __le32  s_blocks_per_group;     /* # Blocks per group */
+       __le32  s_frags_per_group;      /* # Fragments per group */
+       __le32  s_inodes_per_group;     /* # Inodes per group */
+       __le32  s_mtime;                /* Mount time */
+       __le32  s_wtime;                /* Write time */
+       __le16  s_mnt_count;            /* Mount count */
+       __le16  s_max_mnt_count;        /* Maximal mount count */
+       __le16  s_magic;                /* Magic signature */
+       __le16  s_state;                /* File system state */
+       __le16  s_errors;               /* Behaviour when detecting errors */
+       __le16  s_minor_rev_level;      /* minor revision level */
+       __le32  s_lastcheck;            /* time of last check */
+       __le32  s_checkinterval;        /* max. time between checks */
+       __le32  s_creator_os;           /* OS */
+       __le32  s_rev_level;            /* Revision level */
+       __le16  s_def_resuid;           /* Default uid for reserved blocks */
+       __le16  s_def_resgid;           /* Default gid for reserved blocks */
+       /*
+        * These fields are for EXT2_DYNAMIC_REV superblocks only.
+        *
+        * Note: the difference between the compatible feature set and
+        * the incompatible feature set is that if there is a bit set
+        * in the incompatible feature set that the kernel doesn't
+        * know about, it should refuse to mount the filesystem.
+        * 
+        * e2fsck's requirements are more strict; if it doesn't know
+        * about a feature in either the compatible or incompatible
+        * feature set, it must abort and not try to meddle with
+        * things it doesn't understand...
+        */
+       __le32  s_first_ino;            /* First non-reserved inode */
+       __le16   s_inode_size;          /* size of inode structure */
+       __le16  s_block_group_nr;       /* block group # of this superblock */
+       __le32  s_feature_compat;       /* compatible feature set */
+       __le32  s_feature_incompat;     /* incompatible feature set */
+       __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
+       __u8    s_uuid[16];             /* 128-bit uuid for volume */
+       char    s_volume_name[16];      /* volume name */
+       char    s_last_mounted[64];     /* directory where last mounted */
+       __le32  s_algorithm_usage_bitmap; /* For compression */
+       /*
+        * Performance hints.  Directory preallocation should only
+        * happen if the EXT2_COMPAT_PREALLOC flag is on.
+        */
+       __u8    s_prealloc_blocks;      /* Nr of blocks to try to preallocate*/
+       __u8    s_prealloc_dir_blocks;  /* Nr to preallocate for dirs */
+       __u16   s_padding1;
+       /*
+        * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
+        */
+       __u8    s_journal_uuid[16];     /* uuid of journal superblock */
+       __u32   s_journal_inum;         /* inode number of journal file */
+       __u32   s_journal_dev;          /* device number of journal file */
+       __u32   s_last_orphan;          /* start of list of inodes to delete */
+       __u32   s_hash_seed[4];         /* HTREE hash seed */
+       __u8    s_def_hash_version;     /* Default hash version to use */
+       __u8    s_reserved_char_pad;
+       __u16   s_reserved_word_pad;
+       __le32  s_default_mount_opts;
+       __le32  s_first_meta_bg;        /* First metablock block group */
+       __u32   s_reserved[190];        /* Padding to the end of the block */
+};
+
+/*
+ * Codes for operating systems
+ */
+#define EXT2_OS_LINUX          0
+#define EXT2_OS_HURD           1
+#define EXT2_OS_MASIX          2
+#define EXT2_OS_FREEBSD                3
+#define EXT2_OS_LITES          4
+
+/*
+ * Revision levels
+ */
+#define EXT2_GOOD_OLD_REV      0       /* The good old (original) format */
+#define EXT2_DYNAMIC_REV       1       /* V2 format w/ dynamic inode sizes */
+
+#define EXT2_CURRENT_REV       EXT2_GOOD_OLD_REV
+#define EXT2_MAX_SUPP_REV      EXT2_DYNAMIC_REV
+
+#define EXT2_GOOD_OLD_INODE_SIZE 128
+
+/*
+ * Feature set definitions
+ */
+
+#define EXT2_HAS_COMPAT_FEATURE(sb,mask)                       \
+       ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask)                    \
+       ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
+#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask)                     \
+       ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
+#define EXT2_SET_COMPAT_FEATURE(sb,mask)                       \
+       EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
+#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask)                    \
+       EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
+#define EXT2_SET_INCOMPAT_FEATURE(sb,mask)                     \
+       EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
+#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask)                     \
+       EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask)                  \
+       EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
+#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask)                   \
+       EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
+
+#define EXT2_FEATURE_COMPAT_DIR_PREALLOC       0x0001
+#define EXT2_FEATURE_COMPAT_IMAGIC_INODES      0x0002
+#define EXT3_FEATURE_COMPAT_HAS_JOURNAL                0x0004
+#define EXT2_FEATURE_COMPAT_EXT_ATTR           0x0008
+#define EXT2_FEATURE_COMPAT_RESIZE_INO         0x0010
+#define EXT2_FEATURE_COMPAT_DIR_INDEX          0x0020
+#define EXT2_FEATURE_COMPAT_ANY                        0xffffffff
+
+#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER    0x0001
+#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE      0x0002
+#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR       0x0004
+#define EXT2_FEATURE_RO_COMPAT_ANY             0xffffffff
+
+#define EXT2_FEATURE_INCOMPAT_COMPRESSION      0x0001
+#define EXT2_FEATURE_INCOMPAT_FILETYPE         0x0002
+#define EXT3_FEATURE_INCOMPAT_RECOVER          0x0004
+#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV      0x0008
+#define EXT2_FEATURE_INCOMPAT_META_BG          0x0010
+#define EXT2_FEATURE_INCOMPAT_ANY              0xffffffff
+
+#define EXT2_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
+#define EXT2_FEATURE_INCOMPAT_SUPP     (EXT2_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT2_FEATURE_INCOMPAT_META_BG)
+#define EXT2_FEATURE_RO_COMPAT_SUPP    (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
+#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED     ~EXT2_FEATURE_RO_COMPAT_SUPP
+#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED      ~EXT2_FEATURE_INCOMPAT_SUPP
+
+/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define        EXT2_DEF_RESUID         0
+#define        EXT2_DEF_RESGID         0
+
+/*
+ * Default mount options
+ */
+#define EXT2_DEFM_DEBUG                0x0001
+#define EXT2_DEFM_BSDGROUPS    0x0002
+#define EXT2_DEFM_XATTR_USER   0x0004
+#define EXT2_DEFM_ACL          0x0008
+#define EXT2_DEFM_UID16                0x0010
+    /* Not used by ext2, but reserved for use by ext3 */
+#define EXT3_DEFM_JMODE                0x0060 
+#define EXT3_DEFM_JMODE_DATA   0x0020
+#define EXT3_DEFM_JMODE_ORDERED        0x0040
+#define EXT3_DEFM_JMODE_WBACK  0x0060
+
+/*
+ * Structure of a directory entry
+ */
+
+struct ext2_dir_entry {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __le16  name_len;               /* Name length */
+       char    name[];                 /* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * The new version of the directory entry.  Since EXT2 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct ext2_dir_entry_2 {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __u8    name_len;               /* Name length */
+       __u8    file_type;
+       char    name[];                 /* File name, up to EXT2_NAME_LEN */
+};
+
+/*
+ * Ext2 directory file types.  Only the low 3 bits are used.  The
+ * other bits are reserved for now.
+ */
+enum {
+       EXT2_FT_UNKNOWN         = 0,
+       EXT2_FT_REG_FILE        = 1,
+       EXT2_FT_DIR             = 2,
+       EXT2_FT_CHRDEV          = 3,
+       EXT2_FT_BLKDEV          = 4,
+       EXT2_FT_FIFO            = 5,
+       EXT2_FT_SOCK            = 6,
+       EXT2_FT_SYMLINK         = 7,
+       EXT2_FT_MAX
+};
+
+/*
+ * EXT2_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define EXT2_DIR_PAD                   4
+#define EXT2_DIR_ROUND                         (EXT2_DIR_PAD - 1)
+#define EXT2_DIR_REC_LEN(name_len)     (((name_len) + 8 + EXT2_DIR_ROUND) & \
+                                        ~EXT2_DIR_ROUND)
+#define EXT2_MAX_REC_LEN               ((1<<16)-1)
+
+static inline void verify_offsets(void)
+{
+#define A(x,y) BUILD_BUG_ON(x != offsetof(struct ext2_super_block, y));
+       A(EXT2_SB_MAGIC_OFFSET, s_magic);
+       A(EXT2_SB_BLOCKS_OFFSET, s_blocks_count);
+       A(EXT2_SB_BSIZE_OFFSET, s_log_block_size);
+#undef A
+}
 
 /*
  * ext2 mount options
index be7a8d02c9a7e40cea775bd9cdc7eb8e56c0d00e..cfedb2cb0d8c461e7283396d17ab0fd84d919423 100644 (file)
@@ -3,10 +3,7 @@
  * Handler for storing security labels as extended attributes.
  */
 
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
 #include <linux/security.h>
 #include "xattr.h"
 
index 2989467d3595c0ff8a8349863910bff43a073273..7e192574c0013e6dbdfc591dc0395d4ac12f4886 100644 (file)
@@ -5,10 +5,7 @@
  * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext2_fs.h>
+#include "ext2.h"
 #include "xattr.h"
 
 static size_t
index 322a56b2dfb1a190382a68e9844d3425339ab570..1c3312858fcf42703b7bb1c6be52f24c9b1a5b47 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/fs.h>
 #include <linux/genhd.h>
 #include <linux/buffer_head.h>
-#include <linux/ext2_fs_sb.h>
-#include <linux/ext2_fs.h>
 #include <linux/blkdev.h>
 #include "ext2.h"
 #include "xip.h"
index 3091f62e55b680ae567817c50a6bda3161753895..c76832c8d19229d6c868878ca3f7250394f10f72 100644 (file)
@@ -4,13 +4,7 @@
  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  */
 
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
index 1e036b79384c214f517c052de529932069d39dd0..baac1b129fba9642dcf5eed242a5b6f82a8b2128 100644 (file)
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  */
 
-#include <linux/time.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
 #include <linux/blkdev.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
index 6afc39d80253cf4560ce69b9a4b1983d1df3871b..909d13e265603dd24e3e1c2d143e87f7beda4202 100644 (file)
@@ -7,9 +7,7 @@
  * Universite Pierre et Marie Curie (Paris VI)
  */
 
-#include <linux/buffer_head.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 
 #ifdef EXT3FS_DEBUG
 
index 34f0a072b9350a716dd676ef8c506580f9a0e6fb..cc761ad8fa571541ae6aede1924b9178a342ab5f 100644 (file)
  *
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/buffer_head.h>
-#include <linux/slab.h>
-#include <linux/rbtree.h>
+#include "ext3.h"
 
 static unsigned char ext3_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
new file mode 100644 (file)
index 0000000..b6515fd
--- /dev/null
@@ -0,0 +1,1322 @@
+/*
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+#include <linux/magic.h>
+#include <linux/bug.h>
+#include <linux/blockgroup_lock.h>
+
+/*
+ * The second extended filesystem constants/structures
+ */
+
+/*
+ * Define EXT3FS_DEBUG to produce debug messages
+ */
+#undef EXT3FS_DEBUG
+
+/*
+ * Define EXT3_RESERVATION to reserve data blocks for expanding files
+ */
+#define EXT3_DEFAULT_RESERVE_BLOCKS     8
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT3_MAX_RESERVE_BLOCKS         1027
+#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
+
+/*
+ * Debug code
+ */
+#ifdef EXT3FS_DEBUG
+#define ext3_debug(f, a...)                                            \
+       do {                                                            \
+               printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:",       \
+                       __FILE__, __LINE__, __func__);          \
+               printk (KERN_DEBUG f, ## a);                            \
+       } while (0)
+#else
+#define ext3_debug(f, a...)    do {} while (0)
+#endif
+
+/*
+ * Special inodes numbers
+ */
+#define        EXT3_BAD_INO             1      /* Bad blocks inode */
+#define EXT3_ROOT_INO           2      /* Root inode */
+#define EXT3_BOOT_LOADER_INO    5      /* Boot loader inode */
+#define EXT3_UNDEL_DIR_INO      6      /* Undelete directory inode */
+#define EXT3_RESIZE_INO                 7      /* Reserved group descriptors inode */
+#define EXT3_JOURNAL_INO        8      /* Journal inode */
+
+/* First non-reserved inode for old ext3 filesystems */
+#define EXT3_GOOD_OLD_FIRST_INO        11
+
+/*
+ * Maximal count of links to a file
+ */
+#define EXT3_LINK_MAX          32000
+
+/*
+ * Macro-instructions used to manage several block sizes
+ */
+#define EXT3_MIN_BLOCK_SIZE            1024
+#define        EXT3_MAX_BLOCK_SIZE             65536
+#define EXT3_MIN_BLOCK_LOG_SIZE                10
+#define EXT3_BLOCK_SIZE(s)             ((s)->s_blocksize)
+#define        EXT3_ADDR_PER_BLOCK(s)          (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
+#define EXT3_BLOCK_SIZE_BITS(s)        ((s)->s_blocksize_bits)
+#define        EXT3_ADDR_PER_BLOCK_BITS(s)     (EXT3_SB(s)->s_addr_per_block_bits)
+#define EXT3_INODE_SIZE(s)             (EXT3_SB(s)->s_inode_size)
+#define EXT3_FIRST_INO(s)              (EXT3_SB(s)->s_first_ino)
+
+/*
+ * Macro-instructions used to manage fragments
+ */
+#define EXT3_MIN_FRAG_SIZE             1024
+#define        EXT3_MAX_FRAG_SIZE              4096
+#define EXT3_MIN_FRAG_LOG_SIZE           10
+#define EXT3_FRAG_SIZE(s)              (EXT3_SB(s)->s_frag_size)
+#define EXT3_FRAGS_PER_BLOCK(s)                (EXT3_SB(s)->s_frags_per_block)
+
+/*
+ * Structure of a blocks group descriptor
+ */
+struct ext3_group_desc
+{
+       __le32  bg_block_bitmap;                /* Blocks bitmap block */
+       __le32  bg_inode_bitmap;                /* Inodes bitmap block */
+       __le32  bg_inode_table;         /* Inodes table block */
+       __le16  bg_free_blocks_count;   /* Free blocks count */
+       __le16  bg_free_inodes_count;   /* Free inodes count */
+       __le16  bg_used_dirs_count;     /* Directories count */
+       __u16   bg_pad;
+       __le32  bg_reserved[3];
+};
+
+/*
+ * Macro-instructions used to manage group descriptors
+ */
+#define EXT3_BLOCKS_PER_GROUP(s)       (EXT3_SB(s)->s_blocks_per_group)
+#define EXT3_DESC_PER_BLOCK(s)         (EXT3_SB(s)->s_desc_per_block)
+#define EXT3_INODES_PER_GROUP(s)       (EXT3_SB(s)->s_inodes_per_group)
+#define EXT3_DESC_PER_BLOCK_BITS(s)    (EXT3_SB(s)->s_desc_per_block_bits)
+
+/*
+ * Constants relative to the data blocks
+ */
+#define        EXT3_NDIR_BLOCKS                12
+#define        EXT3_IND_BLOCK                  EXT3_NDIR_BLOCKS
+#define        EXT3_DIND_BLOCK                 (EXT3_IND_BLOCK + 1)
+#define        EXT3_TIND_BLOCK                 (EXT3_DIND_BLOCK + 1)
+#define        EXT3_N_BLOCKS                   (EXT3_TIND_BLOCK + 1)
+
+/*
+ * Inode flags
+ */
+#define        EXT3_SECRM_FL                   0x00000001 /* Secure deletion */
+#define        EXT3_UNRM_FL                    0x00000002 /* Undelete */
+#define        EXT3_COMPR_FL                   0x00000004 /* Compress file */
+#define EXT3_SYNC_FL                   0x00000008 /* Synchronous updates */
+#define EXT3_IMMUTABLE_FL              0x00000010 /* Immutable file */
+#define EXT3_APPEND_FL                 0x00000020 /* writes to file may only append */
+#define EXT3_NODUMP_FL                 0x00000040 /* do not dump file */
+#define EXT3_NOATIME_FL                        0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define EXT3_DIRTY_FL                  0x00000100
+#define EXT3_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
+#define EXT3_NOCOMPR_FL                        0x00000400 /* Don't compress */
+#define EXT3_ECOMPR_FL                 0x00000800 /* Compression error */
+/* End compression flags --- maybe not all used */
+#define EXT3_INDEX_FL                  0x00001000 /* hash-indexed directory */
+#define EXT3_IMAGIC_FL                 0x00002000 /* AFS directory */
+#define EXT3_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
+#define EXT3_NOTAIL_FL                 0x00008000 /* file tail should not be merged */
+#define EXT3_DIRSYNC_FL                        0x00010000 /* dirsync behaviour (directories only) */
+#define EXT3_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
+#define EXT3_RESERVED_FL               0x80000000 /* reserved for ext3 lib */
+
+#define EXT3_FL_USER_VISIBLE           0x0003DFFF /* User visible flags */
+#define EXT3_FL_USER_MODIFIABLE                0x000380FF /* User modifiable flags */
+
+/* Flags that should be inherited by new inodes from their parent. */
+#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
+                          EXT3_SYNC_FL | EXT3_NODUMP_FL |\
+                          EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
+                          EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
+                          EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
+
+/* Flags that are appropriate for regular files (all but dir-specific ones). */
+#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
+
+/* Flags that are appropriate for non-directories/regular files. */
+#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
+
+/* Mask out flags that are inappropriate for the given type of inode. */
+static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
+{
+       if (S_ISDIR(mode))
+               return flags;
+       else if (S_ISREG(mode))
+               return flags & EXT3_REG_FLMASK;
+       else
+               return flags & EXT3_OTHER_FLMASK;
+}
+
+/* Used to pass group descriptor data when online resize is done */
+struct ext3_new_group_input {
+       __u32 group;            /* Group number for this data */
+       __u32 block_bitmap;     /* Absolute block number of block bitmap */
+       __u32 inode_bitmap;     /* Absolute block number of inode bitmap */
+       __u32 inode_table;      /* Absolute block number of inode table start */
+       __u32 blocks_count;     /* Total number of blocks in this group */
+       __u16 reserved_blocks;  /* Number of reserved blocks in this group */
+       __u16 unused;
+};
+
+/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
+struct ext3_new_group_data {
+       __u32 group;
+       __u32 block_bitmap;
+       __u32 inode_bitmap;
+       __u32 inode_table;
+       __u32 blocks_count;
+       __u16 reserved_blocks;
+       __u16 unused;
+       __u32 free_blocks_count;
+};
+
+
+/*
+ * ioctl commands
+ */
+#define        EXT3_IOC_GETFLAGS               FS_IOC_GETFLAGS
+#define        EXT3_IOC_SETFLAGS               FS_IOC_SETFLAGS
+#define        EXT3_IOC_GETVERSION             _IOR('f', 3, long)
+#define        EXT3_IOC_SETVERSION             _IOW('f', 4, long)
+#define EXT3_IOC_GROUP_EXTEND          _IOW('f', 7, unsigned long)
+#define EXT3_IOC_GROUP_ADD             _IOW('f', 8,struct ext3_new_group_input)
+#define        EXT3_IOC_GETVERSION_OLD         FS_IOC_GETVERSION
+#define        EXT3_IOC_SETVERSION_OLD         FS_IOC_SETVERSION
+#ifdef CONFIG_JBD_DEBUG
+#define EXT3_IOC_WAIT_FOR_READONLY     _IOR('f', 99, long)
+#endif
+#define EXT3_IOC_GETRSVSZ              _IOR('f', 5, long)
+#define EXT3_IOC_SETRSVSZ              _IOW('f', 6, long)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT3_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
+#define EXT3_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
+#define EXT3_IOC32_GETVERSION          _IOR('f', 3, int)
+#define EXT3_IOC32_SETVERSION          _IOW('f', 4, int)
+#define EXT3_IOC32_GETRSVSZ            _IOR('f', 5, int)
+#define EXT3_IOC32_SETRSVSZ            _IOW('f', 6, int)
+#define EXT3_IOC32_GROUP_EXTEND                _IOW('f', 7, unsigned int)
+#ifdef CONFIG_JBD_DEBUG
+#define EXT3_IOC32_WAIT_FOR_READONLY   _IOR('f', 99, int)
+#endif
+#define EXT3_IOC32_GETVERSION_OLD      FS_IOC32_GETVERSION
+#define EXT3_IOC32_SETVERSION_OLD      FS_IOC32_SETVERSION
+
+
+/*
+ *  Mount options
+ */
+struct ext3_mount_options {
+       unsigned long s_mount_opt;
+       uid_t s_resuid;
+       gid_t s_resgid;
+       unsigned long s_commit_interval;
+#ifdef CONFIG_QUOTA
+       int s_jquota_fmt;
+       char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
+/*
+ * Structure of an inode on the disk
+ */
+struct ext3_inode {
+       __le16  i_mode;         /* File mode */
+       __le16  i_uid;          /* Low 16 bits of Owner Uid */
+       __le32  i_size;         /* Size in bytes */
+       __le32  i_atime;        /* Access time */
+       __le32  i_ctime;        /* Creation time */
+       __le32  i_mtime;        /* Modification time */
+       __le32  i_dtime;        /* Deletion Time */
+       __le16  i_gid;          /* Low 16 bits of Group Id */
+       __le16  i_links_count;  /* Links count */
+       __le32  i_blocks;       /* Blocks count */
+       __le32  i_flags;        /* File flags */
+       union {
+               struct {
+                       __u32  l_i_reserved1;
+               } linux1;
+               struct {
+                       __u32  h_i_translator;
+               } hurd1;
+               struct {
+                       __u32  m_i_reserved1;
+               } masix1;
+       } osd1;                         /* OS dependent 1 */
+       __le32  i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
+       __le32  i_generation;   /* File version (for NFS) */
+       __le32  i_file_acl;     /* File ACL */
+       __le32  i_dir_acl;      /* Directory ACL */
+       __le32  i_faddr;        /* Fragment address */
+       union {
+               struct {
+                       __u8    l_i_frag;       /* Fragment number */
+                       __u8    l_i_fsize;      /* Fragment size */
+                       __u16   i_pad1;
+                       __le16  l_i_uid_high;   /* these 2 fields    */
+                       __le16  l_i_gid_high;   /* were reserved2[0] */
+                       __u32   l_i_reserved2;
+               } linux2;
+               struct {
+                       __u8    h_i_frag;       /* Fragment number */
+                       __u8    h_i_fsize;      /* Fragment size */
+                       __u16   h_i_mode_high;
+                       __u16   h_i_uid_high;
+                       __u16   h_i_gid_high;
+                       __u32   h_i_author;
+               } hurd2;
+               struct {
+                       __u8    m_i_frag;       /* Fragment number */
+                       __u8    m_i_fsize;      /* Fragment size */
+                       __u16   m_pad1;
+                       __u32   m_i_reserved2[2];
+               } masix2;
+       } osd2;                         /* OS dependent 2 */
+       __le16  i_extra_isize;
+       __le16  i_pad1;
+};
+
+#define i_size_high    i_dir_acl
+
+#define i_reserved1    osd1.linux1.l_i_reserved1
+#define i_frag         osd2.linux2.l_i_frag
+#define i_fsize                osd2.linux2.l_i_fsize
+#define i_uid_low      i_uid
+#define i_gid_low      i_gid
+#define i_uid_high     osd2.linux2.l_i_uid_high
+#define i_gid_high     osd2.linux2.l_i_gid_high
+#define i_reserved2    osd2.linux2.l_i_reserved2
+
+/*
+ * File system states
+ */
+#define        EXT3_VALID_FS                   0x0001  /* Unmounted cleanly */
+#define        EXT3_ERROR_FS                   0x0002  /* Errors detected */
+#define        EXT3_ORPHAN_FS                  0x0004  /* Orphans being recovered */
+
+/*
+ * Misc. filesystem flags
+ */
+#define EXT2_FLAGS_SIGNED_HASH         0x0001  /* Signed dirhash in use */
+#define EXT2_FLAGS_UNSIGNED_HASH       0x0002  /* Unsigned dirhash in use */
+#define EXT2_FLAGS_TEST_FILESYS                0x0004  /* to test development code */
+
+/*
+ * Mount flags
+ */
+#define EXT3_MOUNT_CHECK               0x00001 /* Do mount-time checks */
+/* EXT3_MOUNT_OLDALLOC was there */
+#define EXT3_MOUNT_GRPID               0x00004 /* Create files with directory's group */
+#define EXT3_MOUNT_DEBUG               0x00008 /* Some debugging messages */
+#define EXT3_MOUNT_ERRORS_CONT         0x00010 /* Continue on errors */
+#define EXT3_MOUNT_ERRORS_RO           0x00020 /* Remount fs ro on errors */
+#define EXT3_MOUNT_ERRORS_PANIC                0x00040 /* Panic on errors */
+#define EXT3_MOUNT_MINIX_DF            0x00080 /* Mimics the Minix statfs */
+#define EXT3_MOUNT_NOLOAD              0x00100 /* Don't use existing journal*/
+#define EXT3_MOUNT_ABORT               0x00200 /* Fatal error detected */
+#define EXT3_MOUNT_DATA_FLAGS          0x00C00 /* Mode for data writes: */
+#define EXT3_MOUNT_JOURNAL_DATA                0x00400 /* Write data to journal */
+#define EXT3_MOUNT_ORDERED_DATA                0x00800 /* Flush data before commit */
+#define EXT3_MOUNT_WRITEBACK_DATA      0x00C00 /* No data ordering */
+#define EXT3_MOUNT_UPDATE_JOURNAL      0x01000 /* Update the journal format */
+#define EXT3_MOUNT_NO_UID32            0x02000  /* Disable 32-bit UIDs */
+#define EXT3_MOUNT_XATTR_USER          0x04000 /* Extended user attributes */
+#define EXT3_MOUNT_POSIX_ACL           0x08000 /* POSIX Access Control Lists */
+#define EXT3_MOUNT_RESERVATION         0x10000 /* Preallocation */
+#define EXT3_MOUNT_BARRIER             0x20000 /* Use block barriers */
+#define EXT3_MOUNT_QUOTA               0x80000 /* Some quota option set */
+#define EXT3_MOUNT_USRQUOTA            0x100000 /* "old" user quota */
+#define EXT3_MOUNT_GRPQUOTA            0x200000 /* "old" group quota */
+#define EXT3_MOUNT_DATA_ERR_ABORT      0x400000 /* Abort on file data write
+                                                 * error in ordered mode */
+
+/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+#ifndef _LINUX_EXT2_FS_H
+#define clear_opt(o, opt)              o &= ~EXT3_MOUNT_##opt
+#define set_opt(o, opt)                        o |= EXT3_MOUNT_##opt
+#define test_opt(sb, opt)              (EXT3_SB(sb)->s_mount_opt & \
+                                        EXT3_MOUNT_##opt)
+#else
+#define EXT2_MOUNT_NOLOAD              EXT3_MOUNT_NOLOAD
+#define EXT2_MOUNT_ABORT               EXT3_MOUNT_ABORT
+#define EXT2_MOUNT_DATA_FLAGS          EXT3_MOUNT_DATA_FLAGS
+#endif
+
+#define ext3_set_bit                   __set_bit_le
+#define ext3_set_bit_atomic            ext2_set_bit_atomic
+#define ext3_clear_bit                 __clear_bit_le
+#define ext3_clear_bit_atomic          ext2_clear_bit_atomic
+#define ext3_test_bit                  test_bit_le
+#define ext3_find_next_zero_bit                find_next_zero_bit_le
+
+/*
+ * Maximal mount counts between two filesystem checks
+ */
+#define EXT3_DFL_MAX_MNT_COUNT         20      /* Allow 20 mounts */
+#define EXT3_DFL_CHECKINTERVAL         0       /* Don't use interval check */
+
+/*
+ * Behaviour when detecting errors
+ */
+#define EXT3_ERRORS_CONTINUE           1       /* Continue execution */
+#define EXT3_ERRORS_RO                 2       /* Remount fs read-only */
+#define EXT3_ERRORS_PANIC              3       /* Panic */
+#define EXT3_ERRORS_DEFAULT            EXT3_ERRORS_CONTINUE
+
+/*
+ * Structure of the super block
+ */
+struct ext3_super_block {
+/*00*/ __le32  s_inodes_count;         /* Inodes count */
+       __le32  s_blocks_count;         /* Blocks count */
+       __le32  s_r_blocks_count;       /* Reserved blocks count */
+       __le32  s_free_blocks_count;    /* Free blocks count */
+/*10*/ __le32  s_free_inodes_count;    /* Free inodes count */
+       __le32  s_first_data_block;     /* First Data Block */
+       __le32  s_log_block_size;       /* Block size */
+       __le32  s_log_frag_size;        /* Fragment size */
+/*20*/ __le32  s_blocks_per_group;     /* # Blocks per group */
+       __le32  s_frags_per_group;      /* # Fragments per group */
+       __le32  s_inodes_per_group;     /* # Inodes per group */
+       __le32  s_mtime;                /* Mount time */
+/*30*/ __le32  s_wtime;                /* Write time */
+       __le16  s_mnt_count;            /* Mount count */
+       __le16  s_max_mnt_count;        /* Maximal mount count */
+       __le16  s_magic;                /* Magic signature */
+       __le16  s_state;                /* File system state */
+       __le16  s_errors;               /* Behaviour when detecting errors */
+       __le16  s_minor_rev_level;      /* minor revision level */
+/*40*/ __le32  s_lastcheck;            /* time of last check */
+       __le32  s_checkinterval;        /* max. time between checks */
+       __le32  s_creator_os;           /* OS */
+       __le32  s_rev_level;            /* Revision level */
+/*50*/ __le16  s_def_resuid;           /* Default uid for reserved blocks */
+       __le16  s_def_resgid;           /* Default gid for reserved blocks */
+       /*
+        * These fields are for EXT3_DYNAMIC_REV superblocks only.
+        *
+        * Note: the difference between the compatible feature set and
+        * the incompatible feature set is that if there is a bit set
+        * in the incompatible feature set that the kernel doesn't
+        * know about, it should refuse to mount the filesystem.
+        *
+        * e2fsck's requirements are more strict; if it doesn't know
+        * about a feature in either the compatible or incompatible
+        * feature set, it must abort and not try to meddle with
+        * things it doesn't understand...
+        */
+       __le32  s_first_ino;            /* First non-reserved inode */
+       __le16   s_inode_size;          /* size of inode structure */
+       __le16  s_block_group_nr;       /* block group # of this superblock */
+       __le32  s_feature_compat;       /* compatible feature set */
+/*60*/ __le32  s_feature_incompat;     /* incompatible feature set */
+       __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
+/*68*/ __u8    s_uuid[16];             /* 128-bit uuid for volume */
+/*78*/ char    s_volume_name[16];      /* volume name */
+/*88*/ char    s_last_mounted[64];     /* directory where last mounted */
+/*C8*/ __le32  s_algorithm_usage_bitmap; /* For compression */
+       /*
+        * Performance hints.  Directory preallocation should only
+        * happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
+        */
+       __u8    s_prealloc_blocks;      /* Nr of blocks to try to preallocate*/
+       __u8    s_prealloc_dir_blocks;  /* Nr to preallocate for dirs */
+       __le16  s_reserved_gdt_blocks;  /* Per group desc for online growth */
+       /*
+        * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
+        */
+/*D0*/ __u8    s_journal_uuid[16];     /* uuid of journal superblock */
+/*E0*/ __le32  s_journal_inum;         /* inode number of journal file */
+       __le32  s_journal_dev;          /* device number of journal file */
+       __le32  s_last_orphan;          /* start of list of inodes to delete */
+       __le32  s_hash_seed[4];         /* HTREE hash seed */
+       __u8    s_def_hash_version;     /* Default hash version to use */
+       __u8    s_reserved_char_pad;
+       __u16   s_reserved_word_pad;
+       __le32  s_default_mount_opts;
+       __le32  s_first_meta_bg;        /* First metablock block group */
+       __le32  s_mkfs_time;            /* When the filesystem was created */
+       __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
+       /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
+/*150*/        __le32  s_blocks_count_hi;      /* Blocks count */
+       __le32  s_r_blocks_count_hi;    /* Reserved blocks count */
+       __le32  s_free_blocks_count_hi; /* Free blocks count */
+       __le16  s_min_extra_isize;      /* All inodes have at least # bytes */
+       __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
+       __le32  s_flags;                /* Miscellaneous flags */
+       __le16  s_raid_stride;          /* RAID stride */
+       __le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
+       __le64  s_mmp_block;            /* Block for multi-mount protection */
+       __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
+       __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
+       __u8    s_reserved_char_pad2;
+       __le16  s_reserved_pad;
+       __u32   s_reserved[162];        /* Padding to the end of the block */
+};
+
+/* data type for block offset of block group */
+typedef int ext3_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long ext3_fsblk_t;
+
+#define E3FSBLK "%lu"
+
+struct ext3_reserve_window {
+       ext3_fsblk_t    _rsv_start;     /* First byte reserved */
+       ext3_fsblk_t    _rsv_end;       /* Last byte reserved or 0 */
+};
+
+struct ext3_reserve_window_node {
+       struct rb_node          rsv_node;
+       __u32                   rsv_goal_size;
+       __u32                   rsv_alloc_hit;
+       struct ext3_reserve_window      rsv_window;
+};
+
+struct ext3_block_alloc_info {
+       /* information about reservation window */
+       struct ext3_reserve_window_node rsv_window_node;
+       /*
+        * was i_next_alloc_block in ext3_inode_info
+        * is the logical (file-relative) number of the
+        * most-recently-allocated block in this file.
+        * We use this for detecting linearly ascending allocation requests.
+        */
+       __u32                   last_alloc_logical_block;
+       /*
+        * Was i_next_alloc_goal in ext3_inode_info
+        * is the *physical* companion to i_next_alloc_block.
+        * it the physical block number of the block which was most-recentl
+        * allocated to this file.  This give us the goal (target) for the next
+        * allocation when we detect linearly ascending requests.
+        */
+       ext3_fsblk_t            last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * third extended file system inode data in memory
+ */
+struct ext3_inode_info {
+       __le32  i_data[15];     /* unconverted */
+       __u32   i_flags;
+#ifdef EXT3_FRAGMENTS
+       __u32   i_faddr;
+       __u8    i_frag_no;
+       __u8    i_frag_size;
+#endif
+       ext3_fsblk_t    i_file_acl;
+       __u32   i_dir_acl;
+       __u32   i_dtime;
+
+       /*
+        * i_block_group is the number of the block group which contains
+        * this file's inode.  Constant across the lifetime of the inode,
+        * it is ued for making block allocation decisions - we try to
+        * place a file's data blocks near its inode block, and new inodes
+        * near to their parent directory's inode.
+        */
+       __u32   i_block_group;
+       unsigned long   i_state_flags;  /* Dynamic state flags for ext3 */
+
+       /* block reservation info */
+       struct ext3_block_alloc_info *i_block_alloc_info;
+
+       __u32   i_dir_start_lookup;
+#ifdef CONFIG_EXT3_FS_XATTR
+       /*
+        * Extended attributes can be read independently of the main file
+        * data. Taking i_mutex even when reading would cause contention
+        * between readers of EAs and writers of regular file data, so
+        * instead we synchronize on xattr_sem when reading or changing
+        * EAs.
+        */
+       struct rw_semaphore xattr_sem;
+#endif
+
+       struct list_head i_orphan;      /* unlinked but open inodes */
+
+       /*
+        * i_disksize keeps track of what the inode size is ON DISK, not
+        * in memory.  During truncate, i_size is set to the new size by
+        * the VFS prior to calling ext3_truncate(), but the filesystem won't
+        * set i_disksize to 0 until the truncate is actually under way.
+        *
+        * The intent is that i_disksize always represents the blocks which
+        * are used by this file.  This allows recovery to restart truncate
+        * on orphans if we crash during truncate.  We actually write i_disksize
+        * into the on-disk inode when writing inodes out, instead of i_size.
+        *
+        * The only time when i_disksize and i_size may be different is when
+        * a truncate is in progress.  The only things which change i_disksize
+        * are ext3_get_block (growth) and ext3_truncate (shrinkth).
+        */
+       loff_t  i_disksize;
+
+       /* on-disk additional length */
+       __u16 i_extra_isize;
+
+       /*
+        * truncate_mutex is for serialising ext3_truncate() against
+        * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
+        * data tree are chopped off during truncate. We can't do that in
+        * ext3 because whenever we perform intermediate commits during
+        * truncate, the inode and all the metadata blocks *must* be in a
+        * consistent state which allows truncation of the orphans to restart
+        * during recovery.  Hence we must fix the get_block-vs-truncate race
+        * by other means, so we have truncate_mutex.
+        */
+       struct mutex truncate_mutex;
+
+       /*
+        * Transactions that contain inode's metadata needed to complete
+        * fsync and fdatasync, respectively.
+        */
+       atomic_t i_sync_tid;
+       atomic_t i_datasync_tid;
+
+       struct inode vfs_inode;
+};
+
+/*
+ * third extended-fs super-block data in memory
+ */
+struct ext3_sb_info {
+       unsigned long s_frag_size;      /* Size of a fragment in bytes */
+       unsigned long s_frags_per_block;/* Number of fragments per block */
+       unsigned long s_inodes_per_block;/* Number of inodes per block */
+       unsigned long s_frags_per_group;/* Number of fragments in a group */
+       unsigned long s_blocks_per_group;/* Number of blocks in a group */
+       unsigned long s_inodes_per_group;/* Number of inodes in a group */
+       unsigned long s_itb_per_group;  /* Number of inode table blocks per group */
+       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
+       unsigned long s_desc_per_block; /* Number of group descriptors per block */
+       unsigned long s_groups_count;   /* Number of groups in the fs */
+       unsigned long s_overhead_last;  /* Last calculated overhead */
+       unsigned long s_blocks_last;    /* Last seen block count */
+       struct buffer_head * s_sbh;     /* Buffer containing the super block */
+       struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
+       struct buffer_head ** s_group_desc;
+       unsigned long  s_mount_opt;
+       ext3_fsblk_t s_sb_block;
+       uid_t s_resuid;
+       gid_t s_resgid;
+       unsigned short s_mount_state;
+       unsigned short s_pad;
+       int s_addr_per_block_bits;
+       int s_desc_per_block_bits;
+       int s_inode_size;
+       int s_first_ino;
+       spinlock_t s_next_gen_lock;
+       u32 s_next_generation;
+       u32 s_hash_seed[4];
+       int s_def_hash_version;
+       int s_hash_unsigned;    /* 3 if hash should be signed, 0 if not */
+       struct percpu_counter s_freeblocks_counter;
+       struct percpu_counter s_freeinodes_counter;
+       struct percpu_counter s_dirs_counter;
+       struct blockgroup_lock *s_blockgroup_lock;
+
+       /* root of the per fs reservation window tree */
+       spinlock_t s_rsv_window_lock;
+       struct rb_root s_rsv_window_root;
+       struct ext3_reserve_window_node s_rsv_window_head;
+
+       /* Journaling */
+       struct inode * s_journal_inode;
+       struct journal_s * s_journal;
+       struct list_head s_orphan;
+       struct mutex s_orphan_lock;
+       struct mutex s_resize_lock;
+       unsigned long s_commit_interval;
+       struct block_device *journal_bdev;
+#ifdef CONFIG_QUOTA
+       char *s_qf_names[MAXQUOTAS];            /* Names of quota files with journalled quota */
+       int s_jquota_fmt;                       /* Format of quota to use */
+#endif
+};
+
+static inline spinlock_t *
+sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
+{
+       return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
+}
+
+static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
+{
+       return sb->s_fs_info;
+}
+static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
+{
+       return container_of(inode, struct ext3_inode_info, vfs_inode);
+}
+
+static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
+{
+       return ino == EXT3_ROOT_INO ||
+               ino == EXT3_JOURNAL_INO ||
+               ino == EXT3_RESIZE_INO ||
+               (ino >= EXT3_FIRST_INO(sb) &&
+                ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
+}
+
+/*
+ * Inode dynamic state flags
+ */
+enum {
+       EXT3_STATE_JDATA,               /* journaled data exists */
+       EXT3_STATE_NEW,                 /* inode is newly created */
+       EXT3_STATE_XATTR,               /* has in-inode xattrs */
+       EXT3_STATE_FLUSH_ON_CLOSE,      /* flush dirty pages on close */
+};
+
+static inline int ext3_test_inode_state(struct inode *inode, int bit)
+{
+       return test_bit(bit, &EXT3_I(inode)->i_state_flags);
+}
+
+static inline void ext3_set_inode_state(struct inode *inode, int bit)
+{
+       set_bit(bit, &EXT3_I(inode)->i_state_flags);
+}
+
+static inline void ext3_clear_inode_state(struct inode *inode, int bit)
+{
+       clear_bit(bit, &EXT3_I(inode)->i_state_flags);
+}
+
+#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
+
+/*
+ * Codes for operating systems
+ */
+#define EXT3_OS_LINUX          0
+#define EXT3_OS_HURD           1
+#define EXT3_OS_MASIX          2
+#define EXT3_OS_FREEBSD                3
+#define EXT3_OS_LITES          4
+
+/*
+ * Revision levels
+ */
+#define EXT3_GOOD_OLD_REV      0       /* The good old (original) format */
+#define EXT3_DYNAMIC_REV       1       /* V2 format w/ dynamic inode sizes */
+
+#define EXT3_CURRENT_REV       EXT3_GOOD_OLD_REV
+#define EXT3_MAX_SUPP_REV      EXT3_DYNAMIC_REV
+
+#define EXT3_GOOD_OLD_INODE_SIZE 128
+
+/*
+ * Feature set definitions
+ */
+
+#define EXT3_HAS_COMPAT_FEATURE(sb,mask)                       \
+       ( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
+#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask)                    \
+       ( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
+#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask)                     \
+       ( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
+#define EXT3_SET_COMPAT_FEATURE(sb,mask)                       \
+       EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
+#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask)                    \
+       EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
+#define EXT3_SET_INCOMPAT_FEATURE(sb,mask)                     \
+       EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
+#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask)                     \
+       EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
+#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask)                  \
+       EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
+#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask)                   \
+       EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
+
+#define EXT3_FEATURE_COMPAT_DIR_PREALLOC       0x0001
+#define EXT3_FEATURE_COMPAT_IMAGIC_INODES      0x0002
+#define EXT3_FEATURE_COMPAT_HAS_JOURNAL                0x0004
+#define EXT3_FEATURE_COMPAT_EXT_ATTR           0x0008
+#define EXT3_FEATURE_COMPAT_RESIZE_INODE       0x0010
+#define EXT3_FEATURE_COMPAT_DIR_INDEX          0x0020
+
+#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER    0x0001
+#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE      0x0002
+#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR       0x0004
+
+#define EXT3_FEATURE_INCOMPAT_COMPRESSION      0x0001
+#define EXT3_FEATURE_INCOMPAT_FILETYPE         0x0002
+#define EXT3_FEATURE_INCOMPAT_RECOVER          0x0004 /* Needs recovery */
+#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV      0x0008 /* Journal device */
+#define EXT3_FEATURE_INCOMPAT_META_BG          0x0010
+
+#define EXT3_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
+#define EXT3_FEATURE_INCOMPAT_SUPP     (EXT3_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT3_FEATURE_INCOMPAT_RECOVER| \
+                                        EXT3_FEATURE_INCOMPAT_META_BG)
+#define EXT3_FEATURE_RO_COMPAT_SUPP    (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
+
+/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define        EXT3_DEF_RESUID         0
+#define        EXT3_DEF_RESGID         0
+
+/*
+ * Default mount options
+ */
+#define EXT3_DEFM_DEBUG                0x0001
+#define EXT3_DEFM_BSDGROUPS    0x0002
+#define EXT3_DEFM_XATTR_USER   0x0004
+#define EXT3_DEFM_ACL          0x0008
+#define EXT3_DEFM_UID16                0x0010
+#define EXT3_DEFM_JMODE                0x0060
+#define EXT3_DEFM_JMODE_DATA   0x0020
+#define EXT3_DEFM_JMODE_ORDERED        0x0040
+#define EXT3_DEFM_JMODE_WBACK  0x0060
+
+/*
+ * Structure of a directory entry
+ */
+#define EXT3_NAME_LEN 255
+
+struct ext3_dir_entry {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __le16  name_len;               /* Name length */
+       char    name[EXT3_NAME_LEN];    /* File name */
+};
+
+/*
+ * The new version of the directory entry.  Since EXT3 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct ext3_dir_entry_2 {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __u8    name_len;               /* Name length */
+       __u8    file_type;
+       char    name[EXT3_NAME_LEN];    /* File name */
+};
+
+/*
+ * Ext3 directory file types.  Only the low 3 bits are used.  The
+ * other bits are reserved for now.
+ */
+#define EXT3_FT_UNKNOWN                0
+#define EXT3_FT_REG_FILE       1
+#define EXT3_FT_DIR            2
+#define EXT3_FT_CHRDEV         3
+#define EXT3_FT_BLKDEV         4
+#define EXT3_FT_FIFO           5
+#define EXT3_FT_SOCK           6
+#define EXT3_FT_SYMLINK                7
+
+#define EXT3_FT_MAX            8
+
+/*
+ * EXT3_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define EXT3_DIR_PAD                   4
+#define EXT3_DIR_ROUND                 (EXT3_DIR_PAD - 1)
+#define EXT3_DIR_REC_LEN(name_len)     (((name_len) + 8 + EXT3_DIR_ROUND) & \
+                                        ~EXT3_DIR_ROUND)
+#define EXT3_MAX_REC_LEN               ((1<<16)-1)
+
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
+static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
+{
+       unsigned len = le16_to_cpu(dlen);
+
+#if (PAGE_CACHE_SIZE >= 65536)
+       if (len == EXT3_MAX_REC_LEN)
+               return 1 << 16;
+#endif
+       return len;
+}
+
+static inline __le16 ext3_rec_len_to_disk(unsigned len)
+{
+#if (PAGE_CACHE_SIZE >= 65536)
+       if (len == (1 << 16))
+               return cpu_to_le16(EXT3_MAX_REC_LEN);
+       else if (len > (1 << 16))
+               BUG();
+#endif
+       return cpu_to_le16(len);
+}
+
+/*
+ * Hash Tree Directory indexing
+ * (c) Daniel Phillips, 2001
+ */
+
+#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
+                                     EXT3_FEATURE_COMPAT_DIR_INDEX) && \
+                     (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
+#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
+#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
+
+/* Legal values for the dx_root hash_version field: */
+
+#define DX_HASH_LEGACY         0
+#define DX_HASH_HALF_MD4       1
+#define DX_HASH_TEA            2
+#define DX_HASH_LEGACY_UNSIGNED        3
+#define DX_HASH_HALF_MD4_UNSIGNED      4
+#define DX_HASH_TEA_UNSIGNED           5
+
+/* hash info structure used by the directory hash */
+struct dx_hash_info
+{
+       u32             hash;
+       u32             minor_hash;
+       int             hash_version;
+       u32             *seed;
+};
+
+#define EXT3_HTREE_EOF 0x7fffffff
+
+/*
+ * Control parameters used by ext3_htree_next_block
+ */
+#define HASH_NB_ALWAYS         1
+
+
+/*
+ * Describe an inode's exact location on disk and in memory
+ */
+struct ext3_iloc
+{
+       struct buffer_head *bh;
+       unsigned long offset;
+       unsigned long block_group;
+};
+
+static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
+{
+       return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
+}
+
+/*
+ * This structure is stuffed into the struct file's private_data field
+ * for directories.  It is where we put information so that we can do
+ * readdir operations in hash tree order.
+ */
+struct dir_private_info {
+       struct rb_root  root;
+       struct rb_node  *curr_node;
+       struct fname    *extra_fname;
+       loff_t          last_pos;
+       __u32           curr_hash;
+       __u32           curr_minor_hash;
+       __u32           next_hash;
+};
+
+/* calculate the first block number of the group */
+static inline ext3_fsblk_t
+ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
+{
+       return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
+               le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
+}
+
+/*
+ * Special error return code only used by dx_probe() and its callers.
+ */
+#define ERR_BAD_DX_DIR -75000
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * Ok, these declarations are also in <linux/kernel.h> but none of the
+ * ext3 source programs needs to include it so they are duplicated here.
+ */
+# define NORET_TYPE    /**/
+# define ATTRIB_NORET  __attribute__((noreturn))
+# define NORET_AND     noreturn,
+
+/* balloc.c */
+extern int ext3_bg_has_super(struct super_block *sb, int group);
+extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
+extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, int *errp);
+extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, unsigned long *count, int *errp);
+extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t block, unsigned long count);
+extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
+                                ext3_fsblk_t block, unsigned long count,
+                               unsigned long *pdquot_freed_blocks);
+extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
+extern void ext3_check_blocks_bitmap (struct super_block *);
+extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
+                                                   unsigned int block_group,
+                                                   struct buffer_head ** bh);
+extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
+extern void ext3_init_block_alloc_info(struct inode *);
+extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
+extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
+
+/* dir.c */
+extern int ext3_check_dir_entry(const char *, struct inode *,
+                               struct ext3_dir_entry_2 *,
+                               struct buffer_head *, unsigned long);
+extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
+                                   __u32 minor_hash,
+                                   struct ext3_dir_entry_2 *dirent);
+extern void ext3_htree_free_dir_info(struct dir_private_info *p);
+
+/* fsync.c */
+extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
+
+/* hash.c */
+extern int ext3fs_dirhash(const char *name, int len, struct
+                         dx_hash_info *hinfo);
+
+/* ialloc.c */
+extern struct inode * ext3_new_inode (handle_t *, struct inode *,
+                                     const struct qstr *, umode_t);
+extern void ext3_free_inode (handle_t *, struct inode *);
+extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
+extern unsigned long ext3_count_free_inodes (struct super_block *);
+extern unsigned long ext3_count_dirs (struct super_block *);
+extern void ext3_check_inodes_bitmap (struct super_block *);
+extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
+
+
+/* inode.c */
+int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
+               struct buffer_head *bh, ext3_fsblk_t blocknr);
+struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+       sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
+       int create);
+
+extern struct inode *ext3_iget(struct super_block *, unsigned long);
+extern int  ext3_write_inode (struct inode *, struct writeback_control *);
+extern int  ext3_setattr (struct dentry *, struct iattr *);
+extern void ext3_evict_inode (struct inode *);
+extern int  ext3_sync_inode (handle_t *, struct inode *);
+extern void ext3_discard_reservation (struct inode *);
+extern void ext3_dirty_inode(struct inode *, int);
+extern int ext3_change_inode_journal_flag(struct inode *, int);
+extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
+extern int ext3_can_truncate(struct inode *inode);
+extern void ext3_truncate(struct inode *inode);
+extern void ext3_set_inode_flags(struct inode *);
+extern void ext3_get_inode_flags(struct ext3_inode_info *);
+extern void ext3_set_aops(struct inode *inode);
+extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+                      u64 start, u64 len);
+
+/* ioctl.c */
+extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
+extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
+
+/* namei.c */
+extern int ext3_orphan_add(handle_t *, struct inode *);
+extern int ext3_orphan_del(handle_t *, struct inode *);
+extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+                               __u32 start_minor_hash, __u32 *next_hash);
+
+/* resize.c */
+extern int ext3_group_add(struct super_block *sb,
+                               struct ext3_new_group_data *input);
+extern int ext3_group_extend(struct super_block *sb,
+                               struct ext3_super_block *es,
+                               ext3_fsblk_t n_blocks_count);
+
+/* super.c */
+extern __printf(3, 4)
+void ext3_error(struct super_block *, const char *, const char *, ...);
+extern void __ext3_std_error (struct super_block *, const char *, int);
+extern __printf(3, 4)
+void ext3_abort(struct super_block *, const char *, const char *, ...);
+extern __printf(3, 4)
+void ext3_warning(struct super_block *, const char *, const char *, ...);
+extern __printf(3, 4)
+void ext3_msg(struct super_block *, const char *, const char *, ...);
+extern void ext3_update_dynamic_rev (struct super_block *sb);
+
+#define ext3_std_error(sb, errno)                              \
+do {                                                           \
+       if ((errno))                                            \
+               __ext3_std_error((sb), __func__, (errno));      \
+} while (0)
+
+/*
+ * Inodes and files operations
+ */
+
+/* dir.c */
+extern const struct file_operations ext3_dir_operations;
+
+/* file.c */
+extern const struct inode_operations ext3_file_inode_operations;
+extern const struct file_operations ext3_file_operations;
+
+/* namei.c */
+extern const struct inode_operations ext3_dir_inode_operations;
+extern const struct inode_operations ext3_special_inode_operations;
+
+/* symlink.c */
+extern const struct inode_operations ext3_symlink_inode_operations;
+extern const struct inode_operations ext3_fast_symlink_inode_operations;
+
+#define EXT3_JOURNAL(inode)    (EXT3_SB((inode)->i_sb)->s_journal)
+
+/* Define the number of blocks we need to account to a transaction to
+ * modify one block of data.
+ *
+ * We may have to touch one inode, one bitmap buffer, up to three
+ * indirection blocks, the group and superblock summaries, and the data
+ * block to complete the transaction.  */
+
+#define EXT3_SINGLEDATA_TRANS_BLOCKS   8U
+
+/* Extended attribute operations touch at most two data buffers,
+ * two bitmap buffers, and two group summaries, in addition to the inode
+ * and the superblock, which are already accounted for. */
+
+#define EXT3_XATTR_TRANS_BLOCKS                6U
+
+/* Define the minimum size for a transaction which modifies data.  This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota).  The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+#define EXT3_DATA_TRANS_BLOCKS(sb)     (EXT3_SINGLEDATA_TRANS_BLOCKS + \
+                                        EXT3_XATTR_TRANS_BLOCKS - 2 + \
+                                        EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
+
+/* Delete operations potentially hit one directory's namespace plus an
+ * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
+ * generous.  We can grow the delete transaction later if necessary. */
+
+#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
+
+/* Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction.  For unbounded transactions such as
+ * write(2) and truncate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go. */
+
+#define EXT3_MAX_TRANS_DATA            64U
+
+/* We break up a large truncate or write transaction once the handle's
+ * buffer credits gets this low, we need either to extend the
+ * transaction or to start a new one.  Reserve enough space here for
+ * inode, bitmap, superblock, group and indirection updates for at least
+ * one block, plus two quota updates.  Quota allocations are not
+ * needed. */
+
+#define EXT3_RESERVE_TRANS_BLOCKS      12U
+
+#define EXT3_INDEX_EXTRA_TRANS_BLOCKS  8
+
+#ifdef CONFIG_QUOTA
+/* Amount of blocks needed for quota update - we know that the structure was
+ * allocated so we need to update only inode+data */
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+/* Amount of blocks needed for quota insert/delete - we do some block writes
+ * but inode, sb and group updates are done only once */
+#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
+               (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
+#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
+               (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
+#else
+#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
+#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
+#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
+#endif
+#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
+#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
+
+int
+ext3_mark_iloc_dirty(handle_t *handle,
+                    struct inode *inode,
+                    struct ext3_iloc *iloc);
+
+/*
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh.  This _must_ be cleaned up later.
+ */
+
+int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
+                       struct ext3_iloc *iloc);
+
+int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
+
+/*
+ * Wrapper functions with which ext3 calls into JBD.  The intent here is
+ * to allow these to be turned into appropriate stubs so ext3 can control
+ * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
+ * been done yet.
+ */
+
+static inline void ext3_journal_release_buffer(handle_t *handle,
+                                               struct buffer_head *bh)
+{
+       journal_release_buffer(handle, bh);
+}
+
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+               struct buffer_head *bh, handle_t *handle, int err);
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
+
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+                               unsigned long blocknr, struct buffer_head *bh);
+
+int __ext3_journal_get_create_access(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
+
+int __ext3_journal_dirty_metadata(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
+
+#define ext3_journal_get_undo_access(handle, bh) \
+       __ext3_journal_get_undo_access(__func__, (handle), (bh))
+#define ext3_journal_get_write_access(handle, bh) \
+       __ext3_journal_get_write_access(__func__, (handle), (bh))
+#define ext3_journal_revoke(handle, blocknr, bh) \
+       __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
+#define ext3_journal_get_create_access(handle, bh) \
+       __ext3_journal_get_create_access(__func__, (handle), (bh))
+#define ext3_journal_dirty_metadata(handle, bh) \
+       __ext3_journal_dirty_metadata(__func__, (handle), (bh))
+#define ext3_journal_forget(handle, bh) \
+       __ext3_journal_forget(__func__, (handle), (bh))
+
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
+
+handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
+int __ext3_journal_stop(const char *where, handle_t *handle);
+
+static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
+{
+       return ext3_journal_start_sb(inode->i_sb, nblocks);
+}
+
+#define ext3_journal_stop(handle) \
+       __ext3_journal_stop(__func__, (handle))
+
+static inline handle_t *ext3_journal_current_handle(void)
+{
+       return journal_current_handle();
+}
+
+static inline int ext3_journal_extend(handle_t *handle, int nblocks)
+{
+       return journal_extend(handle, nblocks);
+}
+
+static inline int ext3_journal_restart(handle_t *handle, int nblocks)
+{
+       return journal_restart(handle, nblocks);
+}
+
+static inline int ext3_journal_blocks_per_page(struct inode *inode)
+{
+       return journal_blocks_per_page(inode);
+}
+
+static inline int ext3_journal_force_commit(journal_t *journal)
+{
+       return journal_force_commit(journal);
+}
+
+/* super.c */
+int ext3_force_commit(struct super_block *sb);
+
+static inline int ext3_should_journal_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 1;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
+               return 1;
+       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+               return 1;
+       return 0;
+}
+
+static inline int ext3_should_order_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 0;
+       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+               return 0;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
+               return 1;
+       return 0;
+}
+
+static inline int ext3_should_writeback_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 0;
+       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
+               return 0;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
+               return 1;
+       return 0;
+}
+
+#include <trace/events/ext3.h>
index d401f148d74d2bc87e72a46e53b34c6b4fc3a27a..785a3261a26c6a6f71c73e5609780a09962c3c1b 100644 (file)
@@ -2,7 +2,7 @@
  * Interface between ext3 and JBD
  */
 
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
 
 int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
                                struct buffer_head *bh)
index 724df69847dca1ef2b22ee4827fb6f5c5003ef14..25cb413277e906edb1f037ba625ece7aa92903bb 100644 (file)
  *     (jj@sunsite.ms.mff.cuni.cz)
  */
 
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
 #include <linux/quotaops.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
index 1860ed3563235b8e7cfefbf57b98ed5d60575b7d..d4dff278cbd824d5d732af355f1428be27a775ae 100644 (file)
  * we can depend on generic_block_fdatasync() to sync the data blocks.
  */
 
-#include <linux/time.h>
 #include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
 #include <linux/writeback.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 
 /*
  * akpm: A new design for ext3_sync_file().
index 7d215b4d4f2e82c5ae66fc4c2144130ef2e8abec..d10231ddcf8aa9058d6849a66c855c7fa901b46e 100644 (file)
@@ -9,9 +9,7 @@
  * License.
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include <linux/cryptohash.h>
 
 #define DELTA 0x9E3779B9
index 1cde28438014bfaccd9b1e2b27785044d9f26aec..e3c39e4cec1943e0fb172e999cdaf51990ab7dc7 100644 (file)
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  */
 
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/stat.h>
-#include <linux/string.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
 #include <linux/random.h>
-#include <linux/bitops.h>
-#include <trace/events/ext3.h>
-
-#include <asm/byteorder.h>
 
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
index 6d3418662b540b886efb50e13450811cb6060ae1..10d7812f60219fa8358c42147b8c90ffead6b239 100644 (file)
  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  */
 
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/ext3_jbd.h>
-#include <linux/jbd.h>
 #include <linux/highuid.h>
-#include <linux/pagemap.h>
 #include <linux/quotaops.h>
-#include <linux/string.h>
-#include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/mpage.h>
-#include <linux/uio.h>
-#include <linux/bio.h>
-#include <linux/fiemap.h>
 #include <linux/namei.h>
-#include <trace/events/ext3.h>
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 
index 4af574ce4a4638c651006ff8ce73ca61dbe7a14f..677a5c27dc6977b7999e5c87a563f756fbb8ed58 100644 (file)
@@ -7,15 +7,10 @@
  * Universite Pierre et Marie Curie (Paris VI)
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/capability.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
 #include <linux/mount.h>
-#include <linux/time.h>
 #include <linux/compat.h>
 #include <asm/uaccess.h>
+#include "ext3.h"
 
 long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
index e8e211795e9f3cf13a34c12e1376efcde034fa54..d7940b24cf683c3c63a2cf7d27e0f3e81eebd32e 100644 (file)
  *     Theodore Ts'o, 2002
  */
 
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/jbd.h>
-#include <linux/time.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/fcntl.h>
-#include <linux/stat.h>
-#include <linux/string.h>
 #include <linux/quotaops.h>
-#include <linux/buffer_head.h>
-#include <linux/bio.h>
-#include <trace/events/ext3.h>
-
+#include "ext3.h"
 #include "namei.h"
 #include "xattr.h"
 #include "acl.h"
index 7916e4ce166a8ee4a3cb20f4ed112df48b82c9ec..0f814f3450de65174573b7ccd0ee4be34ab89065 100644 (file)
 
 #define EXT3FS_DEBUG
 
-#include <linux/ext3_jbd.h>
-
-#include <linux/errno.h>
-#include <linux/slab.h>
+#include "ext3.h"
 
 
 #define outside(b, first, last)        ((b) < (first) || (b) >= (last))
index e0b45b93327ba1cdb668837b198c833f2a800d78..cf0b5921cf0fc5f9045394f8e847505153dfe3f4 100644 (file)
  */
 
 #include <linux/module.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/time.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/parser.h>
-#include <linux/buffer_head.h>
 #include <linux/exportfs.h>
-#include <linux/vfs.h>
+#include <linux/statfs.h>
 #include <linux/random.h>
 #include <linux/mount.h>
-#include <linux/namei.h>
 #include <linux/quotaops.h>
 #include <linux/seq_file.h>
 #include <linux/log2.h>
 
 #include <asm/uaccess.h>
 
+#define CREATE_TRACE_POINTS
+
+#include "ext3.h"
 #include "xattr.h"
 #include "acl.h"
 #include "namei.h"
 
-#define CREATE_TRACE_POINTS
-#include <trace/events/ext3.h>
-
 #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
   #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
 #else
index 7c4898207776e91d0e8af56d2d80fefeea158453..6b01c3eab1f3729fdd3293675aebabd65dcb95a1 100644 (file)
  *  ext3 symlink handling code
  */
 
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
 #include <linux/namei.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
index d565759d82eee0c06b10fa44e77abc51f29827d7..d22ebb7a4f55b3fb947e5718c1fbb4d109a7871b 100644 (file)
  * by the buffer lock.
  */
 
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include <linux/mbcache.h>
 #include <linux/quotaops.h>
-#include <linux/rwsem.h>
 #include "xattr.h"
 #include "acl.h"
 
index ea26f2acab942a7cb87eacf969dd5375b8d03e1f..3387664ad70e5bc3bbfbad03a2d934e98d0dda7b 100644 (file)
@@ -3,12 +3,8 @@
  * Handler for storing security labels as extended attributes.
  */
 
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
 #include <linux/security.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
index 2526a8829de80ebf545871af979759a16aa22dc8..d75727cc67fab83175136e3bf0fbf35e2f878903 100644 (file)
@@ -5,11 +5,7 @@
  * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
index b32e473a1e33c0850352d1f55dd3bb17e7d4618d..5612af3567e0c942a389b455ed36eed91a886e89 100644 (file)
@@ -5,10 +5,7 @@
  * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
  */
 
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/ext3_jbd.h>
-#include <linux/ext3_fs.h>
+#include "ext3.h"
 #include "xattr.h"
 
 static size_t
index 76834587a8a419a7478d9c18193c5e38abfa4b8f..a3d2c9ee8d6668676cf6bbd876d135501b2e37a3 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mount.h>
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
-#include <linux/ext2_fs.h>
 #include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/crc32.h>
index e615ff37e27d5889c4f2662d8f552dc12101836d..1898198abc3d10ed4126c73ab24035f0c796a877 100644 (file)
@@ -1054,53 +1054,65 @@ static void follow_dotdot(struct nameidata *nd)
 }
 
 /*
- * Allocate a dentry with name and parent, and perform a parent
- * directory ->lookup on it. Returns the new dentry, or ERR_PTR
- * on error. parent->d_inode->i_mutex must be held. d_lookup must
- * have verified that no child exists while under i_mutex.
+ * This looks up the name in dcache, possibly revalidates the old dentry and
+ * allocates a new one if not found or not valid.  In the need_lookup argument
+ * returns whether i_op->lookup is necessary.
+ *
+ * dir->d_inode->i_mutex must be held
  */
-static struct dentry *d_alloc_and_lookup(struct dentry *parent,
-                               struct qstr *name, struct nameidata *nd)
+static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
+                                   struct nameidata *nd, bool *need_lookup)
 {
-       struct inode *inode = parent->d_inode;
        struct dentry *dentry;
-       struct dentry *old;
+       int error;
 
-       /* Don't create child dentry for a dead directory. */
-       if (unlikely(IS_DEADDIR(inode)))
-               return ERR_PTR(-ENOENT);
+       *need_lookup = false;
+       dentry = d_lookup(dir, name);
+       if (dentry) {
+               if (d_need_lookup(dentry)) {
+                       *need_lookup = true;
+               } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
+                       error = d_revalidate(dentry, nd);
+                       if (unlikely(error <= 0)) {
+                               if (error < 0) {
+                                       dput(dentry);
+                                       return ERR_PTR(error);
+                               } else if (!d_invalidate(dentry)) {
+                                       dput(dentry);
+                                       dentry = NULL;
+                               }
+                       }
+               }
+       }
 
-       dentry = d_alloc(parent, name);
-       if (unlikely(!dentry))
-               return ERR_PTR(-ENOMEM);
+       if (!dentry) {
+               dentry = d_alloc(dir, name);
+               if (unlikely(!dentry))
+                       return ERR_PTR(-ENOMEM);
 
-       old = inode->i_op->lookup(inode, dentry, nd);
-       if (unlikely(old)) {
-               dput(dentry);
-               dentry = old;
+               *need_lookup = true;
        }
        return dentry;
 }
 
 /*
- * We already have a dentry, but require a lookup to be performed on the parent
- * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error.
- * parent->d_inode->i_mutex must be held. d_lookup must have verified that no
- * child exists while under i_mutex.
+ * Call i_op->lookup on the dentry.  The dentry must be negative but may be
+ * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
+ *
+ * dir->d_inode->i_mutex must be held
  */
-static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry,
-                                    struct nameidata *nd)
+static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
+                                 struct nameidata *nd)
 {
-       struct inode *inode = parent->d_inode;
        struct dentry *old;
 
        /* Don't create child dentry for a dead directory. */
-       if (unlikely(IS_DEADDIR(inode))) {
+       if (unlikely(IS_DEADDIR(dir))) {
                dput(dentry);
                return ERR_PTR(-ENOENT);
        }
 
-       old = inode->i_op->lookup(inode, dentry, nd);
+       old = dir->i_op->lookup(dir, dentry, nd);
        if (unlikely(old)) {
                dput(dentry);
                dentry = old;
@@ -1108,6 +1120,19 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr
        return dentry;
 }
 
+static struct dentry *__lookup_hash(struct qstr *name,
+               struct dentry *base, struct nameidata *nd)
+{
+       bool need_lookup;
+       struct dentry *dentry;
+
+       dentry = lookup_dcache(name, base, nd, &need_lookup);
+       if (!need_lookup)
+               return dentry;
+
+       return lookup_real(base->d_inode, dentry, nd);
+}
+
 /*
  *  It's more convoluted than I'd like it to be, but... it's still fairly
  *  small and for now I'd prefer to have fast path as straight as possible.
@@ -1139,6 +1164,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                        return -ECHILD;
                nd->seq = seq;
 
+               if (unlikely(d_need_lookup(dentry)))
+                       goto unlazy;
                if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
                        status = d_revalidate(dentry, nd);
                        if (unlikely(status <= 0)) {
@@ -1147,8 +1174,6 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                                goto unlazy;
                        }
                }
-               if (unlikely(d_need_lookup(dentry)))
-                       goto unlazy;
                path->mnt = mnt;
                path->dentry = dentry;
                if (unlikely(!__follow_mount_rcu(nd, path, inode)))
@@ -1163,38 +1188,14 @@ unlazy:
                dentry = __d_lookup(parent, name);
        }
 
-       if (dentry && unlikely(d_need_lookup(dentry))) {
+       if (unlikely(!dentry))
+               goto need_lookup;
+
+       if (unlikely(d_need_lookup(dentry))) {
                dput(dentry);
-               dentry = NULL;
-       }
-retry:
-       if (unlikely(!dentry)) {
-               struct inode *dir = parent->d_inode;
-               BUG_ON(nd->inode != dir);
-
-               mutex_lock(&dir->i_mutex);
-               dentry = d_lookup(parent, name);
-               if (likely(!dentry)) {
-                       dentry = d_alloc_and_lookup(parent, name, nd);
-                       if (IS_ERR(dentry)) {
-                               mutex_unlock(&dir->i_mutex);
-                               return PTR_ERR(dentry);
-                       }
-                       /* known good */
-                       need_reval = 0;
-                       status = 1;
-               } else if (unlikely(d_need_lookup(dentry))) {
-                       dentry = d_inode_lookup(parent, dentry, nd);
-                       if (IS_ERR(dentry)) {
-                               mutex_unlock(&dir->i_mutex);
-                               return PTR_ERR(dentry);
-                       }
-                       /* known good */
-                       need_reval = 0;
-                       status = 1;
-               }
-               mutex_unlock(&dir->i_mutex);
+               goto need_lookup;
        }
+
        if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
                status = d_revalidate(dentry, nd);
        if (unlikely(status <= 0)) {
@@ -1204,12 +1205,10 @@ retry:
                }
                if (!d_invalidate(dentry)) {
                        dput(dentry);
-                       dentry = NULL;
-                       need_reval = 1;
-                       goto retry;
+                       goto need_lookup;
                }
        }
-
+done:
        path->mnt = mnt;
        path->dentry = dentry;
        err = follow_managed(path, nd->flags);
@@ -1221,6 +1220,16 @@ retry:
                nd->flags |= LOOKUP_JUMPED;
        *inode = path->dentry->d_inode;
        return 0;
+
+need_lookup:
+       BUG_ON(nd->inode != parent->d_inode);
+
+       mutex_lock(&parent->d_inode->i_mutex);
+       dentry = __lookup_hash(name, parent, nd);
+       mutex_unlock(&parent->d_inode->i_mutex);
+       if (IS_ERR(dentry))
+               return PTR_ERR(dentry);
+       goto done;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1846,59 +1855,6 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
        return err;
 }
 
-static struct dentry *__lookup_hash(struct qstr *name,
-               struct dentry *base, struct nameidata *nd)
-{
-       struct inode *inode = base->d_inode;
-       struct dentry *dentry;
-       int err;
-
-       err = inode_permission(inode, MAY_EXEC);
-       if (err)
-               return ERR_PTR(err);
-
-       /*
-        * Don't bother with __d_lookup: callers are for creat as
-        * well as unlink, so a lot of the time it would cost
-        * a double lookup.
-        */
-       dentry = d_lookup(base, name);
-
-       if (dentry && d_need_lookup(dentry)) {
-               /*
-                * __lookup_hash is called with the parent dir's i_mutex already
-                * held, so we are good to go here.
-                */
-               dentry = d_inode_lookup(base, dentry, nd);
-               if (IS_ERR(dentry))
-                       return dentry;
-       }
-
-       if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) {
-               int status = d_revalidate(dentry, nd);
-               if (unlikely(status <= 0)) {
-                       /*
-                        * The dentry failed validation.
-                        * If d_revalidate returned 0 attempt to invalidate
-                        * the dentry otherwise d_revalidate is asking us
-                        * to return a fail status.
-                        */
-                       if (status < 0) {
-                               dput(dentry);
-                               return ERR_PTR(status);
-                       } else if (!d_invalidate(dentry)) {
-                               dput(dentry);
-                               dentry = NULL;
-                       }
-               }
-       }
-
-       if (!dentry)
-               dentry = d_alloc_and_lookup(base, name, nd);
-
-       return dentry;
-}
-
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
  * needs parent already locked. Doesn't follow mounts.
@@ -1924,6 +1880,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 {
        struct qstr this;
        unsigned int c;
+       int err;
 
        WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
 
@@ -1948,6 +1905,10 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
                        return ERR_PTR(err);
        }
 
+       err = inode_permission(base->d_inode, MAY_EXEC);
+       if (err)
+               return ERR_PTR(err);
+
        return __lookup_hash(&this, base, NULL);
 }
 
@@ -2749,7 +2710,7 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
 
 /*
  * The dentry_unhash() helper will try to drop the dentry early: we
- * should have a usage count of 2 if we're the only user of this
+ * should have a usage count of 1 if we're the only user of this
  * dentry, and if that is true (possibly after pruning the dcache),
  * then we drop the dentry now.
  *
index a6fda3c188aa84a85af41aa42b0b64feac31b410..a1a1bfd652c90d49521ad3ea12a908f9a168c1e9 100644 (file)
@@ -28,8 +28,6 @@
 #include "suballoc.h"
 #include "move_extents.h"
 
-#include <linux/ext2_fs.h>
-
 #define o2info_from_user(a, b) \
                copy_from_user(&(a), (b), sizeof(a))
 #define o2info_to_user(a, b)   \
index f37c32b945254dc3c86564c1b0fbcfa816f87030..50952c9bd06c0c460097eeedface267575f11aa5 100644 (file)
@@ -105,26 +105,12 @@ static const struct inode_operations pstore_dir_inode_operations = {
        .unlink         = pstore_unlink,
 };
 
-static struct inode *pstore_get_inode(struct super_block *sb,
-                                       const struct inode *dir, int mode, dev_t dev)
+static struct inode *pstore_get_inode(struct super_block *sb)
 {
        struct inode *inode = new_inode(sb);
-
        if (inode) {
                inode->i_ino = get_next_ino();
-               inode->i_uid = inode->i_gid = 0;
-               inode->i_mode = mode;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-               switch (mode & S_IFMT) {
-               case S_IFREG:
-                       inode->i_fop = &pstore_file_operations;
-                       break;
-               case S_IFDIR:
-                       inode->i_op = &pstore_dir_inode_operations;
-                       inode->i_fop = &simple_dir_operations;
-                       inc_nlink(inode);
-                       break;
-               }
        }
        return inode;
 }
@@ -216,9 +202,11 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
                return rc;
 
        rc = -ENOMEM;
-       inode = pstore_get_inode(pstore_sb, root->d_inode, S_IFREG | 0444, 0);
+       inode = pstore_get_inode(pstore_sb);
        if (!inode)
                goto fail;
+       inode->i_mode = S_IFREG | 0444;
+       inode->i_fop = &pstore_file_operations;
        private = kmalloc(sizeof *private + size, GFP_KERNEL);
        if (!private)
                goto fail_alloc;
@@ -293,10 +281,12 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent)
 
        parse_options(data);
 
-       inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+       inode = pstore_get_inode(sb);
        if (inode) {
-               /* override ramfs "dir" options so we catch unlink(2) */
+               inode->i_mode = S_IFDIR | 0755;
                inode->i_op = &pstore_dir_inode_operations;
+               inode->i_fop = &simple_dir_operations;
+               inc_nlink(inode);
        }
        sb->s_root = d_make_root(inode);
        if (!sb->s_root)
index a4b5da2b83f543ff4e503d2c15fd24aa6e04881f..d05df2810354737714bec9f1f897657036ddc150 100644 (file)
@@ -120,7 +120,6 @@ header-y += errno.h
 header-y += errqueue.h
 header-y += ethtool.h
 header-y += eventpoll.h
-header-y += ext2_fs.h
 header-y += fadvise.h
 header-y += falloc.h
 header-y += fanotify.h
index ce1b719e8bd467f7a82ed8c7b4175665b7d8ffc5..2723e715f67a19d2574aa0f151f5b4c525fcb43e 100644 (file)
 
 #include <linux/types.h>
 #include <linux/magic.h>
-#include <linux/fs.h>
 
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT2FS_DEBUG to produce debug messages
- */
-#undef EXT2FS_DEBUG
-
-/*
- * Define EXT2_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT2_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT2_MAX_RESERVE_BLOCKS         1027
-#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
-/*
- * The second extended file system version
- */
-#define EXT2FS_DATE            "95/08/09"
-#define EXT2FS_VERSION         "0.5b"
-
-/*
- * Debug code
- */
-#ifdef EXT2FS_DEBUG
-#      define ext2_debug(f, a...)      { \
-                                       printk ("EXT2-fs DEBUG (%s, %d): %s:", \
-                                               __FILE__, __LINE__, __func__); \
-                                       printk (f, ## a); \
-                                       }
-#else
-#      define ext2_debug(f, a...)      /**/
-#endif
-
-/*
- * Special inode numbers
- */
-#define        EXT2_BAD_INO             1      /* Bad blocks inode */
-#define EXT2_ROOT_INO           2      /* Root inode */
-#define EXT2_BOOT_LOADER_INO    5      /* Boot loader inode */
-#define EXT2_UNDEL_DIR_INO      6      /* Undelete directory inode */
-
-/* First non-reserved inode for old ext2 filesystems */
-#define EXT2_GOOD_OLD_FIRST_INO        11
-
-#ifdef __KERNEL__
-#include <linux/ext2_fs_sb.h>
-static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
-{
-       return sb->s_fs_info;
-}
-#else
-/* Assume that user mode programs are passing in an ext2fs superblock, not
- * a kernel struct super_block.  This will allow us to call the feature-test
- * macros from user land. */
-#define EXT2_SB(sb)    (sb)
-#endif
+#define EXT2_NAME_LEN 255
 
 /*
  * Maximal count of links to a file
  */
 #define EXT2_LINK_MAX          32000
 
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT2_MIN_BLOCK_SIZE            1024
-#define        EXT2_MAX_BLOCK_SIZE             4096
-#define EXT2_MIN_BLOCK_LOG_SIZE                  10
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE(s)            ((s)->s_blocksize)
-#else
-# define EXT2_BLOCK_SIZE(s)            (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
-#define        EXT2_ADDR_PER_BLOCK(s)          (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT2_BLOCK_SIZE_BITS(s)       ((s)->s_blocksize_bits)
-#else
-# define EXT2_BLOCK_SIZE_BITS(s)       ((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
-#define        EXT2_ADDR_PER_BLOCK_BITS(s)     (EXT2_SB(s)->s_addr_per_block_bits)
-#define EXT2_INODE_SIZE(s)             (EXT2_SB(s)->s_inode_size)
-#define EXT2_FIRST_INO(s)              (EXT2_SB(s)->s_first_ino)
-#else
-#define EXT2_INODE_SIZE(s)     (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
-                                EXT2_GOOD_OLD_INODE_SIZE : \
-                                (s)->s_inode_size)
-#define EXT2_FIRST_INO(s)      (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
-                                EXT2_GOOD_OLD_FIRST_INO : \
-                                (s)->s_first_ino)
-#endif
+#define EXT2_SB_MAGIC_OFFSET   0x38
+#define EXT2_SB_BLOCKS_OFFSET  0x04
+#define EXT2_SB_BSIZE_OFFSET   0x18
 
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT2_MIN_FRAG_SIZE             1024
-#define        EXT2_MAX_FRAG_SIZE              4096
-#define EXT2_MIN_FRAG_LOG_SIZE           10
-#ifdef __KERNEL__
-# define EXT2_FRAG_SIZE(s)             (EXT2_SB(s)->s_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s)       (EXT2_SB(s)->s_frags_per_block)
-#else
-# define EXT2_FRAG_SIZE(s)             (EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT2_FRAGS_PER_BLOCK(s)       (EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s))
-#endif
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext2_group_desc
+static inline u64 ext2_image_size(void *ext2_sb)
 {
-       __le32  bg_block_bitmap;                /* Blocks bitmap block */
-       __le32  bg_inode_bitmap;                /* Inodes bitmap block */
-       __le32  bg_inode_table;         /* Inodes table block */
-       __le16  bg_free_blocks_count;   /* Free blocks count */
-       __le16  bg_free_inodes_count;   /* Free inodes count */
-       __le16  bg_used_dirs_count;     /* Directories count */
-       __le16  bg_pad;
-       __le32  bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#ifdef __KERNEL__
-# define EXT2_BLOCKS_PER_GROUP(s)      (EXT2_SB(s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s)                (EXT2_SB(s)->s_desc_per_block)
-# define EXT2_INODES_PER_GROUP(s)      (EXT2_SB(s)->s_inodes_per_group)
-# define EXT2_DESC_PER_BLOCK_BITS(s)   (EXT2_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT2_BLOCKS_PER_GROUP(s)      ((s)->s_blocks_per_group)
-# define EXT2_DESC_PER_BLOCK(s)                (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc))
-# define EXT2_INODES_PER_GROUP(s)      ((s)->s_inodes_per_group)
-#endif
-
-/*
- * Constants relative to the data blocks
- */
-#define        EXT2_NDIR_BLOCKS                12
-#define        EXT2_IND_BLOCK                  EXT2_NDIR_BLOCKS
-#define        EXT2_DIND_BLOCK                 (EXT2_IND_BLOCK + 1)
-#define        EXT2_TIND_BLOCK                 (EXT2_DIND_BLOCK + 1)
-#define        EXT2_N_BLOCKS                   (EXT2_TIND_BLOCK + 1)
-
-/*
- * Inode flags (GETFLAGS/SETFLAGS)
- */
-#define        EXT2_SECRM_FL                   FS_SECRM_FL     /* Secure deletion */
-#define        EXT2_UNRM_FL                    FS_UNRM_FL      /* Undelete */
-#define        EXT2_COMPR_FL                   FS_COMPR_FL     /* Compress file */
-#define EXT2_SYNC_FL                   FS_SYNC_FL      /* Synchronous updates */
-#define EXT2_IMMUTABLE_FL              FS_IMMUTABLE_FL /* Immutable file */
-#define EXT2_APPEND_FL                 FS_APPEND_FL    /* writes to file may only append */
-#define EXT2_NODUMP_FL                 FS_NODUMP_FL    /* do not dump file */
-#define EXT2_NOATIME_FL                        FS_NOATIME_FL   /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT2_DIRTY_FL                  FS_DIRTY_FL
-#define EXT2_COMPRBLK_FL               FS_COMPRBLK_FL  /* One or more compressed clusters */
-#define EXT2_NOCOMP_FL                 FS_NOCOMP_FL    /* Don't compress */
-#define EXT2_ECOMPR_FL                 FS_ECOMPR_FL    /* Compression error */
-/* End compression flags --- maybe not all used */     
-#define EXT2_BTREE_FL                  FS_BTREE_FL     /* btree format dir */
-#define EXT2_INDEX_FL                  FS_INDEX_FL     /* hash-indexed directory */
-#define EXT2_IMAGIC_FL                 FS_IMAGIC_FL    /* AFS directory */
-#define EXT2_JOURNAL_DATA_FL           FS_JOURNAL_DATA_FL /* Reserved for ext3 */
-#define EXT2_NOTAIL_FL                 FS_NOTAIL_FL    /* file tail should not be merged */
-#define EXT2_DIRSYNC_FL                        FS_DIRSYNC_FL   /* dirsync behaviour (directories only) */
-#define EXT2_TOPDIR_FL                 FS_TOPDIR_FL    /* Top of directory hierarchies*/
-#define EXT2_RESERVED_FL               FS_RESERVED_FL  /* reserved for ext2 lib */
-
-#define EXT2_FL_USER_VISIBLE           FS_FL_USER_VISIBLE      /* User visible flags */
-#define EXT2_FL_USER_MODIFIABLE                FS_FL_USER_MODIFIABLE   /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
-                          EXT2_SYNC_FL | EXT2_NODUMP_FL |\
-                          EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
-                          EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
-                          EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
-{
-       if (S_ISDIR(mode))
-               return flags;
-       else if (S_ISREG(mode))
-               return flags & EXT2_REG_FLMASK;
-       else
-               return flags & EXT2_OTHER_FLMASK;
+       __u8 *p = ext2_sb;
+       if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
+               return 0;
+       return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
+               le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
 }
 
-/*
- * ioctl commands
- */
-#define        EXT2_IOC_GETFLAGS               FS_IOC_GETFLAGS
-#define        EXT2_IOC_SETFLAGS               FS_IOC_SETFLAGS
-#define        EXT2_IOC_GETVERSION             FS_IOC_GETVERSION
-#define        EXT2_IOC_SETVERSION             FS_IOC_SETVERSION
-#define        EXT2_IOC_GETRSVSZ               _IOR('f', 5, long)
-#define        EXT2_IOC_SETRSVSZ               _IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT2_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
-#define EXT2_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
-#define EXT2_IOC32_GETVERSION          FS_IOC32_GETVERSION
-#define EXT2_IOC32_SETVERSION          FS_IOC32_SETVERSION
-
-/*
- * Structure of an inode on the disk
- */
-struct ext2_inode {
-       __le16  i_mode;         /* File mode */
-       __le16  i_uid;          /* Low 16 bits of Owner Uid */
-       __le32  i_size;         /* Size in bytes */
-       __le32  i_atime;        /* Access time */
-       __le32  i_ctime;        /* Creation time */
-       __le32  i_mtime;        /* Modification time */
-       __le32  i_dtime;        /* Deletion Time */
-       __le16  i_gid;          /* Low 16 bits of Group Id */
-       __le16  i_links_count;  /* Links count */
-       __le32  i_blocks;       /* Blocks count */
-       __le32  i_flags;        /* File flags */
-       union {
-               struct {
-                       __le32  l_i_reserved1;
-               } linux1;
-               struct {
-                       __le32  h_i_translator;
-               } hurd1;
-               struct {
-                       __le32  m_i_reserved1;
-               } masix1;
-       } osd1;                         /* OS dependent 1 */
-       __le32  i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
-       __le32  i_generation;   /* File version (for NFS) */
-       __le32  i_file_acl;     /* File ACL */
-       __le32  i_dir_acl;      /* Directory ACL */
-       __le32  i_faddr;        /* Fragment address */
-       union {
-               struct {
-                       __u8    l_i_frag;       /* Fragment number */
-                       __u8    l_i_fsize;      /* Fragment size */
-                       __u16   i_pad1;
-                       __le16  l_i_uid_high;   /* these 2 fields    */
-                       __le16  l_i_gid_high;   /* were reserved2[0] */
-                       __u32   l_i_reserved2;
-               } linux2;
-               struct {
-                       __u8    h_i_frag;       /* Fragment number */
-                       __u8    h_i_fsize;      /* Fragment size */
-                       __le16  h_i_mode_high;
-                       __le16  h_i_uid_high;
-                       __le16  h_i_gid_high;
-                       __le32  h_i_author;
-               } hurd2;
-               struct {
-                       __u8    m_i_frag;       /* Fragment number */
-                       __u8    m_i_fsize;      /* Fragment size */
-                       __u16   m_pad1;
-                       __u32   m_i_reserved2[2];
-               } masix2;
-       } osd2;                         /* OS dependent 2 */
-};
-
-#define i_size_high    i_dir_acl
-
-#if defined(__KERNEL__) || defined(__linux__)
-#define i_reserved1    osd1.linux1.l_i_reserved1
-#define i_frag         osd2.linux2.l_i_frag
-#define i_fsize                osd2.linux2.l_i_fsize
-#define i_uid_low      i_uid
-#define i_gid_low      i_gid
-#define i_uid_high     osd2.linux2.l_i_uid_high
-#define i_gid_high     osd2.linux2.l_i_gid_high
-#define i_reserved2    osd2.linux2.l_i_reserved2
-#endif
-
-#ifdef __hurd__
-#define i_translator   osd1.hurd1.h_i_translator
-#define i_frag         osd2.hurd2.h_i_frag
-#define i_fsize                osd2.hurd2.h_i_fsize
-#define i_uid_high     osd2.hurd2.h_i_uid_high
-#define i_gid_high     osd2.hurd2.h_i_gid_high
-#define i_author       osd2.hurd2.h_i_author
-#endif
-
-#ifdef __masix__
-#define i_reserved1    osd1.masix1.m_i_reserved1
-#define i_frag         osd2.masix2.m_i_frag
-#define i_fsize                osd2.masix2.m_i_fsize
-#define i_reserved2    osd2.masix2.m_i_reserved2
-#endif
-
-/*
- * File system states
- */
-#define        EXT2_VALID_FS                   0x0001  /* Unmounted cleanly */
-#define        EXT2_ERROR_FS                   0x0002  /* Errors detected */
-
-/*
- * Mount flags
- */
-#define EXT2_MOUNT_CHECK               0x000001  /* Do mount-time checks */
-#define EXT2_MOUNT_OLDALLOC            0x000002  /* Don't use the new Orlov allocator */
-#define EXT2_MOUNT_GRPID               0x000004  /* Create files with directory's group */
-#define EXT2_MOUNT_DEBUG               0x000008  /* Some debugging messages */
-#define EXT2_MOUNT_ERRORS_CONT         0x000010  /* Continue on errors */
-#define EXT2_MOUNT_ERRORS_RO           0x000020  /* Remount fs ro on errors */
-#define EXT2_MOUNT_ERRORS_PANIC                0x000040  /* Panic on errors */
-#define EXT2_MOUNT_MINIX_DF            0x000080  /* Mimics the Minix statfs */
-#define EXT2_MOUNT_NOBH                        0x000100  /* No buffer_heads */
-#define EXT2_MOUNT_NO_UID32            0x000200  /* Disable 32-bit UIDs */
-#define EXT2_MOUNT_XATTR_USER          0x004000  /* Extended user attributes */
-#define EXT2_MOUNT_POSIX_ACL           0x008000  /* POSIX Access Control Lists */
-#define EXT2_MOUNT_XIP                 0x010000  /* Execute in place */
-#define EXT2_MOUNT_USRQUOTA            0x020000  /* user quota */
-#define EXT2_MOUNT_GRPQUOTA            0x040000  /* group quota */
-#define EXT2_MOUNT_RESERVATION         0x080000  /* Preallocation */
-
-
-#define clear_opt(o, opt)              o &= ~EXT2_MOUNT_##opt
-#define set_opt(o, opt)                        o |= EXT2_MOUNT_##opt
-#define test_opt(sb, opt)              (EXT2_SB(sb)->s_mount_opt & \
-                                        EXT2_MOUNT_##opt)
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT2_DFL_MAX_MNT_COUNT         20      /* Allow 20 mounts */
-#define EXT2_DFL_CHECKINTERVAL         0       /* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT2_ERRORS_CONTINUE           1       /* Continue execution */
-#define EXT2_ERRORS_RO                 2       /* Remount fs read-only */
-#define EXT2_ERRORS_PANIC              3       /* Panic */
-#define EXT2_ERRORS_DEFAULT            EXT2_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext2_super_block {
-       __le32  s_inodes_count;         /* Inodes count */
-       __le32  s_blocks_count;         /* Blocks count */
-       __le32  s_r_blocks_count;       /* Reserved blocks count */
-       __le32  s_free_blocks_count;    /* Free blocks count */
-       __le32  s_free_inodes_count;    /* Free inodes count */
-       __le32  s_first_data_block;     /* First Data Block */
-       __le32  s_log_block_size;       /* Block size */
-       __le32  s_log_frag_size;        /* Fragment size */
-       __le32  s_blocks_per_group;     /* # Blocks per group */
-       __le32  s_frags_per_group;      /* # Fragments per group */
-       __le32  s_inodes_per_group;     /* # Inodes per group */
-       __le32  s_mtime;                /* Mount time */
-       __le32  s_wtime;                /* Write time */
-       __le16  s_mnt_count;            /* Mount count */
-       __le16  s_max_mnt_count;        /* Maximal mount count */
-       __le16  s_magic;                /* Magic signature */
-       __le16  s_state;                /* File system state */
-       __le16  s_errors;               /* Behaviour when detecting errors */
-       __le16  s_minor_rev_level;      /* minor revision level */
-       __le32  s_lastcheck;            /* time of last check */
-       __le32  s_checkinterval;        /* max. time between checks */
-       __le32  s_creator_os;           /* OS */
-       __le32  s_rev_level;            /* Revision level */
-       __le16  s_def_resuid;           /* Default uid for reserved blocks */
-       __le16  s_def_resgid;           /* Default gid for reserved blocks */
-       /*
-        * These fields are for EXT2_DYNAMIC_REV superblocks only.
-        *
-        * Note: the difference between the compatible feature set and
-        * the incompatible feature set is that if there is a bit set
-        * in the incompatible feature set that the kernel doesn't
-        * know about, it should refuse to mount the filesystem.
-        * 
-        * e2fsck's requirements are more strict; if it doesn't know
-        * about a feature in either the compatible or incompatible
-        * feature set, it must abort and not try to meddle with
-        * things it doesn't understand...
-        */
-       __le32  s_first_ino;            /* First non-reserved inode */
-       __le16   s_inode_size;          /* size of inode structure */
-       __le16  s_block_group_nr;       /* block group # of this superblock */
-       __le32  s_feature_compat;       /* compatible feature set */
-       __le32  s_feature_incompat;     /* incompatible feature set */
-       __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
-       __u8    s_uuid[16];             /* 128-bit uuid for volume */
-       char    s_volume_name[16];      /* volume name */
-       char    s_last_mounted[64];     /* directory where last mounted */
-       __le32  s_algorithm_usage_bitmap; /* For compression */
-       /*
-        * Performance hints.  Directory preallocation should only
-        * happen if the EXT2_COMPAT_PREALLOC flag is on.
-        */
-       __u8    s_prealloc_blocks;      /* Nr of blocks to try to preallocate*/
-       __u8    s_prealloc_dir_blocks;  /* Nr to preallocate for dirs */
-       __u16   s_padding1;
-       /*
-        * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-        */
-       __u8    s_journal_uuid[16];     /* uuid of journal superblock */
-       __u32   s_journal_inum;         /* inode number of journal file */
-       __u32   s_journal_dev;          /* device number of journal file */
-       __u32   s_last_orphan;          /* start of list of inodes to delete */
-       __u32   s_hash_seed[4];         /* HTREE hash seed */
-       __u8    s_def_hash_version;     /* Default hash version to use */
-       __u8    s_reserved_char_pad;
-       __u16   s_reserved_word_pad;
-       __le32  s_default_mount_opts;
-       __le32  s_first_meta_bg;        /* First metablock block group */
-       __u32   s_reserved[190];        /* Padding to the end of the block */
-};
-
-/*
- * Codes for operating systems
- */
-#define EXT2_OS_LINUX          0
-#define EXT2_OS_HURD           1
-#define EXT2_OS_MASIX          2
-#define EXT2_OS_FREEBSD                3
-#define EXT2_OS_LITES          4
-
-/*
- * Revision levels
- */
-#define EXT2_GOOD_OLD_REV      0       /* The good old (original) format */
-#define EXT2_DYNAMIC_REV       1       /* V2 format w/ dynamic inode sizes */
-
-#define EXT2_CURRENT_REV       EXT2_GOOD_OLD_REV
-#define EXT2_MAX_SUPP_REV      EXT2_DYNAMIC_REV
-
-#define EXT2_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT2_HAS_COMPAT_FEATURE(sb,mask)                       \
-       ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask)                    \
-       ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask)                     \
-       ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT2_SET_COMPAT_FEATURE(sb,mask)                       \
-       EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask)                    \
-       EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT2_SET_INCOMPAT_FEATURE(sb,mask)                     \
-       EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask)                     \
-       EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask)                  \
-       EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask)                   \
-       EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT2_FEATURE_COMPAT_DIR_PREALLOC       0x0001
-#define EXT2_FEATURE_COMPAT_IMAGIC_INODES      0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL                0x0004
-#define EXT2_FEATURE_COMPAT_EXT_ATTR           0x0008
-#define EXT2_FEATURE_COMPAT_RESIZE_INO         0x0010
-#define EXT2_FEATURE_COMPAT_DIR_INDEX          0x0020
-#define EXT2_FEATURE_COMPAT_ANY                        0xffffffff
-
-#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER    0x0001
-#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE      0x0002
-#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR       0x0004
-#define EXT2_FEATURE_RO_COMPAT_ANY             0xffffffff
-
-#define EXT2_FEATURE_INCOMPAT_COMPRESSION      0x0001
-#define EXT2_FEATURE_INCOMPAT_FILETYPE         0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER          0x0004
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV      0x0008
-#define EXT2_FEATURE_INCOMPAT_META_BG          0x0010
-#define EXT2_FEATURE_INCOMPAT_ANY              0xffffffff
-
-#define EXT2_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT2_FEATURE_INCOMPAT_SUPP     (EXT2_FEATURE_INCOMPAT_FILETYPE| \
-                                        EXT2_FEATURE_INCOMPAT_META_BG)
-#define EXT2_FEATURE_RO_COMPAT_SUPP    (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-                                        EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
-                                        EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
-#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED     ~EXT2_FEATURE_RO_COMPAT_SUPP
-#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED      ~EXT2_FEATURE_INCOMPAT_SUPP
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define        EXT2_DEF_RESUID         0
-#define        EXT2_DEF_RESGID         0
-
-/*
- * Default mount options
- */
-#define EXT2_DEFM_DEBUG                0x0001
-#define EXT2_DEFM_BSDGROUPS    0x0002
-#define EXT2_DEFM_XATTR_USER   0x0004
-#define EXT2_DEFM_ACL          0x0008
-#define EXT2_DEFM_UID16                0x0010
-    /* Not used by ext2, but reserved for use by ext3 */
-#define EXT3_DEFM_JMODE                0x0060 
-#define EXT3_DEFM_JMODE_DATA   0x0020
-#define EXT3_DEFM_JMODE_ORDERED        0x0040
-#define EXT3_DEFM_JMODE_WBACK  0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT2_NAME_LEN 255
-
-struct ext2_dir_entry {
-       __le32  inode;                  /* Inode number */
-       __le16  rec_len;                /* Directory entry length */
-       __le16  name_len;               /* Name length */
-       char    name[EXT2_NAME_LEN];    /* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT2 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext2_dir_entry_2 {
-       __le32  inode;                  /* Inode number */
-       __le16  rec_len;                /* Directory entry length */
-       __u8    name_len;               /* Name length */
-       __u8    file_type;
-       char    name[EXT2_NAME_LEN];    /* File name */
-};
-
-/*
- * Ext2 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-enum {
-       EXT2_FT_UNKNOWN         = 0,
-       EXT2_FT_REG_FILE        = 1,
-       EXT2_FT_DIR             = 2,
-       EXT2_FT_CHRDEV          = 3,
-       EXT2_FT_BLKDEV          = 4,
-       EXT2_FT_FIFO            = 5,
-       EXT2_FT_SOCK            = 6,
-       EXT2_FT_SYMLINK         = 7,
-       EXT2_FT_MAX
-};
-
-/*
- * EXT2_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT2_DIR_PAD                   4
-#define EXT2_DIR_ROUND                         (EXT2_DIR_PAD - 1)
-#define EXT2_DIR_REC_LEN(name_len)     (((name_len) + 8 + EXT2_DIR_ROUND) & \
-                                        ~EXT2_DIR_ROUND)
-#define EXT2_MAX_REC_LEN               ((1<<16)-1)
-
 #endif /* _LINUX_EXT2_FS_H */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
deleted file mode 100644 (file)
index db4d9f5..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  linux/include/linux/ext2_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_sb.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT2_FS_SB
-#define _LINUX_EXT2_FS_SB
-
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#include <linux/rbtree.h>
-
-/* XXX Here for now... not interested in restructing headers JUST now */
-
-/* data type for block offset of block group */
-typedef int ext2_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext2_fsblk_t;
-
-#define E2FSBLK "%lu"
-
-struct ext2_reserve_window {
-       ext2_fsblk_t            _rsv_start;     /* First byte reserved */
-       ext2_fsblk_t            _rsv_end;       /* Last byte reserved or 0 */
-};
-
-struct ext2_reserve_window_node {
-       struct rb_node          rsv_node;
-       __u32                   rsv_goal_size;
-       __u32                   rsv_alloc_hit;
-       struct ext2_reserve_window      rsv_window;
-};
-
-struct ext2_block_alloc_info {
-       /* information about reservation window */
-       struct ext2_reserve_window_node rsv_window_node;
-       /*
-        * was i_next_alloc_block in ext2_inode_info
-        * is the logical (file-relative) number of the
-        * most-recently-allocated block in this file.
-        * We use this for detecting linearly ascending allocation requests.
-        */
-       __u32                   last_alloc_logical_block;
-       /*
-        * Was i_next_alloc_goal in ext2_inode_info
-        * is the *physical* companion to i_next_alloc_block.
-        * it the the physical block number of the block which was most-recentl
-        * allocated to this file.  This give us the goal (target) for the next
-        * allocation when we detect linearly ascending requests.
-        */
-       ext2_fsblk_t            last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * second extended-fs super-block data in memory
- */
-struct ext2_sb_info {
-       unsigned long s_frag_size;      /* Size of a fragment in bytes */
-       unsigned long s_frags_per_block;/* Number of fragments per block */
-       unsigned long s_inodes_per_block;/* Number of inodes per block */
-       unsigned long s_frags_per_group;/* Number of fragments in a group */
-       unsigned long s_blocks_per_group;/* Number of blocks in a group */
-       unsigned long s_inodes_per_group;/* Number of inodes in a group */
-       unsigned long s_itb_per_group;  /* Number of inode table blocks per group */
-       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
-       unsigned long s_desc_per_block; /* Number of group descriptors per block */
-       unsigned long s_groups_count;   /* Number of groups in the fs */
-       unsigned long s_overhead_last;  /* Last calculated overhead */
-       unsigned long s_blocks_last;    /* Last seen block count */
-       struct buffer_head * s_sbh;     /* Buffer containing the super block */
-       struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
-       struct buffer_head ** s_group_desc;
-       unsigned long  s_mount_opt;
-       unsigned long s_sb_block;
-       uid_t s_resuid;
-       gid_t s_resgid;
-       unsigned short s_mount_state;
-       unsigned short s_pad;
-       int s_addr_per_block_bits;
-       int s_desc_per_block_bits;
-       int s_inode_size;
-       int s_first_ino;
-       spinlock_t s_next_gen_lock;
-       u32 s_next_generation;
-       unsigned long s_dir_count;
-       u8 *s_debts;
-       struct percpu_counter s_freeblocks_counter;
-       struct percpu_counter s_freeinodes_counter;
-       struct percpu_counter s_dirs_counter;
-       struct blockgroup_lock *s_blockgroup_lock;
-       /* root of the per fs reservation window tree */
-       spinlock_t s_rsv_window_lock;
-       struct rb_root s_rsv_window_root;
-       struct ext2_reserve_window_node s_rsv_window_head;
-       /*
-        * s_lock protects against concurrent modifications of s_mount_state,
-        * s_blocks_last, s_overhead_last and the content of superblock's
-        * buffer pointed to by sbi->s_es.
-        *
-        * Note: It is used in ext2_show_options() to provide a consistent view
-        * of the mount options.
-        */
-       spinlock_t s_lock;
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
-{
-       return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif /* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
deleted file mode 100644 (file)
index f5a84ee..0000000
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- *  linux/include/linux/ext3_fs.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_H
-#define _LINUX_EXT3_FS_H
-
-#include <linux/types.h>
-#include <linux/magic.h>
-#include <linux/bug.h>
-
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT3FS_DEBUG to produce debug messages
- */
-#undef EXT3FS_DEBUG
-
-/*
- * Define EXT3_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT3_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT3_MAX_RESERVE_BLOCKS         1027
-#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
-
-/*
- * Debug code
- */
-#ifdef EXT3FS_DEBUG
-#define ext3_debug(f, a...)                                            \
-       do {                                                            \
-               printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:",       \
-                       __FILE__, __LINE__, __func__);          \
-               printk (KERN_DEBUG f, ## a);                            \
-       } while (0)
-#else
-#define ext3_debug(f, a...)    do {} while (0)
-#endif
-
-/*
- * Special inodes numbers
- */
-#define        EXT3_BAD_INO             1      /* Bad blocks inode */
-#define EXT3_ROOT_INO           2      /* Root inode */
-#define EXT3_BOOT_LOADER_INO    5      /* Boot loader inode */
-#define EXT3_UNDEL_DIR_INO      6      /* Undelete directory inode */
-#define EXT3_RESIZE_INO                 7      /* Reserved group descriptors inode */
-#define EXT3_JOURNAL_INO        8      /* Journal inode */
-
-/* First non-reserved inode for old ext3 filesystems */
-#define EXT3_GOOD_OLD_FIRST_INO        11
-
-/*
- * Maximal count of links to a file
- */
-#define EXT3_LINK_MAX          32000
-
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT3_MIN_BLOCK_SIZE            1024
-#define        EXT3_MAX_BLOCK_SIZE             65536
-#define EXT3_MIN_BLOCK_LOG_SIZE                10
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE(s)            ((s)->s_blocksize)
-#else
-# define EXT3_BLOCK_SIZE(s)            (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
-#endif
-#define        EXT3_ADDR_PER_BLOCK(s)          (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#ifdef __KERNEL__
-# define EXT3_BLOCK_SIZE_BITS(s)       ((s)->s_blocksize_bits)
-#else
-# define EXT3_BLOCK_SIZE_BITS(s)       ((s)->s_log_block_size + 10)
-#endif
-#ifdef __KERNEL__
-#define        EXT3_ADDR_PER_BLOCK_BITS(s)     (EXT3_SB(s)->s_addr_per_block_bits)
-#define EXT3_INODE_SIZE(s)             (EXT3_SB(s)->s_inode_size)
-#define EXT3_FIRST_INO(s)              (EXT3_SB(s)->s_first_ino)
-#else
-#define EXT3_INODE_SIZE(s)     (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
-                                EXT3_GOOD_OLD_INODE_SIZE : \
-                                (s)->s_inode_size)
-#define EXT3_FIRST_INO(s)      (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
-                                EXT3_GOOD_OLD_FIRST_INO : \
-                                (s)->s_first_ino)
-#endif
-
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT3_MIN_FRAG_SIZE             1024
-#define        EXT3_MAX_FRAG_SIZE              4096
-#define EXT3_MIN_FRAG_LOG_SIZE           10
-#ifdef __KERNEL__
-# define EXT3_FRAG_SIZE(s)             (EXT3_SB(s)->s_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s)       (EXT3_SB(s)->s_frags_per_block)
-#else
-# define EXT3_FRAG_SIZE(s)             (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT3_FRAGS_PER_BLOCK(s)       (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
-#endif
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext3_group_desc
-{
-       __le32  bg_block_bitmap;                /* Blocks bitmap block */
-       __le32  bg_inode_bitmap;                /* Inodes bitmap block */
-       __le32  bg_inode_table;         /* Inodes table block */
-       __le16  bg_free_blocks_count;   /* Free blocks count */
-       __le16  bg_free_inodes_count;   /* Free inodes count */
-       __le16  bg_used_dirs_count;     /* Directories count */
-       __u16   bg_pad;
-       __le32  bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#ifdef __KERNEL__
-# define EXT3_BLOCKS_PER_GROUP(s)      (EXT3_SB(s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s)                (EXT3_SB(s)->s_desc_per_block)
-# define EXT3_INODES_PER_GROUP(s)      (EXT3_SB(s)->s_inodes_per_group)
-# define EXT3_DESC_PER_BLOCK_BITS(s)   (EXT3_SB(s)->s_desc_per_block_bits)
-#else
-# define EXT3_BLOCKS_PER_GROUP(s)      ((s)->s_blocks_per_group)
-# define EXT3_DESC_PER_BLOCK(s)                (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
-# define EXT3_INODES_PER_GROUP(s)      ((s)->s_inodes_per_group)
-#endif
-
-/*
- * Constants relative to the data blocks
- */
-#define        EXT3_NDIR_BLOCKS                12
-#define        EXT3_IND_BLOCK                  EXT3_NDIR_BLOCKS
-#define        EXT3_DIND_BLOCK                 (EXT3_IND_BLOCK + 1)
-#define        EXT3_TIND_BLOCK                 (EXT3_DIND_BLOCK + 1)
-#define        EXT3_N_BLOCKS                   (EXT3_TIND_BLOCK + 1)
-
-/*
- * Inode flags
- */
-#define        EXT3_SECRM_FL                   0x00000001 /* Secure deletion */
-#define        EXT3_UNRM_FL                    0x00000002 /* Undelete */
-#define        EXT3_COMPR_FL                   0x00000004 /* Compress file */
-#define EXT3_SYNC_FL                   0x00000008 /* Synchronous updates */
-#define EXT3_IMMUTABLE_FL              0x00000010 /* Immutable file */
-#define EXT3_APPEND_FL                 0x00000020 /* writes to file may only append */
-#define EXT3_NODUMP_FL                 0x00000040 /* do not dump file */
-#define EXT3_NOATIME_FL                        0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT3_DIRTY_FL                  0x00000100
-#define EXT3_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
-#define EXT3_NOCOMPR_FL                        0x00000400 /* Don't compress */
-#define EXT3_ECOMPR_FL                 0x00000800 /* Compression error */
-/* End compression flags --- maybe not all used */
-#define EXT3_INDEX_FL                  0x00001000 /* hash-indexed directory */
-#define EXT3_IMAGIC_FL                 0x00002000 /* AFS directory */
-#define EXT3_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
-#define EXT3_NOTAIL_FL                 0x00008000 /* file tail should not be merged */
-#define EXT3_DIRSYNC_FL                        0x00010000 /* dirsync behaviour (directories only) */
-#define EXT3_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
-#define EXT3_RESERVED_FL               0x80000000 /* reserved for ext3 lib */
-
-#define EXT3_FL_USER_VISIBLE           0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE                0x000380FF /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
-                          EXT3_SYNC_FL | EXT3_NODUMP_FL |\
-                          EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
-                          EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
-                          EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
-{
-       if (S_ISDIR(mode))
-               return flags;
-       else if (S_ISREG(mode))
-               return flags & EXT3_REG_FLMASK;
-       else
-               return flags & EXT3_OTHER_FLMASK;
-}
-
-/* Used to pass group descriptor data when online resize is done */
-struct ext3_new_group_input {
-       __u32 group;            /* Group number for this data */
-       __u32 block_bitmap;     /* Absolute block number of block bitmap */
-       __u32 inode_bitmap;     /* Absolute block number of inode bitmap */
-       __u32 inode_table;      /* Absolute block number of inode table start */
-       __u32 blocks_count;     /* Total number of blocks in this group */
-       __u16 reserved_blocks;  /* Number of reserved blocks in this group */
-       __u16 unused;
-};
-
-/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
-struct ext3_new_group_data {
-       __u32 group;
-       __u32 block_bitmap;
-       __u32 inode_bitmap;
-       __u32 inode_table;
-       __u32 blocks_count;
-       __u16 reserved_blocks;
-       __u16 unused;
-       __u32 free_blocks_count;
-};
-
-
-/*
- * ioctl commands
- */
-#define        EXT3_IOC_GETFLAGS               FS_IOC_GETFLAGS
-#define        EXT3_IOC_SETFLAGS               FS_IOC_SETFLAGS
-#define        EXT3_IOC_GETVERSION             _IOR('f', 3, long)
-#define        EXT3_IOC_SETVERSION             _IOW('f', 4, long)
-#define EXT3_IOC_GROUP_EXTEND          _IOW('f', 7, unsigned long)
-#define EXT3_IOC_GROUP_ADD             _IOW('f', 8,struct ext3_new_group_input)
-#define        EXT3_IOC_GETVERSION_OLD         FS_IOC_GETVERSION
-#define        EXT3_IOC_SETVERSION_OLD         FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC_WAIT_FOR_READONLY     _IOR('f', 99, long)
-#endif
-#define EXT3_IOC_GETRSVSZ              _IOR('f', 5, long)
-#define EXT3_IOC_SETRSVSZ              _IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT3_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
-#define EXT3_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
-#define EXT3_IOC32_GETVERSION          _IOR('f', 3, int)
-#define EXT3_IOC32_SETVERSION          _IOW('f', 4, int)
-#define EXT3_IOC32_GETRSVSZ            _IOR('f', 5, int)
-#define EXT3_IOC32_SETRSVSZ            _IOW('f', 6, int)
-#define EXT3_IOC32_GROUP_EXTEND                _IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC32_WAIT_FOR_READONLY   _IOR('f', 99, int)
-#endif
-#define EXT3_IOC32_GETVERSION_OLD      FS_IOC32_GETVERSION
-#define EXT3_IOC32_SETVERSION_OLD      FS_IOC32_SETVERSION
-
-
-/*
- *  Mount options
- */
-struct ext3_mount_options {
-       unsigned long s_mount_opt;
-       uid_t s_resuid;
-       gid_t s_resgid;
-       unsigned long s_commit_interval;
-#ifdef CONFIG_QUOTA
-       int s_jquota_fmt;
-       char *s_qf_names[MAXQUOTAS];
-#endif
-};
-
-/*
- * Structure of an inode on the disk
- */
-struct ext3_inode {
-       __le16  i_mode;         /* File mode */
-       __le16  i_uid;          /* Low 16 bits of Owner Uid */
-       __le32  i_size;         /* Size in bytes */
-       __le32  i_atime;        /* Access time */
-       __le32  i_ctime;        /* Creation time */
-       __le32  i_mtime;        /* Modification time */
-       __le32  i_dtime;        /* Deletion Time */
-       __le16  i_gid;          /* Low 16 bits of Group Id */
-       __le16  i_links_count;  /* Links count */
-       __le32  i_blocks;       /* Blocks count */
-       __le32  i_flags;        /* File flags */
-       union {
-               struct {
-                       __u32  l_i_reserved1;
-               } linux1;
-               struct {
-                       __u32  h_i_translator;
-               } hurd1;
-               struct {
-                       __u32  m_i_reserved1;
-               } masix1;
-       } osd1;                         /* OS dependent 1 */
-       __le32  i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
-       __le32  i_generation;   /* File version (for NFS) */
-       __le32  i_file_acl;     /* File ACL */
-       __le32  i_dir_acl;      /* Directory ACL */
-       __le32  i_faddr;        /* Fragment address */
-       union {
-               struct {
-                       __u8    l_i_frag;       /* Fragment number */
-                       __u8    l_i_fsize;      /* Fragment size */
-                       __u16   i_pad1;
-                       __le16  l_i_uid_high;   /* these 2 fields    */
-                       __le16  l_i_gid_high;   /* were reserved2[0] */
-                       __u32   l_i_reserved2;
-               } linux2;
-               struct {
-                       __u8    h_i_frag;       /* Fragment number */
-                       __u8    h_i_fsize;      /* Fragment size */
-                       __u16   h_i_mode_high;
-                       __u16   h_i_uid_high;
-                       __u16   h_i_gid_high;
-                       __u32   h_i_author;
-               } hurd2;
-               struct {
-                       __u8    m_i_frag;       /* Fragment number */
-                       __u8    m_i_fsize;      /* Fragment size */
-                       __u16   m_pad1;
-                       __u32   m_i_reserved2[2];
-               } masix2;
-       } osd2;                         /* OS dependent 2 */
-       __le16  i_extra_isize;
-       __le16  i_pad1;
-};
-
-#define i_size_high    i_dir_acl
-
-#if defined(__KERNEL__) || defined(__linux__)
-#define i_reserved1    osd1.linux1.l_i_reserved1
-#define i_frag         osd2.linux2.l_i_frag
-#define i_fsize                osd2.linux2.l_i_fsize
-#define i_uid_low      i_uid
-#define i_gid_low      i_gid
-#define i_uid_high     osd2.linux2.l_i_uid_high
-#define i_gid_high     osd2.linux2.l_i_gid_high
-#define i_reserved2    osd2.linux2.l_i_reserved2
-
-#elif defined(__GNU__)
-
-#define i_translator   osd1.hurd1.h_i_translator
-#define i_frag         osd2.hurd2.h_i_frag;
-#define i_fsize                osd2.hurd2.h_i_fsize;
-#define i_uid_high     osd2.hurd2.h_i_uid_high
-#define i_gid_high     osd2.hurd2.h_i_gid_high
-#define i_author       osd2.hurd2.h_i_author
-
-#elif defined(__masix__)
-
-#define i_reserved1    osd1.masix1.m_i_reserved1
-#define i_frag         osd2.masix2.m_i_frag
-#define i_fsize                osd2.masix2.m_i_fsize
-#define i_reserved2    osd2.masix2.m_i_reserved2
-
-#endif /* defined(__KERNEL__) || defined(__linux__) */
-
-/*
- * File system states
- */
-#define        EXT3_VALID_FS                   0x0001  /* Unmounted cleanly */
-#define        EXT3_ERROR_FS                   0x0002  /* Errors detected */
-#define        EXT3_ORPHAN_FS                  0x0004  /* Orphans being recovered */
-
-/*
- * Misc. filesystem flags
- */
-#define EXT2_FLAGS_SIGNED_HASH         0x0001  /* Signed dirhash in use */
-#define EXT2_FLAGS_UNSIGNED_HASH       0x0002  /* Unsigned dirhash in use */
-#define EXT2_FLAGS_TEST_FILESYS                0x0004  /* to test development code */
-
-/*
- * Mount flags
- */
-#define EXT3_MOUNT_CHECK               0x00001 /* Do mount-time checks */
-/* EXT3_MOUNT_OLDALLOC was there */
-#define EXT3_MOUNT_GRPID               0x00004 /* Create files with directory's group */
-#define EXT3_MOUNT_DEBUG               0x00008 /* Some debugging messages */
-#define EXT3_MOUNT_ERRORS_CONT         0x00010 /* Continue on errors */
-#define EXT3_MOUNT_ERRORS_RO           0x00020 /* Remount fs ro on errors */
-#define EXT3_MOUNT_ERRORS_PANIC                0x00040 /* Panic on errors */
-#define EXT3_MOUNT_MINIX_DF            0x00080 /* Mimics the Minix statfs */
-#define EXT3_MOUNT_NOLOAD              0x00100 /* Don't use existing journal*/
-#define EXT3_MOUNT_ABORT               0x00200 /* Fatal error detected */
-#define EXT3_MOUNT_DATA_FLAGS          0x00C00 /* Mode for data writes: */
-#define EXT3_MOUNT_JOURNAL_DATA                0x00400 /* Write data to journal */
-#define EXT3_MOUNT_ORDERED_DATA                0x00800 /* Flush data before commit */
-#define EXT3_MOUNT_WRITEBACK_DATA      0x00C00 /* No data ordering */
-#define EXT3_MOUNT_UPDATE_JOURNAL      0x01000 /* Update the journal format */
-#define EXT3_MOUNT_NO_UID32            0x02000  /* Disable 32-bit UIDs */
-#define EXT3_MOUNT_XATTR_USER          0x04000 /* Extended user attributes */
-#define EXT3_MOUNT_POSIX_ACL           0x08000 /* POSIX Access Control Lists */
-#define EXT3_MOUNT_RESERVATION         0x10000 /* Preallocation */
-#define EXT3_MOUNT_BARRIER             0x20000 /* Use block barriers */
-#define EXT3_MOUNT_QUOTA               0x80000 /* Some quota option set */
-#define EXT3_MOUNT_USRQUOTA            0x100000 /* "old" user quota */
-#define EXT3_MOUNT_GRPQUOTA            0x200000 /* "old" group quota */
-#define EXT3_MOUNT_DATA_ERR_ABORT      0x400000 /* Abort on file data write
-                                                 * error in ordered mode */
-
-/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
-#ifndef _LINUX_EXT2_FS_H
-#define clear_opt(o, opt)              o &= ~EXT3_MOUNT_##opt
-#define set_opt(o, opt)                        o |= EXT3_MOUNT_##opt
-#define test_opt(sb, opt)              (EXT3_SB(sb)->s_mount_opt & \
-                                        EXT3_MOUNT_##opt)
-#else
-#define EXT2_MOUNT_NOLOAD              EXT3_MOUNT_NOLOAD
-#define EXT2_MOUNT_ABORT               EXT3_MOUNT_ABORT
-#define EXT2_MOUNT_DATA_FLAGS          EXT3_MOUNT_DATA_FLAGS
-#endif
-
-#define ext3_set_bit                   __set_bit_le
-#define ext3_set_bit_atomic            ext2_set_bit_atomic
-#define ext3_clear_bit                 __clear_bit_le
-#define ext3_clear_bit_atomic          ext2_clear_bit_atomic
-#define ext3_test_bit                  test_bit_le
-#define ext3_find_next_zero_bit                find_next_zero_bit_le
-
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT3_DFL_MAX_MNT_COUNT         20      /* Allow 20 mounts */
-#define EXT3_DFL_CHECKINTERVAL         0       /* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT3_ERRORS_CONTINUE           1       /* Continue execution */
-#define EXT3_ERRORS_RO                 2       /* Remount fs read-only */
-#define EXT3_ERRORS_PANIC              3       /* Panic */
-#define EXT3_ERRORS_DEFAULT            EXT3_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext3_super_block {
-/*00*/ __le32  s_inodes_count;         /* Inodes count */
-       __le32  s_blocks_count;         /* Blocks count */
-       __le32  s_r_blocks_count;       /* Reserved blocks count */
-       __le32  s_free_blocks_count;    /* Free blocks count */
-/*10*/ __le32  s_free_inodes_count;    /* Free inodes count */
-       __le32  s_first_data_block;     /* First Data Block */
-       __le32  s_log_block_size;       /* Block size */
-       __le32  s_log_frag_size;        /* Fragment size */
-/*20*/ __le32  s_blocks_per_group;     /* # Blocks per group */
-       __le32  s_frags_per_group;      /* # Fragments per group */
-       __le32  s_inodes_per_group;     /* # Inodes per group */
-       __le32  s_mtime;                /* Mount time */
-/*30*/ __le32  s_wtime;                /* Write time */
-       __le16  s_mnt_count;            /* Mount count */
-       __le16  s_max_mnt_count;        /* Maximal mount count */
-       __le16  s_magic;                /* Magic signature */
-       __le16  s_state;                /* File system state */
-       __le16  s_errors;               /* Behaviour when detecting errors */
-       __le16  s_minor_rev_level;      /* minor revision level */
-/*40*/ __le32  s_lastcheck;            /* time of last check */
-       __le32  s_checkinterval;        /* max. time between checks */
-       __le32  s_creator_os;           /* OS */
-       __le32  s_rev_level;            /* Revision level */
-/*50*/ __le16  s_def_resuid;           /* Default uid for reserved blocks */
-       __le16  s_def_resgid;           /* Default gid for reserved blocks */
-       /*
-        * These fields are for EXT3_DYNAMIC_REV superblocks only.
-        *
-        * Note: the difference between the compatible feature set and
-        * the incompatible feature set is that if there is a bit set
-        * in the incompatible feature set that the kernel doesn't
-        * know about, it should refuse to mount the filesystem.
-        *
-        * e2fsck's requirements are more strict; if it doesn't know
-        * about a feature in either the compatible or incompatible
-        * feature set, it must abort and not try to meddle with
-        * things it doesn't understand...
-        */
-       __le32  s_first_ino;            /* First non-reserved inode */
-       __le16   s_inode_size;          /* size of inode structure */
-       __le16  s_block_group_nr;       /* block group # of this superblock */
-       __le32  s_feature_compat;       /* compatible feature set */
-/*60*/ __le32  s_feature_incompat;     /* incompatible feature set */
-       __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
-/*68*/ __u8    s_uuid[16];             /* 128-bit uuid for volume */
-/*78*/ char    s_volume_name[16];      /* volume name */
-/*88*/ char    s_last_mounted[64];     /* directory where last mounted */
-/*C8*/ __le32  s_algorithm_usage_bitmap; /* For compression */
-       /*
-        * Performance hints.  Directory preallocation should only
-        * happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
-        */
-       __u8    s_prealloc_blocks;      /* Nr of blocks to try to preallocate*/
-       __u8    s_prealloc_dir_blocks;  /* Nr to preallocate for dirs */
-       __le16  s_reserved_gdt_blocks;  /* Per group desc for online growth */
-       /*
-        * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-        */
-/*D0*/ __u8    s_journal_uuid[16];     /* uuid of journal superblock */
-/*E0*/ __le32  s_journal_inum;         /* inode number of journal file */
-       __le32  s_journal_dev;          /* device number of journal file */
-       __le32  s_last_orphan;          /* start of list of inodes to delete */
-       __le32  s_hash_seed[4];         /* HTREE hash seed */
-       __u8    s_def_hash_version;     /* Default hash version to use */
-       __u8    s_reserved_char_pad;
-       __u16   s_reserved_word_pad;
-       __le32  s_default_mount_opts;
-       __le32  s_first_meta_bg;        /* First metablock block group */
-       __le32  s_mkfs_time;            /* When the filesystem was created */
-       __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
-       /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
-/*150*/        __le32  s_blocks_count_hi;      /* Blocks count */
-       __le32  s_r_blocks_count_hi;    /* Reserved blocks count */
-       __le32  s_free_blocks_count_hi; /* Free blocks count */
-       __le16  s_min_extra_isize;      /* All inodes have at least # bytes */
-       __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
-       __le32  s_flags;                /* Miscellaneous flags */
-       __le16  s_raid_stride;          /* RAID stride */
-       __le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
-       __le64  s_mmp_block;            /* Block for multi-mount protection */
-       __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
-       __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
-       __u8    s_reserved_char_pad2;
-       __le16  s_reserved_pad;
-       __u32   s_reserved[162];        /* Padding to the end of the block */
-};
-
-#ifdef __KERNEL__
-#include <linux/ext3_fs_i.h>
-#include <linux/ext3_fs_sb.h>
-static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
-{
-       return sb->s_fs_info;
-}
-static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
-{
-       return container_of(inode, struct ext3_inode_info, vfs_inode);
-}
-
-static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
-{
-       return ino == EXT3_ROOT_INO ||
-               ino == EXT3_JOURNAL_INO ||
-               ino == EXT3_RESIZE_INO ||
-               (ino >= EXT3_FIRST_INO(sb) &&
-                ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
-}
-
-/*
- * Inode dynamic state flags
- */
-enum {
-       EXT3_STATE_JDATA,               /* journaled data exists */
-       EXT3_STATE_NEW,                 /* inode is newly created */
-       EXT3_STATE_XATTR,               /* has in-inode xattrs */
-       EXT3_STATE_FLUSH_ON_CLOSE,      /* flush dirty pages on close */
-};
-
-static inline int ext3_test_inode_state(struct inode *inode, int bit)
-{
-       return test_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_set_inode_state(struct inode *inode, int bit)
-{
-       set_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_clear_inode_state(struct inode *inode, int bit)
-{
-       clear_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-#else
-/* Assume that user mode programs are passing in an ext3fs superblock, not
- * a kernel struct super_block.  This will allow us to call the feature-test
- * macros from user land. */
-#define EXT3_SB(sb)    (sb)
-#endif
-
-#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
-
-/*
- * Codes for operating systems
- */
-#define EXT3_OS_LINUX          0
-#define EXT3_OS_HURD           1
-#define EXT3_OS_MASIX          2
-#define EXT3_OS_FREEBSD                3
-#define EXT3_OS_LITES          4
-
-/*
- * Revision levels
- */
-#define EXT3_GOOD_OLD_REV      0       /* The good old (original) format */
-#define EXT3_DYNAMIC_REV       1       /* V2 format w/ dynamic inode sizes */
-
-#define EXT3_CURRENT_REV       EXT3_GOOD_OLD_REV
-#define EXT3_MAX_SUPP_REV      EXT3_DYNAMIC_REV
-
-#define EXT3_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT3_HAS_COMPAT_FEATURE(sb,mask)                       \
-       ( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask)                    \
-       ( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask)                     \
-       ( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT3_SET_COMPAT_FEATURE(sb,mask)                       \
-       EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask)                    \
-       EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT3_SET_INCOMPAT_FEATURE(sb,mask)                     \
-       EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask)                     \
-       EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask)                  \
-       EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask)                   \
-       EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT3_FEATURE_COMPAT_DIR_PREALLOC       0x0001
-#define EXT3_FEATURE_COMPAT_IMAGIC_INODES      0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL                0x0004
-#define EXT3_FEATURE_COMPAT_EXT_ATTR           0x0008
-#define EXT3_FEATURE_COMPAT_RESIZE_INODE       0x0010
-#define EXT3_FEATURE_COMPAT_DIR_INDEX          0x0020
-
-#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER    0x0001
-#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE      0x0002
-#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR       0x0004
-
-#define EXT3_FEATURE_INCOMPAT_COMPRESSION      0x0001
-#define EXT3_FEATURE_INCOMPAT_FILETYPE         0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER          0x0004 /* Needs recovery */
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV      0x0008 /* Journal device */
-#define EXT3_FEATURE_INCOMPAT_META_BG          0x0010
-
-#define EXT3_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT3_FEATURE_INCOMPAT_SUPP     (EXT3_FEATURE_INCOMPAT_FILETYPE| \
-                                        EXT3_FEATURE_INCOMPAT_RECOVER| \
-                                        EXT3_FEATURE_INCOMPAT_META_BG)
-#define EXT3_FEATURE_RO_COMPAT_SUPP    (EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-                                        EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define        EXT3_DEF_RESUID         0
-#define        EXT3_DEF_RESGID         0
-
-/*
- * Default mount options
- */
-#define EXT3_DEFM_DEBUG                0x0001
-#define EXT3_DEFM_BSDGROUPS    0x0002
-#define EXT3_DEFM_XATTR_USER   0x0004
-#define EXT3_DEFM_ACL          0x0008
-#define EXT3_DEFM_UID16                0x0010
-#define EXT3_DEFM_JMODE                0x0060
-#define EXT3_DEFM_JMODE_DATA   0x0020
-#define EXT3_DEFM_JMODE_ORDERED        0x0040
-#define EXT3_DEFM_JMODE_WBACK  0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT3_NAME_LEN 255
-
-struct ext3_dir_entry {
-       __le32  inode;                  /* Inode number */
-       __le16  rec_len;                /* Directory entry length */
-       __le16  name_len;               /* Name length */
-       char    name[EXT3_NAME_LEN];    /* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT3 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext3_dir_entry_2 {
-       __le32  inode;                  /* Inode number */
-       __le16  rec_len;                /* Directory entry length */
-       __u8    name_len;               /* Name length */
-       __u8    file_type;
-       char    name[EXT3_NAME_LEN];    /* File name */
-};
-
-/*
- * Ext3 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-#define EXT3_FT_UNKNOWN                0
-#define EXT3_FT_REG_FILE       1
-#define EXT3_FT_DIR            2
-#define EXT3_FT_CHRDEV         3
-#define EXT3_FT_BLKDEV         4
-#define EXT3_FT_FIFO           5
-#define EXT3_FT_SOCK           6
-#define EXT3_FT_SYMLINK                7
-
-#define EXT3_FT_MAX            8
-
-/*
- * EXT3_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT3_DIR_PAD                   4
-#define EXT3_DIR_ROUND                 (EXT3_DIR_PAD - 1)
-#define EXT3_DIR_REC_LEN(name_len)     (((name_len) + 8 + EXT3_DIR_ROUND) & \
-                                        ~EXT3_DIR_ROUND)
-#define EXT3_MAX_REC_LEN               ((1<<16)-1)
-
-/*
- * Tests against MAX_REC_LEN etc were put in place for 64k block
- * sizes; if that is not possible on this arch, we can skip
- * those tests and speed things up.
- */
-static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
-{
-       unsigned len = le16_to_cpu(dlen);
-
-#if (PAGE_CACHE_SIZE >= 65536)
-       if (len == EXT3_MAX_REC_LEN)
-               return 1 << 16;
-#endif
-       return len;
-}
-
-static inline __le16 ext3_rec_len_to_disk(unsigned len)
-{
-#if (PAGE_CACHE_SIZE >= 65536)
-       if (len == (1 << 16))
-               return cpu_to_le16(EXT3_MAX_REC_LEN);
-       else if (len > (1 << 16))
-               BUG();
-#endif
-       return cpu_to_le16(len);
-}
-
-/*
- * Hash Tree Directory indexing
- * (c) Daniel Phillips, 2001
- */
-
-#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
-                                     EXT3_FEATURE_COMPAT_DIR_INDEX) && \
-                     (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
-#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
-#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
-
-/* Legal values for the dx_root hash_version field: */
-
-#define DX_HASH_LEGACY         0
-#define DX_HASH_HALF_MD4       1
-#define DX_HASH_TEA            2
-#define DX_HASH_LEGACY_UNSIGNED        3
-#define DX_HASH_HALF_MD4_UNSIGNED      4
-#define DX_HASH_TEA_UNSIGNED           5
-
-#ifdef __KERNEL__
-
-/* hash info structure used by the directory hash */
-struct dx_hash_info
-{
-       u32             hash;
-       u32             minor_hash;
-       int             hash_version;
-       u32             *seed;
-};
-
-#define EXT3_HTREE_EOF 0x7fffffff
-
-/*
- * Control parameters used by ext3_htree_next_block
- */
-#define HASH_NB_ALWAYS         1
-
-
-/*
- * Describe an inode's exact location on disk and in memory
- */
-struct ext3_iloc
-{
-       struct buffer_head *bh;
-       unsigned long offset;
-       unsigned long block_group;
-};
-
-static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
-{
-       return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
-}
-
-/*
- * This structure is stuffed into the struct file's private_data field
- * for directories.  It is where we put information so that we can do
- * readdir operations in hash tree order.
- */
-struct dir_private_info {
-       struct rb_root  root;
-       struct rb_node  *curr_node;
-       struct fname    *extra_fname;
-       loff_t          last_pos;
-       __u32           curr_hash;
-       __u32           curr_minor_hash;
-       __u32           next_hash;
-};
-
-/* calculate the first block number of the group */
-static inline ext3_fsblk_t
-ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
-{
-       return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
-               le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-}
-
-/*
- * Special error return code only used by dx_probe() and its callers.
- */
-#define ERR_BAD_DX_DIR -75000
-
-/*
- * Function prototypes
- */
-
-/*
- * Ok, these declarations are also in <linux/kernel.h> but none of the
- * ext3 source programs needs to include it so they are duplicated here.
- */
-# define NORET_TYPE    /**/
-# define ATTRIB_NORET  __attribute__((noreturn))
-# define NORET_AND     noreturn,
-
-/* balloc.c */
-extern int ext3_bg_has_super(struct super_block *sb, int group);
-extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-                       ext3_fsblk_t goal, int *errp);
-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
-                       ext3_fsblk_t goal, unsigned long *count, int *errp);
-extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
-                       ext3_fsblk_t block, unsigned long count);
-extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
-                                ext3_fsblk_t block, unsigned long count,
-                               unsigned long *pdquot_freed_blocks);
-extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
-extern void ext3_check_blocks_bitmap (struct super_block *);
-extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-                                                   unsigned int block_group,
-                                                   struct buffer_head ** bh);
-extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
-extern void ext3_init_block_alloc_info(struct inode *);
-extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
-extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
-
-/* dir.c */
-extern int ext3_check_dir_entry(const char *, struct inode *,
-                               struct ext3_dir_entry_2 *,
-                               struct buffer_head *, unsigned long);
-extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-                                   __u32 minor_hash,
-                                   struct ext3_dir_entry_2 *dirent);
-extern void ext3_htree_free_dir_info(struct dir_private_info *p);
-
-/* fsync.c */
-extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
-
-/* hash.c */
-extern int ext3fs_dirhash(const char *name, int len, struct
-                         dx_hash_info *hinfo);
-
-/* ialloc.c */
-extern struct inode * ext3_new_inode (handle_t *, struct inode *,
-                                     const struct qstr *, umode_t);
-extern void ext3_free_inode (handle_t *, struct inode *);
-extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
-extern unsigned long ext3_count_free_inodes (struct super_block *);
-extern unsigned long ext3_count_dirs (struct super_block *);
-extern void ext3_check_inodes_bitmap (struct super_block *);
-extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
-
-
-/* inode.c */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-               struct buffer_head *bh, ext3_fsblk_t blocknr);
-struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-       sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
-       int create);
-
-extern struct inode *ext3_iget(struct super_block *, unsigned long);
-extern int  ext3_write_inode (struct inode *, struct writeback_control *);
-extern int  ext3_setattr (struct dentry *, struct iattr *);
-extern void ext3_evict_inode (struct inode *);
-extern int  ext3_sync_inode (handle_t *, struct inode *);
-extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *, int);
-extern int ext3_change_inode_journal_flag(struct inode *, int);
-extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
-extern int ext3_can_truncate(struct inode *inode);
-extern void ext3_truncate(struct inode *inode);
-extern void ext3_set_inode_flags(struct inode *);
-extern void ext3_get_inode_flags(struct ext3_inode_info *);
-extern void ext3_set_aops(struct inode *inode);
-extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-                      u64 start, u64 len);
-
-/* ioctl.c */
-extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
-extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* namei.c */
-extern int ext3_orphan_add(handle_t *, struct inode *);
-extern int ext3_orphan_del(handle_t *, struct inode *);
-extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-                               __u32 start_minor_hash, __u32 *next_hash);
-
-/* resize.c */
-extern int ext3_group_add(struct super_block *sb,
-                               struct ext3_new_group_data *input);
-extern int ext3_group_extend(struct super_block *sb,
-                               struct ext3_super_block *es,
-                               ext3_fsblk_t n_blocks_count);
-
-/* super.c */
-extern __printf(3, 4)
-void ext3_error(struct super_block *, const char *, const char *, ...);
-extern void __ext3_std_error (struct super_block *, const char *, int);
-extern __printf(3, 4)
-void ext3_abort(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_warning(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_msg(struct super_block *, const char *, const char *, ...);
-extern void ext3_update_dynamic_rev (struct super_block *sb);
-
-#define ext3_std_error(sb, errno)                              \
-do {                                                           \
-       if ((errno))                                            \
-               __ext3_std_error((sb), __func__, (errno));      \
-} while (0)
-
-/*
- * Inodes and files operations
- */
-
-/* dir.c */
-extern const struct file_operations ext3_dir_operations;
-
-/* file.c */
-extern const struct inode_operations ext3_file_inode_operations;
-extern const struct file_operations ext3_file_operations;
-
-/* namei.c */
-extern const struct inode_operations ext3_dir_inode_operations;
-extern const struct inode_operations ext3_special_inode_operations;
-
-/* symlink.c */
-extern const struct inode_operations ext3_symlink_inode_operations;
-extern const struct inode_operations ext3_fast_symlink_inode_operations;
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_EXT3_FS_H */
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
deleted file mode 100644 (file)
index f42c098..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- *  linux/include/linux/ext3_fs_i.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_i.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_I
-#define _LINUX_EXT3_FS_I
-
-#include <linux/rwsem.h>
-#include <linux/rbtree.h>
-#include <linux/seqlock.h>
-#include <linux/mutex.h>
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
-       ext3_fsblk_t    _rsv_start;     /* First byte reserved */
-       ext3_fsblk_t    _rsv_end;       /* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
-       struct rb_node          rsv_node;
-       __u32                   rsv_goal_size;
-       __u32                   rsv_alloc_hit;
-       struct ext3_reserve_window      rsv_window;
-};
-
-struct ext3_block_alloc_info {
-       /* information about reservation window */
-       struct ext3_reserve_window_node rsv_window_node;
-       /*
-        * was i_next_alloc_block in ext3_inode_info
-        * is the logical (file-relative) number of the
-        * most-recently-allocated block in this file.
-        * We use this for detecting linearly ascending allocation requests.
-        */
-       __u32                   last_alloc_logical_block;
-       /*
-        * Was i_next_alloc_goal in ext3_inode_info
-        * is the *physical* companion to i_next_alloc_block.
-        * it the physical block number of the block which was most-recentl
-        * allocated to this file.  This give us the goal (target) for the next
-        * allocation when we detect linearly ascending requests.
-        */
-       ext3_fsblk_t            last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
-       __le32  i_data[15];     /* unconverted */
-       __u32   i_flags;
-#ifdef EXT3_FRAGMENTS
-       __u32   i_faddr;
-       __u8    i_frag_no;
-       __u8    i_frag_size;
-#endif
-       ext3_fsblk_t    i_file_acl;
-       __u32   i_dir_acl;
-       __u32   i_dtime;
-
-       /*
-        * i_block_group is the number of the block group which contains
-        * this file's inode.  Constant across the lifetime of the inode,
-        * it is ued for making block allocation decisions - we try to
-        * place a file's data blocks near its inode block, and new inodes
-        * near to their parent directory's inode.
-        */
-       __u32   i_block_group;
-       unsigned long   i_state_flags;  /* Dynamic state flags for ext3 */
-
-       /* block reservation info */
-       struct ext3_block_alloc_info *i_block_alloc_info;
-
-       __u32   i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
-       /*
-        * Extended attributes can be read independently of the main file
-        * data. Taking i_mutex even when reading would cause contention
-        * between readers of EAs and writers of regular file data, so
-        * instead we synchronize on xattr_sem when reading or changing
-        * EAs.
-        */
-       struct rw_semaphore xattr_sem;
-#endif
-
-       struct list_head i_orphan;      /* unlinked but open inodes */
-
-       /*
-        * i_disksize keeps track of what the inode size is ON DISK, not
-        * in memory.  During truncate, i_size is set to the new size by
-        * the VFS prior to calling ext3_truncate(), but the filesystem won't
-        * set i_disksize to 0 until the truncate is actually under way.
-        *
-        * The intent is that i_disksize always represents the blocks which
-        * are used by this file.  This allows recovery to restart truncate
-        * on orphans if we crash during truncate.  We actually write i_disksize
-        * into the on-disk inode when writing inodes out, instead of i_size.
-        *
-        * The only time when i_disksize and i_size may be different is when
-        * a truncate is in progress.  The only things which change i_disksize
-        * are ext3_get_block (growth) and ext3_truncate (shrinkth).
-        */
-       loff_t  i_disksize;
-
-       /* on-disk additional length */
-       __u16 i_extra_isize;
-
-       /*
-        * truncate_mutex is for serialising ext3_truncate() against
-        * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
-        * data tree are chopped off during truncate. We can't do that in
-        * ext3 because whenever we perform intermediate commits during
-        * truncate, the inode and all the metadata blocks *must* be in a
-        * consistent state which allows truncation of the orphans to restart
-        * during recovery.  Hence we must fix the get_block-vs-truncate race
-        * by other means, so we have truncate_mutex.
-        */
-       struct mutex truncate_mutex;
-
-       /*
-        * Transactions that contain inode's metadata needed to complete
-        * fsync and fdatasync, respectively.
-        */
-       atomic_t i_sync_tid;
-       atomic_t i_datasync_tid;
-
-       struct inode vfs_inode;
-};
-
-#endif /* _LINUX_EXT3_FS_I */
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
deleted file mode 100644 (file)
index 6436525..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- *  linux/include/linux/ext3_fs_sb.h
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs_sb.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#ifndef _LINUX_EXT3_FS_SB
-#define _LINUX_EXT3_FS_SB
-
-#ifdef __KERNEL__
-#include <linux/timer.h>
-#include <linux/wait.h>
-#include <linux/blockgroup_lock.h>
-#include <linux/percpu_counter.h>
-#endif
-#include <linux/rbtree.h>
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
-       unsigned long s_frag_size;      /* Size of a fragment in bytes */
-       unsigned long s_frags_per_block;/* Number of fragments per block */
-       unsigned long s_inodes_per_block;/* Number of inodes per block */
-       unsigned long s_frags_per_group;/* Number of fragments in a group */
-       unsigned long s_blocks_per_group;/* Number of blocks in a group */
-       unsigned long s_inodes_per_group;/* Number of inodes in a group */
-       unsigned long s_itb_per_group;  /* Number of inode table blocks per group */
-       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
-       unsigned long s_desc_per_block; /* Number of group descriptors per block */
-       unsigned long s_groups_count;   /* Number of groups in the fs */
-       unsigned long s_overhead_last;  /* Last calculated overhead */
-       unsigned long s_blocks_last;    /* Last seen block count */
-       struct buffer_head * s_sbh;     /* Buffer containing the super block */
-       struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
-       struct buffer_head ** s_group_desc;
-       unsigned long  s_mount_opt;
-       ext3_fsblk_t s_sb_block;
-       uid_t s_resuid;
-       gid_t s_resgid;
-       unsigned short s_mount_state;
-       unsigned short s_pad;
-       int s_addr_per_block_bits;
-       int s_desc_per_block_bits;
-       int s_inode_size;
-       int s_first_ino;
-       spinlock_t s_next_gen_lock;
-       u32 s_next_generation;
-       u32 s_hash_seed[4];
-       int s_def_hash_version;
-       int s_hash_unsigned;    /* 3 if hash should be signed, 0 if not */
-       struct percpu_counter s_freeblocks_counter;
-       struct percpu_counter s_freeinodes_counter;
-       struct percpu_counter s_dirs_counter;
-       struct blockgroup_lock *s_blockgroup_lock;
-
-       /* root of the per fs reservation window tree */
-       spinlock_t s_rsv_window_lock;
-       struct rb_root s_rsv_window_root;
-       struct ext3_reserve_window_node s_rsv_window_head;
-
-       /* Journaling */
-       struct inode * s_journal_inode;
-       struct journal_s * s_journal;
-       struct list_head s_orphan;
-       struct mutex s_orphan_lock;
-       struct mutex s_resize_lock;
-       unsigned long s_commit_interval;
-       struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
-       char *s_qf_names[MAXQUOTAS];            /* Names of quota files with journalled quota */
-       int s_jquota_fmt;                       /* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
-       return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-#endif /* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
deleted file mode 100644 (file)
index d7b5ddc..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * linux/include/linux/ext3_jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Ext3-specific journaling extensions.
- */
-
-#ifndef _LINUX_EXT3_JBD_H
-#define _LINUX_EXT3_JBD_H
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/ext3_fs.h>
-
-#define EXT3_JOURNAL(inode)    (EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction.  */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS   8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS                6U
-
-/* Define the minimum size for a transaction which modifies data.  This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota).  The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb)     (EXT3_SINGLEDATA_TRANS_BLOCKS + \
-                                        EXT3_XATTR_TRANS_BLOCKS - 2 + \
-                                        EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction.  For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA            64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one.  Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates.  Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS      12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS  8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-               (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-               (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
-                    struct inode *inode,
-                    struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-                       struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD.  The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
-                                               struct buffer_head *bh)
-{
-       journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-               struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-                               struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-                               unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
-                               handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
-                               handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
-       __ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
-       __ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
-       __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
-       __ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
-       __ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
-       __ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
-       return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
-       __ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
-       return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
-       return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
-       return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
-       return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
-       return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
-       if (!S_ISREG(inode->i_mode))
-               return 1;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
-               return 1;
-       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-               return 1;
-       return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-               return 0;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
-               return 1;
-       return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
-       if (!S_ISREG(inode->i_mode))
-               return 0;
-       if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-               return 0;
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
-               return 1;
-       return 0;
-}
-
-#endif /* _LINUX_EXT3_JBD_H */
index dd478fc8f9f55deb1194d19894ff6ad3c040874a..5f3f3be5af09b6446026f32c0845d842c257b629 100644 (file)
@@ -144,12 +144,14 @@ struct event_filter;
 enum trace_reg {
        TRACE_REG_REGISTER,
        TRACE_REG_UNREGISTER,
+#ifdef CONFIG_PERF_EVENTS
        TRACE_REG_PERF_REGISTER,
        TRACE_REG_PERF_UNREGISTER,
        TRACE_REG_PERF_OPEN,
        TRACE_REG_PERF_CLOSE,
        TRACE_REG_PERF_ADD,
        TRACE_REG_PERF_DEL,
+#endif
 };
 
 struct ftrace_event_call;
index a5375e7f3feac423fa6325280276e17dcf5351f6..645231c373c8a5b5684e806a9d13647d1eb3300e 100644 (file)
@@ -430,16 +430,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
  * Most likely, you want to use tracing_on/tracing_off.
  */
 #ifdef CONFIG_RING_BUFFER
-void tracing_on(void);
-void tracing_off(void);
 /* trace_off_permanent stops recording with no way to bring it back */
 void tracing_off_permanent(void);
-int tracing_is_on(void);
 #else
-static inline void tracing_on(void) { }
-static inline void tracing_off(void) { }
 static inline void tracing_off_permanent(void) { }
-static inline int tracing_is_on(void) { return 0; }
 #endif
 
 enum ftrace_dump_mode {
@@ -449,6 +443,10 @@ enum ftrace_dump_mode {
 };
 
 #ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+
 extern void tracing_start(void);
 extern void tracing_stop(void);
 extern void ftrace_off_permanent(void);
@@ -533,6 +531,11 @@ static inline void tracing_start(void) { }
 static inline void tracing_stop(void) { }
 static inline void ftrace_off_permanent(void) { }
 static inline void trace_dump_stack(void) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+
 static inline int
 trace_printk(const char *fmt, ...)
 {
index 8f825756c459b6d53afda01394aa7bb4e67406c2..18543e2db06f202e0c9c2b6dc1d3e6fae1c07fb5 100644 (file)
@@ -194,6 +194,7 @@ struct      mtpos {
 #define MT_ST_SYSV              0x1000
 #define MT_ST_NOWAIT            0x2000
 #define MT_ST_SILI             0x4000
+#define MT_ST_NOWAIT_EOF       0x8000
 
 /* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
 #define MT_ST_CLEAR_DEFAULT    0xfffff
index bd9f55a5958d4cd982190292076123fcb3fd6ea6..ddbb6a901f653b7880ed293ee2f67db17a01b850 100644 (file)
@@ -299,18 +299,31 @@ struct perf_event_mmap_page {
        /*
         * Bits needed to read the hw events in user-space.
         *
-        *   u32 seq;
-        *   s64 count;
+        *   u32 seq, time_mult, time_shift, idx, width;
+        *   u64 count, enabled, running;
+        *   u64 cyc, time_offset;
+        *   s64 pmc = 0;
         *
         *   do {
         *     seq = pc->lock;
-        *
         *     barrier()
-        *     if (pc->index) {
-        *       count = pmc_read(pc->index - 1);
-        *       count += pc->offset;
-        *     } else
-        *       goto regular_read;
+        *
+        *     enabled = pc->time_enabled;
+        *     running = pc->time_running;
+        *
+        *     if (pc->cap_usr_time && enabled != running) {
+        *       cyc = rdtsc();
+        *       time_offset = pc->time_offset;
+        *       time_mult   = pc->time_mult;
+        *       time_shift  = pc->time_shift;
+        *     }
+        *
+        *     idx = pc->index;
+        *     count = pc->offset;
+        *     if (pc->cap_usr_rdpmc && idx) {
+        *       width = pc->pmc_width;
+        *       pmc = rdpmc(idx - 1);
+        *     }
         *
         *     barrier();
         *   } while (pc->lock != seq);
@@ -323,14 +336,57 @@ struct perf_event_mmap_page {
        __s64   offset;                 /* add to hardware event value */
        __u64   time_enabled;           /* time event active */
        __u64   time_running;           /* time event on cpu */
-       __u32   time_mult, time_shift;
+       union {
+               __u64   capabilities;
+               __u64   cap_usr_time  : 1,
+                       cap_usr_rdpmc : 1,
+                       cap_____res   : 62;
+       };
+
+       /*
+        * If cap_usr_rdpmc this field provides the bit-width of the value
+        * read using the rdpmc() or equivalent instruction. This can be used
+        * to sign extend the result like:
+        *
+        *   pmc <<= 64 - width;
+        *   pmc >>= 64 - width; // signed shift right
+        *   count += pmc;
+        */
+       __u16   pmc_width;
+
+       /*
+        * If cap_usr_time the below fields can be used to compute the time
+        * delta since time_enabled (in ns) using rdtsc or similar.
+        *
+        *   u64 quot, rem;
+        *   u64 delta;
+        *
+        *   quot = (cyc >> time_shift);
+        *   rem = cyc & ((1 << time_shift) - 1);
+        *   delta = time_offset + quot * time_mult +
+        *              ((rem * time_mult) >> time_shift);
+        *
+        * Where time_offset,time_mult,time_shift and cyc are read in the
+        * seqcount loop described above. This delta can then be added to
+        * enabled and possible running (if idx), improving the scaling:
+        *
+        *   enabled += delta;
+        *   if (idx)
+        *     running += delta;
+        *
+        *   quot = count / running;
+        *   rem  = count % running;
+        *   count = quot * enabled + (rem * enabled) / running;
+        */
+       __u16   time_shift;
+       __u32   time_mult;
        __u64   time_offset;
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u64   __reserved[121];        /* align to 1k */
+       __u64   __reserved[120];        /* align to 1k */
 
        /*
         * Control data for the mmap() data buffer.
@@ -550,6 +606,7 @@ struct perf_guest_info_callbacks {
 #include <linux/irq_work.h>
 #include <linux/static_key.h>
 #include <linux/atomic.h>
+#include <linux/sysfs.h>
 #include <asm/local.h>
 
 #define PERF_MAX_STACK_DEPTH           255
@@ -1291,5 +1348,18 @@ do {                                                                     \
        register_cpu_notifier(&fn##_nb);                                \
 } while (0)
 
+
+#define PMU_FORMAT_ATTR(_name, _format)                                        \
+static ssize_t                                                         \
+_name##_show(struct device *dev,                                       \
+                              struct device_attribute *attr,           \
+                              char *page)                              \
+{                                                                      \
+       BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
+       return sprintf(page, _format "\n");                             \
+}                                                                      \
+                                                                       \
+static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_PERF_EVENT_H */
index 67be0376d8e35e0593945dd9df507ed8528d02ce..7be2e88f23fdae28f322f484c36a409e00c39eb3 100644 (file)
@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
 
 void ring_buffer_record_disable(struct ring_buffer *buffer);
 void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_off(struct ring_buffer *buffer);
+void ring_buffer_record_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
index 9c23ee8fd2d33d037cb7cb5760dc4130948b1ccc..917741bb8e11d18b1dc7c88ddbbadc850029223e 100644 (file)
@@ -261,7 +261,8 @@ struct iscsi_uevent {
                } host_event;
                struct msg_ping_comp {
                        uint32_t        host_no;
-                       uint32_t        status;
+                       uint32_t        status; /* enum
+                                                * iscsi_ping_status_code */
                        uint32_t        pid;    /* unique ping id associated
                                                   with each ping request */
                        uint32_t        data_size;
@@ -483,6 +484,20 @@ enum iscsi_port_state {
        ISCSI_PORT_STATE_UP             = 0x2,
 };
 
+/* iSCSI PING status/error code */
+enum iscsi_ping_status_code {
+       ISCSI_PING_SUCCESS                      = 0,
+       ISCSI_PING_FW_DISABLED                  = 0x1,
+       ISCSI_PING_IPADDR_INVALID               = 0x2,
+       ISCSI_PING_LINKLOCAL_IPV6_ADDR_INVALID  = 0x3,
+       ISCSI_PING_TIMEOUT                      = 0x4,
+       ISCSI_PING_INVALID_DEST_ADDR            = 0x5,
+       ISCSI_PING_OVERSIZE_PACKET              = 0x6,
+       ISCSI_PING_ICMP_ERROR                   = 0x7,
+       ISCSI_PING_MAX_REQ_EXCEEDED             = 0x8,
+       ISCSI_PING_NO_ARP_RECEIVED              = 0x9,
+};
+
 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
 
@@ -578,6 +593,6 @@ struct iscsi_chap_rec {
        char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN];
        uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN];
        uint8_t password_length;
-} __packed;
+};
 
 #endif
index 5a35a2a2d3c514bef92ef7bed3d32216cc08c206..cfdb55f0937e37002d21be6eaf9f81833683aa4b 100644 (file)
@@ -165,7 +165,8 @@ struct fcoe_ctlr {
  * @switch_name: WWN of switch from advertisement
  * @fabric_name: WWN of fabric from advertisement
  * @fc_map:     FC_MAP value from advertisement
- * @fcf_mac:    Ethernet address of the FCF
+ * @fcf_mac:    Ethernet address of the FCF for FIP traffic
+ * @fcoe_mac:   Ethernet address of the FCF for FCoE traffic
  * @vfid:       virtual fabric ID
  * @pri:        selection priority, smaller values are better
  * @flogi_sent:         current FLOGI sent to this FCF
@@ -188,6 +189,7 @@ struct fcoe_fcf {
        u32 fc_map;
        u16 vfid;
        u8 fcf_mac[ETH_ALEN];
+       u8 fcoe_mac[ETH_ALEN];
 
        u8 pri;
        u8 flogi_sent;
index 3098a38f3ae1b6a980da5e645b7c093e799721bf..9047330c73e9b8fed1098131513d6a8a1bdf0f8b 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/minix_fs.h>
-#include <linux/ext2_fs.h>
 #include <linux/romfs_fs.h>
 #include <linux/initrd.h>
 #include <linux/sched.h>
index 01f1306aa26e2bfa0fd6fa273fc0754d117fa389..6212586df29ace81e239b71d01b95826cb636507 100644 (file)
@@ -54,20 +54,19 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
 {
        const int size = 512;
        struct minix_super_block *minixsb;
-       struct ext2_super_block *ext2sb;
        struct romfs_super_block *romfsb;
        struct cramfs_super *cramfsb;
        struct squashfs_super_block *squashfsb;
        int nblocks = -1;
        unsigned char *buf;
        const char *compress_name;
+       unsigned long n;
 
        buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
        minixsb = (struct minix_super_block *) buf;
-       ext2sb = (struct ext2_super_block *) buf;
        romfsb = (struct romfs_super_block *) buf;
        cramfsb = (struct cramfs_super *) buf;
        squashfsb = (struct squashfs_super_block *) buf;
@@ -150,12 +149,12 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
        }
 
        /* Try ext2 */
-       if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) {
+       n = ext2_image_size(buf);
+       if (n) {
                printk(KERN_NOTICE
                       "RAMDISK: ext2 filesystem found at block %d\n",
                       start_block);
-               nblocks = le32_to_cpu(ext2sb->s_blocks_count) <<
-                       le32_to_cpu(ext2sb->s_log_block_size);
+               nblocks = n;
                goto done;
        }
 
index 4b50357914fb437a30cd146e1bd33e1f2b43c449..a6a9ec4cd8f583d640ab0da9e4941b19fe8fd990 100644 (file)
@@ -3348,7 +3348,7 @@ static void calc_timer_values(struct perf_event *event,
        *running = ctx_time - event->tstamp_running;
 }
 
-void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
 {
 }
 
@@ -3398,7 +3398,7 @@ void perf_event_update_userpage(struct perf_event *event)
        userpg->time_running = running +
                        atomic64_read(&event->child_total_time_running);
 
-       perf_update_user_clock(userpg, now);
+       arch_perf_update_userpage(userpg, now);
 
        barrier();
        ++userpg->lock;
@@ -7116,6 +7116,13 @@ void __init perf_event_init(void)
 
        /* do not patch jump label more than once per second */
        jump_label_rate_limit(&perf_sched_events, HZ);
+
+       /*
+        * Build time assertion that we keep the data_head at the intended
+        * location.  IOW, validation we got the __reserved[] size right.
+        */
+       BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
+                    != 1024);
 }
 
 static int __init perf_event_sysfs_init(void)
index e3ed0ecee7c7a47c2b8c85e3717b3606343c30aa..4603b9d8f30a362d15dc60e4bd0cd659fd7c3f89 100644 (file)
@@ -1270,7 +1270,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
        int dest_cpu;
 
        /* Look for allowed, online CPU in same node. */
-       for_each_cpu_mask(dest_cpu, *nodemask) {
+       for_each_cpu(dest_cpu, nodemask) {
                if (!cpu_online(dest_cpu))
                        continue;
                if (!cpu_active(dest_cpu))
@@ -1281,7 +1281,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
 
        for (;;) {
                /* Any allowed, online CPU? */
-               for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) {
+               for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
                        if (!cpu_online(dest_cpu))
                                continue;
                        if (!cpu_active(dest_cpu))
@@ -1964,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
        finish_lock_switch(rq, prev);
+       finish_arch_post_lock_switch();
 
        fire_sched_in_preempt_notifiers(current);
        if (mm)
@@ -3101,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
  */
 static noinline void __schedule_bug(struct task_struct *prev)
 {
-       struct pt_regs *regs = get_irq_regs();
-
        if (oops_in_progress)
                return;
 
@@ -3113,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
        print_modules();
        if (irqs_disabled())
                print_irqtrace_events(prev);
-
-       if (regs)
-               show_regs(regs);
-       else
-               dump_stack();
+       dump_stack();
 }
 
 /*
index 42b1f304b0447512eb1f3fb808477d004ac5c03a..fb3acba4d52e052c8bf6a5da32979313a88e0478 100644 (file)
@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
 #ifndef finish_arch_switch
 # define finish_arch_switch(prev)      do { } while (0)
 #endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch()        do { } while (0)
+#endif
 
 #ifndef __ARCH_WANT_UNLOCKED_CTXSW
 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
index cd3134510f3d0b9d807a532d6d9ab271e98a40f1..a1d2849f247314ce7fc5d33e51ff3c06d3641771 100644 (file)
@@ -141,7 +141,7 @@ if FTRACE
 config FUNCTION_TRACER
        bool "Kernel Function Tracer"
        depends on HAVE_FUNCTION_TRACER
-       select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE
+       select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
        select KALLSYMS
        select GENERIC_TRACER
        select CONTEXT_SWITCH_TRACER
index 867bd1dd2dd09250f3a52663844ee1b54435bbd3..0fa92f677c9209e2ed046752752de986e34f071e 100644 (file)
@@ -249,7 +249,8 @@ static void update_ftrace_function(void)
 #else
        __ftrace_trace_function = func;
 #endif
-       ftrace_trace_function = ftrace_test_stop_func;
+       ftrace_trace_function =
+               (func == ftrace_stub) ? func : ftrace_test_stop_func;
 #endif
 }
 
index f5b7b5c1195beaf806f19d59e0ce7114c0e36add..cf8d11e91efdf92d95dad58d6fa771d2ac998786 100644 (file)
@@ -154,33 +154,10 @@ enum {
 
 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 
-#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
-
-/**
- * tracing_on - enable all tracing buffers
- *
- * This function enables all tracing buffers that may have been
- * disabled with tracing_off.
- */
-void tracing_on(void)
-{
-       set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_on);
+/* Used for individual buffers (after the counter) */
+#define RB_BUFFER_OFF          (1 << 20)
 
-/**
- * tracing_off - turn off all tracing buffers
- *
- * This function stops all tracing buffers from recording data.
- * It does not disable any overhead the tracers themselves may
- * be causing. This function simply causes all recording to
- * the ring buffers to fail.
- */
-void tracing_off(void)
-{
-       clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
-}
-EXPORT_SYMBOL_GPL(tracing_off);
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 
 /**
  * tracing_off_permanent - permanently disable ring buffers
@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
        set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 }
 
-/**
- * tracing_is_on - show state of ring buffers enabled
- */
-int tracing_is_on(void)
-{
-       return ring_buffer_flags == RB_BUFFERS_ON;
-}
-EXPORT_SYMBOL_GPL(tracing_is_on);
-
 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 #define RB_ALIGNMENT           4U
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2618,6 +2586,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
 
+/**
+ * ring_buffer_record_off - stop all writes into the buffer
+ * @buffer: The ring buffer to stop writes to.
+ *
+ * This prevents all writes to the buffer. Any attempt to write
+ * to the buffer after this will fail and return NULL.
+ *
+ * This is different than ring_buffer_record_disable() as
+ * it works like an on/off switch, where as the disable() verison
+ * must be paired with a enable().
+ */
+void ring_buffer_record_off(struct ring_buffer *buffer)
+{
+       unsigned int rd;
+       unsigned int new_rd;
+
+       do {
+               rd = atomic_read(&buffer->record_disabled);
+               new_rd = rd | RB_BUFFER_OFF;
+       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_off);
+
+/**
+ * ring_buffer_record_on - restart writes into the buffer
+ * @buffer: The ring buffer to start writes to.
+ *
+ * This enables all writes to the buffer that was disabled by
+ * ring_buffer_record_off().
+ *
+ * This is different than ring_buffer_record_enable() as
+ * it works like an on/off switch, where as the enable() verison
+ * must be paired with a disable().
+ */
+void ring_buffer_record_on(struct ring_buffer *buffer)
+{
+       unsigned int rd;
+       unsigned int new_rd;
+
+       do {
+               rd = atomic_read(&buffer->record_disabled);
+               new_rd = rd & ~RB_BUFFER_OFF;
+       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+}
+EXPORT_SYMBOL_GPL(ring_buffer_record_on);
+
+/**
+ * ring_buffer_record_is_on - return true if the ring buffer can write
+ * @buffer: The ring buffer to see if write is enabled
+ *
+ * Returns true if the ring buffer is in a state that it accepts writes.
+ */
+int ring_buffer_record_is_on(struct ring_buffer *buffer)
+{
+       return !atomic_read(&buffer->record_disabled);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
 
-#ifdef CONFIG_TRACING
-static ssize_t
-rb_simple_read(struct file *filp, char __user *ubuf,
-              size_t cnt, loff_t *ppos)
-{
-       unsigned long *p = filp->private_data;
-       char buf[64];
-       int r;
-
-       if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
-               r = sprintf(buf, "permanently disabled\n");
-       else
-               r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
-
-       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-rb_simple_write(struct file *filp, const char __user *ubuf,
-               size_t cnt, loff_t *ppos)
-{
-       unsigned long *p = filp->private_data;
-       unsigned long val;
-       int ret;
-
-       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
-       if (ret)
-               return ret;
-
-       if (val)
-               set_bit(RB_BUFFERS_ON_BIT, p);
-       else
-               clear_bit(RB_BUFFERS_ON_BIT, p);
-
-       (*ppos)++;
-
-       return cnt;
-}
-
-static const struct file_operations rb_simple_fops = {
-       .open           = tracing_open_generic,
-       .read           = rb_simple_read,
-       .write          = rb_simple_write,
-       .llseek         = default_llseek,
-};
-
-
-static __init int rb_init_debugfs(void)
-{
-       struct dentry *d_tracer;
-
-       d_tracer = tracing_init_dentry();
-
-       trace_create_file("tracing_on", 0644, d_tracer,
-                           &ring_buffer_flags, &rb_simple_fops);
-
-       return 0;
-}
-
-fs_initcall(rb_init_debugfs);
-#endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 static int rb_cpu_notify(struct notifier_block *self,
                         unsigned long action, void *hcpu)
index 10d5503f0d04d1782a98888ba8e31e05b096516c..ed7b5d1e12f468168178b1a3c144d9736e0b4614 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/ctype.h>
 #include <linux/init.h>
 #include <linux/poll.h>
+#include <linux/nmi.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -351,6 +352,59 @@ static void wakeup_work_handler(struct work_struct *work)
 
 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
 
+/**
+ * tracing_on - enable tracing buffers
+ *
+ * This function enables tracing buffers that may have been
+ * disabled with tracing_off.
+ */
+void tracing_on(void)
+{
+       if (global_trace.buffer)
+               ring_buffer_record_on(global_trace.buffer);
+       /*
+        * This flag is only looked at when buffers haven't been
+        * allocated yet. We don't really care about the race
+        * between setting this flag and actually turning
+        * on the buffer.
+        */
+       global_trace.buffer_disabled = 0;
+}
+EXPORT_SYMBOL_GPL(tracing_on);
+
+/**
+ * tracing_off - turn off tracing buffers
+ *
+ * This function stops the tracing buffers from recording data.
+ * It does not disable any overhead the tracers themselves may
+ * be causing. This function simply causes all recording to
+ * the ring buffers to fail.
+ */
+void tracing_off(void)
+{
+       if (global_trace.buffer)
+               ring_buffer_record_on(global_trace.buffer);
+       /*
+        * This flag is only looked at when buffers haven't been
+        * allocated yet. We don't really care about the race
+        * between setting this flag and actually turning
+        * on the buffer.
+        */
+       global_trace.buffer_disabled = 1;
+}
+EXPORT_SYMBOL_GPL(tracing_off);
+
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+       if (global_trace.buffer)
+               return ring_buffer_record_is_on(global_trace.buffer);
+       return !global_trace.buffer_disabled;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -1644,6 +1698,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
        int cpu_file = iter->cpu_file;
        u64 next_ts = 0, ts;
        int next_cpu = -1;
+       int next_size = 0;
        int cpu;
 
        /*
@@ -1675,9 +1730,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
                        next_cpu = cpu;
                        next_ts = ts;
                        next_lost = lost_events;
+                       next_size = iter->ent_size;
                }
        }
 
+       iter->ent_size = next_size;
+
        if (ent_cpu)
                *ent_cpu = next_cpu;
 
@@ -4567,6 +4625,55 @@ static __init void create_trace_options_dir(void)
                create_trace_option_core_file(trace_options[i], i);
 }
 
+static ssize_t
+rb_simple_read(struct file *filp, char __user *ubuf,
+              size_t cnt, loff_t *ppos)
+{
+       struct ring_buffer *buffer = filp->private_data;
+       char buf[64];
+       int r;
+
+       if (buffer)
+               r = ring_buffer_record_is_on(buffer);
+       else
+               r = 0;
+
+       r = sprintf(buf, "%d\n", r);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+rb_simple_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos)
+{
+       struct ring_buffer *buffer = filp->private_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
+               return ret;
+
+       if (buffer) {
+               if (val)
+                       ring_buffer_record_on(buffer);
+               else
+                       ring_buffer_record_off(buffer);
+       }
+
+       (*ppos)++;
+
+       return cnt;
+}
+
+static const struct file_operations rb_simple_fops = {
+       .open           = tracing_open_generic,
+       .read           = rb_simple_read,
+       .write          = rb_simple_write,
+       .llseek         = default_llseek,
+};
+
 static __init int tracer_init_debugfs(void)
 {
        struct dentry *d_tracer;
@@ -4626,6 +4733,9 @@ static __init int tracer_init_debugfs(void)
        trace_create_file("trace_clock", 0644, d_tracer, NULL,
                          &trace_clock_fops);
 
+       trace_create_file("tracing_on", 0644, d_tracer,
+                           global_trace.buffer, &rb_simple_fops);
+
 #ifdef CONFIG_DYNAMIC_FTRACE
        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4798,6 +4908,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
                        if (ret != TRACE_TYPE_NO_CONSUME)
                                trace_consume(&iter);
                }
+               touch_nmi_watchdog();
 
                trace_printk_seq(&iter.seq);
        }
@@ -4863,6 +4974,8 @@ __init static int tracer_alloc_buffers(void)
                goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
+       if (global_trace.buffer_disabled)
+               tracing_off();
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
index 54faec790bc18c53ddcc40df65640d340d2f01d3..95059f091a242abcfd60bbe9169131e7870e4a2c 100644 (file)
@@ -154,6 +154,7 @@ struct trace_array {
        struct ring_buffer      *buffer;
        unsigned long           entries;
        int                     cpu;
+       int                     buffer_disabled;
        cycle_t                 time_start;
        struct task_struct      *waiter;
        struct trace_array_cpu  *data[NR_CPUS];
@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
                     filter)
 #include "trace_entries.h"
 
-#ifdef CONFIG_PERF_EVENTS
 #ifdef CONFIG_FUNCTION_TRACER
 int perf_ftrace_event_register(struct ftrace_event_call *call,
                               enum trace_reg type, void *data);
 #else
 #define perf_ftrace_event_register NULL
 #endif /* CONFIG_FUNCTION_TRACER */
-#endif /* CONFIG_PERF_EVENTS */
 
 #endif /* _LINUX_KERNEL_TRACE_H */
index d91eb0541b3aa73f206e7f6ec8eee4c563c04233..4108e1250ca2c2b83e58a99c851c6506fe1348bb 100644 (file)
@@ -166,6 +166,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
 
 #define FTRACE_STACK_ENTRIES   8
 
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
 FTRACE_ENTRY(kernel_stack, stack_entry,
 
        TRACE_STACK,
@@ -175,8 +181,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
                __dynamic_array(unsigned long,  caller  )
        ),
 
-       F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+       F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
                 __entry->caller[6], __entry->caller[7]),
@@ -193,8 +200,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
                __array(        unsigned long,  caller, FTRACE_STACK_ENTRIES    )
        ),
 
-       F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
-                "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
+       F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+                "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
                 __entry->caller[6], __entry->caller[7]),
index 7b46c9bd22aef8ed4f6de572965c439af54b5ff8..3dd15e8bc856d87f7e4525a1577b5af579db6f96 100644 (file)
@@ -162,7 +162,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call)   \
 #define __dynamic_array(type, item)
 
 #undef F_printk
-#define F_printk(fmt, args...) #fmt ", "  __stringify(args)
+#define F_printk(fmt, args...) __stringify(fmt) ", "  __stringify(args)
 
 #undef FTRACE_ENTRY_REG
 #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
index 6989472d0957d89ab80e773c4ce1e2751de56cbe..1a70fa26da72683c4bd3fe343baa83117f5ed285 100644 (file)
@@ -513,7 +513,7 @@ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
  * be performed under a lock, to allow the lock to be released
  * before calling the auditing code.
  */
-int avc_audit(u32 ssid, u32 tsid,
+inline int avc_audit(u32 ssid, u32 tsid,
               u16 tclass, u32 requested,
               struct av_decision *avd, int result, struct common_audit_data *a,
               unsigned flags)
@@ -741,6 +741,41 @@ int avc_ss_reset(u32 seqno)
        return rc;
 }
 
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+                        u16 tclass, struct av_decision *avd)
+{
+       rcu_read_unlock();
+       security_compute_av(ssid, tsid, tclass, avd);
+       rcu_read_lock();
+       return avc_insert(ssid, tsid, tclass, avd);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+                        u16 tclass, u32 requested,
+                        unsigned flags,
+                        struct av_decision *avd)
+{
+       if (flags & AVC_STRICT)
+               return -EACCES;
+
+       if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+               return -EACCES;
+
+       avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+                               tsid, tclass, avd->seqno);
+       return 0;
+}
+
+
 /**
  * avc_has_perm_noaudit - Check permissions but perform no auditing.
  * @ssid: source security identifier
@@ -761,7 +796,7 @@ int avc_ss_reset(u32 seqno)
  * auditing, e.g. in cases where a lock must be held for the check but
  * should be released for the auditing.
  */
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
                         u16 tclass, u32 requested,
                         unsigned flags,
                         struct av_decision *avd)
@@ -776,26 +811,15 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
 
        node = avc_lookup(ssid, tsid, tclass);
        if (unlikely(!node)) {
-               rcu_read_unlock();
-               security_compute_av(ssid, tsid, tclass, avd);
-               rcu_read_lock();
-               node = avc_insert(ssid, tsid, tclass, avd);
+               node = avc_compute_av(ssid, tsid, tclass, avd);
        } else {
                memcpy(avd, &node->ae.avd, sizeof(*avd));
                avd = &node->ae.avd;
        }
 
        denied = requested & ~(avd->allowed);
-
-       if (denied) {
-               if (flags & AVC_STRICT)
-                       rc = -EACCES;
-               else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
-                       avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
-                                       tsid, tclass, avd->seqno);
-               else
-                       rc = -EACCES;
-       }
+       if (unlikely(denied))
+               rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
 
        rcu_read_unlock();
        return rc;
index 15c6c567468b6f4a12d00b8a4f5756311ddbc532..28482f9e15b89101c0a0cf90dfc94d53a6639ee5 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/tracehook.h>
 #include <linux/errno.h>
-#include <linux/ext2_fs.h>
 #include <linux/sched.h>
 #include <linux/security.h>
 #include <linux/xattr.h>
@@ -2971,15 +2970,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
        /* fall through */
        case FIGETBSZ:
        /* fall through */
-       case EXT2_IOC_GETFLAGS:
+       case FS_IOC_GETFLAGS:
        /* fall through */
-       case EXT2_IOC_GETVERSION:
+       case FS_IOC_GETVERSION:
                error = file_has_perm(cred, file, FILE__GETATTR);
                break;
 
-       case EXT2_IOC_SETFLAGS:
+       case FS_IOC_SETFLAGS:
        /* fall through */
-       case EXT2_IOC_SETVERSION:
+       case FS_IOC_SETVERSION:
                error = file_has_perm(cred, file, FILE__SETATTR);
                break;
 
index 48a7d0014b4f4f5c4f40d92de9cb029bdf149d45..d7018bfa1f00a607aeade4d0047a11ccf8cf7a7e 100644 (file)
@@ -344,7 +344,7 @@ static int sel_make_classes(void);
 static int sel_make_policycap(void);
 
 /* declaration for sel_make_class_dirs */
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
                        unsigned long *ino);
 
 static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -1678,13 +1678,9 @@ static int sel_make_class_dir_entries(char *classname, int index,
        inode->i_ino = sel_class_to_ino(index);
        d_add(dentry, inode);
 
-       dentry = d_alloc_name(dir, "perms");
-       if (!dentry)
-               return -ENOMEM;
-
-       rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
-       if (rc)
-               return rc;
+       dentry = sel_make_dir(dir, "perms", &last_class_ino);
+       if (IS_ERR(dentry))
+               return PTR_ERR(dentry);
 
        rc = sel_make_perm_files(classname, index, dentry);
 
@@ -1733,15 +1729,12 @@ static int sel_make_classes(void)
        for (i = 0; i < nclasses; i++) {
                struct dentry *class_name_dir;
 
-               rc = -ENOMEM;
-               class_name_dir = d_alloc_name(class_dir, classes[i]);
-               if (!class_name_dir)
-                       goto out;
-
-               rc = sel_make_dir(class_dir->d_inode, class_name_dir,
+               class_name_dir = sel_make_dir(class_dir, classes[i],
                                &last_class_ino);
-               if (rc)
+               if (IS_ERR(class_name_dir)) {
+                       rc = PTR_ERR(class_name_dir);
                        goto out;
+               }
 
                /* i+1 since class values are 1-indexed */
                rc = sel_make_class_dir_entries(classes[i], i + 1,
@@ -1787,14 +1780,20 @@ static int sel_make_policycap(void)
        return 0;
 }
 
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
                        unsigned long *ino)
 {
+       struct dentry *dentry = d_alloc_name(dir, name);
        struct inode *inode;
 
-       inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
-       if (!inode)
-               return -ENOMEM;
+       if (!dentry)
+               return ERR_PTR(-ENOMEM);
+
+       inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+       if (!inode) {
+               dput(dentry);
+               return ERR_PTR(-ENOMEM);
+       }
 
        inode->i_op = &simple_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
@@ -1803,16 +1802,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
        inc_nlink(inode);
        d_add(dentry, inode);
        /* bump link count on parent directory, too */
-       inc_nlink(dir);
+       inc_nlink(dir->d_inode);
 
-       return 0;
+       return dentry;
 }
 
 static int sel_fill_super(struct super_block *sb, void *data, int silent)
 {
        int ret;
        struct dentry *dentry;
-       struct inode *inode, *root_inode;
+       struct inode *inode;
        struct inode_security_struct *isec;
 
        static struct tree_descr selinux_files[] = {
@@ -1839,18 +1838,12 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
        if (ret)
                goto err;
 
-       root_inode = sb->s_root->d_inode;
-
-       ret = -ENOMEM;
-       dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
-       if (!dentry)
+       bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+       if (IS_ERR(bool_dir)) {
+               ret = PTR_ERR(bool_dir);
+               bool_dir = NULL;
                goto err;
-
-       ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-       if (ret)
-               goto err;
-
-       bool_dir = dentry;
+       }
 
        ret = -ENOMEM;
        dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
@@ -1872,54 +1865,39 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
        d_add(dentry, inode);
        selinux_null = dentry;
 
-       ret = -ENOMEM;
-       dentry = d_alloc_name(sb->s_root, "avc");
-       if (!dentry)
-               goto err;
-
-       ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-       if (ret)
+       dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+       if (IS_ERR(dentry)) {
+               ret = PTR_ERR(dentry);
                goto err;
+       }
 
        ret = sel_make_avc_files(dentry);
        if (ret)
                goto err;
 
-       ret = -ENOMEM;
-       dentry = d_alloc_name(sb->s_root, "initial_contexts");
-       if (!dentry)
-               goto err;
-
-       ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-       if (ret)
+       dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+       if (IS_ERR(dentry)) {
+               ret = PTR_ERR(dentry);
                goto err;
+       }
 
        ret = sel_make_initcon_files(dentry);
        if (ret)
                goto err;
 
-       ret = -ENOMEM;
-       dentry = d_alloc_name(sb->s_root, "class");
-       if (!dentry)
-               goto err;
-
-       ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-       if (ret)
-               goto err;
-
-       class_dir = dentry;
-
-       ret = -ENOMEM;
-       dentry = d_alloc_name(sb->s_root, "policy_capabilities");
-       if (!dentry)
+       class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+       if (IS_ERR(class_dir)) {
+               ret = PTR_ERR(class_dir);
+               class_dir = NULL;
                goto err;
+       }
 
-       ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
-       if (ret)
+       policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+       if (IS_ERR(policycap_dir)) {
+               ret = PTR_ERR(policycap_dir);
+               policycap_dir = NULL;
                goto err;
-
-       policycap_dir = dentry;
-
+       }
        return 0;
 err:
        printk(KERN_ERR "SELinux: %s:  failed while creating inodes\n",
index 87feeee8b90c4137a0fe5d5260a31ca98a3c3766..2d89f02719b5f6ce52e502414c031a2d7610ca9a 100644 (file)
@@ -48,6 +48,9 @@ OPTIONS
        Only consider these symbols. CSV that understands
        file://filename entries.
 
+--symbol-filter=::
+       Only show symbols that match (partially) with this filter.
+
 -U::
 --hide-unresolved::
         Only display entries resolved to a symbol.
@@ -110,6 +113,8 @@ OPTIONS
        requires a tty, if one is not present, as when piping to other
        commands, the stdio interface is used.
 
+--gtk:: Use the GTK2 interface.
+
 -k::
 --vmlinux=<file>::
         vmlinux pathname
index 74fd7f89208a092e5312785317aa6ed49a81741e..820371f10d1b1b96e1be9332d454f6ee053aaf34 100644 (file)
@@ -182,7 +182,7 @@ endif
 
 ### --- END CONFIGURATION SECTION ---
 
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
@@ -234,6 +234,25 @@ endif
 
 export PERL_PATH
 
+FLEX = $(CROSS_COMPILE)flex
+BISON= $(CROSS_COMPILE)bison
+
+event-parser:
+       $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
+       $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
+
+$(OUTPUT)util/parse-events-flex.c: event-parser
+$(OUTPUT)util/parse-events-bison.c: event-parser
+
+pmu-parser:
+       $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+       $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
+
+$(OUTPUT)util/pmu-flex.c: pmu-parser
+$(OUTPUT)util/pmu-bison.c: pmu-parser
+
+$(OUTPUT)util/parse-events.o: event-parser pmu-parser
+
 LIB_FILE=$(OUTPUT)libperf.a
 
 LIB_H += ../../include/linux/perf_event.h
@@ -249,7 +268,7 @@ LIB_H += util/include/linux/const.h
 LIB_H += util/include/linux/ctype.h
 LIB_H += util/include/linux/kernel.h
 LIB_H += util/include/linux/list.h
-LIB_H += util/include/linux/module.h
+LIB_H += util/include/linux/export.h
 LIB_H += util/include/linux/poison.h
 LIB_H += util/include/linux/prefetch.h
 LIB_H += util/include/linux/rbtree.h
@@ -276,6 +295,7 @@ LIB_H += util/build-id.h
 LIB_H += util/debug.h
 LIB_H += util/debugfs.h
 LIB_H += util/sysfs.h
+LIB_H += util/pmu.h
 LIB_H += util/event.h
 LIB_H += util/evsel.h
 LIB_H += util/evlist.h
@@ -323,6 +343,7 @@ LIB_OBJS += $(OUTPUT)util/config.o
 LIB_OBJS += $(OUTPUT)util/ctype.o
 LIB_OBJS += $(OUTPUT)util/debugfs.o
 LIB_OBJS += $(OUTPUT)util/sysfs.o
+LIB_OBJS += $(OUTPUT)util/pmu.o
 LIB_OBJS += $(OUTPUT)util/environment.o
 LIB_OBJS += $(OUTPUT)util/event.o
 LIB_OBJS += $(OUTPUT)util/evlist.o
@@ -359,6 +380,10 @@ LIB_OBJS += $(OUTPUT)util/session.o
 LIB_OBJS += $(OUTPUT)util/thread.o
 LIB_OBJS += $(OUTPUT)util/thread_map.o
 LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
+LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
+LIB_OBJS += $(OUTPUT)util/pmu-flex.o
+LIB_OBJS += $(OUTPUT)util/pmu-bison.o
 LIB_OBJS += $(OUTPUT)util/trace-event-read.o
 LIB_OBJS += $(OUTPUT)util/trace-event-info.o
 LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
@@ -501,6 +526,20 @@ else
        endif
 endif
 
+ifdef NO_GTK2
+       BASIC_CFLAGS += -DNO_GTK2
+else
+       FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
+       ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
+               msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
+               BASIC_CFLAGS += -DNO_GTK2_SUPPORT
+       else
+               BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
+               EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
+               LIB_OBJS += $(OUTPUT)util/gtk/browser.o
+       endif
+endif
+
 ifdef NO_LIBPERL
        BASIC_CFLAGS += -DNO_LIBPERL
 else
@@ -647,6 +686,8 @@ ifndef V
        QUIET_LINK     = @echo '   ' LINK $@;
        QUIET_MKDIR    = @echo '   ' MKDIR $@;
        QUIET_GEN      = @echo '   ' GEN $@;
+       QUIET_FLEX     = @echo '   ' FLEX $@;
+       QUIET_BISON    = @echo '   ' BISON $@;
 endif
 endif
 
@@ -727,12 +768,28 @@ $(OUTPUT)perf.o perf.spec \
        $(SCRIPTS) \
        : $(OUTPUT)PERF-VERSION-FILE
 
+.SUFFIXES:
+.SUFFIXES: .o .c .S .s
+
+# These two need to be here so that when O= is not used they take precedence
+# over the general rule for .o
+
+$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
+$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+
 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
 $(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
-       $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
+       $(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $<
 $(OUTPUT)%.o: %.S
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.s: %.S
+       $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
 
 $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -795,6 +852,8 @@ help:
        @echo '  html           - make html documentation'
        @echo '  info           - make GNU info documentation (access with info <foo>)'
        @echo '  pdf            - make pdf documentation'
+       @echo '  event-parser   - make event parser code'
+       @echo '  pmu-parser     - make pmu format parser code'
        @echo '  TAGS           - use etags to make tag information for source browsing'
        @echo '  tags           - use ctags to make tag information for source browsing'
        @echo '  cscope - use cscope to make interactive browsing database'
@@ -931,6 +990,7 @@ clean:
        $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
        $(MAKE) -C Documentation/ clean
        $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
+       $(RM) $(OUTPUT)util/*-{bison,flex}*
        $(python-clean)
 
 .PHONY: all install clean strip
index 4f19513d7dda068eed48141ee4b3b300381cdc3d..d29d350fb2b731187d92233d6c0ddb5ef2a368a6 100644 (file)
@@ -24,6 +24,11 @@ static char    diff__default_sort_order[] = "dso,symbol";
 static bool  force;
 static bool show_displacement;
 
+struct perf_diff {
+       struct perf_tool tool;
+       struct perf_session *session;
+};
+
 static int hists__add_entry(struct hists *self,
                            struct addr_location *al, u64 period)
 {
@@ -32,12 +37,14 @@ static int hists__add_entry(struct hists *self,
        return -ENOMEM;
 }
 
-static int diff__process_sample_event(struct perf_tool *tool __used,
+static int diff__process_sample_event(struct perf_tool *tool,
                                      union perf_event *event,
                                      struct perf_sample *sample,
                                      struct perf_evsel *evsel __used,
                                      struct machine *machine)
 {
+       struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
+       struct perf_session *session = _diff->session;
        struct addr_location al;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -49,24 +56,26 @@ static int diff__process_sample_event(struct perf_tool *tool __used,
        if (al.filtered || al.sym == NULL)
                return 0;
 
-       if (hists__add_entry(&evsel->hists, &al, sample->period)) {
+       if (hists__add_entry(&session->hists, &al, sample->period)) {
                pr_warning("problem incrementing symbol period, skipping event\n");
                return -1;
        }
 
-       evsel->hists.stats.total_period += sample->period;
+       session->hists.stats.total_period += sample->period;
        return 0;
 }
 
-static struct perf_tool perf_diff = {
-       .sample = diff__process_sample_event,
-       .mmap   = perf_event__process_mmap,
-       .comm   = perf_event__process_comm,
-       .exit   = perf_event__process_task,
-       .fork   = perf_event__process_task,
-       .lost   = perf_event__process_lost,
-       .ordered_samples = true,
-       .ordering_requires_timestamps = true,
+static struct perf_diff diff = {
+       .tool = {
+               .sample = diff__process_sample_event,
+               .mmap   = perf_event__process_mmap,
+               .comm   = perf_event__process_comm,
+               .exit   = perf_event__process_task,
+               .fork   = perf_event__process_task,
+               .lost   = perf_event__process_lost,
+               .ordered_samples = true,
+               .ordering_requires_timestamps = true,
+       },
 };
 
 static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -107,12 +116,6 @@ static void hists__resort_entries(struct hists *self)
        self->entries = tmp;
 }
 
-static void hists__set_positions(struct hists *self)
-{
-       hists__output_resort(self);
-       hists__resort_entries(self);
-}
-
 static struct hist_entry *hists__find_entry(struct hists *self,
                                            struct hist_entry *he)
 {
@@ -146,30 +149,37 @@ static void hists__match(struct hists *older, struct hists *newer)
 static int __cmd_diff(void)
 {
        int ret, i;
+#define older (session[0])
+#define newer (session[1])
        struct perf_session *session[2];
 
-       session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff);
-       session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff);
+       older = perf_session__new(input_old, O_RDONLY, force, false,
+                                 &diff.tool);
+       newer = perf_session__new(input_new, O_RDONLY, force, false,
+                                 &diff.tool);
        if (session[0] == NULL || session[1] == NULL)
                return -ENOMEM;
 
        for (i = 0; i < 2; ++i) {
-               ret = perf_session__process_events(session[i], &perf_diff);
+               diff.session = session[i];
+               ret = perf_session__process_events(session[i], &diff.tool);
                if (ret)
                        goto out_delete;
+               hists__output_resort(&session[i]->hists);
        }
 
-       hists__output_resort(&session[1]->hists);
        if (show_displacement)
-               hists__set_positions(&session[0]->hists);
+               hists__resort_entries(&older->hists);
 
-       hists__match(&session[0]->hists, &session[1]->hists);
-       hists__fprintf(&session[1]->hists, &session[0]->hists,
+       hists__match(&older->hists, &newer->hists);
+       hists__fprintf(&newer->hists, &older->hists,
                       show_displacement, true, 0, 0, stdout);
 out_delete:
        for (i = 0; i < 2; ++i)
                perf_session__delete(session[i]);
        return ret;
+#undef older
+#undef newer
 }
 
 static const char * const diff_usage[] = {
index 8e91c6eba18adbcc7b1c8648d017699806b9d289..2e317438980b4767bbca4fdc3512cbd55d4ad1d2 100644 (file)
@@ -40,7 +40,7 @@ struct perf_report {
        struct perf_tool        tool;
        struct perf_session     *session;
        char const              *input_name;
-       bool                    force, use_tui, use_stdio;
+       bool                    force, use_tui, use_gtk, use_stdio;
        bool                    hide_unresolved;
        bool                    dont_use_callchains;
        bool                    show_full_info;
@@ -50,6 +50,7 @@ struct perf_report {
        const char              *pretty_printing_style;
        symbol_filter_t         annotate_init;
        const char              *cpu_list;
+       const char              *symbol_filter_str;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
@@ -400,6 +401,9 @@ static int __cmd_report(struct perf_report *rep)
        list_for_each_entry(pos, &session->evlist->entries, node) {
                struct hists *hists = &pos->hists;
 
+               if (pos->idx == 0)
+                       hists->symbol_filter_str = rep->symbol_filter_str;
+
                hists__collapse_resort(hists);
                hists__output_resort(hists);
                nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
@@ -411,8 +415,13 @@ static int __cmd_report(struct perf_report *rep)
        }
 
        if (use_browser > 0) {
-               perf_evlist__tui_browse_hists(session->evlist, help,
-                                             NULL, NULL, 0);
+               if (use_browser == 1) {
+                       perf_evlist__tui_browse_hists(session->evlist, help,
+                                                     NULL, NULL, 0);
+               } else if (use_browser == 2) {
+                       perf_evlist__gtk_browse_hists(session->evlist, help,
+                                                     NULL, NULL, 0);
+               }
        } else
                perf_evlist__tty_browse_hists(session->evlist, rep, help);
 
@@ -569,6 +578,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
        OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
                   "pretty printing style key: normal raw"),
        OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+       OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
        OPT_BOOLEAN(0, "stdio", &report.use_stdio,
                    "Use the stdio interface"),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
@@ -591,6 +601,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
                   "only consider symbols in these comms"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
                   "only consider these symbols"),
+       OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
+                  "only show symbols that (partially) match with this filter"),
        OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
                   "width[,width...]",
                   "don't try to adjust column width, use these fixed values"),
@@ -624,6 +636,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
                use_browser = 0;
        else if (report.use_tui)
                use_browser = 1;
+       else if (report.use_gtk)
+               use_browser = 2;
 
        if (report.inverted_callchain)
                callchain_param.order = ORDER_CALLER;
@@ -660,7 +674,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
        }
 
        if (strcmp(report.input_name, "-") != 0) {
-               setup_browser(true);
+               if (report.use_gtk)
+                       perf_gtk_setup_browser(argc, argv, true);
+               else
+                       setup_browser(true);
        } else {
                use_browser = 0;
        }
@@ -709,11 +726,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
        } else
                symbol_conf.exclude_other = false;
 
-       /*
-        * Any (unrecognized) arguments left?
-        */
-       if (argc)
-               usage_with_options(report_usage, options);
+       if (argc) {
+               /*
+                * Special case: if there's an argument left then assume that
+                * it's a symbol filter:
+                */
+               if (argc > 1)
+                       usage_with_options(report_usage, options);
+
+               report.symbol_filter_str = argv[0];
+       }
 
        sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
 
index ea40e4e8b2271f0a7a53ee25d8154cdcee4b19bd..c941bb640f4990f8590f8fb410dfaa0247bcc3b8 100644 (file)
@@ -296,7 +296,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
        if (system_wide)
                return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
                                                group, group_fd);
-       if (!target_pid && !target_tid) {
+       if (!target_pid && !target_tid && (!group || evsel == first)) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
index 3e087ce8daa63fce8572f50ab60f9c1aab0e4531..1c5b9801ac6115547039599ff1424048efefd0e7 100644 (file)
@@ -13,6 +13,7 @@
 #include "util/parse-events.h"
 #include "util/symbol.h"
 #include "util/thread_map.h"
+#include "util/pmu.h"
 #include "../../include/linux/hw_breakpoint.h"
 
 #include <sys/mman.h>
@@ -650,7 +651,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist)
 
        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
-       TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config);
        return 0;
 }
 
@@ -677,6 +678,24 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
        return 0;
 }
 
+static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong period",
+                       100000 == evsel->attr.sample_period);
+       TEST_ASSERT_VAL("wrong config1",
+                       0 == evsel->attr.config1);
+       TEST_ASSERT_VAL("wrong config2",
+                       1 == evsel->attr.config2);
+       return 0;
+}
+
 static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -858,6 +877,115 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
        return test__checkevent_genhw(evlist);
 }
 
+static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+       return test__checkevent_breakpoint(evlist);
+}
+
+static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+       return test__checkevent_breakpoint_x(evlist);
+}
+
+static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+       return test__checkevent_breakpoint_r(evlist);
+}
+
+static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+       return test__checkevent_breakpoint_w(evlist);
+}
+
+static int test__checkevent_pmu(struct perf_evlist *evlist)
+{
+
+       struct perf_evsel *evsel = list_entry(evlist->entries.next,
+                                             struct perf_evsel, node);
+
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",    10 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong config1",    1 == evsel->attr.config1);
+       TEST_ASSERT_VAL("wrong config2",    3 == evsel->attr.config2);
+       TEST_ASSERT_VAL("wrong period",  1000 == evsel->attr.sample_period);
+
+       return 0;
+}
+
+static int test__checkevent_list(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+       /* r1 */
+       evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
+       TEST_ASSERT_VAL("wrong config2", 0 == evsel->attr.config2);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+       /* syscalls:sys_enter_open:k */
+       evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong sample_type",
+               (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
+               evsel->attr.sample_type);
+       TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
+       TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+       /* 1:1:hp */
+       evsel = list_entry(evsel->node.next, struct perf_evsel, node);
+       TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+
+       return 0;
+}
+
 static struct test__event_st {
        const char *name;
        __u32 type;
@@ -872,7 +1000,7 @@ static struct test__event_st {
                .check = test__checkevent_tracepoint_multi,
        },
        {
-               .name  = "r1",
+               .name  = "r1a",
                .check = test__checkevent_raw,
        },
        {
@@ -883,6 +1011,10 @@ static struct test__event_st {
                .name  = "instructions",
                .check = test__checkevent_symbolic_name,
        },
+       {
+               .name  = "cycles/period=100000,config2/",
+               .check = test__checkevent_symbolic_name_config,
+       },
        {
                .name  = "faults",
                .check = test__checkevent_symbolic_alias,
@@ -916,7 +1048,7 @@ static struct test__event_st {
                .check = test__checkevent_tracepoint_multi_modifier,
        },
        {
-               .name  = "r1:kp",
+               .name  = "r1a:kp",
                .check = test__checkevent_raw_modifier,
        },
        {
@@ -935,6 +1067,30 @@ static struct test__event_st {
                .name  = "L1-dcache-load-miss:kp",
                .check = test__checkevent_genhw_modifier,
        },
+       {
+               .name  = "mem:0:u",
+               .check = test__checkevent_breakpoint_modifier,
+       },
+       {
+               .name  = "mem:0:x:k",
+               .check = test__checkevent_breakpoint_x_modifier,
+       },
+       {
+               .name  = "mem:0:r:hp",
+               .check = test__checkevent_breakpoint_r_modifier,
+       },
+       {
+               .name  = "mem:0:w:up",
+               .check = test__checkevent_breakpoint_w_modifier,
+       },
+       {
+               .name  = "cpu/config=10,config1,config2=3,period=1000/u",
+               .check = test__checkevent_pmu,
+       },
+       {
+               .name  = "r1,syscalls:sys_enter_open:k,1:1:hp",
+               .check = test__checkevent_list,
+       },
 };
 
 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -960,10 +1116,9 @@ static int test__parse_events(void)
                }
 
                ret = e->check(evlist);
+               perf_evlist__delete(evlist);
                if (ret)
                        break;
-
-               perf_evlist__delete(evlist);
        }
 
        return ret;
@@ -1462,6 +1617,11 @@ static int test__rdpmc(void)
 
 #endif
 
+static int test__perf_pmu(void)
+{
+       return perf_pmu__test();
+}
+
 static struct test {
        const char *desc;
        int (*func)(void);
@@ -1496,6 +1656,10 @@ static struct test {
                .desc = "Validate PERF_RECORD_* events & perf_sample fields",
                .func = test__PERF_RECORD,
        },
+       {
+               .desc = "Test perf pmu format parsing",
+               .func = test__perf_pmu,
+       },
        {
                .func = NULL,
        },
index 6170fd2531b5e681ca1986fa0b231d3b3029382b..d9084e03ce56676628736c5814e27badbe2ef730 100644 (file)
@@ -65,6 +65,21 @@ int main(void)
 endef
 endif
 
+ifndef NO_GTK2
+define SOURCE_GTK2
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(int argc, char *argv[])
+{
+        gtk_init(&argc, &argv);
+
+        return 0;
+}
+endef
+endif
+
 ifndef NO_LIBPERL
 define SOURCE_PERL_EMBED
 #include <EXTERN.h>
index e5a462f1d07c0c0ab6cbcc08bd1e4a317bac6693..199f69ec656f7a77c5c490e2f1195c9e09226e98 100644 (file)
@@ -28,8 +28,8 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
 int symbol__alloc_hist(struct symbol *sym)
 {
        struct annotation *notes = symbol__annotation(sym);
-       size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
-                                 (sym->end - sym->start) * sizeof(u64));
+       const size_t size = sym->end - sym->start + 1;
+       size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
 
        notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
        if (notes->src == NULL)
@@ -64,7 +64,7 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 
        pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
 
-       if (addr >= sym->end)
+       if (addr > sym->end)
                return 0;
 
        offset = addr - sym->start;
@@ -408,7 +408,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
        if (!notes->src->lines)
                return -1;
 
-       start = map->unmap_ip(map, sym->start);
+       start = map__rip_2objdump(map, sym->start);
 
        for (i = 0; i < len; i++) {
                char *path = NULL;
index fc5e5a09d5b94102b7adaed11518dd8be2c0eb84..8dd224df3e54ce4f88c8f09b852c07e93656442c 100644 (file)
@@ -45,6 +45,18 @@ void setup_browser(bool fallback_to_pager);
 void exit_browser(bool wait_for_ok);
 #endif
 
+#ifdef NO_GTK2_SUPPORT
+static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager)
+{
+       if (fallback_to_pager)
+               setup_pager();
+}
+static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {}
+#else
+void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager);
+void perf_gtk_exit_browser(bool wait_for_ok);
+#endif
+
 char *alias_lookup(const char *alias);
 int split_cmdline(char *cmdline, const char ***argv);
 
index 159263d17c2d31926672454004748b3583fe6f78..1986d8051bd16a9f133159545fb7083df3511d76 100644 (file)
@@ -51,13 +51,15 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
 void perf_evlist__config_attrs(struct perf_evlist *evlist,
                               struct perf_record_opts *opts)
 {
-       struct perf_evsel *evsel;
+       struct perf_evsel *evsel, *first;
 
        if (evlist->cpus->map[0] < 0)
                opts->no_inherit = true;
 
+       first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
        list_for_each_entry(evsel, &evlist->entries, node) {
-               perf_evsel__config(evsel, opts);
+               perf_evsel__config(evsel, opts, first);
 
                if (evlist->nr_entries > 1)
                        evsel->attr.sample_type |= PERF_SAMPLE_ID;
index f421f7cbc0d34871a3ed7d58f064b1538547fc12..8c13dbcb84b93bee3527ebd3e53b48eaa23d5f11 100644 (file)
@@ -34,7 +34,7 @@ int __perf_evsel__sample_size(u64 sample_type)
        return size;
 }
 
-static void hists__init(struct hists *hists)
+void hists__init(struct hists *hists)
 {
        memset(hists, 0, sizeof(*hists));
        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -63,7 +63,8 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
        return evsel;
 }
 
-void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
+void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
+                       struct perf_evsel *first)
 {
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
@@ -134,7 +135,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
        attr->mmap = track;
        attr->comm = track;
 
-       if (!opts->target_pid && !opts->target_tid && !opts->system_wide) {
+       if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
+           (!opts->group || evsel == first)) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
@@ -578,6 +580,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
                        return -EFAULT;
 
                data->raw_data = (void *) pdata;
+
+               array = (void *)array + data->raw_size + sizeof(u32);
        }
 
        if (type & PERF_SAMPLE_BRANCH_STACK) {
index 326b8e4d503579cb7306e8c05d82d95e957aa2d3..3d6b3e4cb66bb9bfbb32b75958ef1f23a4880163 100644 (file)
@@ -80,7 +80,8 @@ void perf_evsel__exit(struct perf_evsel *evsel);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 void perf_evsel__config(struct perf_evsel *evsel,
-                       struct perf_record_opts *opts);
+                       struct perf_record_opts *opts,
+                       struct perf_evsel *first);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
@@ -169,4 +170,6 @@ static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
        return __perf_evsel__sample_size(evsel->attr.sample_type);
 }
 
+void hists__init(struct hists *hists);
+
 #endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/gtk/browser.c b/tools/perf/util/gtk/browser.c
new file mode 100644 (file)
index 0000000..258352a
--- /dev/null
@@ -0,0 +1,189 @@
+#include "../evlist.h"
+#include "../cache.h"
+#include "../evsel.h"
+#include "../sort.h"
+#include "../hist.h"
+#include "gtk.h"
+
+#include <signal.h>
+
+#define MAX_COLUMNS                    32
+
+void perf_gtk_setup_browser(int argc, const char *argv[],
+                           bool fallback_to_pager __used)
+{
+       gtk_init(&argc, (char ***)&argv);
+}
+
+void perf_gtk_exit_browser(bool wait_for_ok __used)
+{
+       gtk_main_quit();
+}
+
+static void perf_gtk_signal(int sig)
+{
+       psignal(sig, "perf");
+       gtk_main_quit();
+}
+
+static void perf_gtk_resize_window(GtkWidget *window)
+{
+       GdkRectangle rect;
+       GdkScreen *screen;
+       int monitor;
+       int height;
+       int width;
+
+       screen = gtk_widget_get_screen(window);
+
+       monitor = gdk_screen_get_monitor_at_window(screen, window->window);
+
+       gdk_screen_get_monitor_geometry(screen, monitor, &rect);
+
+       width   = rect.width * 3 / 4;
+       height  = rect.height * 3 / 4;
+
+       gtk_window_resize(GTK_WINDOW(window), width, height);
+}
+
+static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists)
+{
+       GType col_types[MAX_COLUMNS];
+       GtkCellRenderer *renderer;
+       struct sort_entry *se;
+       GtkListStore *store;
+       struct rb_node *nd;
+       u64 total_period;
+       GtkWidget *view;
+       int col_idx;
+       int nr_cols;
+
+       nr_cols = 0;
+
+       /* The percentage column */
+       col_types[nr_cols++] = G_TYPE_STRING;
+
+       list_for_each_entry(se, &hist_entry__sort_list, list) {
+               if (se->elide)
+                       continue;
+
+               col_types[nr_cols++] = G_TYPE_STRING;
+       }
+
+       store = gtk_list_store_newv(nr_cols, col_types);
+
+       view = gtk_tree_view_new();
+
+       renderer = gtk_cell_renderer_text_new();
+
+       col_idx = 0;
+
+       /* The percentage column */
+       gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+                                                   -1, "Overhead (%)",
+                                                   renderer, "text",
+                                                   col_idx++, NULL);
+
+       list_for_each_entry(se, &hist_entry__sort_list, list) {
+               if (se->elide)
+                       continue;
+
+               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+                                                           -1, se->se_header,
+                                                           renderer, "text",
+                                                           col_idx++, NULL);
+       }
+
+       gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+
+       g_object_unref(GTK_TREE_MODEL(store));
+
+       total_period = hists->stats.total_period;
+
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+               GtkTreeIter iter;
+               double percent;
+               char s[512];
+
+               if (h->filtered)
+                       continue;
+
+               gtk_list_store_append(store, &iter);
+
+               col_idx = 0;
+
+               percent = (h->period * 100.0) / total_period;
+
+               snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
+
+               gtk_list_store_set(store, &iter, col_idx++, s, -1);
+
+               list_for_each_entry(se, &hist_entry__sort_list, list) {
+                       if (se->elide)
+                               continue;
+
+                       se->se_snprintf(h, s, ARRAY_SIZE(s),
+                                       hists__col_len(hists, se->se_width_idx));
+
+                       gtk_list_store_set(store, &iter, col_idx++, s, -1);
+               }
+       }
+
+       gtk_container_add(GTK_CONTAINER(window), view);
+}
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
+                                 const char *help __used,
+                                 void (*timer) (void *arg)__used,
+                                 void *arg __used, int delay_secs __used)
+{
+       struct perf_evsel *pos;
+       GtkWidget *notebook;
+       GtkWidget *window;
+
+       signal(SIGSEGV, perf_gtk_signal);
+       signal(SIGFPE,  perf_gtk_signal);
+       signal(SIGINT,  perf_gtk_signal);
+       signal(SIGQUIT, perf_gtk_signal);
+       signal(SIGTERM, perf_gtk_signal);
+
+       window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+
+       gtk_window_set_title(GTK_WINDOW(window), "perf report");
+
+       g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+       notebook = gtk_notebook_new();
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               struct hists *hists = &pos->hists;
+               const char *evname = event_name(pos);
+               GtkWidget *scrolled_window;
+               GtkWidget *tab_label;
+
+               scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+
+               gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+                                                       GTK_POLICY_AUTOMATIC,
+                                                       GTK_POLICY_AUTOMATIC);
+
+               perf_gtk_show_hists(scrolled_window, hists);
+
+               tab_label = gtk_label_new(evname);
+
+               gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
+       }
+
+       gtk_container_add(GTK_CONTAINER(window), notebook);
+
+       gtk_widget_show_all(window);
+
+       perf_gtk_resize_window(window);
+
+       gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+       gtk_main();
+
+       return 0;
+}
diff --git a/tools/perf/util/gtk/gtk.h b/tools/perf/util/gtk/gtk.h
new file mode 100644 (file)
index 0000000..75177ee
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _PERF_GTK_H_
+#define _PERF_GTK_H_ 1
+
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+#endif /* _PERF_GTK_H_ */
index fcd9cf3ea63e24b7057a7c007aa12f25ad340e5d..4c7c2d73251f81af31f6394c394baa0d17b64043 100644 (file)
@@ -1177,7 +1177,7 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
                goto error;
 
        msz = sizeof(attr);
-       if (sz < (ssize_t)msz)
+       if (sz < msz)
                msz = sz;
 
        for (i = 0 ; i < nre; i++) {
index 3dc99a9b71f507ea777f8b7d2fc3b1f024a32c39..2ec4b60aff6c1efa5929c53e3c572b1d6504cfbd 100644 (file)
@@ -10,11 +10,14 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
                                       struct hist_entry *he);
 static bool hists__filter_entry_by_thread(struct hists *hists,
                                          struct hist_entry *he);
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+                                         struct hist_entry *he);
 
 enum hist_filter {
        HIST_FILTER__DSO,
        HIST_FILTER__THREAD,
        HIST_FILTER__PARENT,
+       HIST_FILTER__SYMBOL,
 };
 
 struct callchain_param callchain_param = {
@@ -420,6 +423,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
 {
        hists__filter_entry_by_dso(hists, he);
        hists__filter_entry_by_thread(hists, he);
+       hists__filter_entry_by_symbol(hists, he);
 }
 
 static void __hists__collapse_resort(struct hists *hists, bool threaded)
@@ -603,7 +607,7 @@ static void init_rem_hits(void)
        rem_hits.ms.sym = rem_sq_bracket;
 }
 
-static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
                                         u64 total_samples, int depth,
                                         int depth_mask, int left_margin)
 {
@@ -611,21 +615,16 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
        struct callchain_node *child;
        struct callchain_list *chain;
        int new_depth_mask = depth_mask;
-       u64 new_total;
        u64 remaining;
        size_t ret = 0;
        int i;
        uint entries_printed = 0;
 
-       if (callchain_param.mode == CHAIN_GRAPH_REL)
-               new_total = self->children_hit;
-       else
-               new_total = total_samples;
-
-       remaining = new_total;
+       remaining = total_samples;
 
-       node = rb_first(&self->rb_root);
+       node = rb_first(root);
        while (node) {
+               u64 new_total;
                u64 cumul;
 
                child = rb_entry(node, struct callchain_node, rb_node);
@@ -653,11 +652,17 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
                list_for_each_entry(chain, &child->val, list) {
                        ret += ipchain__fprintf_graph(fp, chain, depth,
                                                      new_depth_mask, i++,
-                                                     new_total,
+                                                     total_samples,
                                                      cumul,
                                                      left_margin);
                }
-               ret += __callchain__fprintf_graph(fp, child, new_total,
+
+               if (callchain_param.mode == CHAIN_GRAPH_REL)
+                       new_total = child->children_hit;
+               else
+                       new_total = total_samples;
+
+               ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
                                                  depth + 1,
                                                  new_depth_mask | (1 << depth),
                                                  left_margin);
@@ -667,61 +672,75 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
        }
 
        if (callchain_param.mode == CHAIN_GRAPH_REL &&
-               remaining && remaining != new_total) {
+               remaining && remaining != total_samples) {
 
                if (!rem_sq_bracket)
                        return ret;
 
                new_depth_mask &= ~(1 << (depth - 1));
-
                ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
-                                             new_depth_mask, 0, new_total,
+                                             new_depth_mask, 0, total_samples,
                                              remaining, left_margin);
        }
 
        return ret;
 }
 
-static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
                                       u64 total_samples, int left_margin)
 {
+       struct callchain_node *cnode;
        struct callchain_list *chain;
+       u32 entries_printed = 0;
        bool printed = false;
+       struct rb_node *node;
        int i = 0;
-       int ret = 0;
-       u32 entries_printed = 0;
-
-       list_for_each_entry(chain, &self->val, list) {
-               if (!i++ && sort__first_dimension == SORT_SYM)
-                       continue;
-
-               if (!printed) {
-                       ret += callchain__fprintf_left_margin(fp, left_margin);
-                       ret += fprintf(fp, "|\n");
-                       ret += callchain__fprintf_left_margin(fp, left_margin);
-                       ret += fprintf(fp, "---");
-
-                       left_margin += 3;
-                       printed = true;
-               } else
-                       ret += callchain__fprintf_left_margin(fp, left_margin);
+       int ret;
 
-               if (chain->ms.sym)
-                       ret += fprintf(fp, " %s\n", chain->ms.sym->name);
-               else
-                       ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+       /*
+        * If have one single callchain root, don't bother printing
+        * its percentage (100 % in fractal mode and the same percentage
+        * than the hist in graph mode). This also avoid one level of column.
+        */
+       node = rb_first(root);
+       if (node && !rb_next(node)) {
+               cnode = rb_entry(node, struct callchain_node, rb_node);
+               list_for_each_entry(chain, &cnode->val, list) {
+                       /*
+                        * If we sort by symbol, the first entry is the same than
+                        * the symbol. No need to print it otherwise it appears as
+                        * displayed twice.
+                        */
+                       if (!i++ && sort__first_dimension == SORT_SYM)
+                               continue;
+                       if (!printed) {
+                               ret += callchain__fprintf_left_margin(fp, left_margin);
+                               ret += fprintf(fp, "|\n");
+                               ret += callchain__fprintf_left_margin(fp, left_margin);
+                               ret += fprintf(fp, "---");
+                               left_margin += 3;
+                               printed = true;
+                       } else
+                               ret += callchain__fprintf_left_margin(fp, left_margin);
+
+                       if (chain->ms.sym)
+                               ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+                       else
+                               ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
 
-               if (++entries_printed == callchain_param.print_limit)
-                       break;
+                       if (++entries_printed == callchain_param.print_limit)
+                               break;
+               }
+               root = &cnode->rb_root;
        }
 
-       ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
-
-       return ret;
+       return __callchain__fprintf_graph(fp, root, total_samples,
+                                         1, 1, left_margin);
 }
 
-static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
-                                     u64 total_samples)
+static size_t __callchain__fprintf_flat(FILE *fp,
+                                       struct callchain_node *self,
+                                       u64 total_samples)
 {
        struct callchain_list *chain;
        size_t ret = 0;
@@ -729,7 +748,7 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
        if (!self)
                return 0;
 
-       ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+       ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
 
 
        list_for_each_entry(chain, &self->val, list) {
@@ -745,44 +764,58 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
        return ret;
 }
 
-static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
-                                           u64 total_samples, int left_margin,
-                                           FILE *fp)
+static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
+                                     u64 total_samples)
 {
-       struct rb_node *rb_node;
-       struct callchain_node *chain;
        size_t ret = 0;
        u32 entries_printed = 0;
+       struct rb_node *rb_node;
+       struct callchain_node *chain;
 
-       rb_node = rb_first(&he->sorted_chain);
+       rb_node = rb_first(self);
        while (rb_node) {
                double percent;
 
                chain = rb_entry(rb_node, struct callchain_node, rb_node);
                percent = chain->hit * 100.0 / total_samples;
-               switch (callchain_param.mode) {
-               case CHAIN_FLAT:
-                       ret += percent_color_fprintf(fp, "           %6.2f%%\n",
-                                                    percent);
-                       ret += callchain__fprintf_flat(fp, chain, total_samples);
-                       break;
-               case CHAIN_GRAPH_ABS: /* Falldown */
-               case CHAIN_GRAPH_REL:
-                       ret += callchain__fprintf_graph(fp, chain, total_samples,
-                                                       left_margin);
-               case CHAIN_NONE:
-               default:
-                       break;
-               }
+
+               ret = percent_color_fprintf(fp, "           %6.2f%%\n", percent);
+               ret += __callchain__fprintf_flat(fp, chain, total_samples);
                ret += fprintf(fp, "\n");
                if (++entries_printed == callchain_param.print_limit)
                        break;
+
                rb_node = rb_next(rb_node);
        }
 
        return ret;
 }
 
+static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
+                                           u64 total_samples, int left_margin,
+                                           FILE *fp)
+{
+       switch (callchain_param.mode) {
+       case CHAIN_GRAPH_REL:
+               return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
+                                               left_margin);
+               break;
+       case CHAIN_GRAPH_ABS:
+               return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+                                               left_margin);
+               break;
+       case CHAIN_FLAT:
+               return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
+               break;
+       case CHAIN_NONE:
+               break;
+       default:
+               pr_err("Bad callchain mode\n");
+       }
+
+       return 0;
+}
+
 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 {
        struct rb_node *next = rb_first(&hists->entries);
@@ -887,9 +920,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
                diff = new_percent - old_percent;
 
                if (fabs(diff) >= 0.01)
-                       ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+                       scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
                else
-                       ret += scnprintf(bf, sizeof(bf), " ");
+                       scnprintf(bf, sizeof(bf), " ");
 
                if (sep)
                        ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -898,9 +931,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
 
                if (show_displacement) {
                        if (displacement)
-                               ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement);
+                               scnprintf(bf, sizeof(bf), "%+4ld", displacement);
                        else
-                               ret += scnprintf(bf, sizeof(bf), " ");
+                               scnprintf(bf, sizeof(bf), " ");
 
                        if (sep)
                                ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -1247,6 +1280,37 @@ void hists__filter_by_thread(struct hists *hists)
        }
 }
 
+static bool hists__filter_entry_by_symbol(struct hists *hists,
+                                         struct hist_entry *he)
+{
+       if (hists->symbol_filter_str != NULL &&
+           (!he->ms.sym || strstr(he->ms.sym->name,
+                                  hists->symbol_filter_str) == NULL)) {
+               he->filtered |= (1 << HIST_FILTER__SYMBOL);
+               return true;
+       }
+
+       return false;
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+       struct rb_node *nd;
+
+       hists->nr_entries = hists->stats.total_period = 0;
+       hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+       hists__reset_col_len(hists);
+
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+               if (hists__filter_entry_by_symbol(hists, h))
+                       continue;
+
+               hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+       }
+}
+
 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
 {
        return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
index 9413f3e31fea8b5c1c305f1d914b7de2d3e12009..2cae9df40e04c0663ba5b2ccc753595b2731626d 100644 (file)
@@ -62,6 +62,7 @@ struct hists {
        const struct thread     *thread_filter;
        const struct dso        *dso_filter;
        const char              *uid_filter_str;
+       const char              *symbol_filter_str;
        pthread_mutex_t         lock;
        struct events_stats     stats;
        u64                     event_stream;
@@ -107,6 +108,7 @@ int hist_entry__annotate(struct hist_entry *self, size_t privsize);
 
 void hists__filter_by_dso(struct hists *hists);
 void hists__filter_by_thread(struct hists *hists);
+void hists__filter_by_symbol(struct hists *hists);
 
 u16 hists__col_len(struct hists *self, enum hist_column col);
 void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
@@ -145,6 +147,23 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  int refresh);
 #endif
 
+#ifdef NO_GTK2_SUPPORT
+static inline
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
+                                 const char *help __used,
+                                 void(*timer)(void *arg) __used,
+                                 void *arg __used,
+                                 int refresh __used)
+{
+       return 0;
+}
+
+#else
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
+                                 void(*timer)(void *arg), void *arg,
+                                 int refresh);
+#endif
+
 unsigned int hists__sort_list_width(struct hists *self);
 
 #endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/export.h b/tools/perf/util/include/linux/export.h
new file mode 100644 (file)
index 0000000..b43e2dc
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_MODULE_H
+#define PERF_LINUX_MODULE_H
+
+#define EXPORT_SYMBOL(name)
+
+#endif
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/module.h
deleted file mode 100644 (file)
index b43e2dc..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef PERF_LINUX_MODULE_H
-#define PERF_LINUX_MODULE_H
-
-#define EXPORT_SYMBOL(name)
-
-#endif
index c7a6f6faf91e4e15e326c7dd03d8aabe4a197c08..5b3a0ef4e2321523563c9da1e4c0449e3be4c013 100644 (file)
 #include "cache.h"
 #include "header.h"
 #include "debugfs.h"
+#include "parse-events-flex.h"
+#include "pmu.h"
+
+#define MAX_NAME_LEN 100
 
 struct event_symbol {
        u8              type;
@@ -19,11 +23,8 @@ struct event_symbol {
        const char      *alias;
 };
 
-enum event_result {
-       EVT_FAILED,
-       EVT_HANDLED,
-       EVT_HANDLED_ALL
-};
+int parse_events_parse(struct list_head *list, struct list_head *list_tmp,
+                      int *idx);
 
 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -354,7 +355,24 @@ const char *__event_name(int type, u64 config)
        return "unknown";
 }
 
-static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
+static int add_event(struct list_head *list, int *idx,
+                    struct perf_event_attr *attr, char *name)
+{
+       struct perf_evsel *evsel;
+
+       event_attr_init(attr);
+
+       evsel = perf_evsel__new(attr, (*idx)++);
+       if (!evsel)
+               return -ENOMEM;
+
+       list_add_tail(&evsel->node, list);
+
+       evsel->name = strdup(name);
+       return 0;
+}
+
+static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
 {
        int i, j;
        int n, longest = -1;
@@ -362,58 +380,57 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
        for (i = 0; i < size; i++) {
                for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
                        n = strlen(names[i][j]);
-                       if (n > longest && !strncasecmp(*str, names[i][j], n))
+                       if (n > longest && !strncasecmp(str, names[i][j], n))
                                longest = n;
                }
-               if (longest > 0) {
-                       *str += longest;
+               if (longest > 0)
                        return i;
-               }
        }
 
        return -1;
 }
 
-static enum event_result
-parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
+int parse_events_add_cache(struct list_head *list, int *idx,
+                          char *type, char *op_result1, char *op_result2)
 {
-       const char *s = *str;
+       struct perf_event_attr attr;
+       char name[MAX_NAME_LEN];
        int cache_type = -1, cache_op = -1, cache_result = -1;
+       char *op_result[2] = { op_result1, op_result2 };
+       int i, n;
 
-       cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
        /*
         * No fallback - if we cannot get a clear cache type
         * then bail out:
         */
+       cache_type = parse_aliases(type, hw_cache,
+                                  PERF_COUNT_HW_CACHE_MAX);
        if (cache_type == -1)
-               return EVT_FAILED;
+               return -EINVAL;
 
-       while ((cache_op == -1 || cache_result == -1) && *s == '-') {
-               ++s;
+       n = snprintf(name, MAX_NAME_LEN, "%s", type);
+
+       for (i = 0; (i < 2) && (op_result[i]); i++) {
+               char *str = op_result[i];
+
+               snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
 
                if (cache_op == -1) {
-                       cache_op = parse_aliases(&s, hw_cache_op,
-                                               PERF_COUNT_HW_CACHE_OP_MAX);
+                       cache_op = parse_aliases(str, hw_cache_op,
+                                                PERF_COUNT_HW_CACHE_OP_MAX);
                        if (cache_op >= 0) {
                                if (!is_cache_op_valid(cache_type, cache_op))
-                                       return EVT_FAILED;
+                                       return -EINVAL;
                                continue;
                        }
                }
 
                if (cache_result == -1) {
-                       cache_result = parse_aliases(&s, hw_cache_result,
+                       cache_result = parse_aliases(str, hw_cache_result,
                                                PERF_COUNT_HW_CACHE_RESULT_MAX);
                        if (cache_result >= 0)
                                continue;
                }
-
-               /*
-                * Can't parse this as a cache op or result, so back up
-                * to the '-'.
-                */
-               --s;
-               break;
        }
 
        /*
@@ -428,20 +445,17 @@ parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
        if (cache_result == -1)
                cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
 
-       attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
-       attr->type = PERF_TYPE_HW_CACHE;
-
-       *str = s;
-       return EVT_HANDLED;
+       memset(&attr, 0, sizeof(attr));
+       attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
+       attr.type = PERF_TYPE_HW_CACHE;
+       return add_event(list, idx, &attr, name);
 }
 
-static enum event_result
-parse_single_tracepoint_event(char *sys_name,
-                             const char *evt_name,
-                             unsigned int evt_length,
-                             struct perf_event_attr *attr,
-                             const char **strp)
+static int add_tracepoint(struct list_head *list, int *idx,
+                         char *sys_name, char *evt_name)
 {
+       struct perf_event_attr attr;
+       char name[MAX_NAME_LEN];
        char evt_path[MAXPATHLEN];
        char id_buf[4];
        u64 id;
@@ -452,130 +466,80 @@ parse_single_tracepoint_event(char *sys_name,
 
        fd = open(evt_path, O_RDONLY);
        if (fd < 0)
-               return EVT_FAILED;
+               return -1;
 
        if (read(fd, id_buf, sizeof(id_buf)) < 0) {
                close(fd);
-               return EVT_FAILED;
+               return -1;
        }
 
        close(fd);
        id = atoll(id_buf);
-       attr->config = id;
-       attr->type = PERF_TYPE_TRACEPOINT;
-       *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */
-
-       attr->sample_type |= PERF_SAMPLE_RAW;
-       attr->sample_type |= PERF_SAMPLE_TIME;
-       attr->sample_type |= PERF_SAMPLE_CPU;
-
-       attr->sample_period = 1;
 
+       memset(&attr, 0, sizeof(attr));
+       attr.config = id;
+       attr.type = PERF_TYPE_TRACEPOINT;
+       attr.sample_type |= PERF_SAMPLE_RAW;
+       attr.sample_type |= PERF_SAMPLE_TIME;
+       attr.sample_type |= PERF_SAMPLE_CPU;
+       attr.sample_period = 1;
 
-       return EVT_HANDLED;
+       snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
+       return add_event(list, idx, &attr, name);
 }
 
-/* sys + ':' + event + ':' + flags*/
-#define MAX_EVOPT_LEN  (MAX_EVENT_LENGTH * 2 + 2 + 128)
-static enum event_result
-parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
-                               const char *evt_exp, char *flags)
+static int add_tracepoint_multi(struct list_head *list, int *idx,
+                               char *sys_name, char *evt_name)
 {
        char evt_path[MAXPATHLEN];
        struct dirent *evt_ent;
        DIR *evt_dir;
+       int ret = 0;
 
        snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
        evt_dir = opendir(evt_path);
-
        if (!evt_dir) {
                perror("Can't open event dir");
-               return EVT_FAILED;
+               return -1;
        }
 
-       while ((evt_ent = readdir(evt_dir))) {
-               char event_opt[MAX_EVOPT_LEN + 1];
-               int len;
-
+       while (!ret && (evt_ent = readdir(evt_dir))) {
                if (!strcmp(evt_ent->d_name, ".")
                    || !strcmp(evt_ent->d_name, "..")
                    || !strcmp(evt_ent->d_name, "enable")
                    || !strcmp(evt_ent->d_name, "filter"))
                        continue;
 
-               if (!strglobmatch(evt_ent->d_name, evt_exp))
+               if (!strglobmatch(evt_ent->d_name, evt_name))
                        continue;
 
-               len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
-                              evt_ent->d_name, flags ? ":" : "",
-                              flags ?: "");
-               if (len < 0)
-                       return EVT_FAILED;
-
-               if (parse_events(evlist, event_opt, 0))
-                       return EVT_FAILED;
+               ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
        }
 
-       return EVT_HANDLED_ALL;
+       return ret;
 }
 
-static enum event_result
-parse_tracepoint_event(struct perf_evlist *evlist, const char **strp,
-                      struct perf_event_attr *attr)
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+                               char *sys, char *event)
 {
-       const char *evt_name;
-       char *flags = NULL, *comma_loc;
-       char sys_name[MAX_EVENT_LENGTH];
-       unsigned int sys_length, evt_length;
-
-       if (debugfs_valid_mountpoint(tracing_events_path))
-               return 0;
-
-       evt_name = strchr(*strp, ':');
-       if (!evt_name)
-               return EVT_FAILED;
-
-       sys_length = evt_name - *strp;
-       if (sys_length >= MAX_EVENT_LENGTH)
-               return 0;
+       int ret;
 
-       strncpy(sys_name, *strp, sys_length);
-       sys_name[sys_length] = '\0';
-       evt_name = evt_name + 1;
+       ret = debugfs_valid_mountpoint(tracing_events_path);
+       if (ret)
+               return ret;
 
-       comma_loc = strchr(evt_name, ',');
-       if (comma_loc) {
-               /* take the event name up to the comma */
-               evt_name = strndup(evt_name, comma_loc - evt_name);
-       }
-       flags = strchr(evt_name, ':');
-       if (flags) {
-               /* split it out: */
-               evt_name = strndup(evt_name, flags - evt_name);
-               flags++;
-       }
-
-       evt_length = strlen(evt_name);
-       if (evt_length >= MAX_EVENT_LENGTH)
-               return EVT_FAILED;
-       if (strpbrk(evt_name, "*?")) {
-               *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
-               return parse_multiple_tracepoint_event(evlist, sys_name,
-                                                      evt_name, flags);
-       } else {
-               return parse_single_tracepoint_event(sys_name, evt_name,
-                                                    evt_length, attr, strp);
-       }
+       return strpbrk(event, "*?") ?
+              add_tracepoint_multi(list, idx, sys, event) :
+              add_tracepoint(list, idx, sys, event);
 }
 
-static enum event_result
-parse_breakpoint_type(const char *type, const char **strp,
-                     struct perf_event_attr *attr)
+static int
+parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
 {
        int i;
 
        for (i = 0; i < 3; i++) {
-               if (!type[i])
+               if (!type || !type[i])
                        break;
 
                switch (type[i]) {
@@ -589,164 +553,146 @@ parse_breakpoint_type(const char *type, const char **strp,
                        attr->bp_type |= HW_BREAKPOINT_X;
                        break;
                default:
-                       return EVT_FAILED;
+                       return -EINVAL;
                }
        }
+
        if (!attr->bp_type) /* Default */
                attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
 
-       *strp = type + i;
-
-       return EVT_HANDLED;
+       return 0;
 }
 
-static enum event_result
-parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+                               void *ptr, char *type)
 {
-       const char *target;
-       const char *type;
-       char *endaddr;
-       u64 addr;
-       enum event_result err;
-
-       target = strchr(*strp, ':');
-       if (!target)
-               return EVT_FAILED;
-
-       if (strncmp(*strp, "mem", target - *strp) != 0)
-               return EVT_FAILED;
-
-       target++;
-
-       addr = strtoull(target, &endaddr, 0);
-       if (target == endaddr)
-               return EVT_FAILED;
-
-       attr->bp_addr = addr;
-       *strp = endaddr;
+       struct perf_event_attr attr;
+       char name[MAX_NAME_LEN];
 
-       type = strchr(target, ':');
+       memset(&attr, 0, sizeof(attr));
+       attr.bp_addr = (unsigned long) ptr;
 
-       /* If no type is defined, just rw as default */
-       if (!type) {
-               attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
-       } else {
-               err = parse_breakpoint_type(++type, strp, attr);
-               if (err == EVT_FAILED)
-                       return EVT_FAILED;
-       }
+       if (parse_breakpoint_type(type, &attr))
+               return -EINVAL;
 
        /*
         * We should find a nice way to override the access length
         * Provide some defaults for now
         */
-       if (attr->bp_type == HW_BREAKPOINT_X)
-               attr->bp_len = sizeof(long);
+       if (attr.bp_type == HW_BREAKPOINT_X)
+               attr.bp_len = sizeof(long);
        else
-               attr->bp_len = HW_BREAKPOINT_LEN_4;
+               attr.bp_len = HW_BREAKPOINT_LEN_4;
 
-       attr->type = PERF_TYPE_BREAKPOINT;
+       attr.type = PERF_TYPE_BREAKPOINT;
 
-       return EVT_HANDLED;
+       snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
+       return add_event(list, idx, &attr, name);
 }
 
-static int check_events(const char *str, unsigned int i)
+static int config_term(struct perf_event_attr *attr,
+                      struct parse_events__term *term)
 {
-       int n;
+       switch (term->type) {
+       case PARSE_EVENTS__TERM_TYPE_CONFIG:
+               attr->config = term->val.num;
+               break;
+       case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+               attr->config1 = term->val.num;
+               break;
+       case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+               attr->config2 = term->val.num;
+               break;
+       case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+               attr->sample_period = term->val.num;
+               break;
+       case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+               /*
+                * TODO uncomment when the field is available
+                * attr->branch_sample_type = term->val.num;
+                */
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
 
-       n = strlen(event_symbols[i].symbol);
-       if (!strncasecmp(str, event_symbols[i].symbol, n))
-               return n;
+static int config_attr(struct perf_event_attr *attr,
+                      struct list_head *head, int fail)
+{
+       struct parse_events__term *term;
 
-       n = strlen(event_symbols[i].alias);
-       if (n) {
-               if (!strncasecmp(str, event_symbols[i].alias, n))
-                       return n;
-       }
+       list_for_each_entry(term, head, list)
+               if (config_term(attr, term) && fail)
+                       return -EINVAL;
 
        return 0;
 }
 
-static enum event_result
-parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_numeric(struct list_head *list, int *idx,
+                            unsigned long type, unsigned long config,
+                            struct list_head *head_config)
 {
-       const char *str = *strp;
-       unsigned int i;
-       int n;
-
-       for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
-               n = check_events(str, i);
-               if (n > 0) {
-                       attr->type = event_symbols[i].type;
-                       attr->config = event_symbols[i].config;
-                       *strp = str + n;
-                       return EVT_HANDLED;
-               }
-       }
-       return EVT_FAILED;
+       struct perf_event_attr attr;
+
+       memset(&attr, 0, sizeof(attr));
+       attr.type = type;
+       attr.config = config;
+
+       if (head_config &&
+           config_attr(&attr, head_config, 1))
+               return -EINVAL;
+
+       return add_event(list, idx, &attr,
+                        (char *) __event_name(type, config));
 }
 
-static enum event_result
-parse_raw_event(const char **strp, struct perf_event_attr *attr)
+int parse_events_add_pmu(struct list_head *list, int *idx,
+                        char *name, struct list_head *head_config)
 {
-       const char *str = *strp;
-       u64 config;
-       int n;
-
-       if (*str != 'r')
-               return EVT_FAILED;
-       n = hex2u64(str + 1, &config);
-       if (n > 0) {
-               const char *end = str + n + 1;
-               if (*end != '\0' && *end != ',' && *end != ':')
-                       return EVT_FAILED;
-
-               *strp = end;
-               attr->type = PERF_TYPE_RAW;
-               attr->config = config;
-               return EVT_HANDLED;
-       }
-       return EVT_FAILED;
+       struct perf_event_attr attr;
+       struct perf_pmu *pmu;
+
+       pmu = perf_pmu__find(name);
+       if (!pmu)
+               return -EINVAL;
+
+       memset(&attr, 0, sizeof(attr));
+
+       /*
+        * Configure hardcoded terms first, no need to check
+        * return value when called with fail == 0 ;)
+        */
+       config_attr(&attr, head_config, 0);
+
+       if (perf_pmu__config(pmu, &attr, head_config))
+               return -EINVAL;
+
+       return add_event(list, idx, &attr, (char *) "pmu");
 }
 
-static enum event_result
-parse_numeric_event(const char **strp, struct perf_event_attr *attr)
+void parse_events_update_lists(struct list_head *list_event,
+                              struct list_head *list_all)
 {
-       const char *str = *strp;
-       char *endp;
-       unsigned long type;
-       u64 config;
-
-       type = strtoul(str, &endp, 0);
-       if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
-               str = endp + 1;
-               config = strtoul(str, &endp, 0);
-               if (endp > str) {
-                       attr->type = type;
-                       attr->config = config;
-                       *strp = endp;
-                       return EVT_HANDLED;
-               }
-       }
-       return EVT_FAILED;
+       /*
+        * Called for single event definition. Update the
+        * 'all event' list, and reinit the 'signle event'
+        * list, for next event definition.
+        */
+       list_splice_tail(list_event, list_all);
+       INIT_LIST_HEAD(list_event);
 }
 
-static int
-parse_event_modifier(const char **strp, struct perf_event_attr *attr)
+int parse_events_modifier(struct list_head *list, char *str)
 {
-       const char *str = *strp;
+       struct perf_evsel *evsel;
        int exclude = 0, exclude_GH = 0;
        int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
 
-       if (!*str)
+       if (str == NULL)
                return 0;
 
-       if (*str == ',')
-               return 0;
-
-       if (*str++ != ':')
-               return -1;
-
        while (*str) {
                if (*str == 'u') {
                        if (!exclude)
@@ -775,111 +721,62 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
 
                ++str;
        }
-       if (str < *strp + 2)
-               return -1;
 
-       *strp = str;
+       /*
+        * precise ip:
+        *
+        *  0 - SAMPLE_IP can have arbitrary skid
+        *  1 - SAMPLE_IP must have constant skid
+        *  2 - SAMPLE_IP requested to have 0 skid
+        *  3 - SAMPLE_IP must have 0 skid
+        *
+        *  See also PERF_RECORD_MISC_EXACT_IP
+        */
+       if (precise > 3)
+               return -EINVAL;
 
-       attr->exclude_user   = eu;
-       attr->exclude_kernel = ek;
-       attr->exclude_hv     = eh;
-       attr->precise_ip     = precise;
-       attr->exclude_host   = eH;
-       attr->exclude_guest  = eG;
+       list_for_each_entry(evsel, list, node) {
+               evsel->attr.exclude_user   = eu;
+               evsel->attr.exclude_kernel = ek;
+               evsel->attr.exclude_hv     = eh;
+               evsel->attr.precise_ip     = precise;
+               evsel->attr.exclude_host   = eH;
+               evsel->attr.exclude_guest  = eG;
+       }
 
        return 0;
 }
 
-/*
- * Each event can have multiple symbolic names.
- * Symbolic names are (almost) exactly matched.
- */
-static enum event_result
-parse_event_symbols(struct perf_evlist *evlist, const char **str,
-                   struct perf_event_attr *attr)
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
 {
-       enum event_result ret;
-
-       ret = parse_tracepoint_event(evlist, str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
-
-       ret = parse_raw_event(str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
+       LIST_HEAD(list);
+       LIST_HEAD(list_tmp);
+       YY_BUFFER_STATE buffer;
+       int ret, idx = evlist->nr_entries;
 
-       ret = parse_numeric_event(str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
+       buffer = parse_events__scan_string(str);
 
-       ret = parse_symbolic_event(str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
+       ret = parse_events_parse(&list, &list_tmp, &idx);
 
-       ret = parse_generic_hw_event(str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
+       parse_events__flush_buffer(buffer);
+       parse_events__delete_buffer(buffer);
 
-       ret = parse_breakpoint_event(str, attr);
-       if (ret != EVT_FAILED)
-               goto modifier;
-
-       fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
-       fprintf(stderr, "Run 'perf list' for a list of valid events\n");
-       return EVT_FAILED;
-
-modifier:
-       if (parse_event_modifier(str, attr) < 0) {
-               fprintf(stderr, "invalid event modifier: '%s'\n", *str);
-               fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
-
-               return EVT_FAILED;
+       if (!ret) {
+               int entries = idx - evlist->nr_entries;
+               perf_evlist__splice_list_tail(evlist, &list, entries);
+               return 0;
        }
 
+       /*
+        * There are 2 users - builtin-record and builtin-test objects.
+        * Both call perf_evlist__delete in case of error, so we dont
+        * need to bother.
+        */
+       fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
+       fprintf(stderr, "Run 'perf list' for a list of valid events\n");
        return ret;
 }
 
-int parse_events(struct perf_evlist *evlist , const char *str, int unset __used)
-{
-       struct perf_event_attr attr;
-       enum event_result ret;
-       const char *ostr;
-
-       for (;;) {
-               ostr = str;
-               memset(&attr, 0, sizeof(attr));
-               event_attr_init(&attr);
-               ret = parse_event_symbols(evlist, &str, &attr);
-               if (ret == EVT_FAILED)
-                       return -1;
-
-               if (!(*str == 0 || *str == ',' || isspace(*str)))
-                       return -1;
-
-               if (ret != EVT_HANDLED_ALL) {
-                       struct perf_evsel *evsel;
-                       evsel = perf_evsel__new(&attr, evlist->nr_entries);
-                       if (evsel == NULL)
-                               return -1;
-                       perf_evlist__add(evlist, evsel);
-
-                       evsel->name = calloc(str - ostr + 1, 1);
-                       if (!evsel->name)
-                               return -1;
-                       strncpy(evsel->name, ostr, str - ostr);
-               }
-
-               if (*str == 0)
-                       break;
-               if (*str == ',')
-                       ++str;
-               while (isspace(*str))
-                       ++str;
-       }
-
-       return 0;
-}
-
 int parse_events_option(const struct option *opt, const char *str,
                        int unset __used)
 {
@@ -1052,8 +949,6 @@ int print_hwcache_events(const char *event_glob)
        return printed;
 }
 
-#define MAX_NAME_LEN 100
-
 /*
  * Print the help text for the event symbols:
  */
@@ -1102,8 +997,12 @@ void print_events(const char *event_glob)
 
        printf("\n");
        printf("  %-50s [%s]\n",
-               "rNNN (see 'perf list --help' on how to encode it)",
+              "rNNN",
               event_type_descriptors[PERF_TYPE_RAW]);
+       printf("  %-50s [%s]\n",
+              "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
+              event_type_descriptors[PERF_TYPE_RAW]);
+       printf("   (see 'perf list --help' on how to encode it)\n");
        printf("\n");
 
        printf("  %-50s [%s]\n",
@@ -1113,3 +1012,51 @@ void print_events(const char *event_glob)
 
        print_tracepoint_events(NULL, NULL);
 }
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term)
+{
+       return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX;
+}
+
+int parse_events__new_term(struct parse_events__term **_term, int type,
+                          char *config, char *str, long num)
+{
+       struct parse_events__term *term;
+
+       term = zalloc(sizeof(*term));
+       if (!term)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&term->list);
+       term->type = type;
+       term->config = config;
+
+       switch (type) {
+       case PARSE_EVENTS__TERM_TYPE_CONFIG:
+       case PARSE_EVENTS__TERM_TYPE_CONFIG1:
+       case PARSE_EVENTS__TERM_TYPE_CONFIG2:
+       case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+       case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+       case PARSE_EVENTS__TERM_TYPE_NUM:
+               term->val.num = num;
+               break;
+       case PARSE_EVENTS__TERM_TYPE_STR:
+               term->val.str = str;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *_term = term;
+       return 0;
+}
+
+void parse_events__free_terms(struct list_head *terms)
+{
+       struct parse_events__term *term, *h;
+
+       list_for_each_entry_safe(term, h, terms, list)
+               free(term);
+
+       free(terms);
+}
index 7e0cbe75d5f1f79560e2b6cbdd2b2205aea1a359..ca069f893381c3b87081a12abcc3031562b83ab8 100644 (file)
@@ -33,6 +33,55 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
 
 #define EVENTS_HELP_MAX (128*1024)
 
+enum {
+       PARSE_EVENTS__TERM_TYPE_CONFIG,
+       PARSE_EVENTS__TERM_TYPE_CONFIG1,
+       PARSE_EVENTS__TERM_TYPE_CONFIG2,
+       PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+       PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+       PARSE_EVENTS__TERM_TYPE_NUM,
+       PARSE_EVENTS__TERM_TYPE_STR,
+
+       PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
+               PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+};
+
+struct parse_events__term {
+       char *config;
+       union {
+               char *str;
+               long  num;
+       } val;
+       int type;
+
+       struct list_head list;
+};
+
+int parse_events__is_hardcoded_term(struct parse_events__term *term);
+int parse_events__new_term(struct parse_events__term **term, int type,
+                          char *config, char *str, long num);
+void parse_events__free_terms(struct list_head *terms);
+int parse_events_modifier(struct list_head *list __used, char *str __used);
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
+                               char *sys, char *event);
+int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
+                        unsigned long config1, unsigned long config2,
+                        char *mod);
+int parse_events_add_numeric(struct list_head *list, int *idx,
+                            unsigned long type, unsigned long config,
+                            struct list_head *head_config);
+int parse_events_add_cache(struct list_head *list, int *idx,
+                          char *type, char *op_result1, char *op_result2);
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
+                               void *ptr, char *type);
+int parse_events_add_pmu(struct list_head *list, int *idx,
+                        char *pmu , struct list_head *head_config);
+void parse_events_update_lists(struct list_head *list_event,
+                              struct list_head *list_all);
+void parse_events_error(struct list_head *list_all,
+                       struct list_head *list_event,
+                       int *idx, char const *msg);
+
 void print_events(const char *event_glob);
 void print_events_type(u8 type);
 void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
new file mode 100644 (file)
index 0000000..05d766e
--- /dev/null
@@ -0,0 +1,127 @@
+
+%option prefix="parse_events_"
+
+%{
+#include <errno.h>
+#include "../perf.h"
+#include "parse-events-bison.h"
+#include "parse-events.h"
+
+static int __value(char *str, int base, int token)
+{
+       long num;
+
+       errno = 0;
+       num = strtoul(str, NULL, base);
+       if (errno)
+               return PE_ERROR;
+
+       parse_events_lval.num = num;
+       return token;
+}
+
+static int value(int base)
+{
+       return __value(parse_events_text, base, PE_VALUE);
+}
+
+static int raw(void)
+{
+       return __value(parse_events_text + 1, 16, PE_RAW);
+}
+
+static int str(int token)
+{
+       parse_events_lval.str = strdup(parse_events_text);
+       return token;
+}
+
+static int sym(int type, int config)
+{
+       parse_events_lval.num = (type << 16) + config;
+       return PE_VALUE_SYM;
+}
+
+static int term(int type)
+{
+       parse_events_lval.num = type;
+       return PE_TERM;
+}
+
+%}
+
+num_dec                [0-9]+
+num_hex                0x[a-fA-F0-9]+
+num_raw_hex    [a-fA-F0-9]+
+name           [a-zA-Z_*?][a-zA-Z0-9_*?]*
+modifier_event [ukhp]{1,5}
+modifier_bp    [rwx]
+
+%%
+cpu-cycles|cycles                              { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references                               { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses                                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches                   { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses                                  { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles                                     { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock                                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock                                     { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults                             { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults                                   { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs                            { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations                      { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults                               { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+L1-dcache|l1-d|l1d|L1-data             |
+L1-icache|l1-i|l1i|L1-instruction      |
+LLC|L2                                 |
+dTLB|d-tlb|Data-TLB                    |
+iTLB|i-tlb|Instruction-TLB             |
+branch|branches|bpu|btb|bpc            |
+node                                   { return str(PE_NAME_CACHE_TYPE); }
+
+load|loads|read                                |
+store|stores|write                     |
+prefetch|prefetches                    |
+speculative-read|speculative-load      |
+refs|Reference|ops|access              |
+misses|miss                            { return str(PE_NAME_CACHE_OP_RESULT); }
+
+       /*
+        * These are event config hardcoded term names to be specified
+        * within xxx/.../ syntax. So far we dont clash with other names,
+        * so we can put them here directly. In case the we have a conflict
+        * in future, this needs to go into '//' condition block.
+        */
+config                 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2                        { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+period                 { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type            { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+
+mem:                   { return PE_PREFIX_MEM; }
+r{num_raw_hex}         { return raw(); }
+{num_dec}              { return value(10); }
+{num_hex}              { return value(16); }
+
+{modifier_event}       { return str(PE_MODIFIER_EVENT); }
+{modifier_bp}          { return str(PE_MODIFIER_BP); }
+{name}                 { return str(PE_NAME); }
+"/"                    { return '/'; }
+-                      { return '-'; }
+,                      { return ','; }
+:                      { return ':'; }
+=                      { return '='; }
+
+%%
+
+int parse_events_wrap(void)
+{
+       return 1;
+}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
new file mode 100644 (file)
index 0000000..d9637da
--- /dev/null
@@ -0,0 +1,229 @@
+
+%name-prefix "parse_events_"
+%parse-param {struct list_head *list_all}
+%parse-param {struct list_head *list_event}
+%parse-param {int *idx}
+
+%{
+
+#define YYDEBUG 1
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include "types.h"
+#include "util.h"
+#include "parse-events.h"
+
+extern int parse_events_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+       if (val) \
+               YYABORT; \
+} while (0)
+
+%}
+
+%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_NAME
+%token PE_MODIFIER_EVENT PE_MODIFIER_BP
+%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
+%token PE_PREFIX_MEM PE_PREFIX_RAW
+%token PE_ERROR
+%type <num> PE_VALUE
+%type <num> PE_VALUE_SYM
+%type <num> PE_RAW
+%type <num> PE_TERM
+%type <str> PE_NAME
+%type <str> PE_NAME_CACHE_TYPE
+%type <str> PE_NAME_CACHE_OP_RESULT
+%type <str> PE_MODIFIER_EVENT
+%type <str> PE_MODIFIER_BP
+%type <head> event_config
+%type <term> event_term
+
+%union
+{
+       char *str;
+       unsigned long num;
+       struct list_head *head;
+       struct parse_events__term *term;
+}
+%%
+
+events:
+events ',' event | event
+
+event:
+event_def PE_MODIFIER_EVENT
+{
+       /*
+        * Apply modifier on all events added by single event definition
+        * (there could be more events added for multiple tracepoint
+        * definitions via '*?'.
+        */
+       ABORT_ON(parse_events_modifier(list_event, $2));
+       parse_events_update_lists(list_event, list_all);
+}
+|
+event_def
+{
+       parse_events_update_lists(list_event, list_all);
+}
+
+event_def: event_pmu |
+          event_legacy_symbol |
+          event_legacy_cache sep_dc |
+          event_legacy_mem |
+          event_legacy_tracepoint sep_dc |
+          event_legacy_numeric sep_dc |
+          event_legacy_raw sep_dc
+
+event_pmu:
+PE_NAME '/' event_config '/'
+{
+       ABORT_ON(parse_events_add_pmu(list_event, idx, $1, $3));
+       parse_events__free_terms($3);
+}
+
+event_legacy_symbol:
+PE_VALUE_SYM '/' event_config '/'
+{
+       int type = $1 >> 16;
+       int config = $1 & 255;
+
+       ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, $3));
+       parse_events__free_terms($3);
+}
+|
+PE_VALUE_SYM sep_slash_dc
+{
+       int type = $1 >> 16;
+       int config = $1 & 255;
+
+       ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, NULL));
+}
+
+event_legacy_cache:
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
+{
+       ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, $5));
+}
+|
+PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
+{
+       ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, NULL));
+}
+|
+PE_NAME_CACHE_TYPE
+{
+       ABORT_ON(parse_events_add_cache(list_event, idx, $1, NULL, NULL));
+}
+
+event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+       ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, $4));
+}
+|
+PE_PREFIX_MEM PE_VALUE sep_dc
+{
+       ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, NULL));
+}
+
+event_legacy_tracepoint:
+PE_NAME ':' PE_NAME
+{
+       ABORT_ON(parse_events_add_tracepoint(list_event, idx, $1, $3));
+}
+
+event_legacy_numeric:
+PE_VALUE ':' PE_VALUE
+{
+       ABORT_ON(parse_events_add_numeric(list_event, idx, $1, $3, NULL));
+}
+
+event_legacy_raw:
+PE_RAW
+{
+       ABORT_ON(parse_events_add_numeric(list_event, idx, PERF_TYPE_RAW, $1, NULL));
+}
+
+event_config:
+event_config ',' event_term
+{
+       struct list_head *head = $1;
+       struct parse_events__term *term = $3;
+
+       ABORT_ON(!head);
+       list_add_tail(&term->list, head);
+       $$ = $1;
+}
+|
+event_term
+{
+       struct list_head *head = malloc(sizeof(*head));
+       struct parse_events__term *term = $1;
+
+       ABORT_ON(!head);
+       INIT_LIST_HEAD(head);
+       list_add_tail(&term->list, head);
+       $$ = head;
+}
+
+event_term:
+PE_NAME '=' PE_NAME
+{
+       struct parse_events__term *term;
+
+       ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR,
+                $1, $3, 0));
+       $$ = term;
+}
+|
+PE_NAME '=' PE_VALUE
+{
+       struct parse_events__term *term;
+
+       ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+                $1, NULL, $3));
+       $$ = term;
+}
+|
+PE_NAME
+{
+       struct parse_events__term *term;
+
+       ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
+                $1, NULL, 1));
+       $$ = term;
+}
+|
+PE_TERM '=' PE_VALUE
+{
+       struct parse_events__term *term;
+
+       ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3));
+       $$ = term;
+}
+|
+PE_TERM
+{
+       struct parse_events__term *term;
+
+       ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1));
+       $$ = term;
+}
+
+sep_dc: ':' |
+
+sep_slash_dc: '/' | ':' |
+
+%%
+
+void parse_events_error(struct list_head *list_all __used,
+                       struct list_head *list_event __used,
+                       int *idx __used,
+                       char const *msg __used)
+{
+}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
new file mode 100644 (file)
index 0000000..cb08a11
--- /dev/null
@@ -0,0 +1,469 @@
+
+#include <linux/list.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <dirent.h>
+#include "sysfs.h"
+#include "util.h"
+#include "pmu.h"
+#include "parse-events.h"
+
+int perf_pmu_parse(struct list_head *list, char *name);
+extern FILE *perf_pmu_in;
+
+static LIST_HEAD(pmus);
+
+/*
+ * Parse & process all the sysfs attributes located under
+ * the directory specified in 'dir' parameter.
+ */
+static int pmu_format_parse(char *dir, struct list_head *head)
+{
+       struct dirent *evt_ent;
+       DIR *format_dir;
+       int ret = 0;
+
+       format_dir = opendir(dir);
+       if (!format_dir)
+               return -EINVAL;
+
+       while (!ret && (evt_ent = readdir(format_dir))) {
+               char path[PATH_MAX];
+               char *name = evt_ent->d_name;
+               FILE *file;
+
+               if (!strcmp(name, ".") || !strcmp(name, ".."))
+                       continue;
+
+               snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+               ret = -EINVAL;
+               file = fopen(path, "r");
+               if (!file)
+                       break;
+
+               perf_pmu_in = file;
+               ret = perf_pmu_parse(head, name);
+               fclose(file);
+       }
+
+       closedir(format_dir);
+       return ret;
+}
+
+/*
+ * Reading/parsing the default pmu format definition, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
+ */
+static int pmu_format(char *name, struct list_head *format)
+{
+       struct stat st;
+       char path[PATH_MAX];
+       const char *sysfs;
+
+       sysfs = sysfs_find_mountpoint();
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, PATH_MAX,
+                "%s/bus/event_source/devices/%s/format", sysfs, name);
+
+       if (stat(path, &st) < 0)
+               return -1;
+
+       if (pmu_format_parse(path, format))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Reading/parsing the default pmu type value, which should be
+ * located at:
+ * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
+ */
+static int pmu_type(char *name, __u32 *type)
+{
+       struct stat st;
+       char path[PATH_MAX];
+       const char *sysfs;
+       FILE *file;
+       int ret = 0;
+
+       sysfs = sysfs_find_mountpoint();
+       if (!sysfs)
+               return -1;
+
+       snprintf(path, PATH_MAX,
+                "%s/bus/event_source/devices/%s/type", sysfs, name);
+
+       if (stat(path, &st) < 0)
+               return -1;
+
+       file = fopen(path, "r");
+       if (!file)
+               return -EINVAL;
+
+       if (1 != fscanf(file, "%u", type))
+               ret = -1;
+
+       fclose(file);
+       return ret;
+}
+
+static struct perf_pmu *pmu_lookup(char *name)
+{
+       struct perf_pmu *pmu;
+       LIST_HEAD(format);
+       __u32 type;
+
+       /*
+        * The pmu data we store & need consists of the pmu
+        * type value and format definitions. Load both right
+        * now.
+        */
+       if (pmu_format(name, &format))
+               return NULL;
+
+       if (pmu_type(name, &type))
+               return NULL;
+
+       pmu = zalloc(sizeof(*pmu));
+       if (!pmu)
+               return NULL;
+
+       INIT_LIST_HEAD(&pmu->format);
+       list_splice(&format, &pmu->format);
+       pmu->name = strdup(name);
+       pmu->type = type;
+       return pmu;
+}
+
+static struct perf_pmu *pmu_find(char *name)
+{
+       struct perf_pmu *pmu;
+
+       list_for_each_entry(pmu, &pmus, list)
+               if (!strcmp(pmu->name, name))
+                       return pmu;
+
+       return NULL;
+}
+
+struct perf_pmu *perf_pmu__find(char *name)
+{
+       struct perf_pmu *pmu;
+
+       /*
+        * Once PMU is loaded it stays in the list,
+        * so we keep us from multiple reading/parsing
+        * the pmu format definitions.
+        */
+       pmu = pmu_find(name);
+       if (pmu)
+               return pmu;
+
+       return pmu_lookup(name);
+}
+
+static struct perf_pmu__format*
+pmu_find_format(struct list_head *formats, char *name)
+{
+       struct perf_pmu__format *format;
+
+       list_for_each_entry(format, formats, list)
+               if (!strcmp(format->name, name))
+                       return format;
+
+       return NULL;
+}
+
+/*
+ * Returns value based on the format definition (format parameter)
+ * and unformated value (value parameter).
+ *
+ * TODO maybe optimize a little ;)
+ */
+static __u64 pmu_format_value(unsigned long *format, __u64 value)
+{
+       unsigned long fbit, vbit;
+       __u64 v = 0;
+
+       for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
+
+               if (!test_bit(fbit, format))
+                       continue;
+
+               if (!(value & (1llu << vbit++)))
+                       continue;
+
+               v |= (1llu << fbit);
+       }
+
+       return v;
+}
+
+/*
+ * Setup one of config[12] attr members based on the
+ * user input data - temr parameter.
+ */
+static int pmu_config_term(struct list_head *formats,
+                          struct perf_event_attr *attr,
+                          struct parse_events__term *term)
+{
+       struct perf_pmu__format *format;
+       __u64 *vp;
+
+       /*
+        * Support only for hardcoded and numnerial terms.
+        * Hardcoded terms should be already in, so nothing
+        * to be done for them.
+        */
+       if (parse_events__is_hardcoded_term(term))
+               return 0;
+
+       if (term->type != PARSE_EVENTS__TERM_TYPE_NUM)
+               return -EINVAL;
+
+       format = pmu_find_format(formats, term->config);
+       if (!format)
+               return -EINVAL;
+
+       switch (format->value) {
+       case PERF_PMU_FORMAT_VALUE_CONFIG:
+               vp = &attr->config;
+               break;
+       case PERF_PMU_FORMAT_VALUE_CONFIG1:
+               vp = &attr->config1;
+               break;
+       case PERF_PMU_FORMAT_VALUE_CONFIG2:
+               vp = &attr->config2;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *vp |= pmu_format_value(format->bits, term->val.num);
+       return 0;
+}
+
+static int pmu_config(struct list_head *formats, struct perf_event_attr *attr,
+                     struct list_head *head_terms)
+{
+       struct parse_events__term *term, *h;
+
+       list_for_each_entry_safe(term, h, head_terms, list)
+               if (pmu_config_term(formats, attr, term))
+                       return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * Configures event's 'attr' parameter based on the:
+ * 1) users input - specified in terms parameter
+ * 2) pmu format definitions - specified by pmu parameter
+ */
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+                    struct list_head *head_terms)
+{
+       attr->type = pmu->type;
+       return pmu_config(&pmu->format, attr, head_terms);
+}
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+                        int config, unsigned long *bits)
+{
+       struct perf_pmu__format *format;
+
+       format = zalloc(sizeof(*format));
+       if (!format)
+               return -ENOMEM;
+
+       format->name = strdup(name);
+       format->value = config;
+       memcpy(format->bits, bits, sizeof(format->bits));
+
+       list_add_tail(&format->list, list);
+       return 0;
+}
+
+void perf_pmu__set_format(unsigned long *bits, long from, long to)
+{
+       long b;
+
+       if (!to)
+               to = from;
+
+       memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
+       for (b = from; b <= to; b++)
+               set_bit(b, bits);
+}
+
+/* Simulated format definitions. */
+static struct test_format {
+       const char *name;
+       const char *value;
+} test_formats[] = {
+       { "krava01", "config:0-1,62-63\n", },
+       { "krava02", "config:10-17\n", },
+       { "krava03", "config:5\n", },
+       { "krava11", "config1:0,2,4,6,8,20-28\n", },
+       { "krava12", "config1:63\n", },
+       { "krava13", "config1:45-47\n", },
+       { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
+       { "krava22", "config2:8,18,48,58\n", },
+       { "krava23", "config2:28-29,38\n", },
+};
+
+#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
+
+/* Simulated users input. */
+static struct parse_events__term test_terms[] = {
+       {
+               .config  = (char *) "krava01",
+               .val.num = 15,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava02",
+               .val.num = 170,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava03",
+               .val.num = 1,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava11",
+               .val.num = 27,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava12",
+               .val.num = 1,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava13",
+               .val.num = 2,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava21",
+               .val.num = 119,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava22",
+               .val.num = 11,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+       {
+               .config  = (char *) "krava23",
+               .val.num = 2,
+               .type    = PARSE_EVENTS__TERM_TYPE_NUM,
+       },
+};
+#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
+
+/*
+ * Prepare format directory data, exported by kernel
+ * at /sys/bus/event_source/devices/<dev>/format.
+ */
+static char *test_format_dir_get(void)
+{
+       static char dir[PATH_MAX];
+       unsigned int i;
+
+       snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
+       if (!mkdtemp(dir))
+               return NULL;
+
+       for (i = 0; i < TEST_FORMATS_CNT; i++) {
+               static char name[PATH_MAX];
+               struct test_format *format = &test_formats[i];
+               FILE *file;
+
+               snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+               file = fopen(name, "w");
+               if (!file)
+                       return NULL;
+
+               if (1 != fwrite(format->value, strlen(format->value), 1, file))
+                       break;
+
+               fclose(file);
+       }
+
+       return dir;
+}
+
+/* Cleanup format directory. */
+static int test_format_dir_put(char *dir)
+{
+       char buf[PATH_MAX];
+       snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
+       if (system(buf))
+               return -1;
+
+       snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
+       return system(buf);
+}
+
+static struct list_head *test_terms_list(void)
+{
+       static LIST_HEAD(terms);
+       unsigned int i;
+
+       for (i = 0; i < TERMS_CNT; i++)
+               list_add_tail(&test_terms[i].list, &terms);
+
+       return &terms;
+}
+
+#undef TERMS_CNT
+
+int perf_pmu__test(void)
+{
+       char *format = test_format_dir_get();
+       LIST_HEAD(formats);
+       struct list_head *terms = test_terms_list();
+       int ret;
+
+       if (!format)
+               return -EINVAL;
+
+       do {
+               struct perf_event_attr attr;
+
+               memset(&attr, 0, sizeof(attr));
+
+               ret = pmu_format_parse(format, &formats);
+               if (ret)
+                       break;
+
+               ret = pmu_config(&formats, &attr, terms);
+               if (ret)
+                       break;
+
+               ret = -EINVAL;
+
+               if (attr.config  != 0xc00000000002a823)
+                       break;
+               if (attr.config1 != 0x8000400000000145)
+                       break;
+               if (attr.config2 != 0x0400000020041d07)
+                       break;
+
+               ret = 0;
+       } while (0);
+
+       test_format_dir_put(format);
+       return ret;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
new file mode 100644 (file)
index 0000000..68c0db9
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __PMU_H
+#define __PMU_H
+
+#include <linux/bitops.h>
+#include "../../../include/linux/perf_event.h"
+
+enum {
+       PERF_PMU_FORMAT_VALUE_CONFIG,
+       PERF_PMU_FORMAT_VALUE_CONFIG1,
+       PERF_PMU_FORMAT_VALUE_CONFIG2,
+};
+
+#define PERF_PMU_FORMAT_BITS 64
+
+struct perf_pmu__format {
+       char *name;
+       int value;
+       DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+       struct list_head list;
+};
+
+struct perf_pmu {
+       char *name;
+       __u32 type;
+       struct list_head format;
+       struct list_head list;
+};
+
+struct perf_pmu *perf_pmu__find(char *name);
+int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
+                    struct list_head *head_terms);
+
+int perf_pmu_wrap(void);
+void perf_pmu_error(struct list_head *list, char *name, char const *msg);
+
+int perf_pmu__new_format(struct list_head *list, char *name,
+                        int config, unsigned long *bits);
+void perf_pmu__set_format(unsigned long *bits, long from, long to);
+
+int perf_pmu__test(void);
+#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
new file mode 100644 (file)
index 0000000..a15d9fb
--- /dev/null
@@ -0,0 +1,43 @@
+%option prefix="perf_pmu_"
+
+%{
+#include <stdlib.h>
+#include <linux/bitops.h>
+#include "pmu.h"
+#include "pmu-bison.h"
+
+static int value(int base)
+{
+       long num;
+
+       errno = 0;
+       num = strtoul(perf_pmu_text, NULL, base);
+       if (errno)
+               return PP_ERROR;
+
+       perf_pmu_lval.num = num;
+       return PP_VALUE;
+}
+
+%}
+
+num_dec         [0-9]+
+
+%%
+
+{num_dec}      { return value(10); }
+config         { return PP_CONFIG; }
+config1                { return PP_CONFIG1; }
+config2                { return PP_CONFIG2; }
+-              { return '-'; }
+:              { return ':'; }
+,              { return ','; }
+.              { ; }
+\n             { ; }
+
+%%
+
+int perf_pmu_wrap(void)
+{
+       return 1;
+}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
new file mode 100644 (file)
index 0000000..20ea77e
--- /dev/null
@@ -0,0 +1,93 @@
+
+%name-prefix "perf_pmu_"
+%parse-param {struct list_head *format}
+%parse-param {char *name}
+
+%{
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <string.h>
+#include "pmu.h"
+
+extern int perf_pmu_lex (void);
+
+#define ABORT_ON(val) \
+do { \
+        if (val) \
+                YYABORT; \
+} while (0)
+
+%}
+
+%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_VALUE PP_ERROR
+%type <num> PP_VALUE
+%type <bits> bit_term
+%type <bits> bits
+
+%union
+{
+       unsigned long num;
+       DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+}
+
+%%
+
+format:
+format format_term
+|
+format_term
+
+format_term:
+PP_CONFIG ':' bits
+{
+       ABORT_ON(perf_pmu__new_format(format, name,
+                                     PERF_PMU_FORMAT_VALUE_CONFIG,
+                                     $3));
+}
+|
+PP_CONFIG1 ':' bits
+{
+       ABORT_ON(perf_pmu__new_format(format, name,
+                                     PERF_PMU_FORMAT_VALUE_CONFIG1,
+                                     $3));
+}
+|
+PP_CONFIG2 ':' bits
+{
+       ABORT_ON(perf_pmu__new_format(format, name,
+                                     PERF_PMU_FORMAT_VALUE_CONFIG2,
+                                     $3));
+}
+
+bits:
+bits ',' bit_term
+{
+       bitmap_or($$, $1, $3, 64);
+}
+|
+bit_term
+{
+       memcpy($$, $1, sizeof($1));
+}
+
+bit_term:
+PP_VALUE '-' PP_VALUE
+{
+       perf_pmu__set_format($$, $1, $3);
+}
+|
+PP_VALUE
+{
+       perf_pmu__set_format($$, $1, 0);
+}
+
+%%
+
+void perf_pmu_error(struct list_head *list __used,
+                   char *name __used,
+                   char const *msg __used)
+{
+}
index 2cc162d3b78c3b8ed720bff80a3b2171a8673397..d448984ed789c25ff3fc6524bc355d67bc2ad6cb 100644 (file)
@@ -972,10 +972,12 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
        struct dwarf_callback_param *param = data;
        struct probe_finder *pf = param->data;
        struct perf_probe_point *pp = &pf->pev->point;
+       Dwarf_Attribute attr;
 
        /* Check tag and diename */
        if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
-           !die_compare_name(sp_die, pp->function))
+           !die_compare_name(sp_die, pp->function) ||
+           dwarf_attr(sp_die, DW_AT_declaration, &attr))
                return DWARF_CB_OK;
 
        /* Check declared file */
index 002ebbf59f48e84aa69898de786b6227e2402ad7..9412e3b05f6888c9504c7ab6b3dd0effe73de627 100644 (file)
@@ -140,6 +140,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
        INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
        INIT_LIST_HEAD(&self->ordered_samples.to_free);
        machine__init(&self->host_machine, "", HOST_KERNEL_ID);
+       hists__init(&self->hists);
 
        if (mode == O_RDONLY) {
                if (perf_session__open(self, force) < 0)
index 5dd83c3e2c0c4d61dd02a70f42a46af96fca36a8..c0a028c3ebaf35905e99e69f4cef76e5e343f44f 100644 (file)
@@ -1,6 +1,5 @@
 #include <dirent.h>
 #include <errno.h>
-#include <libgen.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
@@ -51,6 +50,8 @@ struct symbol_conf symbol_conf = {
 
 int dso__name_len(const struct dso *dso)
 {
+       if (!dso)
+               return strlen("[unknown]");
        if (verbose)
                return dso->long_name_len;
 
index a4088ced1e64493944b2b7b66514ba05e302fba8..dfd1bd8371a4cba9fc31d164bada0400f0ca01a9 100644 (file)
@@ -722,7 +722,7 @@ static char *event_read_name(void)
 static int event_read_id(void)
 {
        char *token;
-       int id;
+       int id = -1;
 
        if (read_expected_item(EVENT_ITEM, "ID") < 0)
                return -1;
@@ -731,15 +731,13 @@ static int event_read_id(void)
                return -1;
 
        if (read_expect_type(EVENT_ITEM, &token) < 0)
-               goto fail;
+               goto free;
 
        id = strtoul(token, NULL, 0);
-       free_token(token);
-       return id;
 
- fail:
+ free:
        free_token(token);
-       return -1;
+       return id;
 }
 
 static int field_is_string(struct format_field *field)
index 84d761b730c10d9d11b1db8d88bcf0afeacb47a8..6ee82f60feaf2988ba31a0040ad9d85a00183f97 100644 (file)
@@ -49,6 +49,8 @@ int ui_browser__warning(struct ui_browser *browser, int timeout,
                        const char *format, ...);
 int ui_browser__help_window(struct ui_browser *browser, const char *text);
 bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
+int ui_browser__input_window(const char *title, const char *text, char *input,
+                            const char *exit_msg, int delay_sec);
 
 void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
 unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
index fa530fcc764a015646be7de8522d2979987a452e..d7a1c4afe28b9089ab09bbf5b7abce623e46a544 100644 (file)
@@ -879,6 +879,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        char *options[16];
        int nr_options = 0;
        int key = -1;
+       char buf[64];
 
        if (browser == NULL)
                return -1;
@@ -933,6 +934,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        goto zoom_dso;
                case 't':
                        goto zoom_thread;
+               case 's':
+                       if (ui_browser__input_window("Symbol to show",
+                                       "Please enter the name of symbol you want to see",
+                                       buf, "ENTER: OK, ESC: Cancel",
+                                       delay_secs * 2) == K_ENTER) {
+                               self->symbol_filter_str = *buf ? buf : NULL;
+                               hists__filter_by_symbol(self);
+                               hist_browser__reset(browser);
+                       }
+                       continue;
                case K_F1:
                case 'h':
                case '?':
@@ -950,7 +961,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "C             Collapse all callchains\n"
                                        "E             Expand all callchains\n"
                                        "d             Zoom into current DSO\n"
-                                       "t             Zoom into current Thread");
+                                       "t             Zoom into current Thread\n"
+                                       "s             Filter symbol by name");
                        continue;
                case K_ENTER:
                case K_RIGHT:
index 3458b1985761b863311d53f17552e7c61019501a..809eca5707fae1fccee91dcd8a22ef46603f347c 100644 (file)
@@ -16,6 +16,8 @@
 #define K_TAB  '\t'
 #define K_UNTAB        SL_KEY_UNTAB
 #define K_UP   SL_KEY_UP
+#define K_BKSPC 0x7f
+#define K_DEL  SL_KEY_DELETE
 
 /* Not really keys */
 #define K_TIMER         -1
index 45daa7c41dad9812226ba8737a23664709d50cbb..ad4374a16bb08b88c008ca6e79fcf4a5b15903b5 100644 (file)
@@ -69,6 +69,88 @@ int ui__popup_menu(int argc, char * const argv[])
        return popup_menu__run(&menu);
 }
 
+int ui_browser__input_window(const char *title, const char *text, char *input,
+                            const char *exit_msg, int delay_secs)
+{
+       int x, y, len, key;
+       int max_len = 60, nr_lines = 0;
+       static char buf[50];
+       const char *t;
+
+       t = text;
+       while (1) {
+               const char *sep = strchr(t, '\n');
+
+               if (sep == NULL)
+                       sep = strchr(t, '\0');
+               len = sep - t;
+               if (max_len < len)
+                       max_len = len;
+               ++nr_lines;
+               if (*sep == '\0')
+                       break;
+               t = sep + 1;
+       }
+
+       max_len += 2;
+       nr_lines += 8;
+       y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+       x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+       SLsmg_set_color(0);
+       SLsmg_draw_box(y, x++, nr_lines, max_len);
+       if (title) {
+               SLsmg_gotorc(y, x + 1);
+               SLsmg_write_string((char *)title);
+       }
+       SLsmg_gotorc(++y, x);
+       nr_lines -= 7;
+       max_len -= 2;
+       SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+                                  nr_lines, max_len, 1);
+       y += nr_lines;
+       len = 5;
+       while (len--) {
+               SLsmg_gotorc(y + len - 1, x);
+               SLsmg_write_nstring((char *)" ", max_len);
+       }
+       SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+       SLsmg_gotorc(y + 3, x);
+       SLsmg_write_nstring((char *)exit_msg, max_len);
+       SLsmg_refresh();
+
+       x += 2;
+       len = 0;
+       key = ui__getch(delay_secs);
+       while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+               if (key == K_BKSPC) {
+                       if (len == 0)
+                               goto next_key;
+                       SLsmg_gotorc(y, x + --len);
+                       SLsmg_write_char(' ');
+               } else {
+                       buf[len] = key;
+                       SLsmg_gotorc(y, x + len++);
+                       SLsmg_write_char(key);
+               }
+               SLsmg_refresh();
+
+               /* XXX more graceful overflow handling needed */
+               if (len == sizeof(buf) - 1) {
+                       ui_helpline__push("maximum size of symbol name reached!");
+                       key = K_ENTER;
+                       break;
+               }
+next_key:
+               key = ui__getch(delay_secs);
+       }
+
+       buf[len] = '\0';
+       strncpy(input, buf, len+1);
+       return key;
+}
+
 int ui__question_window(const char *title, const char *text,
                        const char *exit_msg, int delay_secs)
 {