Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 04:15:42 +0000 (21:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 04:15:42 +0000 (21:15 -0700)
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6: (425 commits)
  V4L/DVB (11870): gspca - main: VIDIOC_ENUM_FRAMESIZES ioctl added.
  V4L/DVB (12004): poll method lose race condition
  V4L/DVB (11894): flexcop-pci: dmesg visible names broken
  V4L/DVB (11892): Siano: smsendian - declare function as extern
  V4L/DVB (11891): Siano: smscore - bind the GPIO SMS protocol
  V4L/DVB (11890): Siano: smscore - remove redundant code
  V4L/DVB (11889): Siano: smsdvb - add DVB v3 events
  V4L/DVB (11888): Siano: smsusb - remove redundant ifdef
  V4L/DVB (11887): Siano: smscards - add board (target) events
  V4L/DVB (11886): Siano: smscore - fix some new GPIO definitions names
  V4L/DVB (11885): Siano: Add new GPIO management interface
  V4L/DVB (11884): Siano: smssdio - revert to stand alone module
  V4L/DVB (11883): Siano: cards - add two additional (USB) devices
  V4L/DVB (11824): Siano: smsusb - change exit func debug msg
  V4L/DVB (11823): Siano: smsusb - fix typo in module description
  V4L/DVB (11822): Siano: smscore - bug fix at get_device_mode
  V4L/DVB (11821): Siano: smscore - fix isdb-t firmware name
  V4L/DVB (11820): Siano: smscore - fix byte ordering bug
  V4L/DVB (11819): Siano: smscore - fix get_common_buffer bug
  V4L/DVB (11818): Siano: smscards - assign gpio to HPG targets
  ...

611 files changed:
Documentation/DocBook/debugobjects.tmpl
Documentation/accounting/getdelays.c
Documentation/atomic_ops.txt
Documentation/cdrom/packet-writing.txt
Documentation/driver-model/device.txt
Documentation/fault-injection/fault-injection.txt
Documentation/fb/vesafb.txt
Documentation/filesystems/proc.txt
Documentation/firmware_class/README
Documentation/kernel-parameters.txt
Documentation/kmemcheck.txt [new file with mode: 0644]
Documentation/kprobes.txt
Documentation/sysctl/vm.txt
Documentation/trace/ftrace.txt
Documentation/trace/mmiotrace.txt
Documentation/vm/Makefile
Documentation/vm/balance
Documentation/vm/page-types.c [new file with mode: 0644]
Documentation/vm/pagemap.txt
MAINTAINERS
arch/alpha/include/asm/8253pit.h
arch/alpha/include/asm/kmap_types.h
arch/alpha/kernel/init_task.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/irq_i8259.c
arch/alpha/kernel/irq_impl.h
arch/alpha/kernel/irq_pyxis.c
arch/alpha/kernel/irq_srm.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_ruffian.c
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_wildfire.c
arch/alpha/mm/numa.c
arch/arm/kernel/init_task.c
arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h [new file with mode: 0644]
arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h [new file with mode: 0644]
arch/avr32/kernel/init_task.c
arch/blackfin/include/asm/kmap_types.h
arch/blackfin/kernel/init_task.c
arch/cris/include/asm/kmap_types.h
arch/cris/kernel/process.c
arch/frv/kernel/init_task.c
arch/h8300/include/asm/kmap_types.h
arch/h8300/kernel/init_task.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/include/asm/kmap_types.h
arch/ia64/kernel/init_task.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/uncached.c
arch/ia64/sn/pci/pci_dma.c
arch/m32r/include/asm/kmap_types.h
arch/m32r/kernel/init_task.c
arch/m32r/mm/discontig.c
arch/m32r/platforms/m32104ut/setup.c
arch/m32r/platforms/m32700ut/setup.c
arch/m32r/platforms/mappi/setup.c
arch/m32r/platforms/mappi2/setup.c
arch/m32r/platforms/mappi3/setup.c
arch/m32r/platforms/oaks32r/setup.c
arch/m32r/platforms/opsput/setup.c
arch/m32r/platforms/usrv/setup.c
arch/m68k/include/asm/kmap_types.h
arch/m68k/kernel/process.c
arch/m68knommu/kernel/init_task.c
arch/microblaze/include/asm/kmap_types.h
arch/mips/include/asm/i8253.h
arch/mips/include/asm/kmap_types.h
arch/mips/kernel/init_task.c
arch/mips/sni/eisa.c
arch/mn10300/include/asm/kmap_types.h
arch/mn10300/kernel/init_task.c
arch/parisc/include/asm/kmap_types.h
arch/parisc/kernel/init_task.c
arch/powerpc/include/asm/8253pit.h
arch/powerpc/kernel/init_task.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/platforms/cell/ras.c
arch/powerpc/platforms/cell/spu_base.c
arch/s390/include/asm/kmap_types.h
arch/s390/kernel/init_task.c
arch/sh/include/asm/kmap_types.h
arch/sh/kernel/init_task.c
arch/sparc/include/asm/kmap_types.h
arch/sparc/kernel/init_task.c
arch/um/drivers/net_kern.c
arch/um/drivers/ubd_kern.c
arch/um/include/shared/init.h
arch/um/include/shared/net_user.h
arch/um/kernel/init_task.c
arch/um/kernel/irq.c
arch/um/sys-i386/stub.S
arch/um/sys-x86_64/asm/elf.h
arch/um/sys-x86_64/stub.S
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/kmap_types.h
arch/x86/include/asm/kmemcheck.h [new file with mode: 0644]
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/string_32.h
arch/x86/include/asm/string_64.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/timex.h
arch/x86/include/asm/xor.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpuid.c
arch/x86/kernel/i8253.c
arch/x86/kernel/init_task.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/msr.c
arch/x86/kernel/process.c
arch/x86/kernel/stacktrace.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kvm/vmx.c
arch/x86/mm/Makefile
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/kmemcheck/Makefile [new file with mode: 0644]
arch/x86/mm/kmemcheck/error.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/error.h [new file with mode: 0644]
arch/x86/mm/kmemcheck/kmemcheck.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/opcode.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/opcode.h [new file with mode: 0644]
arch/x86/mm/kmemcheck/pte.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/pte.h [new file with mode: 0644]
arch/x86/mm/kmemcheck/selftest.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/selftest.h [new file with mode: 0644]
arch/x86/mm/kmemcheck/shadow.c [new file with mode: 0644]
arch/x86/mm/kmemcheck/shadow.h [new file with mode: 0644]
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/xtensa/include/asm/kmap_types.h
arch/xtensa/kernel/init_task.c
block/bsg.c
block/genhd.c
crypto/xor.c
drivers/Kconfig
drivers/Makefile
drivers/base/core.c
drivers/base/dd.c
drivers/base/firmware_class.c
drivers/base/node.c
drivers/base/platform.c
drivers/base/sys.c
drivers/block/aoe/aoechr.c
drivers/block/pktcdvd.c
drivers/block/xen-blkfront.c
drivers/char/hvcs.c
drivers/char/hw_random/core.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/misc.c
drivers/char/raw.c
drivers/char/vt.c
drivers/clocksource/acpi_pm.c
drivers/eisa/eisa.ids
drivers/eisa/pci_eisa.c
drivers/eisa/virtual_root.c
drivers/firewire/Makefile
drivers/firewire/core-card.c [new file with mode: 0644]
drivers/firewire/core-cdev.c [new file with mode: 0644]
drivers/firewire/core-device.c [new file with mode: 0644]
drivers/firewire/core-iso.c [new file with mode: 0644]
drivers/firewire/core-topology.c [new file with mode: 0644]
drivers/firewire/core-transaction.c [new file with mode: 0644]
drivers/firewire/core.h [new file with mode: 0644]
drivers/firewire/fw-card.c [deleted file]
drivers/firewire/fw-cdev.c [deleted file]
drivers/firewire/fw-device.c [deleted file]
drivers/firewire/fw-device.h [deleted file]
drivers/firewire/fw-iso.c [deleted file]
drivers/firewire/fw-ohci.c [deleted file]
drivers/firewire/fw-ohci.h [deleted file]
drivers/firewire/fw-sbp2.c [deleted file]
drivers/firewire/fw-topology.c [deleted file]
drivers/firewire/fw-topology.h [deleted file]
drivers/firewire/fw-transaction.c [deleted file]
drivers/firewire/fw-transaction.h [deleted file]
drivers/firewire/ohci.c [new file with mode: 0644]
drivers/firewire/ohci.h [new file with mode: 0644]
drivers/firewire/sbp2.c [new file with mode: 0644]
drivers/firmware/memmap.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/intel_fb.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/Kconfig
drivers/hwmon/hp_accel.c
drivers/hwmon/lis3lv02d.c
drivers/hwmon/lis3lv02d.h
drivers/hwmon/lis3lv02d_spi.c
drivers/ide/ide-pm.c
drivers/ide/ide-probe.c
drivers/ide/ide_platform.c
drivers/ieee1394/csr1212.c
drivers/ieee1394/eth1394.c
drivers/ieee1394/nodemgr.c
drivers/ieee1394/sbp2.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/input/input.c
drivers/input/joystick/analog.c
drivers/input/misc/pcspkr.c
drivers/input/touchscreen/wm97xx-core.c
drivers/input/xen-kbdfront.c
drivers/md/dm-ioctl.c
drivers/media/common/tuners/tuner-xc2028.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/dvb/dvb-usb/dvb-usb.h
drivers/media/video/dabusb.c
drivers/media/video/videobuf-dma-contig.c
drivers/mfd/htc-pasic3.c
drivers/mfd/pcf50633-core.c
drivers/mfd/wm8400-core.c
drivers/misc/c2port/core.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/sgi-xp/xpc_uv.c
drivers/net/Kconfig
drivers/net/tun.c
drivers/net/wimax/i2400m/i2400m.h
drivers/net/wireless/ath/ath5k/Kconfig
drivers/net/wireless/libertas/README
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas/if_spi.h
drivers/net/wireless/libertas/if_usb.c
drivers/net/xen-netfront.c
drivers/parisc/eisa.c
drivers/parisc/sba_iommu.c
drivers/parport/parport_gsc.c
drivers/pci/pci.c
drivers/pci/pcie/portdrv_core.c
drivers/pcmcia/ds.c
drivers/pcmcia/pcmcia_ioctl.c
drivers/s390/char/con3215.c
drivers/s390/char/raw3270.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/tape_core.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmur.c
drivers/s390/net/claw.c
drivers/s390/net/lcs.c
drivers/s390/net/lcs.h
drivers/s390/net/netiucv.c
drivers/scsi/aha1740.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvstgt.c
drivers/scsi/libsrp.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/serial/of_serial.c
drivers/spi/spi_mpc83xx.c
drivers/staging/uc2322/aten2011.c
drivers/thermal/thermal_sys.c
drivers/usb/Kconfig
drivers/usb/Makefile
drivers/usb/atm/ueagle-atm.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/usblp.c
drivers/usb/class/usbtmc.c
drivers/usb/core/Kconfig
drivers/usb/core/Makefile
drivers/usb/core/config.c
drivers/usb/core/driver.c
drivers/usb/core/endpoint.c
drivers/usb/core/file.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hcd.h
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/core/message.c
drivers/usb/core/sysfs.c
drivers/usb/core/urb.c
drivers/usb/core/usb.c
drivers/usb/core/usb.h
drivers/usb/gadget/Kconfig
drivers/usb/gadget/Makefile
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/atmel_usba_udc.c
drivers/usb/gadget/audio.c [new file with mode: 0644]
drivers/usb/gadget/ci13xxx_udc.c
drivers/usb/gadget/f_audio.c [new file with mode: 0644]
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/fsl_mx3_udc.c [new file with mode: 0644]
drivers/usb/gadget/fsl_udc_core.c [new file with mode: 0644]
drivers/usb/gadget/fsl_usb2_udc.c [deleted file]
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/gadget_chips.h
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/imx_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/langwell_udc.c [new file with mode: 0644]
drivers/usb/gadget/langwell_udc.h [new file with mode: 0644]
drivers/usb/gadget/pxa27x_udc.c
drivers/usb/gadget/pxa27x_udc.h
drivers/usb/gadget/s3c-hsotg.c [new file with mode: 0644]
drivers/usb/gadget/u_audio.c [new file with mode: 0644]
drivers/usb/gadget/u_audio.h [new file with mode: 0644]
drivers/usb/gadget/u_serial.c
drivers/usb/host/Kconfig
drivers/usb/host/Makefile
drivers/usb/host/ehci-au1xxx.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-ixp4xx.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/host/ehci-ps3.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci.h
drivers/usb/host/fhci-dbg.c
drivers/usb/host/hwa-hc.c
drivers/usb/host/ohci-dbg.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/r8a66597.h
drivers/usb/host/uhci-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-dbg.c [new file with mode: 0644]
drivers/usb/host/xhci-ext-caps.h [new file with mode: 0644]
drivers/usb/host/xhci-hcd.c [new file with mode: 0644]
drivers/usb/host/xhci-hub.c [new file with mode: 0644]
drivers/usb/host/xhci-mem.c [new file with mode: 0644]
drivers/usb/host/xhci-pci.c [new file with mode: 0644]
drivers/usb/host/xhci-ring.c [new file with mode: 0644]
drivers/usb/host/xhci.h [new file with mode: 0644]
drivers/usb/misc/iowarrior.c
drivers/usb/misc/legousbtower.c
drivers/usb/misc/sisusbvga/Kconfig
drivers/usb/misc/usbtest.c
drivers/usb/mon/mon_text.c
drivers/usb/musb/Kconfig
drivers/usb/musb/blackfin.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/cppi_dma.h
drivers/usb/musb/davinci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/musb_host.h
drivers/usb/musb/musb_virthub.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/tusb6010.c
drivers/usb/otg/Kconfig
drivers/usb/otg/Makefile
drivers/usb/otg/langwell_otg.c [new file with mode: 0644]
drivers/usb/otg/nop-usb-xceiv.c
drivers/usb/otg/twl4030-usb.c
drivers/usb/serial/aircable.c
drivers/usb/serial/belkin_sa.c
drivers/usb/serial/bus.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/cyberjack.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/empeg.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio.h
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/generic.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_tables.h
drivers/usb/serial/io_ti.c
drivers/usb/serial/ipaq.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan.c
drivers/usb/serial/keyspan.h
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/kl5kusb105.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/omninet.c
drivers/usb/serial/opticon.c
drivers/usb/serial/option.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/sierra.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/symbolserial.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/usb_debug.c
drivers/usb/serial/visor.c
drivers/usb/serial/whiteheat.c
drivers/usb/storage/initializers.c
drivers/usb/storage/option_ms.c
drivers/usb/storage/sierra_ms.c
drivers/usb/storage/unusual_devs.h
drivers/video/Kconfig
drivers/video/acornfb.c
drivers/video/atmel_lcdfb.c
drivers/video/aty/radeon_pm.c
drivers/video/bf54x-lq043fb.c
drivers/video/bfin-t350mcqb-fb.c
drivers/video/carminefb.c
drivers/video/chipsfb.c
drivers/video/efifb.c
drivers/video/fbmem.c
drivers/video/igafb.c
drivers/video/intelfb/intelfbdrv.c
drivers/video/logo/Makefile
drivers/video/logo/logo.c
drivers/video/mb862xx/mb862xxfb.c
drivers/video/modedb.c
drivers/video/offb.c
drivers/video/pm2fb.c
drivers/video/s1d13xxxfb.c
drivers/video/s3c-fb.c
drivers/video/s3c2410fb.c
drivers/video/s3c2410fb.h
drivers/video/sis/sis_main.c
drivers/video/stifb.c
drivers/video/tcx.c
drivers/video/vesafb.c
drivers/video/xen-fbfront.c
drivers/vlynq/Kconfig [new file with mode: 0644]
drivers/vlynq/Makefile [new file with mode: 0644]
drivers/vlynq/vlynq.c [new file with mode: 0644]
fs/Kconfig
fs/befs/linuxvfs.c
fs/debugfs/file.c
fs/debugfs/inode.c
fs/drop_caches.c
fs/fat/dir.c
fs/fat/namei_vfat.c
fs/fcntl.c
fs/fs-writeback.c
fs/isofs/joliet.c
fs/jfs/jfs_extent.c
fs/ncpfs/ncplib_kernel.c
fs/nfs/iostat.h
fs/nls/nls_base.c
fs/nls/nls_utf8.c
fs/ntfs/inode.c
fs/ntfs/logfile.c
fs/proc/base.c
fs/proc/meminfo.c
fs/proc/page.c
fs/select.c
fs/sysfs/symlink.c
include/asm-generic/kmap_types.h
include/linux/bug.h
include/linux/c2port.h
include/linux/cpuset.h
include/linux/device.h
include/linux/eisa.h
include/linux/fb.h
include/linux/firewire.h [new file with mode: 0644]
include/linux/firmware-map.h
include/linux/firmware.h
include/linux/fs.h
include/linux/genhd.h
include/linux/gfp.h
include/linux/highmem.h
include/linux/hugetlb.h
include/linux/init.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/kernel.h
include/linux/kmemcheck.h [new file with mode: 0644]
include/linux/linux_logo.h
include/linux/lis3lv02d.h [new file with mode: 0644]
include/linux/major.h
include/linux/memcontrol.h
include/linux/miscdevice.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/module.h
include/linux/nls.h
include/linux/nodemask.h
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/platform_device.h
include/linux/poll.h
include/linux/radix-tree.h
include/linux/ring_buffer.h
include/linux/rmap.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/slab_def.h
include/linux/smp.h
include/linux/stacktrace.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/timex.h
include/linux/tracepoint.h
include/linux/usb.h
include/linux/usb/audio.h
include/linux/usb/ch9.h
include/linux/usb/composite.h
include/linux/usb/langwell_otg.h [new file with mode: 0644]
include/linux/usb/langwell_udc.h [new file with mode: 0644]
include/linux/usb/otg.h
include/linux/usb/r8a66597.h [new file with mode: 0644]
include/linux/usb/serial.h
include/linux/utsname.h
include/linux/vlynq.h [new file with mode: 0644]
include/linux/vmstat.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/sock.h
include/video/s1d13xxxfb.h
init/Kconfig
init/do_mounts.c
init/main.c
kernel/Makefile
kernel/cpuset.c
kernel/fork.c
kernel/groups.c [new file with mode: 0644]
kernel/kfifo.c
kernel/kthread.c
kernel/power/process.c
kernel/profile.c
kernel/signal.c
kernel/slow-work.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/Kconfig
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/user.c
lib/Kconfig.debug
lib/Kconfig.kmemcheck [new file with mode: 0644]
lib/dec_and_lock.c
lib/genalloc.c
lib/hexdump.c
lib/kobject.c
lib/radix-tree.c
lib/rbtree.c
mm/Kconfig
mm/Kconfig.debug
mm/Makefile
mm/fadvise.c
mm/filemap.c
mm/hugetlb.c
mm/init-mm.c [new file with mode: 0644]
mm/internal.h
mm/kmemcheck.c [new file with mode: 0644]
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_io.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slob.c
mm/slub.c
mm/swap_state.c
mm/swapfile.c
mm/truncate.c
mm/util.c
mm/vmscan.c
mm/vmstat.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/inet_timewait_sock.c
net/sunrpc/svc.c
samples/Kconfig
samples/firmware_class/firmware_sample_driver.c [deleted file]
samples/firmware_class/firmware_sample_firmware_class.c [deleted file]
scripts/get_maintainer.pl
scripts/gfp-translate [new file with mode: 0644]
scripts/pnmtologo.c
scripts/tracing/draw_functrace.py
sound/drivers/pcsp/pcsp.h
sound/oss/pas2_pcm.c
sound/sound_core.c

index 7f5f218015feb8beed681fa7742caced8ac4c76f..08ff908aa7a239732fce68cc1b06e507626f216a 100644 (file)
       number of errors are printk'ed including a full stack trace.
     </para>
     <para>
-      The statistics are available via debugfs/debug_objects/stats.
+      The statistics are available via /sys/kernel/debug/debug_objects/stats.
       They provide information about the number of warnings and the
       number of successful fixups along with information about the
       usage of the internal tracking objects and the state of the
index 7ea231172c850db1a7eed7127d0bb8a036e56364..aa73e72fd793896cbb70fa85505d5f6a7b76d80a 100644 (file)
@@ -246,7 +246,8 @@ void print_ioacct(struct taskstats *t)
 
 int main(int argc, char *argv[])
 {
-       int c, rc, rep_len, aggr_len, len2, cmd_type;
+       int c, rc, rep_len, aggr_len, len2;
+       int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
        __u16 id;
        __u32 mypid;
 
index 4ef245010457fc9301f21f6e78f2b1fc16c1d014..396bec3b74ed12ca1ddbfe980f9ffd4ca4d91308 100644 (file)
@@ -229,10 +229,10 @@ kernel.  It is the use of atomic counters to implement reference
 counting, and it works such that once the counter falls to zero it can
 be guaranteed that no other entity can be accessing the object:
 
-static void obj_list_add(struct obj *obj)
+static void obj_list_add(struct obj *obj, struct list_head *head)
 {
        obj->active = 1;
-       list_add(&obj->list);
+       list_add(&obj->list, head);
 }
 
 static void obj_list_del(struct obj *obj)
index cf1f8126991c00fe5f904631d0471821bf958515..1c407778c8b26213a69984e03a94d34a4fe16d82 100644 (file)
@@ -117,7 +117,7 @@ Using the pktcdvd debugfs interface
 
 To read pktcdvd device infos in human readable form, do:
 
-       # cat /debug/pktcdvd/pktcdvd[0-7]/info
+       # cat /sys/kernel/debug/pktcdvd/pktcdvd[0-7]/info
 
 For a description of the debugfs interface look into the file:
 
index a7cbfff40d077047f76463377381b269b5d41fc4..a124f3126b0d1f4f495c982a03df6e12f743b87b 100644 (file)
@@ -162,3 +162,35 @@ device_remove_file(dev,&dev_attr_power);
 
 The file name will be 'power' with a mode of 0644 (-rw-r--r--).
 
+Word of warning:  While the kernel allows device_create_file() and
+device_remove_file() to be called on a device at any time, userspace has
+strict expectations on when attributes get created.  When a new device is
+registered in the kernel, a uevent is generated to notify userspace (like
+udev) that a new device is available.  If attributes are added after the
+device is registered, then userspace won't get notified and userspace will
+not know about the new attributes.
+
+This is important for device driver that need to publish additional
+attributes for a device at driver probe time.  If the device driver simply
+calls device_create_file() on the device structure passed to it, then
+userspace will never be notified of the new attributes.  Instead, it should
+probably use class_create() and class->dev_attrs to set up a list of
+desired attributes in the modules_init function, and then in the .probe()
+hook, and then use device_create() to create a new device as a child
+of the probed device.  The new device will generate a new uevent and
+properly advertise the new attributes to userspace.
+
+For example, if a driver wanted to add the following attributes:
+struct device_attribute mydriver_attribs[] = {
+       __ATTR(port_count, 0444, port_count_show),
+       __ATTR(serial_number, 0444, serial_number_show),
+       NULL
+};
+
+Then in the module init function is would do:
+       mydriver_class = class_create(THIS_MODULE, "my_attrs");
+       mydriver_class.dev_attr = mydriver_attribs;
+
+And assuming 'dev' is the struct device passed into the probe hook, the driver
+probe function would do something like:
+       create_device(&mydriver_class, dev, chrdev, &private_data, "my_name");
index 4bc374a14345da10f666d67745aa151231c10c92..079305640790ee0436ab7ae46e49624498c333ce 100644 (file)
@@ -29,16 +29,16 @@ o debugfs entries
 fault-inject-debugfs kernel module provides some debugfs entries for runtime
 configuration of fault-injection capabilities.
 
-- /debug/fail*/probability:
+- /sys/kernel/debug/fail*/probability:
 
        likelihood of failure injection, in percent.
        Format: <percent>
 
        Note that one-failure-per-hundred is a very high error rate
        for some testcases.  Consider setting probability=100 and configure
-       /debug/fail*/interval for such testcases.
+       /sys/kernel/debug/fail*/interval for such testcases.
 
-- /debug/fail*/interval:
+- /sys/kernel/debug/fail*/interval:
 
        specifies the interval between failures, for calls to
        should_fail() that pass all the other tests.
@@ -46,18 +46,18 @@ configuration of fault-injection capabilities.
        Note that if you enable this, by setting interval>1, you will
        probably want to set probability=100.
 
-- /debug/fail*/times:
+- /sys/kernel/debug/fail*/times:
 
        specifies how many times failures may happen at most.
        A value of -1 means "no limit".
 
-- /debug/fail*/space:
+- /sys/kernel/debug/fail*/space:
 
        specifies an initial resource "budget", decremented by "size"
        on each call to should_fail(,size).  Failure injection is
        suppressed until "space" reaches zero.
 
-- /debug/fail*/verbose
+- /sys/kernel/debug/fail*/verbose
 
        Format: { 0 | 1 | 2 }
        specifies the verbosity of the messages when failure is
@@ -65,17 +65,17 @@ configuration of fault-injection capabilities.
        log line per failure; '2' will print a call trace too -- useful
        to debug the problems revealed by fault injection.
 
-- /debug/fail*/task-filter:
+- /sys/kernel/debug/fail*/task-filter:
 
        Format: { 'Y' | 'N' }
        A value of 'N' disables filtering by process (default).
        Any positive value limits failures to only processes indicated by
        /proc/<pid>/make-it-fail==1.
 
-- /debug/fail*/require-start:
-- /debug/fail*/require-end:
-- /debug/fail*/reject-start:
-- /debug/fail*/reject-end:
+- /sys/kernel/debug/fail*/require-start:
+- /sys/kernel/debug/fail*/require-end:
+- /sys/kernel/debug/fail*/reject-start:
+- /sys/kernel/debug/fail*/reject-end:
 
        specifies the range of virtual addresses tested during
        stacktrace walking.  Failure is injected only if some caller
@@ -84,26 +84,26 @@ configuration of fault-injection capabilities.
        Default required range is [0,ULONG_MAX) (whole of virtual address space).
        Default rejected range is [0,0).
 
-- /debug/fail*/stacktrace-depth:
+- /sys/kernel/debug/fail*/stacktrace-depth:
 
        specifies the maximum stacktrace depth walked during search
        for a caller within [require-start,require-end) OR
        [reject-start,reject-end).
 
-- /debug/fail_page_alloc/ignore-gfp-highmem:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-highmem:
 
        Format: { 'Y' | 'N' }
        default is 'N', setting it to 'Y' won't inject failures into
        highmem/user allocations.
 
-- /debug/failslab/ignore-gfp-wait:
-- /debug/fail_page_alloc/ignore-gfp-wait:
+- /sys/kernel/debug/failslab/ignore-gfp-wait:
+- /sys/kernel/debug/fail_page_alloc/ignore-gfp-wait:
 
        Format: { 'Y' | 'N' }
        default is 'N', setting it to 'Y' will inject failures
        only into non-sleep allocations (GFP_ATOMIC allocations).
 
-- /debug/fail_page_alloc/min-order:
+- /sys/kernel/debug/fail_page_alloc/min-order:
 
        specifies the minimum page allocation order to be injected
        failures.
@@ -166,13 +166,13 @@ o Inject slab allocation failures into module init/exit code
 #!/bin/bash
 
 FAILTYPE=failslab
-echo Y > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
+echo Y > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
 
 faulty_system()
 {
@@ -217,20 +217,20 @@ then
        exit 1
 fi
 
-cat /sys/module/$module/sections/.text > /debug/$FAILTYPE/require-start
-cat /sys/module/$module/sections/.data > /debug/$FAILTYPE/require-end
+cat /sys/module/$module/sections/.text > /sys/kernel/debug/$FAILTYPE/require-start
+cat /sys/module/$module/sections/.data > /sys/kernel/debug/$FAILTYPE/require-end
 
-echo N > /debug/$FAILTYPE/task-filter
-echo 10 > /debug/$FAILTYPE/probability
-echo 100 > /debug/$FAILTYPE/interval
-echo -1 > /debug/$FAILTYPE/times
-echo 0 > /debug/$FAILTYPE/space
-echo 2 > /debug/$FAILTYPE/verbose
-echo 1 > /debug/$FAILTYPE/ignore-gfp-wait
-echo 1 > /debug/$FAILTYPE/ignore-gfp-highmem
-echo 10 > /debug/$FAILTYPE/stacktrace-depth
+echo N > /sys/kernel/debug/$FAILTYPE/task-filter
+echo 10 > /sys/kernel/debug/$FAILTYPE/probability
+echo 100 > /sys/kernel/debug/$FAILTYPE/interval
+echo -1 > /sys/kernel/debug/$FAILTYPE/times
+echo 0 > /sys/kernel/debug/$FAILTYPE/space
+echo 2 > /sys/kernel/debug/$FAILTYPE/verbose
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait
+echo 1 > /sys/kernel/debug/$FAILTYPE/ignore-gfp-highmem
+echo 10 > /sys/kernel/debug/$FAILTYPE/stacktrace-depth
 
-trap "echo 0 > /debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
+trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
 
 echo "Injecting errors into the module $module... (interrupt to stop)"
 sleep 1000000
index ee277dd204b0f575165b02f0dfc1fcb27c8d0bf8..950d5a658cb33860e05abaf2673b19e12482d388 100644 (file)
@@ -95,7 +95,7 @@ There is no way to change the vesafb video mode and/or timings after
 booting linux.  If you are not happy with the 60 Hz refresh rate, you
 have these options:
 
- * configure and load the DOS-Tools for your the graphics board (if
+ * configure and load the DOS-Tools for the graphics board (if
    available) and boot linux with loadlin.
  * use a native driver (matroxfb/atyfb) instead if vesafb.  If none
    is available, write a new one!
index cd8717a362712fbb537bf562eaa95b0be778dd3b..ebff3c10a07fc124b865f815de5b38d65f38d1fa 100644 (file)
@@ -1003,11 +1003,13 @@ CHAPTER 3: PER-PROCESS PARAMETERS
 3.1 /proc/<pid>/oom_adj - Adjust the oom-killer score
 ------------------------------------------------------
 
-This file can be used to adjust the score used to select which processes
-should be killed in an  out-of-memory  situation.  Giving it a high score will
-increase the likelihood of this process being killed by the oom-killer.  Valid
-values are in the range -16 to +15, plus the special value -17, which disables
-oom-killing altogether for this process.
+This file can be used to adjust the score used to select which processes should
+be killed in an out-of-memory situation.  The oom_adj value is a characteristic
+of the task's mm, so all threads that share an mm with pid will have the same
+oom_adj value.  A high value will increase the likelihood of this process being
+killed by the oom-killer.  Valid values are in the range -16 to +15 as
+explained below and a special value of -17, which disables oom-killing
+altogether for threads sharing pid's mm.
 
 The process to be killed in an out-of-memory situation is selected among all others
 based on its badness score. This value equals the original memory size of the process
@@ -1021,6 +1023,9 @@ the parent's score if they do not share the same memory. Thus forking servers
 are the prime candidates to be killed. Having only one 'hungry' child will make
 parent less preferable than the child.
 
+/proc/<pid>/oom_adj cannot be changed for kthreads since they are immune from
+oom-killing already.
+
 /proc/<pid>/oom_score shows process' current badness score.
 
 The following heuristics are then applied:
index c3480aa66ba8048bf46483544ed6fb3ebb7151eb..7eceaff63f5ffb8f45748d0cb1f46b864652b69a 100644 (file)
@@ -77,7 +77,8 @@
    seconds for the whole load operation.
 
  - request_firmware_nowait() is also provided for convenience in
-   non-user contexts.
+   user contexts to request firmware asynchronously, but can't be called
+   in atomic contexts.
 
 
  about in-kernel persistence:
index ad38006307725ff2a563d9e86e78230df0879e22..5578248c18a46837e6c88b0b1779cb2e08b436c3 100644 (file)
@@ -546,6 +546,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        console=brl,ttyS0
                For now, only VisioBraille is supported.
 
+       consoleblank=   [KNL] The console blank (screen saver) timeout in
+                       seconds. Defaults to 10*60 = 10mins. A value of 0
+                       disables the blank timer.
+
        coredump_filter=
                        [KNL] Change the default value for
                        /proc/<pid>/coredump_filter.
diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt
new file mode 100644 (file)
index 0000000..3630446
--- /dev/null
@@ -0,0 +1,773 @@
+GETTING STARTED WITH KMEMCHECK
+==============================
+
+Vegard Nossum <vegardno@ifi.uio.no>
+
+
+Contents
+========
+0. Introduction
+1. Downloading
+2. Configuring and compiling
+3. How to use
+3.1. Booting
+3.2. Run-time enable/disable
+3.3. Debugging
+3.4. Annotating false positives
+4. Reporting errors
+5. Technical description
+
+
+0. Introduction
+===============
+
+kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
+is a dynamic checker that detects and warns about some uses of uninitialized
+memory.
+
+Userspace programmers might be familiar with Valgrind's memcheck. The main
+difference between memcheck and kmemcheck is that memcheck works for userspace
+programs only, and kmemcheck works for the kernel only. The implementations
+are of course vastly different. Because of this, kmemcheck is not as accurate
+as memcheck, but it turns out to be good enough in practice to discover real
+programmer errors that the compiler is not able to find through static
+analysis.
+
+Enabling kmemcheck on a kernel will probably slow it down to the extent that
+the machine will not be usable for normal workloads such as e.g. an
+interactive desktop. kmemcheck will also cause the kernel to use about twice
+as much memory as normal. For this reason, kmemcheck is strictly a debugging
+feature.
+
+
+1. Downloading
+==============
+
+kmemcheck can only be downloaded using git. If you want to write patches
+against the current code, you should use the kmemcheck development branch of
+the tip tree. It is also possible to use the linux-next tree, which also
+includes the latest version of kmemcheck.
+
+Assuming that you've already cloned the linux-2.6.git repository, all you
+have to do is add the -tip tree as a remote, like this:
+
+       $ git remote add tip git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git
+
+To actually download the tree, fetch the remote:
+
+       $ git fetch tip
+
+And to check out a new local branch with the kmemcheck code:
+
+       $ git checkout -b kmemcheck tip/kmemcheck
+
+General instructions for the -tip tree can be found here:
+http://people.redhat.com/mingo/tip.git/readme.txt
+
+
+2. Configuring and compiling
+============================
+
+kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
+configuration variables must have specific settings in order for the kmemcheck
+menu to even appear in "menuconfig". These are:
+
+  o CONFIG_CC_OPTIMIZE_FOR_SIZE=n
+
+       This option is located under "General setup" / "Optimize for size".
+
+       Without this, gcc will use certain optimizations that usually lead to
+       false positive warnings from kmemcheck. An example of this is a 16-bit
+       field in a struct, where gcc may load 32 bits, then discard the upper
+       16 bits. kmemcheck sees only the 32-bit load, and may trigger a
+       warning for the upper 16 bits (if they're uninitialized).
+
+  o CONFIG_SLAB=y or CONFIG_SLUB=y
+
+       This option is located under "General setup" / "Choose SLAB
+       allocator".
+
+  o CONFIG_FUNCTION_TRACER=n
+
+       This option is located under "Kernel hacking" / "Tracers" / "Kernel
+       Function Tracer"
+
+       When function tracing is compiled in, gcc emits a call to another
+       function at the beginning of every function. This means that when the
+       page fault handler is called, the ftrace framework will be called
+       before kmemcheck has had a chance to handle the fault. If ftrace then
+       modifies memory that was tracked by kmemcheck, the result is an
+       endless recursive page fault.
+
+  o CONFIG_DEBUG_PAGEALLOC=n
+
+       This option is located under "Kernel hacking" / "Debug page memory
+       allocations".
+
+In addition, I highly recommend turning on CONFIG_DEBUG_INFO=y. This is also
+located under "Kernel hacking". With this, you will be able to get line number
+information from the kmemcheck warnings, which is extremely valuable in
+debugging a problem. This option is not mandatory, however, because it slows
+down the compilation process and produces a much bigger kernel image.
+
+Now the kmemcheck menu should be visible (under "Kernel hacking" / "kmemcheck:
+trap use of uninitialized memory"). Here follows a description of the
+kmemcheck configuration variables:
+
+  o CONFIG_KMEMCHECK
+
+       This must be enabled in order to use kmemcheck at all...
+
+  o CONFIG_KMEMCHECK_[DISABLED | ENABLED | ONESHOT]_BY_DEFAULT
+
+       This option controls the status of kmemcheck at boot-time. "Enabled"
+       will enable kmemcheck right from the start, "disabled" will boot the
+       kernel as normal (but with the kmemcheck code compiled in, so it can
+       be enabled at run-time after the kernel has booted), and "one-shot" is
+       a special mode which will turn kmemcheck off automatically after
+       detecting the first use of uninitialized memory.
+
+       If you are using kmemcheck to actively debug a problem, then you
+       probably want to choose "enabled" here.
+
+       The one-shot mode is mostly useful in automated test setups because it
+       can prevent floods of warnings and increase the chances of the machine
+       surviving in case something is really wrong. In other cases, the one-
+       shot mode could actually be counter-productive because it would turn
+       itself off at the very first error -- in the case of a false positive
+       too -- and this would come in the way of debugging the specific
+       problem you were interested in.
+
+       If you would like to use your kernel as normal, but with a chance to
+       enable kmemcheck in case of some problem, it might be a good idea to
+       choose "disabled" here. When kmemcheck is disabled, most of the run-
+       time overhead is not incurred, and the kernel will be almost as fast
+       as normal.
+
+  o CONFIG_KMEMCHECK_QUEUE_SIZE
+
+       Select the maximum number of error reports to store in an internal
+       (fixed-size) buffer. Since errors can occur virtually anywhere and in
+       any context, we need a temporary storage area which is guaranteed not
+       to generate any other page faults when accessed. The queue will be
+       emptied as soon as a tasklet may be scheduled. If the queue is full,
+       new error reports will be lost.
+
+       The default value of 64 is probably fine. If some code produces more
+       than 64 errors within an irqs-off section, then the code is likely to
+       produce many, many more, too, and these additional reports seldom give
+       any more information (the first report is usually the most valuable
+       anyway).
+
+       This number might have to be adjusted if you are not using serial
+       console or similar to capture the kernel log. If you are using the
+       "dmesg" command to save the log, then getting a lot of kmemcheck
+       warnings might overflow the kernel log itself, and the earlier reports
+       will get lost in that way instead. Try setting this to 10 or so on
+       such a setup.
+
+  o CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT
+
+       Select the number of shadow bytes to save along with each entry of the
+       error-report queue. These bytes indicate what parts of an allocation
+       are initialized, uninitialized, etc. and will be displayed when an
+       error is detected to help the debugging of a particular problem.
+
+       The number entered here is actually the logarithm of the number of
+       bytes that will be saved. So if you pick for example 5 here, kmemcheck
+       will save 2^5 = 32 bytes.
+
+       The default value should be fine for debugging most problems. It also
+       fits nicely within 80 columns.
+
+  o CONFIG_KMEMCHECK_PARTIAL_OK
+
+       This option (when enabled) works around certain GCC optimizations that
+       produce 32-bit reads from 16-bit variables where the upper 16 bits are
+       thrown away afterwards.
+
+       The default value (enabled) is recommended. This may of course hide
+       some real errors, but disabling it would probably produce a lot of
+       false positives.
+
+  o CONFIG_KMEMCHECK_BITOPS_OK
+
+       This option silences warnings that would be generated for bit-field
+       accesses where not all the bits are initialized at the same time. This
+       may also hide some real bugs.
+
+       This option is probably obsolete, or it should be replaced with
+       the kmemcheck-/bitfield-annotations for the code in question. The
+       default value is therefore fine.
+
+Now compile the kernel as usual.
+
+
+3. How to use
+=============
+
+3.1. Booting
+============
+
+First some information about the command-line options. There is only one
+option specific to kmemcheck, and this is called "kmemcheck". It can be used
+to override the default mode as chosen by the CONFIG_KMEMCHECK_*_BY_DEFAULT
+option. Its possible settings are:
+
+  o kmemcheck=0 (disabled)
+  o kmemcheck=1 (enabled)
+  o kmemcheck=2 (one-shot mode)
+
+If SLUB debugging has been enabled in the kernel, it may take precedence over
+kmemcheck in such a way that the slab caches which are under SLUB debugging
+will not be tracked by kmemcheck. In order to ensure that this doesn't happen
+(even though it shouldn't by default), use SLUB's boot option "slub_debug",
+like this: slub_debug=-
+
+In fact, this option may also be used for fine-grained control over SLUB vs.
+kmemcheck. For example, if the command line includes "kmemcheck=1
+slub_debug=,dentry", then SLUB debugging will be used only for the "dentry"
+slab cache, and with kmemcheck tracking all the other caches. This is advanced
+usage, however, and is not generally recommended.
+
+
+3.2. Run-time enable/disable
+============================
+
+When the kernel has booted, it is possible to enable or disable kmemcheck at
+run-time. WARNING: This feature is still experimental and may cause false
+positive warnings to appear. Therefore, try not to use this. If you find that
+it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
+will be happy to take bug reports.
+
+Use the file /proc/sys/kernel/kmemcheck for this purpose, e.g.:
+
+       $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
+
+The numbers are the same as for the kmemcheck= command-line option.
+
+
+3.3. Debugging
+==============
+
+A typical report will look something like this:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+         ^
+
+Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
+RIP: 0010:[<ffffffff8104ede8>]  [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+RSP: 0018:ffff88003cdf7d98  EFLAGS: 00210002
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
+RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
+R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
+R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
+FS:  0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
+CS:  0010 DS: 002b ES: 002b CR0: 0000000080050033
+CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+ [<ffffffffffffffff>] 0xffffffffffffffff
+
+The single most valuable information in this report is the RIP (or EIP on 32-
+bit) value. This will help us pinpoint exactly which instruction that caused
+the warning.
+
+If your kernel was compiled with CONFIG_DEBUG_INFO=y, then all we have to do
+is give this address to the addr2line program, like this:
+
+       $ addr2line -e vmlinux -i ffffffff8104ede8
+       arch/x86/include/asm/string_64.h:12
+       include/asm-generic/siginfo.h:287
+       kernel/signal.c:380
+       kernel/signal.c:410
+
+The "-e vmlinux" tells addr2line which file to look in. IMPORTANT: This must
+be the vmlinux of the kernel that produced the warning in the first place! If
+not, the line number information will almost certainly be wrong.
+
+The "-i" tells addr2line to also print the line numbers of inlined functions.
+In this case, the flag was very important, because otherwise, it would only
+have printed the first line, which is just a call to memcpy(), which could be
+called from a thousand places in the kernel, and is therefore not very useful.
+These inlined functions would not show up in the stack trace above, simply
+because the kernel doesn't load the extra debugging information. This
+technique can of course be used with ordinary kernel oopses as well.
+
+In this case, it's the caller of memcpy() that is interesting, and it can be
+found in include/asm-generic/siginfo.h, line 287:
+
+281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+282 {
+283         if (from->si_code < 0)
+284                 memcpy(to, from, sizeof(*to));
+285         else
+286                 /* _sigchld is currently the largest know union member */
+287                 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+288 }
+
+Since this was a read (kmemcheck usually warns about reads only, though it can
+warn about writes to unallocated or freed memory as well), it was probably the
+"from" argument which contained some uninitialized bytes. Following the chain
+of calls, we move upwards to see where "from" was allocated or initialized,
+kernel/signal.c, line 380:
+
+359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+360 {
+...
+367         list_for_each_entry(q, &list->list, list) {
+368                 if (q->info.si_signo == sig) {
+369                         if (first)
+370                                 goto still_pending;
+371                         first = q;
+...
+377         if (first) {
+378 still_pending:
+379                 list_del_init(&first->list);
+380                 copy_siginfo(info, &first->info);
+381                 __sigqueue_free(first);
+...
+392         }
+393 }
+
+Here, it is &first->info that is being passed on to copy_siginfo(). The
+variable "first" was found on a list -- passed in as the second argument to
+collect_signal(). We  continue our journey through the stack, to figure out
+where the item on "list" was allocated or initialized. We move to line 410:
+
+395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
+396                         siginfo_t *info)
+397 {
+...
+410                 collect_signal(sig, pending, info);
+...
+414 }
+
+Now we need to follow the "pending" pointer, since that is being passed on to
+collect_signal() as "list". At this point, we've run out of lines from the
+"addr2line" output. Not to worry, we just paste the next addresses from the
+kmemcheck stack dump, i.e.:
+
+ [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
+ [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
+ [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
+ [<ffffffff8100c7b5>] int_signal+0x12/0x17
+
+       $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
+               ffffffff8100b87d ffffffff8100c7b5
+       kernel/signal.c:446
+       kernel/signal.c:1806
+       arch/x86/kernel/signal.c:805
+       arch/x86/kernel/signal.c:871
+       arch/x86/kernel/entry_64.S:694
+
+Remember that since these addresses were found on the stack and not as the
+RIP value, they actually point to the _next_ instruction (they are return
+addresses). This becomes obvious when we look at the code for line 446:
+
+422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+423 {
+...
+431                 signr = __dequeue_signal(&tsk->signal->shared_pending,
+432                                          mask, info);
+433                 /*
+434                  * itimer signal ?
+435                  *
+436                  * itimers are process shared and we restart periodic
+437                  * itimers in the signal delivery path to prevent DoS
+438                  * attacks in the high resolution timer case. This is
+439                  * compliant with the old way of self restarting
+440                  * itimers, as the SIGALRM is a legacy signal and only
+441                  * queued once. Changing the restart behaviour to
+442                  * restart the timer in the signal dequeue path is
+443                  * reducing the timer noise on heavy loaded !highres
+444                  * systems too.
+445                  */
+446                 if (unlikely(signr == SIGALRM)) {
+...
+489 }
+
+So instead of looking at 446, we should be looking at 431, which is the line
+that executes just before 446. Here we see that what we are looking for is
+&tsk->signal->shared_pending.
+
+Our next task is now to figure out which function that puts items on this
+"shared_pending" list. A crude, but efficient tool, is git grep:
+
+       $ git grep -n 'shared_pending' kernel/
+       ...
+       kernel/signal.c:828:    pending = group ? &t->signal->shared_pending : &t->pending;
+       kernel/signal.c:1339:   pending = group ? &t->signal->shared_pending : &t->pending;
+       ...
+
+There were more results, but none of them were related to list operations,
+and these were the only assignments. We inspect the line numbers more closely
+and find that this is indeed where items are being added to the list:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817                         int group)
+818 {
+...
+828         pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852                                              (is_si_special(info) ||
+853                                               info->si_code >= 0)));
+854         if (q) {
+855                 list_add_tail(&q->list, &pending->list);
+...
+890 }
+
+and:
+
+1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
+1310 {
+....
+1339         pending = group ? &t->signal->shared_pending : &t->pending;
+1340         list_add_tail(&q->list, &pending->list);
+....
+1347 }
+
+In the first case, the list element we are looking for, "q", is being returned
+from the function __sigqueue_alloc(), which looks like an allocation function.
+Let's take a look at it:
+
+187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
+188                                          int override_rlimit)
+189 {
+190         struct sigqueue *q = NULL;
+191         struct user_struct *user;
+192 
+193         /*
+194          * We won't get problems with the target's UID changing under us
+195          * because changing it requires RCU be used, and if t != current, the
+196          * caller must be holding the RCU readlock (by way of a spinlock) and
+197          * we use RCU protection here
+198          */
+199         user = get_uid(__task_cred(t)->user);
+200         atomic_inc(&user->sigpending);
+201         if (override_rlimit ||
+202             atomic_read(&user->sigpending) <=
+203                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+204                 q = kmem_cache_alloc(sigqueue_cachep, flags);
+205         if (unlikely(q == NULL)) {
+206                 atomic_dec(&user->sigpending);
+207                 free_uid(user);
+208         } else {
+209                 INIT_LIST_HEAD(&q->list);
+210                 q->flags = 0;
+211                 q->user = user;
+212         }
+213 
+214         return q;
+215 }
+
+We see that this function initializes q->list, q->flags, and q->user. It seems
+that now is the time to look at the definition of "struct sigqueue", e.g.:
+
+14 struct sigqueue {
+15         struct list_head list;
+16         int flags;
+17         siginfo_t info;
+18         struct user_struct *user;
+19 };
+
+And, you might remember, it was a memcpy() on &first->info that caused the
+warning, so this makes perfect sense. It also seems reasonable to assume that
+it is the caller of __sigqueue_alloc() that has the responsibility of filling
+out (initializing) this member.
+
+But just which fields of the struct were uninitialized? Let's look at
+kmemcheck's report again:
+
+WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+         ^
+
+These first two lines are the memory dump of the memory object itself, and the
+shadow bytemap, respectively. The memory object itself is in this case
+&first->info. Just beware that the start of this dump is NOT the start of the
+object itself! The position of the caret (^) corresponds with the address of
+the read (ffff88003e4a2024).
+
+The shadow bytemap dump legend is as follows:
+
+  i - initialized
+  u - uninitialized
+  a - unallocated (memory has been allocated by the slab layer, but has not
+      yet been handed off to anybody)
+  f - freed (memory has been allocated by the slab layer, but has been freed
+      by the previous owner)
+
+In order to figure out where (relative to the start of the object) the
+uninitialized memory was located, we have to look at the disassembly. For
+that, we'll need the RIP address again:
+
+RIP: 0010:[<ffffffff8104ede8>]  [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
+
+       $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
+       ffffffff8104edc8:       mov    %r8,0x8(%r8)
+       ffffffff8104edcc:       test   %r10d,%r10d
+       ffffffff8104edcf:       js     ffffffff8104ee88 <__dequeue_signal+0x168>
+       ffffffff8104edd5:       mov    %rax,%rdx
+       ffffffff8104edd8:       mov    $0xc,%ecx
+       ffffffff8104eddd:       mov    %r13,%rdi
+       ffffffff8104ede0:       mov    $0x30,%eax
+       ffffffff8104ede5:       mov    %rdx,%rsi
+       ffffffff8104ede8:       rep movsl %ds:(%rsi),%es:(%rdi)
+       ffffffff8104edea:       test   $0x2,%al
+       ffffffff8104edec:       je     ffffffff8104edf0 <__dequeue_signal+0xd0>
+       ffffffff8104edee:       movsw  %ds:(%rsi),%es:(%rdi)
+       ffffffff8104edf0:       test   $0x1,%al
+       ffffffff8104edf2:       je     ffffffff8104edf5 <__dequeue_signal+0xd5>
+       ffffffff8104edf4:       movsb  %ds:(%rsi),%es:(%rdi)
+       ffffffff8104edf5:       mov    %r8,%rdi
+       ffffffff8104edf8:       callq  ffffffff8104de60 <__sigqueue_free>
+
+As expected, it's the "rep movsl" instruction from the memcpy() that causes
+the warning. We know about REP MOVSL that it uses the register RCX to count
+the number of remaining iterations. By taking a look at the register dump
+again (from the kmemcheck report), we can figure out how many bytes were left
+to copy:
+
+RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
+
+By looking at the disassembly, we also see that %ecx is being loaded with the
+value $0xc just before (ffffffff8104edd8), so we are very lucky. Keep in mind
+that this is the number of iterations, not bytes. And since this is a "long"
+operation, we need to multiply by 4 to get the number of bytes. So this means
+that the uninitialized value was encountered at 4 * (0xc - 0x9) = 12 bytes
+from the start of the object.
+
+We can now try to figure out which field of the "struct siginfo" that was not
+initialized. This is the beginning of the struct:
+
+40 typedef struct siginfo {
+41         int si_signo;
+42         int si_errno;
+43         int si_code;
+44                 
+45         union {
+..
+92         } _sifields;
+93 } siginfo_t;
+
+On 64-bit, the int is 4 bytes long, so it must the the union member that has
+not been initialized. We can verify this using gdb:
+
+       $ gdb vmlinux
+       ...
+       (gdb) p &((struct siginfo *) 0)->_sifields
+       $1 = (union {...} *) 0x10
+
+Actually, it seems that the union member is located at offset 0x10 -- which
+means that gcc has inserted 4 bytes of padding between the members si_code
+and _sifields. We can now get a fuller picture of the memory dump:
+
+         _----------------------------=> si_code
+        /        _--------------------=> (padding)
+       |        /        _------------=> _sifields(._kill._pid)
+       |       |        /        _----=> _sifields(._kill._uid)
+       |       |       |        / 
+-------|-------|-------|-------|
+80000000000000000000000000000000000000000088ffff0000000000000000
+ i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
+
+This allows us to realize another important fact: si_code contains the value
+0x80. Remember that x86 is little endian, so the first 4 bytes "80000000" are
+really the number 0x00000080. With a bit of research, we find that this is
+actually the constant SI_KERNEL defined in include/asm-generic/siginfo.h:
+
+144 #define SI_KERNEL       0x80            /* sent by the kernel from somewhere     */
+
+This macro is used in exactly one place in the x86 kernel: In send_signal()
+in kernel/signal.c:
+
+816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+817                         int group)
+818 {
+...
+828         pending = group ? &t->signal->shared_pending : &t->pending;
+...
+851         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+852                                              (is_si_special(info) ||
+853                                               info->si_code >= 0)));
+854         if (q) {
+855                 list_add_tail(&q->list, &pending->list);
+856                 switch ((unsigned long) info) {
+...
+865                 case (unsigned long) SEND_SIG_PRIV:
+866                         q->info.si_signo = sig;
+867                         q->info.si_errno = 0;
+868                         q->info.si_code = SI_KERNEL;
+869                         q->info.si_pid = 0;
+870                         q->info.si_uid = 0;
+871                         break;
+...
+890 }
+
+Not only does this match with the .si_code member, it also matches the place
+we found earlier when looking for where siginfo_t objects are enqueued on the
+"shared_pending" list.
+
+So to sum up: It seems that it is the padding introduced by the compiler
+between two struct fields that is uninitialized, and this gets reported when
+we do a memcpy() on the struct. This means that we have identified a false
+positive warning.
+
+Normally, kmemcheck will not report uninitialized accesses in memcpy() calls
+when both the source and destination addresses are tracked. (Instead, we copy
+the shadow bytemap as well). In this case, the destination address clearly
+was not tracked. We can dig a little deeper into the stack trace from above:
+
+       arch/x86/kernel/signal.c:805
+       arch/x86/kernel/signal.c:871
+       arch/x86/kernel/entry_64.S:694
+
+And we clearly see that the destination siginfo object is located on the
+stack:
+
+782 static void do_signal(struct pt_regs *regs)
+783 {
+784         struct k_sigaction ka;
+785         siginfo_t info;
+...
+804         signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+...
+854 }
+
+And this &info is what eventually gets passed to copy_siginfo() as the
+destination argument.
+
+Now, even though we didn't find an actual error here, the example is still a
+good one, because it shows how one would go about to find out what the report
+was all about.
+
+
+3.4. Annotating false positives
+===============================
+
+There are a few different ways to make annotations in the source code that
+will keep kmemcheck from checking and reporting certain allocations. Here
+they are:
+
+  o __GFP_NOTRACK_FALSE_POSITIVE
+
+       This flag can be passed to kmalloc() or kmem_cache_alloc() (therefore
+       also to other functions that end up calling one of these) to indicate
+       that the allocation should not be tracked because it would lead to
+       a false positive report. This is a "big hammer" way of silencing
+       kmemcheck; after all, even if the false positive pertains to 
+       particular field in a struct, for example, we will now lose the
+       ability to find (real) errors in other parts of the same struct.
+
+       Example:
+
+           /* No warnings will ever trigger on accessing any part of x */
+           x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
+
+  o kmemcheck_bitfield_begin(name)/kmemcheck_bitfield_end(name) and
+       kmemcheck_annotate_bitfield(ptr, name)
+
+       The first two of these three macros can be used inside struct
+       definitions to signal, respectively, the beginning and end of a
+       bitfield. Additionally, this will assign the bitfield a name, which
+       is given as an argument to the macros.
+
+       Having used these markers, one can later use
+       kmemcheck_annotate_bitfield() at the point of allocation, to indicate
+       which parts of the allocation is part of a bitfield.
+
+       Example:
+
+           struct foo {
+               int x;
+
+               kmemcheck_bitfield_begin(flags);
+               int flag_a:1;
+               int flag_b:1;
+               kmemcheck_bitfield_end(flags);
+
+               int y;
+           };
+
+           struct foo *x = kmalloc(sizeof *x);
+
+           /* No warnings will trigger on accessing the bitfield of x */
+           kmemcheck_annotate_bitfield(x, flags);
+
+       Note that kmemcheck_annotate_bitfield() can be used even before the
+       return value of kmalloc() is checked -- in other words, passing NULL
+       as the first argument is legal (and will do nothing).
+
+
+4. Reporting errors
+===================
+
+As we have seen, kmemcheck will produce false positive reports. Therefore, it
+is not very wise to blindly post kmemcheck warnings to mailing lists and
+maintainers. Instead, I encourage maintainers and developers to find errors
+in their own code. If you get a warning, you can try to work around it, try
+to figure out if it's a real error or not, or simply ignore it. Most
+developers know their own code and will quickly and efficiently determine the
+root cause of a kmemcheck report. This is therefore also the most efficient
+way to work with kmemcheck.
+
+That said, we (the kmemcheck maintainers) will always be on the lookout for
+false positives that we can annotate and silence. So whatever you find,
+please drop us a note privately! Kernel configs and steps to reproduce (if
+available) are of course a great help too.
+
+Happy hacking!
+
+
+5. Technical description
+========================
+
+kmemcheck works by marking memory pages non-present. This means that whenever
+somebody attempts to access the page, a page fault is generated. The page
+fault handler notices that the page was in fact only hidden, and so it calls
+on the kmemcheck code to make further investigations.
+
+When the investigations are completed, kmemcheck "shows" the page by marking
+it present (as it would be under normal circumstances). This way, the
+interrupted code can continue as usual.
+
+But after the instruction has been executed, we should hide the page again, so
+that we can catch the next access too! Now kmemcheck makes use of a debugging
+feature of the processor, namely single-stepping. When the processor has
+finished the one instruction that generated the memory access, a debug
+exception is raised. From here, we simply hide the page again and continue
+execution, this time with the single-stepping feature turned off.
+
+kmemcheck requires some assistance from the memory allocator in order to work.
+The memory allocator needs to
+
+  1. Tell kmemcheck about newly allocated pages and pages that are about to
+     be freed. This allows kmemcheck to set up and tear down the shadow memory
+     for the pages in question. The shadow memory stores the status of each
+     byte in the allocation proper, e.g. whether it is initialized or
+     uninitialized.
+
+  2. Tell kmemcheck which parts of memory should be marked uninitialized.
+     There are actually a few more states, such as "not yet allocated" and
+     "recently freed".
+
+If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
+memory that can take page faults because of kmemcheck.
+
+If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
+request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
+This does not prevent the page faults from occurring, however, but marks the
+object in question as being initialized so that no warnings will ever be
+produced for this object.
+
+Currently, the SLAB and SLUB allocators are supported by kmemcheck.
index 1e7a769a10f97354a0b3aa15229e5606723a4cd8..053037a1fe6d3b4943219c55d82dafca593c8d03 100644 (file)
@@ -507,9 +507,9 @@ http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115)
 Appendix A: The kprobes debugfs interface
 
 With recent kernels (> 2.6.20) the list of registered kprobes is visible
-under the /debug/kprobes/ directory (assuming debugfs is mounted at /debug).
+under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at //sys/kernel/debug).
 
-/debug/kprobes/list: Lists all registered probes on the system
+/sys/kernel/debug/kprobes/list: Lists all registered probes on the system
 
 c015d71a  k  vfs_read+0x0
 c011a316  j  do_fork+0x0
@@ -525,7 +525,7 @@ virtual addresses that correspond to modules that've been unloaded),
 such probes are marked with [GONE]. If the probe is temporarily disabled,
 such probes are marked with [DISABLED].
 
-/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
+/sys/kernel/debug/kprobes/enabled: Turn kprobes ON/OFF forcibly.
 
 Provides a knob to globally and forcibly turn registered kprobes ON or OFF.
 By default, all kprobes are enabled. By echoing "0" to this file, all
index 6fab2dcbb4d37f4a02accdcae64ebc8ede16cd9f..c4de6359d440e4dbf92d585e8a7e577d62d475c1 100644 (file)
@@ -233,8 +233,8 @@ These protections are added to score to judge whether this zone should be used
 for page allocation or should be reclaimed.
 
 In this example, if normal pages (index=2) are required to this DMA zone and
-pages_high is used for watermark, the kernel judges this zone should not be
-used because pages_free(1355) is smaller than watermark + protection[2]
+watermark[WMARK_HIGH] is used for watermark, the kernel judges this zone should
+not be used because pages_free(1355) is smaller than watermark + protection[2]
 (4 + 2004 = 2008). If this protection value is 0, this zone would be used for
 normal page requirement. If requirement is DMA zone(index=0), protection[0]
 (=0) is used.
@@ -280,9 +280,10 @@ The default value is 65536.
 min_free_kbytes:
 
 This is used to force the Linux VM to keep a minimum number
-of kilobytes free.  The VM uses this number to compute a pages_min
-value for each lowmem zone in the system.  Each lowmem zone gets
-a number of reserved free pages based proportionally on its size.
+of kilobytes free.  The VM uses this number to compute a
+watermark[WMARK_MIN] value for each lowmem zone in the system.
+Each lowmem zone gets a number of reserved free pages based
+proportionally on its size.
 
 Some minimal amount of memory is needed to satisfy PF_MEMALLOC
 allocations; if you set this to lower than 1024KB, your system will
@@ -314,10 +315,14 @@ min_unmapped_ratio:
 
 This is available only on NUMA kernels.
 
-A percentage of the total pages in each zone.  Zone reclaim will only
-occur if more than this percentage of pages are file backed and unmapped.
-This is to insure that a minimal amount of local pages is still available for
-file I/O even if the node is overallocated.
+This is a percentage of the total pages in each zone. Zone reclaim will
+only occur if more than this percentage of pages are in a state that
+zone_reclaim_mode allows to be reclaimed.
+
+If zone_reclaim_mode has the value 4 OR'd, then the percentage is compared
+against all file-backed unmapped pages including swapcache pages and tmpfs
+files. Otherwise, only unmapped pages backed by normal files but not tmpfs
+files and similar are considered.
 
 The default is 1 percent.
 
index 7bd27f0e288008c641c31474341a2dd84ef8bf73..a39b3c749de58c12129d80d0ffad738f9a7836f0 100644 (file)
@@ -7,7 +7,6 @@ Copyright 2008 Red Hat Inc.
                (dual licensed under the GPL v2)
 Reviewers:   Elias Oltmanns, Randy Dunlap, Andrew Morton,
             John Kacur, and David Teigland.
-
 Written for: 2.6.28-rc2
 
 Introduction
@@ -33,13 +32,26 @@ The File System
 Ftrace uses the debugfs file system to hold the control files as
 well as the files to display output.
 
-To mount the debugfs system:
+When debugfs is configured into the kernel (which selecting any ftrace
+option will do) the directory /sys/kernel/debug will be created. To mount
+this directory, you can add to your /etc/fstab file:
+
+ debugfs       /sys/kernel/debug          debugfs defaults        0       0
+
+Or you can mount it at run time with:
+
+ mount -t debugfs nodev /sys/kernel/debug
 
-  # mkdir /debug
-  # mount -t debugfs nodev /debug
+For quicker access to that directory you may want to make a soft link to
+it:
 
-( Note: it is more common to mount at /sys/kernel/debug, but for
-  simplicity this document will use /debug)
+ ln -s /sys/kernel/debug /debug
+
+Any selected ftrace option will also create a directory called tracing
+within the debugfs. The rest of the document will assume that you are in
+the ftrace directory (cd /sys/kernel/debug/tracing) and will only concentrate
+on the files within that directory and not distract from the content with
+the extended "/sys/kernel/debug/tracing" path name.
 
 That's it! (assuming that you have ftrace configured into your kernel)
 
@@ -389,18 +401,18 @@ trace_options
 The trace_options file is used to control what gets printed in
 the trace output. To see what is available, simply cat the file:
 
-  cat /debug/tracing/trace_options
+  cat trace_options
   print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
   noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
 
 To disable one of the options, echo in the option prepended with
 "no".
 
-  echo noprint-parent > /debug/tracing/trace_options
+  echo noprint-parent > trace_options
 
 To enable an option, leave off the "no".
 
-  echo sym-offset > /debug/tracing/trace_options
+  echo sym-offset > trace_options
 
 Here are the available options:
 
@@ -476,11 +488,11 @@ sched_switch
 This tracer simply records schedule switches. Here is an example
 of how to use it.
 
- # echo sched_switch > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo sched_switch > current_tracer
+ # echo 1 > tracing_enabled
  # sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 
 # tracer: sched_switch
 #
@@ -583,13 +595,13 @@ new trace is saved.
 To reset the maximum, echo 0 into tracing_max_latency. Here is
 an example:
 
- # echo irqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo irqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: irqsoff
 #
 irqsoff latency trace v1.1.5 on 2.6.26
@@ -690,13 +702,13 @@ Like the irqsoff tracer, it records the maximum latency for
 which preemption was disabled. The control of preemptoff tracer
 is much like the irqsoff tracer.
 
- # echo preemptoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: preemptoff
 #
 preemptoff latency trace v1.1.5 on 2.6.26-rc8
@@ -837,13 +849,13 @@ tracer.
 Again, using this trace is much like the irqsoff and preemptoff
 tracers.
 
- # echo preemptirqsoff > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo preemptirqsoff > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # ls -ltr
  [...]
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: preemptirqsoff
 #
 preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
@@ -999,12 +1011,12 @@ slightly differently than we did with the previous tracers.
 Instead of performing an 'ls', we will run 'sleep 1' under
 'chrt' which changes the priority of the task.
 
- # echo wakeup > /debug/tracing/current_tracer
- # echo 0 > /debug/tracing/tracing_max_latency
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo wakeup > current_tracer
+ # echo 0 > tracing_max_latency
+ # echo 1 > tracing_enabled
  # chrt -f 5 sleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/latency_trace
+ # echo 0 > tracing_enabled
+ # cat latency_trace
 # tracer: wakeup
 #
 wakeup latency trace v1.1.5 on 2.6.26-rc8
@@ -1114,11 +1126,11 @@ can be done from the debug file system. Make sure the
 ftrace_enabled is set; otherwise this tracer is a nop.
 
  # sysctl kernel.ftrace_enabled=1
- # echo function > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo function > current_tracer
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: function
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1155,7 +1167,7 @@ int trace_fd;
 [...]
 int main(int argc, char *argv[]) {
        [...]
-       trace_fd = open("/debug/tracing/tracing_enabled", O_WRONLY);
+       trace_fd = open(tracing_file("tracing_enabled"), O_WRONLY);
        [...]
        if (condition_hit()) {
                write(trace_fd, "0", 1);
@@ -1163,26 +1175,20 @@ int main(int argc, char *argv[]) {
        [...]
 }
 
-Note: Here we hard coded the path name. The debugfs mount is not
-guaranteed to be at /debug (and is more commonly at
-/sys/kernel/debug). For simple one time traces, the above is
-sufficent. For anything else, a search through /proc/mounts may
-be needed to find where the debugfs file-system is mounted.
-
 
 Single thread tracing
 ---------------------
 
-By writing into /debug/tracing/set_ftrace_pid you can trace a
+By writing into set_ftrace_pid you can trace a
 single thread. For example:
 
-# cat /debug/tracing/set_ftrace_pid
+# cat set_ftrace_pid
 no pid
-# echo 3111 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/set_ftrace_pid
+# echo 3111 > set_ftrace_pid
+# cat set_ftrace_pid
 3111
-# echo function > /debug/tracing/current_tracer
-# cat /debug/tracing/trace | head
+# echo function > current_tracer
+# cat trace | head
  # tracer: function
  #
  #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
@@ -1193,8 +1199,8 @@ no pid
      yum-updatesd-3111  [003]  1637.254683: lock_hrtimer_base <-hrtimer_try_to_cancel
      yum-updatesd-3111  [003]  1637.254685: fget_light <-do_sys_poll
      yum-updatesd-3111  [003]  1637.254686: pipe_poll <-do_sys_poll
-# echo -1 > /debug/tracing/set_ftrace_pid
-# cat /debug/tracing/trace |head
+# echo -1 > set_ftrace_pid
+# cat trace |head
  # tracer: function
  #
  #           TASK-PID    CPU#    TIMESTAMP  FUNCTION
@@ -1216,6 +1222,51 @@ something like this simple program:
 #include <fcntl.h>
 #include <unistd.h>
 
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+const char *find_debugfs(void)
+{
+       static char debugfs[MAX_PATH+1];
+       static int debugfs_found;
+       char type[100];
+       FILE *fp;
+
+       if (debugfs_found)
+               return debugfs;
+
+       if ((fp = fopen("/proc/mounts","r")) == NULL) {
+               perror("/proc/mounts");
+               return NULL;
+       }
+
+       while (fscanf(fp, "%*s %"
+                     STR(MAX_PATH)
+                     "s %99s %*s %*d %*d\n",
+                     debugfs, type) == 2) {
+               if (strcmp(type, "debugfs") == 0)
+                       break;
+       }
+       fclose(fp);
+
+       if (strcmp(type, "debugfs") != 0) {
+               fprintf(stderr, "debugfs not mounted");
+               return NULL;
+       }
+
+       debugfs_found = 1;
+
+       return debugfs;
+}
+
+const char *tracing_file(const char *file_name)
+{
+       static char trace_file[MAX_PATH+1];
+       snprintf(trace_file, MAX_PATH, "%s/%s", find_debugfs(), file_name);
+       return trace_file;
+}
+
 int main (int argc, char **argv)
 {
         if (argc < 1)
@@ -1226,12 +1277,12 @@ int main (int argc, char **argv)
                 char line[64];
                 int s;
 
-                ffd = open("/debug/tracing/current_tracer", O_WRONLY);
+                ffd = open(tracing_file("current_tracer"), O_WRONLY);
                 if (ffd < 0)
                         exit(-1);
                 write(ffd, "nop", 3);
 
-                fd = open("/debug/tracing/set_ftrace_pid", O_WRONLY);
+                fd = open(tracing_file("set_ftrace_pid"), O_WRONLY);
                 s = sprintf(line, "%d\n", getpid());
                 write(fd, line, s);
 
@@ -1383,22 +1434,22 @@ want, depending on your needs.
   tracing_cpu_mask file) or you might sometimes see unordered
   function calls while cpu tracing switch.
 
-       hide: echo nofuncgraph-cpu > /debug/tracing/trace_options
-       show: echo funcgraph-cpu > /debug/tracing/trace_options
+       hide: echo nofuncgraph-cpu > trace_options
+       show: echo funcgraph-cpu > trace_options
 
 - The duration (function's time of execution) is displayed on
   the closing bracket line of a function or on the same line
   than the current function in case of a leaf one. It is default
   enabled.
 
-       hide: echo nofuncgraph-duration > /debug/tracing/trace_options
-       show: echo funcgraph-duration > /debug/tracing/trace_options
+       hide: echo nofuncgraph-duration > trace_options
+       show: echo funcgraph-duration > trace_options
 
 - The overhead field precedes the duration field in case of
   reached duration thresholds.
 
-       hide: echo nofuncgraph-overhead > /debug/tracing/trace_options
-       show: echo funcgraph-overhead > /debug/tracing/trace_options
+       hide: echo nofuncgraph-overhead > trace_options
+       show: echo funcgraph-overhead > trace_options
        depends on: funcgraph-duration
 
   ie:
@@ -1427,8 +1478,8 @@ want, depending on your needs.
 - The task/pid field displays the thread cmdline and pid which
   executed the function. It is default disabled.
 
-       hide: echo nofuncgraph-proc > /debug/tracing/trace_options
-       show: echo funcgraph-proc > /debug/tracing/trace_options
+       hide: echo nofuncgraph-proc > trace_options
+       show: echo funcgraph-proc > trace_options
 
   ie:
 
@@ -1451,8 +1502,8 @@ want, depending on your needs.
   system clock since it started. A snapshot of this time is
   given on each entry/exit of functions
 
-       hide: echo nofuncgraph-abstime > /debug/tracing/trace_options
-       show: echo funcgraph-abstime > /debug/tracing/trace_options
+       hide: echo nofuncgraph-abstime > trace_options
+       show: echo funcgraph-abstime > trace_options
 
   ie:
 
@@ -1549,7 +1600,7 @@ listed in:
 
    available_filter_functions
 
- # cat /debug/tracing/available_filter_functions
+ # cat available_filter_functions
 put_prev_task_idle
 kmem_cache_create
 pick_next_task_rt
@@ -1561,12 +1612,12 @@ mutex_lock
 If I am only interested in sys_nanosleep and hrtimer_interrupt:
 
  # echo sys_nanosleep hrtimer_interrupt \
-               > /debug/tracing/set_ftrace_filter
- # echo ftrace > /debug/tracing/current_tracer
- # echo 1 > /debug/tracing/tracing_enabled
+               > set_ftrace_filter
+ # echo ftrace > current_tracer
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: ftrace
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1577,7 +1628,7 @@ If I am only interested in sys_nanosleep and hrtimer_interrupt:
 
 To see which functions are being traced, you can cat the file:
 
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_interrupt
 sys_nanosleep
 
@@ -1597,7 +1648,7 @@ Note: It is better to use quotes to enclose the wild cards,
       otherwise the shell may expand the parameters into names
       of files in the local directory.
 
- # echo 'hrtimer_*' > /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' > set_ftrace_filter
 
 Produces:
 
@@ -1618,7 +1669,7 @@ Produces:
 
 Notice that we lost the sys_nanosleep.
 
- # cat /debug/tracing/set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_run_queues
 hrtimer_run_pending
 hrtimer_init
@@ -1644,17 +1695,17 @@ To append to the filters, use '>>'
 To clear out a filter so that all functions will be recorded
 again:
 
- # echo > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo > set_ftrace_filter
+ # cat set_ftrace_filter
  #
 
 Again, now we want to append.
 
- # echo sys_nanosleep > /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo sys_nanosleep > set_ftrace_filter
+ # cat set_ftrace_filter
 sys_nanosleep
- # echo 'hrtimer_*' >> /debug/tracing/set_ftrace_filter
- # cat /debug/tracing/set_ftrace_filter
+ # echo 'hrtimer_*' >> set_ftrace_filter
+ # cat set_ftrace_filter
 hrtimer_run_queues
 hrtimer_run_pending
 hrtimer_init
@@ -1677,7 +1728,7 @@ hrtimer_init_sleeper
 The set_ftrace_notrace prevents those functions from being
 traced.
 
- # echo '*preempt*' '*lock*' > /debug/tracing/set_ftrace_notrace
+ # echo '*preempt*' '*lock*' > set_ftrace_notrace
 
 Produces:
 
@@ -1767,13 +1818,13 @@ the effect on the tracing is different. Every read from
 trace_pipe is consumed. This means that subsequent reads will be
 different. The trace is live.
 
- # echo function > /debug/tracing/current_tracer
- # cat /debug/tracing/trace_pipe > /tmp/trace.out &
+ # echo function > current_tracer
+ # cat trace_pipe > /tmp/trace.out &
 [1] 4153
- # echo 1 > /debug/tracing/tracing_enabled
+ # echo 1 > tracing_enabled
  # usleep 1
- # echo 0 > /debug/tracing/tracing_enabled
- # cat /debug/tracing/trace
+ # echo 0 > tracing_enabled
+ # cat trace
 # tracer: function
 #
 #           TASK-PID   CPU#    TIMESTAMP  FUNCTION
@@ -1809,7 +1860,7 @@ number listed is the number of entries that can be recorded per
 CPU. To know the full size, multiply the number of possible CPUS
 with the number of entries.
 
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
 1408 (units kilobytes)
 
 Note, to modify this, you must have tracing completely disabled.
@@ -1817,18 +1868,18 @@ To do that, echo "nop" into the current_tracer. If the
 current_tracer is not set to "nop", an EINVAL error will be
 returned.
 
- # echo nop > /debug/tracing/current_tracer
- # echo 10000 > /debug/tracing/buffer_size_kb
- # cat /debug/tracing/buffer_size_kb
+ # echo nop > current_tracer
+ # echo 10000 > buffer_size_kb
+ # cat buffer_size_kb
 10000 (units kilobytes)
 
 The number of pages which will be allocated is limited to a
 percentage of available memory. Allocating too much will produce
 an error.
 
- # echo 1000000000000 > /debug/tracing/buffer_size_kb
+ # echo 1000000000000 > buffer_size_kb
 -bash: echo: write error: Cannot allocate memory
- # cat /debug/tracing/buffer_size_kb
+ # cat buffer_size_kb
 85
 
 -----------
index 5731c67abc558f4ac91d9f54d78bf88cacb8dad6..162effbfbdec09b947a914b27cc7197cc87f1ba9 100644 (file)
@@ -32,41 +32,41 @@ is no way to automatically detect if you are losing events due to CPUs racing.
 Usage Quick Reference
 ---------------------
 
-$ mount -t debugfs debugfs /debug
-$ echo mmiotrace > /debug/tracing/current_tracer
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ mount -t debugfs debugfs /sys/kernel/debug
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
 Start X or whatever.
-$ echo "X is up" > /debug/tracing/trace_marker
-$ echo nop > /debug/tracing/current_tracer
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
 Check for lost events.
 
 
 Usage
 -----
 
-Make sure debugfs is mounted to /debug. If not, (requires root privileges)
-$ mount -t debugfs debugfs /debug
+Make sure debugfs is mounted to /sys/kernel/debug. If not, (requires root privileges)
+$ mount -t debugfs debugfs /sys/kernel/debug
 
 Check that the driver you are about to trace is not loaded.
 
 Activate mmiotrace (requires root privileges):
-$ echo mmiotrace > /debug/tracing/current_tracer
+$ echo mmiotrace > /sys/kernel/debug/tracing/current_tracer
 
 Start storing the trace:
-$ cat /debug/tracing/trace_pipe > mydump.txt &
+$ cat /sys/kernel/debug/tracing/trace_pipe > mydump.txt &
 The 'cat' process should stay running (sleeping) in the background.
 
 Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
 accesses to areas that are ioremapped while mmiotrace is active.
 
 During tracing you can place comments (markers) into the trace by
-$ echo "X is up" > /debug/tracing/trace_marker
+$ echo "X is up" > /sys/kernel/debug/tracing/trace_marker
 This makes it easier to see which part of the (huge) trace corresponds to
 which action. It is recommended to place descriptive markers about what you
 do.
 
 Shut down mmiotrace (requires root privileges):
-$ echo nop > /debug/tracing/current_tracer
+$ echo nop > /sys/kernel/debug/tracing/current_tracer
 The 'cat' process exits. If it does not, kill it by issuing 'fg' command and
 pressing ctrl+c.
 
@@ -78,10 +78,10 @@ to view your kernel log and look for "mmiotrace has lost events" warning. If
 events were lost, the trace is incomplete. You should enlarge the buffers and
 try again. Buffers are enlarged by first seeing how large the current buffers
 are:
-$ cat /debug/tracing/buffer_size_kb
+$ cat /sys/kernel/debug/tracing/buffer_size_kb
 gives you a number. Approximately double this number and write it back, for
 instance:
-$ echo 128000 > /debug/tracing/buffer_size_kb
+$ echo 128000 > /sys/kernel/debug/tracing/buffer_size_kb
 Then start again from the top.
 
 If you are doing a trace for a driver project, e.g. Nouveau, you should also
index 6f562f778b289e48f51e4b0e7b1740c89f4ff683..27479d43a9b06c53a7070d5bab1d33bb66c54caf 100644 (file)
@@ -2,7 +2,7 @@
 obj- := dummy.o
 
 # List of programs to build
-hostprogs-y := slabinfo
+hostprogs-y := slabinfo slqbinfo page-types
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
index bd3d31bc49150f6ec2ad7127273dc0f38e420f7f..c46e68cf93449aadb652bdce92953c53bb00ca22 100644 (file)
@@ -75,15 +75,15 @@ Page stealing from process memory and shm is done if stealing the page would
 alleviate memory pressure on any zone in the page's node that has fallen below
 its watermark.
 
-pages_min/pages_low/pages_high/low_on_memory/zone_wake_kswapd: These are 
-per-zone fields, used to determine when a zone needs to be balanced. When
-the number of pages falls below pages_min, the hysteric field low_on_memory
-gets set. This stays set till the number of free pages becomes pages_high.
-When low_on_memory is set, page allocation requests will try to free some
-pages in the zone (providing GFP_WAIT is set in the request). Orthogonal
-to this, is the decision to poke kswapd to free some zone pages. That
-decision is not hysteresis based, and is done when the number of free
-pages is below pages_low; in which case zone_wake_kswapd is also set.
+watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd: These
+are per-zone fields, used to determine when a zone needs to be balanced. When
+the number of pages falls below watermark[WMARK_MIN], the hysteric field
+low_on_memory gets set. This stays set till the number of free pages becomes
+watermark[WMARK_HIGH]. When low_on_memory is set, page allocation requests will
+try to free some pages in the zone (providing GFP_WAIT is set in the request).
+Orthogonal to this, is the decision to poke kswapd to free some zone pages.
+That decision is not hysteresis based, and is done when the number of free
+pages is below watermark[WMARK_LOW]; in which case zone_wake_kswapd is also set.
 
 
 (Good) Ideas that I have heard:
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
new file mode 100644 (file)
index 0000000..0833f44
--- /dev/null
@@ -0,0 +1,698 @@
+/*
+ * page-types: Tool for querying page flags
+ *
+ * Copyright (C) 2009 Intel corporation
+ * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <string.h>
+#include <getopt.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/fcntl.h>
+
+
+/*
+ * kernel page flags
+ */
+
+#define KPF_BYTES              8
+#define PROC_KPAGEFLAGS                "/proc/kpageflags"
+
+/* copied from kpageflags_read() */
+#define KPF_LOCKED             0
+#define KPF_ERROR              1
+#define KPF_REFERENCED         2
+#define KPF_UPTODATE           3
+#define KPF_DIRTY              4
+#define KPF_LRU                        5
+#define KPF_ACTIVE             6
+#define KPF_SLAB               7
+#define KPF_WRITEBACK          8
+#define KPF_RECLAIM            9
+#define KPF_BUDDY              10
+
+/* [11-20] new additions in 2.6.31 */
+#define KPF_MMAP               11
+#define KPF_ANON               12
+#define KPF_SWAPCACHE          13
+#define KPF_SWAPBACKED         14
+#define KPF_COMPOUND_HEAD      15
+#define KPF_COMPOUND_TAIL      16
+#define KPF_HUGE               17
+#define KPF_UNEVICTABLE                18
+#define KPF_NOPAGE             20
+
+/* [32-] kernel hacking assistances */
+#define KPF_RESERVED           32
+#define KPF_MLOCKED            33
+#define KPF_MAPPEDTODISK       34
+#define KPF_PRIVATE            35
+#define KPF_PRIVATE_2          36
+#define KPF_OWNER_PRIVATE      37
+#define KPF_ARCH               38
+#define KPF_UNCACHED           39
+
+/* [48-] take some arbitrary free slots for expanding overloaded flags
+ * not part of kernel API
+ */
+#define KPF_READAHEAD          48
+#define KPF_SLOB_FREE          49
+#define KPF_SLUB_FROZEN                50
+#define KPF_SLUB_DEBUG         51
+
+#define KPF_ALL_BITS           ((uint64_t)~0ULL)
+#define KPF_HACKERS_BITS       (0xffffULL << 32)
+#define KPF_OVERLOADED_BITS    (0xffffULL << 48)
+#define BIT(name)              (1ULL << KPF_##name)
+#define BITS_COMPOUND          (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
+
+static char *page_flag_names[] = {
+       [KPF_LOCKED]            = "L:locked",
+       [KPF_ERROR]             = "E:error",
+       [KPF_REFERENCED]        = "R:referenced",
+       [KPF_UPTODATE]          = "U:uptodate",
+       [KPF_DIRTY]             = "D:dirty",
+       [KPF_LRU]               = "l:lru",
+       [KPF_ACTIVE]            = "A:active",
+       [KPF_SLAB]              = "S:slab",
+       [KPF_WRITEBACK]         = "W:writeback",
+       [KPF_RECLAIM]           = "I:reclaim",
+       [KPF_BUDDY]             = "B:buddy",
+
+       [KPF_MMAP]              = "M:mmap",
+       [KPF_ANON]              = "a:anonymous",
+       [KPF_SWAPCACHE]         = "s:swapcache",
+       [KPF_SWAPBACKED]        = "b:swapbacked",
+       [KPF_COMPOUND_HEAD]     = "H:compound_head",
+       [KPF_COMPOUND_TAIL]     = "T:compound_tail",
+       [KPF_HUGE]              = "G:huge",
+       [KPF_UNEVICTABLE]       = "u:unevictable",
+       [KPF_NOPAGE]            = "n:nopage",
+
+       [KPF_RESERVED]          = "r:reserved",
+       [KPF_MLOCKED]           = "m:mlocked",
+       [KPF_MAPPEDTODISK]      = "d:mappedtodisk",
+       [KPF_PRIVATE]           = "P:private",
+       [KPF_PRIVATE_2]         = "p:private_2",
+       [KPF_OWNER_PRIVATE]     = "O:owner_private",
+       [KPF_ARCH]              = "h:arch",
+       [KPF_UNCACHED]          = "c:uncached",
+
+       [KPF_READAHEAD]         = "I:readahead",
+       [KPF_SLOB_FREE]         = "P:slob_free",
+       [KPF_SLUB_FROZEN]       = "A:slub_frozen",
+       [KPF_SLUB_DEBUG]        = "E:slub_debug",
+};
+
+
+/*
+ * data structures
+ */
+
+static int             opt_raw;        /* for kernel developers */
+static int             opt_list;       /* list pages (in ranges) */
+static int             opt_no_summary; /* don't show summary */
+static pid_t           opt_pid;        /* process to walk */
+
+#define MAX_ADDR_RANGES        1024
+static int             nr_addr_ranges;
+static unsigned long   opt_offset[MAX_ADDR_RANGES];
+static unsigned long   opt_size[MAX_ADDR_RANGES];
+
+#define MAX_BIT_FILTERS        64
+static int             nr_bit_filters;
+static uint64_t                opt_mask[MAX_BIT_FILTERS];
+static uint64_t                opt_bits[MAX_BIT_FILTERS];
+
+static int             page_size;
+
+#define PAGES_BATCH    (64 << 10)      /* 64k pages */
+static int             kpageflags_fd;
+static uint64_t                kpageflags_buf[KPF_BYTES * PAGES_BATCH];
+
+#define HASH_SHIFT     13
+#define HASH_SIZE      (1 << HASH_SHIFT)
+#define HASH_MASK      (HASH_SIZE - 1)
+#define HASH_KEY(flags)        (flags & HASH_MASK)
+
+static unsigned long   total_pages;
+static unsigned long   nr_pages[HASH_SIZE];
+static uint64_t        page_flags[HASH_SIZE];
+
+
+/*
+ * helper functions
+ */
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define min_t(type, x, y) ({                   \
+       type __min1 = (x);                      \
+       type __min2 = (y);                      \
+       __min1 < __min2 ? __min1 : __min2; })
+
+unsigned long pages2mb(unsigned long pages)
+{
+       return (pages * page_size) >> 20;
+}
+
+void fatal(const char *x, ...)
+{
+       va_list ap;
+
+       va_start(ap, x);
+       vfprintf(stderr, x, ap);
+       va_end(ap);
+       exit(EXIT_FAILURE);
+}
+
+
+/*
+ * page flag names
+ */
+
+char *page_flag_name(uint64_t flags)
+{
+       static char buf[65];
+       int present;
+       int i, j;
+
+       for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
+               present = (flags >> i) & 1;
+               if (!page_flag_names[i]) {
+                       if (present)
+                               fatal("unkown flag bit %d\n", i);
+                       continue;
+               }
+               buf[j++] = present ? page_flag_names[i][0] : '_';
+       }
+
+       return buf;
+}
+
+char *page_flag_longname(uint64_t flags)
+{
+       static char buf[1024];
+       int i, n;
+
+       for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
+               if (!page_flag_names[i])
+                       continue;
+               if ((flags >> i) & 1)
+                       n += snprintf(buf + n, sizeof(buf) - n, "%s,",
+                                       page_flag_names[i] + 2);
+       }
+       if (n)
+               n--;
+       buf[n] = '\0';
+
+       return buf;
+}
+
+
+/*
+ * page list and summary
+ */
+
+void show_page_range(unsigned long offset, uint64_t flags)
+{
+       static uint64_t      flags0;
+       static unsigned long index;
+       static unsigned long count;
+
+       if (flags == flags0 && offset == index + count) {
+               count++;
+               return;
+       }
+
+       if (count)
+               printf("%lu\t%lu\t%s\n",
+                               index, count, page_flag_name(flags0));
+
+       flags0 = flags;
+       index  = offset;
+       count  = 1;
+}
+
+void show_page(unsigned long offset, uint64_t flags)
+{
+       printf("%lu\t%s\n", offset, page_flag_name(flags));
+}
+
+void show_summary(void)
+{
+       int i;
+
+       printf("             flags\tpage-count       MB"
+               "  symbolic-flags\t\t\tlong-symbolic-flags\n");
+
+       for (i = 0; i < ARRAY_SIZE(nr_pages); i++) {
+               if (nr_pages[i])
+                       printf("0x%016llx\t%10lu %8lu  %s\t%s\n",
+                               (unsigned long long)page_flags[i],
+                               nr_pages[i],
+                               pages2mb(nr_pages[i]),
+                               page_flag_name(page_flags[i]),
+                               page_flag_longname(page_flags[i]));
+       }
+
+       printf("             total\t%10lu %8lu\n",
+                       total_pages, pages2mb(total_pages));
+}
+
+
+/*
+ * page flag filters
+ */
+
+int bit_mask_ok(uint64_t flags)
+{
+       int i;
+
+       for (i = 0; i < nr_bit_filters; i++) {
+               if (opt_bits[i] == KPF_ALL_BITS) {
+                       if ((flags & opt_mask[i]) == 0)
+                               return 0;
+               } else {
+                       if ((flags & opt_mask[i]) != opt_bits[i])
+                               return 0;
+               }
+       }
+
+       return 1;
+}
+
+uint64_t expand_overloaded_flags(uint64_t flags)
+{
+       /* SLOB/SLUB overload several page flags */
+       if (flags & BIT(SLAB)) {
+               if (flags & BIT(PRIVATE))
+                       flags ^= BIT(PRIVATE) | BIT(SLOB_FREE);
+               if (flags & BIT(ACTIVE))
+                       flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN);
+               if (flags & BIT(ERROR))
+                       flags ^= BIT(ERROR) | BIT(SLUB_DEBUG);
+       }
+
+       /* PG_reclaim is overloaded as PG_readahead in the read path */
+       if ((flags & (BIT(RECLAIM) | BIT(WRITEBACK))) == BIT(RECLAIM))
+               flags ^= BIT(RECLAIM) | BIT(READAHEAD);
+
+       return flags;
+}
+
+uint64_t well_known_flags(uint64_t flags)
+{
+       /* hide flags intended only for kernel hacker */
+       flags &= ~KPF_HACKERS_BITS;
+
+       /* hide non-hugeTLB compound pages */
+       if ((flags & BITS_COMPOUND) && !(flags & BIT(HUGE)))
+               flags &= ~BITS_COMPOUND;
+
+       return flags;
+}
+
+
+/*
+ * page frame walker
+ */
+
+int hash_slot(uint64_t flags)
+{
+       int k = HASH_KEY(flags);
+       int i;
+
+       /* Explicitly reserve slot 0 for flags 0: the following logic
+        * cannot distinguish an unoccupied slot from slot (flags==0).
+        */
+       if (flags == 0)
+               return 0;
+
+       /* search through the remaining (HASH_SIZE-1) slots */
+       for (i = 1; i < ARRAY_SIZE(page_flags); i++, k++) {
+               if (!k || k >= ARRAY_SIZE(page_flags))
+                       k = 1;
+               if (page_flags[k] == 0) {
+                       page_flags[k] = flags;
+                       return k;
+               }
+               if (page_flags[k] == flags)
+                       return k;
+       }
+
+       fatal("hash table full: bump up HASH_SHIFT?\n");
+       exit(EXIT_FAILURE);
+}
+
+void add_page(unsigned long offset, uint64_t flags)
+{
+       flags = expand_overloaded_flags(flags);
+
+       if (!opt_raw)
+               flags = well_known_flags(flags);
+
+       if (!bit_mask_ok(flags))
+               return;
+
+       if (opt_list == 1)
+               show_page_range(offset, flags);
+       else if (opt_list == 2)
+               show_page(offset, flags);
+
+       nr_pages[hash_slot(flags)]++;
+       total_pages++;
+}
+
+void walk_pfn(unsigned long index, unsigned long count)
+{
+       unsigned long batch;
+       unsigned long n;
+       unsigned long i;
+
+       if (index > ULONG_MAX / KPF_BYTES)
+               fatal("index overflow: %lu\n", index);
+
+       lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
+
+       while (count) {
+               batch = min_t(unsigned long, count, PAGES_BATCH);
+               n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
+               if (n == 0)
+                       break;
+               if (n < 0) {
+                       perror(PROC_KPAGEFLAGS);
+                       exit(EXIT_FAILURE);
+               }
+
+               if (n % KPF_BYTES != 0)
+                       fatal("partial read: %lu bytes\n", n);
+               n = n / KPF_BYTES;
+
+               for (i = 0; i < n; i++)
+                       add_page(index + i, kpageflags_buf[i]);
+
+               index += batch;
+               count -= batch;
+       }
+}
+
+void walk_addr_ranges(void)
+{
+       int i;
+
+       kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY);
+       if (kpageflags_fd < 0) {
+               perror(PROC_KPAGEFLAGS);
+               exit(EXIT_FAILURE);
+       }
+
+       if (!nr_addr_ranges)
+               walk_pfn(0, ULONG_MAX);
+
+       for (i = 0; i < nr_addr_ranges; i++)
+               walk_pfn(opt_offset[i], opt_size[i]);
+
+       close(kpageflags_fd);
+}
+
+
+/*
+ * user interface
+ */
+
+const char *page_flag_type(uint64_t flag)
+{
+       if (flag & KPF_HACKERS_BITS)
+               return "(r)";
+       if (flag & KPF_OVERLOADED_BITS)
+               return "(o)";
+       return "   ";
+}
+
+void usage(void)
+{
+       int i, j;
+
+       printf(
+"page-types [options]\n"
+"            -r|--raw                  Raw mode, for kernel developers\n"
+"            -a|--addr    addr-spec    Walk a range of pages\n"
+"            -b|--bits    bits-spec    Walk pages with specified bits\n"
+#if 0 /* planned features */
+"            -p|--pid     pid          Walk process address space\n"
+"            -f|--file    filename     Walk file address space\n"
+#endif
+"            -l|--list                 Show page details in ranges\n"
+"            -L|--list-each            Show page details one by one\n"
+"            -N|--no-summary           Don't show summay info\n"
+"            -h|--help                 Show this usage message\n"
+"addr-spec:\n"
+"            N                         one page at offset N (unit: pages)\n"
+"            N+M                       pages range from N to N+M-1\n"
+"            N,M                       pages range from N to M-1\n"
+"            N,                        pages range from N to end\n"
+"            ,M                        pages range from 0 to M\n"
+"bits-spec:\n"
+"            bit1,bit2                 (flags & (bit1|bit2)) != 0\n"
+"            bit1,bit2=bit1            (flags & (bit1|bit2)) == bit1\n"
+"            bit1,~bit2                (flags & (bit1|bit2)) == bit1\n"
+"            =bit1,bit2                flags == (bit1|bit2)\n"
+"bit-names:\n"
+       );
+
+       for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
+               if (!page_flag_names[i])
+                       continue;
+               printf("%16s%s", page_flag_names[i] + 2,
+                                page_flag_type(1ULL << i));
+               if (++j > 3) {
+                       j = 0;
+                       putchar('\n');
+               }
+       }
+       printf("\n                                   "
+               "(r) raw mode bits  (o) overloaded bits\n");
+}
+
+unsigned long long parse_number(const char *str)
+{
+       unsigned long long n;
+
+       n = strtoll(str, NULL, 0);
+
+       if (n == 0 && str[0] != '0')
+               fatal("invalid name or number: %s\n", str);
+
+       return n;
+}
+
+void parse_pid(const char *str)
+{
+       opt_pid = parse_number(str);
+}
+
+void parse_file(const char *name)
+{
+}
+
+void add_addr_range(unsigned long offset, unsigned long size)
+{
+       if (nr_addr_ranges >= MAX_ADDR_RANGES)
+               fatal("too much addr ranges\n");
+
+       opt_offset[nr_addr_ranges] = offset;
+       opt_size[nr_addr_ranges] = size;
+       nr_addr_ranges++;
+}
+
+void parse_addr_range(const char *optarg)
+{
+       unsigned long offset;
+       unsigned long size;
+       char *p;
+
+       p = strchr(optarg, ',');
+       if (!p)
+               p = strchr(optarg, '+');
+
+       if (p == optarg) {
+               offset = 0;
+               size   = parse_number(p + 1);
+       } else if (p) {
+               offset = parse_number(optarg);
+               if (p[1] == '\0')
+                       size = ULONG_MAX;
+               else {
+                       size = parse_number(p + 1);
+                       if (*p == ',') {
+                               if (size < offset)
+                                       fatal("invalid range: %lu,%lu\n",
+                                                       offset, size);
+                               size -= offset;
+                       }
+               }
+       } else {
+               offset = parse_number(optarg);
+               size   = 1;
+       }
+
+       add_addr_range(offset, size);
+}
+
+void add_bits_filter(uint64_t mask, uint64_t bits)
+{
+       if (nr_bit_filters >= MAX_BIT_FILTERS)
+               fatal("too much bit filters\n");
+
+       opt_mask[nr_bit_filters] = mask;
+       opt_bits[nr_bit_filters] = bits;
+       nr_bit_filters++;
+}
+
+uint64_t parse_flag_name(const char *str, int len)
+{
+       int i;
+
+       if (!*str || !len)
+               return 0;
+
+       if (len <= 8 && !strncmp(str, "compound", len))
+               return BITS_COMPOUND;
+
+       for (i = 0; i < ARRAY_SIZE(page_flag_names); i++) {
+               if (!page_flag_names[i])
+                       continue;
+               if (!strncmp(str, page_flag_names[i] + 2, len))
+                       return 1ULL << i;
+       }
+
+       return parse_number(str);
+}
+
+uint64_t parse_flag_names(const char *str, int all)
+{
+       const char *p    = str;
+       uint64_t   flags = 0;
+
+       while (1) {
+               if (*p == ',' || *p == '=' || *p == '\0') {
+                       if ((*str != '~') || (*str == '~' && all && *++str))
+                               flags |= parse_flag_name(str, p - str);
+                       if (*p != ',')
+                               break;
+                       str = p + 1;
+               }
+               p++;
+       }
+
+       return flags;
+}
+
+void parse_bits_mask(const char *optarg)
+{
+       uint64_t mask;
+       uint64_t bits;
+       const char *p;
+
+       p = strchr(optarg, '=');
+       if (p == optarg) {
+               mask = KPF_ALL_BITS;
+               bits = parse_flag_names(p + 1, 0);
+       } else if (p) {
+               mask = parse_flag_names(optarg, 0);
+               bits = parse_flag_names(p + 1, 0);
+       } else if (strchr(optarg, '~')) {
+               mask = parse_flag_names(optarg, 1);
+               bits = parse_flag_names(optarg, 0);
+       } else {
+               mask = parse_flag_names(optarg, 0);
+               bits = KPF_ALL_BITS;
+       }
+
+       add_bits_filter(mask, bits);
+}
+
+
+struct option opts[] = {
+       { "raw"       , 0, NULL, 'r' },
+       { "pid"       , 1, NULL, 'p' },
+       { "file"      , 1, NULL, 'f' },
+       { "addr"      , 1, NULL, 'a' },
+       { "bits"      , 1, NULL, 'b' },
+       { "list"      , 0, NULL, 'l' },
+       { "list-each" , 0, NULL, 'L' },
+       { "no-summary", 0, NULL, 'N' },
+       { "help"      , 0, NULL, 'h' },
+       { NULL        , 0, NULL, 0 }
+};
+
+int main(int argc, char *argv[])
+{
+       int c;
+
+       page_size = getpagesize();
+
+       while ((c = getopt_long(argc, argv,
+                               "rp:f:a:b:lLNh", opts, NULL)) != -1) {
+               switch (c) {
+               case 'r':
+                       opt_raw = 1;
+                       break;
+               case 'p':
+                       parse_pid(optarg);
+                       break;
+               case 'f':
+                       parse_file(optarg);
+                       break;
+               case 'a':
+                       parse_addr_range(optarg);
+                       break;
+               case 'b':
+                       parse_bits_mask(optarg);
+                       break;
+               case 'l':
+                       opt_list = 1;
+                       break;
+               case 'L':
+                       opt_list = 2;
+                       break;
+               case 'N':
+                       opt_no_summary = 1;
+                       break;
+               case 'h':
+                       usage();
+                       exit(0);
+               default:
+                       usage();
+                       exit(1);
+               }
+       }
+
+       if (opt_list == 1)
+               printf("offset\tcount\tflags\n");
+       if (opt_list == 2)
+               printf("offset\tflags\n");
+
+       walk_addr_ranges();
+
+       if (opt_list == 1)
+               show_page_range(0, 0);  /* drain the buffer */
+
+       if (opt_no_summary)
+               return 0;
+
+       if (opt_list)
+               printf("\n\n");
+
+       show_summary();
+
+       return 0;
+}
index ce72c0fe6177efaeb4afd73d2b7d01372e718727..600a304a828cb2865e63a85e031fb881f27cbbc7 100644 (file)
@@ -12,9 +12,9 @@ There are three components to pagemap:
    value for each virtual page, containing the following data (from
    fs/proc/task_mmu.c, above pagemap_read):
 
-    * Bits 0-55  page frame number (PFN) if present
+    * Bits 0-54  page frame number (PFN) if present
     * Bits 0-4   swap type if swapped
-    * Bits 5-55  swap offset if swapped
+    * Bits 5-54  swap offset if swapped
     * Bits 55-60 page shift (page size = 1<<page shift)
     * Bit  61    reserved for future use
     * Bit  62    page swapped
@@ -36,7 +36,7 @@ There are three components to pagemap:
  * /proc/kpageflags.  This file contains a 64-bit set of flags for each
    page, indexed by PFN.
 
-   The flags are (from fs/proc/proc_misc, above kpageflags_read):
+   The flags are (from fs/proc/page.c, above kpageflags_read):
 
      0. LOCKED
      1. ERROR
@@ -49,6 +49,68 @@ There are three components to pagemap:
      8. WRITEBACK
      9. RECLAIM
     10. BUDDY
+    11. MMAP
+    12. ANON
+    13. SWAPCACHE
+    14. SWAPBACKED
+    15. COMPOUND_HEAD
+    16. COMPOUND_TAIL
+    16. HUGE
+    18. UNEVICTABLE
+    20. NOPAGE
+
+Short descriptions to the page flags:
+
+ 0. LOCKED
+    page is being locked for exclusive access, eg. by undergoing read/write IO
+
+ 7. SLAB
+    page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator
+    When compound page is used, SLUB/SLQB will only set this flag on the head
+    page; SLOB will not flag it at all.
+
+10. BUDDY
+    a free memory block managed by the buddy system allocator
+    The buddy system organizes free memory in blocks of various orders.
+    An order N block has 2^N physically contiguous pages, with the BUDDY flag
+    set for and _only_ for the first page.
+
+15. COMPOUND_HEAD
+16. COMPOUND_TAIL
+    A compound page with order N consists of 2^N physically contiguous pages.
+    A compound page with order 2 takes the form of "HTTT", where H donates its
+    head page and T donates its tail page(s).  The major consumers of compound
+    pages are hugeTLB pages (Documentation/vm/hugetlbpage.txt), the SLUB etc.
+    memory allocators and various device drivers. However in this interface,
+    only huge/giga pages are made visible to end users.
+17. HUGE
+    this is an integral part of a HugeTLB page
+
+20. NOPAGE
+    no page frame exists at the requested address
+
+    [IO related page flags]
+ 1. ERROR     IO error occurred
+ 3. UPTODATE  page has up-to-date data
+              ie. for file backed page: (in-memory data revision >= on-disk one)
+ 4. DIRTY     page has been written to, hence contains new data
+              ie. for file backed page: (in-memory data revision >  on-disk one)
+ 8. WRITEBACK page is being synced to disk
+
+    [LRU related page flags]
+ 5. LRU         page is in one of the LRU lists
+ 6. ACTIVE      page is in the active LRU list
+18. UNEVICTABLE page is in the unevictable (non-)LRU list
+                It is somehow pinned and not a candidate for LRU page reclaims,
+               eg. ramfs pages, shmctl(SHM_LOCK) and mlock() memory segments
+ 2. REFERENCED  page has been referenced since last LRU list enqueue/requeue
+ 9. RECLAIM     page will be reclaimed soon after its pageout IO completed
+11. MMAP        a memory mapped page
+12. ANON        a memory mapped page that is not part of a file
+13. SWAPCACHE   page is mapped to swap space, ie. has an associated swap entry
+14. SWAPBACKED  page is backed by swap/RAM
+
+The page-types tool in this directory can be used to query the above flags.
 
 Using pagemap to do something useful:
 
index 09f6b3e5708a17a07076a5ab3da16c3ac3e2a6ad..fb94addb34de463ec8c4af59d1b5861d04fd1e6b 100644 (file)
@@ -36,6 +36,12 @@ trivial patch so apply some common sense.
        (scripts/checkpatch.pl) to catch trival style violations.
        See Documentation/CodingStyle for guidance here.
 
+       PLEASE CC: the maintainers and mailing lists that are generated
+       by scripts/get_maintainer.pl.  The results returned by the
+       script will be best if you have git installed and are making
+       your changes in a branch derived from Linus' latest git tree.
+       See Documentation/SubmittingPatches for details.
+
        PLEASE try to include any credit lines you want added with the
        patch. It avoids people being missed off by mistake and makes
        it easier to know who wants adding and who doesn't.
@@ -489,7 +495,7 @@ AOA (Apple Onboard Audio) ALSA DRIVER
 P:     Johannes Berg
 M:     johannes@sipsolutions.net
 L:     linuxppc-dev@ozlabs.org
-L:     alsa-devel@alsa-project.org (subscribers-only)
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/aoa/
 
@@ -912,7 +918,6 @@ P:  Dan Williams
 M:     dan.j.williams@intel.com
 P:     Maciej Sosnowski
 M:     maciej.sosnowski@intel.com
-L:     linux-kernel@vger.kernel.org
 W:     http://sourceforge.net/projects/xscaleiop
 S:     Supported
 F:     Documentation/crypto/async-tx-api.txt
@@ -1008,7 +1013,6 @@ F:        drivers/mmc/host/at91_mci.c
 ATMEL AT91 / AT32 SERIAL DRIVER
 P:     Haavard Skinnemoen
 M:     hskinnemoen@atmel.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/serial/atmel_serial.c
 
@@ -1064,7 +1068,6 @@ F:        kernel/audit*
 AUXILIARY DISPLAY DRIVERS
 P:     Miguel Ojeda Sandonis
 M:     miguel.ojeda.sandonis@gmail.com
-L:     linux-kernel@vger.kernel.org
 W:     http://miguelojeda.es/auxdisplay.htm
 W:     http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
 S:     Maintained
@@ -1134,7 +1137,6 @@ F:        drivers/net/hamradio/baycom*
 BEFS FILE SYSTEM
 P:     Sergey S. Kostyliov
 M:     rathamahata@php4.ru
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/befs.txt
 F:     fs/befs/
@@ -1142,7 +1144,6 @@ F:        fs/befs/
 BFS FILE SYSTEM
 P:     Tigran A. Aivazian
 M:     tigran@aivazian.fsnet.co.uk
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/bfs.txt
 F:     fs/bfs/
@@ -1199,7 +1200,6 @@ F:        drivers/i2c/busses/i2c-bfin-twi.c
 BLOCK LAYER
 P:     Jens Axboe
 M:     axboe@kernel.dk
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
 S:     Maintained
 F:     block/
@@ -1326,7 +1326,6 @@ P:        Muli Ben-Yehuda
 M:     muli@il.ibm.com
 P:     Jon D. Mason
 M:     jdmason@kudzu.us
-L:     linux-kernel@vger.kernel.org
 L:     discuss@x86-64.org
 S:     Maintained
 F:     arch/x86/kernel/pci-calgary_64.c
@@ -1378,7 +1377,6 @@ F:        include/linux/usb/wusb*
 CFAG12864B LCD DRIVER
 P:     Miguel Ojeda Sandonis
 M:     miguel.ojeda.sandonis@gmail.com
-L:     linux-kernel@vger.kernel.org
 W:     http://miguelojeda.es/auxdisplay.htm
 W:     http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
 S:     Maintained
@@ -1388,7 +1386,6 @@ F:        include/linux/cfag12864b.h
 CFAG12864BFB LCD FRAMEBUFFER DRIVER
 P:     Miguel Ojeda Sandonis
 M:     miguel.ojeda.sandonis@gmail.com
-L:     linux-kernel@vger.kernel.org
 W:     http://miguelojeda.es/auxdisplay.htm
 W:     http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
 S:     Maintained
@@ -1408,7 +1405,6 @@ X:        net/wireless/wext*
 CHECKPATCH
 P:     Andy Whitcroft
 M:     apw@canonical.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     scripts/checkpatch.pl
 
@@ -1437,7 +1433,7 @@ F:        drivers/usb/host/ohci-ep93xx.c
 CIRRUS LOGIC CS4270 SOUND DRIVER
 P:     Timur Tabi
 M:     timur@freescale.com
-L:     alsa-devel@alsa-project.org
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/codecs/cs4270*
 
@@ -1462,6 +1458,7 @@ P:        Joe Eykholt
 M:     jeykholt@cisco.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
+F:     drivers/scsi/fnic/
 
 CODA FILE SYSTEM
 P:     Jan Harkes
@@ -1534,7 +1531,6 @@ F:        drivers/usb/atm/cxacru.c
 CONFIGFS
 P:     Joel Becker
 M:     joel.becker@oracle.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     fs/configfs/
 F:     include/linux/configfs.h
@@ -1592,7 +1588,6 @@ F:        arch/x86/kernel/msr.c
 CPUSETS
 P:     Paul Menage
 M:     menage@google.com
-L:     linux-kernel@vger.kernel.org
 W:     http://www.bullopensource.org/cpuset/
 W:     http://oss.sgi.com/projects/cpusets/
 S:     Supported
@@ -1799,7 +1794,6 @@ DEVICE NUMBER REGISTRY
 P:     Torben Mathiasen
 M:     device@lanana.org
 W:     http://lanana.org/docs/device-list/index.html
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
 DEVICE-MAPPER  (LVM)
@@ -1825,7 +1819,6 @@ F:        drivers/char/digi*
 DIRECTORY NOTIFICATION (DNOTIFY)
 P:     Eric Paris
 M:     eparis@parisplace.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/dnotify.txt
 F:     fs/notify/dnotify/
@@ -1842,7 +1835,6 @@ S:        Maintained
 DISKQUOTA
 P:     Jan Kara
 M:     jack@suse.cz
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/quota.txt
 F:     fs/quota/
@@ -1864,7 +1856,6 @@ P:        Maciej Sosnowski
 M:     maciej.sosnowski@intel.com
 P:     Dan Williams
 M:     dan.j.williams@intel.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/dma/
 F:     include/linux/dma*
@@ -1916,7 +1907,6 @@ F:        drivers/scsi/dpt/
 DRIVER CORE, KOBJECTS, AND SYSFS
 P:     Greg Kroah-Hartman
 M:     gregkh@suse.de
-L:     linux-kernel@vger.kernel.org
 T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
 S:     Supported
 F:     Documentation/kobject.txt
@@ -1982,8 +1972,8 @@ F:        net/bridge/netfilter/ebt*.c
 ECRYPT FILE SYSTEM
 P:     Tyler Hicks
 M:     tyhicks@linux.vnet.ibm.com
-M:     Dustin Kirkland
-P:     kirkland@canonical.com
+P:     Dustin Kirkland
+M:     kirkland@canonical.com
 L:     ecryptfs-devel@lists.launchpad.net
 W:     https://launchpad.net/ecryptfs
 S:     Supported
@@ -2263,7 +2253,6 @@ F:        drivers/firewire/
 F:     include/linux/firewire*.h
 
 FIRMWARE LOADER (request_firmware)
-L:     linux-kernel@vger.kernel.org
 S:     Orphan
 F:     Documentation/firmware_class/
 F:     drivers/base/firmware*.c
@@ -2300,7 +2289,6 @@ M:        leoli@freescale.com
 P:     Zhang Wei
 M:     zw@zh-kernel.org
 L:     linuxppc-dev@ozlabs.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/dma/fsldma.*
 
@@ -2366,7 +2354,7 @@ F:        drivers/serial/ucc_uart.c
 FREESCALE SOC SOUND DRIVERS
 P:     Timur Tabi
 M:     timur@freescale.com
-L:     alsa-devel@alsa-project.org
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linuxppc-dev@ozlabs.org
 S:     Supported
 F:     sound/soc/fsl/fsl*
@@ -2500,7 +2488,6 @@ F:        drivers/hwmon/hdaps.c
 
 HYPERVISOR VIRTUAL CONSOLE DRIVER
 L:     linuxppc-dev@ozlabs.org
-L:     linux-kernel@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/char/hvc_*
 
@@ -2567,7 +2554,6 @@ F:        sound/parisc/harmony.*
 HAYES ESP SERIAL DRIVER
 P:     Andrew J. Robinson
 M:     arobinso@nyx.net
-L:     linux-kernel@vger.kernel.org
 W:     http://www.nyx.net/~arobinso
 S:     Maintained
 F:     Documentation/serial/hayes-esp.txt
@@ -2593,7 +2579,6 @@ F:        include/linux/cciss_ioctl.h
 HFS FILESYSTEM
 P:     Roman Zippel
 M:     zippel@linux-m68k.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/hfs.txt
 F:     fs/hfs/
@@ -2633,7 +2618,6 @@ F:        include/linux/hid*
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 P:     Thomas Gleixner
 M:     tglx@linutronix.de
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/timers/
 F:     kernel/hrtimer.c
@@ -2772,7 +2756,6 @@ F:        drivers/i2c/busses/i2c-tiny-usb.c
 i386 BOOT CODE
 P:     H. Peter Anvin
 M:     hpa@zytor.com
-L:     Linux-Kernel@vger.kernel.org
 S:     Maintained
 F:     arch/x86/boot/
 
@@ -2902,7 +2885,6 @@ P:        Robert Love
 M:     rlove@rlove.org
 P:     Eric Paris
 M:     eparis@parisplace.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/inotify.txt
 F:     fs/notify/inotify/
@@ -2950,7 +2932,6 @@ F:        arch/x86/kernel/microcode_intel.c
 INTEL I/OAT DMA DRIVER
 P:     Maciej Sosnowski
 M:     maciej.sosnowski@intel.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/dma/ioat*
 
@@ -2966,7 +2947,6 @@ F:        include/linux/intel-iommu.h
 INTEL IOP-ADMA DMA DRIVER
 P:     Dan Williams
 M:     dan.j.williams@intel.com
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/dma/iop-adma.c
 
@@ -3279,7 +3259,6 @@ M:        vgoyal@redhat.com
 P:     Haren Myneni
 M:     hbabu@us.ibm.com
 L:     kexec@lists.infradead.org
-L:     linux-kernel@vger.kernel.org
 W:     http://lse.sourceforge.net/kdump/
 S:     Maintained
 F:     Documentation/kdump/
@@ -3389,7 +3368,6 @@ KEXEC
 P:     Eric Biederman
 M:     ebiederm@xmission.com
 W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
-L:     linux-kernel@vger.kernel.org
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -3406,6 +3384,14 @@ F:       drivers/serial/kgdboc.c
 F:     include/linux/kgdb.h
 F:     kernel/kgdb.c
 
+KMEMCHECK
+P:     Vegard Nossum
+M:     vegardno@ifi.uio.no
+P      Pekka Enberg
+M:     penberg@cs.helsinki.fi
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 KMEMLEAK
 P:     Catalin Marinas
 M:     catalin.marinas@arm.com
@@ -3419,7 +3405,6 @@ F:        mm/kmemleak-test.c
 KMEMTRACE
 P:     Eduard - Gabriel Munteanu
 M:     eduard.munteanu@linux360.ro
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/trace/kmemtrace.txt
 F:     include/trace/kmemtrace.h
@@ -3434,7 +3419,6 @@ P:        David S. Miller
 M:     davem@davemloft.net
 P:     Masami Hiramatsu
 M:     mhiramat@redhat.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/kprobes.txt
 F:     include/linux/kprobes.h
@@ -3443,7 +3427,6 @@ F:        kernel/kprobes.c
 KS0108 LCD CONTROLLER DRIVER
 P:     Miguel Ojeda Sandonis
 M:     miguel.ojeda.sandonis@gmail.com
-L:     linux-kernel@vger.kernel.org
 W:     http://miguelojeda.es/auxdisplay.htm
 W:     http://jair.lab.fi.uva.es/~migojed/auxdisplay.htm
 S:     Maintained
@@ -3607,7 +3590,6 @@ P:        Peter Zijlstra
 M:     peterz@infradead.org
 P:     Ingo Molnar
 M:     mingo@redhat.com
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
 S:     Maintained
 F:     Documentation/lockdep*.txt
@@ -3659,7 +3641,6 @@ L:        linux-m32r-ja@ml.linux-m32r.org (in Japanese)
 W:     http://www.linux-m32r.org/
 S:     Maintained
 F:     arch/m32r/
-F:     include/asm-m32r/
 
 M68K ARCHITECTURE
 P:     Geert Uytterhoeven
@@ -3743,7 +3724,6 @@ F:        include/linux/mv643xx.h
 MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
 P:     Nicolas Pitre
 M:     nico@cam.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
 MARVELL YUKON / SYSKONNECT DRIVER
@@ -3797,7 +3777,6 @@ F:        drivers/scsi/megaraid/
 
 MEMORY MANAGEMENT
 L:     linux-mm@kvack.org
-L:     linux-kernel@vger.kernel.org
 W:     http://www.linux-mm.org
 S:     Maintained
 F:     include/linux/mm.h
@@ -3811,7 +3790,6 @@ M:        xemul@openvz.org
 P:     KAMEZAWA Hiroyuki
 M:     kamezawa.hiroyu@jp.fujitsu.com
 L:     linux-mm@kvack.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     mm/memcontrol.c
 
@@ -3854,7 +3832,6 @@ F:        arch/mips/
 MISCELLANEOUS MCA-SUPPORT
 P:     James Bottomley
 M:     James.Bottomley@HansenPartnership.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/ia64/mca.txt
 F:     Documentation/mca.txt
@@ -3864,7 +3841,6 @@ F:        include/linux/mca*
 MODULE SUPPORT
 P:     Rusty Russell
 M:     rusty@rustcorp.com.au
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     include/linux/module.h
 F:     kernel/module.c
@@ -3888,7 +3864,6 @@ F:        drivers/mmc/host/imxmmc.*
 MOUSE AND MISC DEVICES [GENERAL]
 P:     Alessandro Rubini
 M:     rubini@ipvvis.unipv.it
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/input/mouse/
 F:     include/linux/gpio_mouse.h
@@ -3896,7 +3871,6 @@ F:        include/linux/gpio_mouse.h
 MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 P:     Jiri Slaby
 M:     jirislaby@gmail.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/serial/moxa-smartio
 F:     drivers/char/mxser.*
@@ -3912,7 +3886,6 @@ F:        drivers/platform/x86/msi-laptop.c
 MULTIFUNCTION DEVICES (MFD)
 P:     Samuel Ortiz
 M:     sameo@linux.intel.com
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/mfd-2.6.git
 S:     Supported
 F:     drivers/mfd/
@@ -3920,7 +3893,6 @@ F:        drivers/mfd/
 MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
 P:     Pierre Ossman
 M:     pierre@ossman.eu
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/mmc/
 F:     include/linux/mmc/
@@ -3928,7 +3900,6 @@ F:        include/linux/mmc/
 MULTIMEDIA CARD (MMC) ETC. OVER SPI
 P:     David Brownell
 M:     dbrownell@users.sourceforge.net
-L:     linux-kernel@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/mmc/host/mmc_spi.c
 F:     include/linux/spi/mmc_spi.h
@@ -3943,7 +3914,6 @@ F:        sound/oss/msnd*
 MULTITECH MULTIPORT CARD (ISICOM)
 P:     Jiri Slaby
 M:     jirislaby@gmail.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/char/isicom.c
 F:     include/linux/isicom.h
@@ -4187,7 +4157,6 @@ NTFS FILESYSTEM
 P:     Anton Altaparmakov
 M:     aia21@cantab.net
 L:     linux-ntfs-dev@lists.sourceforge.net
-L:     linux-kernel@vger.kernel.org
 W:     http://www.linux-ntfs.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
 S:     Maintained
@@ -4421,7 +4390,6 @@ M:        akataria@vmware.com
 P:     Rusty Russell
 M:     rusty@rustcorp.com.au
 L:     virtualization@lists.osdl.org
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     Documentation/ia64/paravirt_ops.txt
 F:     arch/*/kernel/paravirt*
@@ -4472,7 +4440,6 @@ F:        include/linux/leds-pca9532.h
 PCI ERROR RECOVERY
 P:     Linas Vepstas
 M:     linas@austin.ibm.com
-L:     linux-kernel@vger.kernel.org
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/pci-error-recovery.txt
@@ -4481,7 +4448,6 @@ F:        Documentation/powerpc/eeh-pci-error-recovery.txt
 PCI SUBSYSTEM
 P:     Jesse Barnes
 M:     jbarnes@virtuousgeek.org
-L:     linux-kernel@vger.kernel.org
 L:     linux-pci@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
 S:     Supported
@@ -4516,7 +4482,6 @@ F:        drivers/net/pcnet32.c
 PER-TASK DELAY ACCOUNTING
 P:     Balbir Singh
 M:     balbir@linux.vnet.ibm.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     include/linux/delayacct.h
 F:     kernel/delayacct.c
@@ -4548,7 +4513,6 @@ F:        drivers/mtd/devices/phram.c
 PKTCDVD DRIVER
 P:     Peter Osterlund
 M:     petero2@telia.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/block/pktcdvd.c
 F:     include/linux/pktcdvd.h
@@ -4556,7 +4520,6 @@ F:        include/linux/pktcdvd.h
 POSIX CLOCKS and TIMERS
 P:     Thomas Gleixner
 M:     tglx@linutronix.de
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     fs/timerfd.c
 F:     include/linux/timer*
@@ -4567,7 +4530,6 @@ P:        Anton Vorontsov
 M:     cbou@mail.ru
 P:     David Woodhouse
 M:     dwmw2@infradead.org
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.infradead.org/battery-2.6.git
 S:     Maintained
 F:     include/linux/power_supply.h
@@ -4619,7 +4581,6 @@ F:        include/linux/if_pppol2tp.h
 PREEMPTIBLE KERNEL
 P:     Robert Love
 M:     rml@tech9.net
-L:     linux-kernel@vger.kernel.org
 L:     kpreempt-tech@lists.sourceforge.net
 W:     ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel
 S:     Supported
@@ -4682,7 +4643,6 @@ P:        Roland McGrath
 M:     roland@redhat.com
 P:     Oleg Nesterov
 M:     oleg@redhat.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     include/asm-generic/syscall.h
 F:     include/linux/ptrace.h
@@ -4768,7 +4728,6 @@ F:        drivers/net/qlge/
 QNX4 FILESYSTEM
 P:     Anders Larsen
 M:     al@alarsen.net
-L:     linux-kernel@vger.kernel.org
 W:     http://www.alarsen.net/linux/qnx4fs/
 S:     Maintained
 F:     fs/qnx4/
@@ -4815,7 +4774,6 @@ F:        drivers/char/random.c
 RAPIDIO SUBSYSTEM
 P:     Matt Porter
 M:     mporter@kernel.crashing.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/rapidio/
 
@@ -4829,7 +4787,8 @@ F:        drivers/net/wireless/ray*
 RCUTORTURE MODULE
 P:     Josh Triplett
 M:     josh@freedesktop.org
-L:     linux-kernel@vger.kernel.org
+P:     Paul E. McKenney
+M:     paulmck@linux.vnet.ibm.com
 S:     Maintained
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
@@ -4837,7 +4796,6 @@ F:        kernel/rcutorture.c
 RDC R-321X SoC
 P:     Florian Fainelli
 M:     florian@openwrt.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
 RDC R6040 FAST ETHERNET DRIVER
@@ -4857,8 +4815,9 @@ F:        net/rds/
 READ-COPY UPDATE (RCU)
 P:     Dipankar Sarma
 M:     dipankar@in.ibm.com
+P:     Paul E. McKenney
+M:     paulmck@linux.vnet.ibm.com
 W:     http://www.rdrop.com/users/paulmck/rclock/
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     Documentation/RCU/rcu.txt
 F:     Documentation/RCU/rcuref.txt
@@ -4869,7 +4828,6 @@ F:        kernel/rcupdate.c
 REAL TIME CLOCK DRIVER
 P:     Paul Gortmaker
 M:     p_gortmaker@yahoo.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/rtc.txt
 F:     drivers/rtc/
@@ -5007,7 +4965,6 @@ S3C24XX SD/MMC Driver
 P:     Ben Dooks
 M:     ben-linux@fluff.org
 L:     linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/mmc/host/s3cmci.*
 
@@ -5033,7 +4990,6 @@ P:        Ingo Molnar
 M:     mingo@elte.hu
 P:     Peter Zijlstra
 M:     peterz@infradead.org
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     kernel/sched*
 F:     include/linux/sched.h
@@ -5135,7 +5091,6 @@ F:        drivers/mmc/host/sdhci.*
 SECURITY SUBSYSTEM
 P:     James Morris
 M:     jmorris@namei.org
-L:     linux-kernel@vger.kernel.org
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
 T:     git git://www.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
 W:     http://security.wiki.kernel.org/
@@ -5154,7 +5109,6 @@ P:        James Morris
 M:     jmorris@namei.org
 P:     Eric Paris
 M:     eparis@parisplace.org
-L:     linux-kernel@vger.kernel.org (kernel issues)
 L:     selinux@tycho.nsa.gov (subscribers-only, general discussion)
 W:     http://selinuxproject.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
@@ -5417,7 +5371,6 @@ F:        include/linux/sony-laptop.h
 SONY MEMORYSTICK CARD SUPPORT
 P:     Alex Dubov
 M:     oakad@yahoo.com
-L:     linux-kernel@vger.kernel.org
 W:     http://tifmxx.berlios.de/
 S:     Maintained
 F:     drivers/memstick/host/tifm_ms.c
@@ -5427,7 +5380,7 @@ P:        Jaroslav Kysela
 M:     perex@perex.cz
 P:     Takashi Iwai
 M:     tiwai@suse.de
-L:     alsa-devel@alsa-project.org (subscribers-only)
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://www.alsa-project.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git
 T:     git git://git.alsa-project.org/alsa-kernel.git
@@ -5442,7 +5395,7 @@ M:        lrg@slimlogic.co.uk
 P:     Mark Brown
 M:     broonie@opensource.wolfsonmicro.com
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git
-L:     alsa-devel@alsa-project.org (subscribers-only)
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://alsa-project.org/main/index.php/ASoC
 S:     Supported
 F:     sound/soc/
@@ -5460,7 +5413,6 @@ F:        arch/sparc/
 SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
 P:     Roger Wolff
 M:     R.E.Wolff@BitWizard.nl
-L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     Documentation/serial/specialix.txt
 F:     drivers/char/specialix*
@@ -5506,7 +5458,6 @@ F:        fs/squashfs/
 SRM (Alpha) environment access
 P:     Jan-Benedict Glaw
 M:     jbglaw@lug-owl.de
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     arch/alpha/kernel/srm_env.c
 
@@ -5521,7 +5472,6 @@ S:        Maintained
 STAGING SUBSYSTEM
 P:     Greg Kroah-Hartman
 M:     gregkh@suse.de
-L:     linux-kernel@vger.kernel.org
 T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
 S:     Maintained
 F:     drivers/staging/
@@ -5601,7 +5551,6 @@ F:        include/linux/sysv_fs.h
 TASKSTATS STATISTICS INTERFACE
 P:     Balbir Singh
 M:     balbir@linux.vnet.ibm.com
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/accounting/taskstats*
 F:     include/linux/taskstats*
@@ -5694,7 +5643,6 @@ P:        Kentaro Takeda
 M:     takedakn@nttdata.co.jp
 P:     Tetsuo Handa
 M:     penguin-kernel@I-love.SAKURA.ne.jp
-L:     linux-kernel@vger.kernel.org (kernel issues)
 L:     tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English)
 L:     tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
 L:     tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
@@ -5746,14 +5694,17 @@ F:      drivers/char/tpm/
 TRIVIAL PATCHES
 P:     Jiri Kosina
 M:     trivial@kernel.org
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
 S:     Maintained
+F:     drivers/char/tty_*
+F:     drivers/serial/serial_core.c
+F:     include/linux/serial_core.h
+F:     include/linux/serial.h
+F:     include/linux/tty.h
 
 TTY LAYER
 P:     Alan Cox
 M:     alan@lxorguk.ukuu.org.uk
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     stgit http://zeniv.linux.org.uk/~alan/ttydev/
 
@@ -5826,7 +5777,6 @@ F:        fs/udf/
 UFS FILESYSTEM
 P:     Evgeniy Dushistov
 M:     dushistov@mail.ru
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/ufs.txt
 F:     fs/ufs/
@@ -5843,7 +5793,6 @@ F:        include/linux/uwb/
 UNIFORM CDROM DRIVER
 P:     Jens Axboe
 M:     axboe@kernel.dk
-L:     linux-kernel@vger.kernel.org
 W:     http://www.kernel.dk
 S:     Maintained
 F:     Documentation/cdrom/
@@ -5872,7 +5821,6 @@ F:        drivers/usb/class/cdc-acm.*
 USB BLOCK DRIVER (UB ub)
 P:     Pete Zaitcev
 M:     zaitcev@redhat.com
-L:     linux-kernel@vger.kernel.org
 L:     linux-usb@vger.kernel.org
 S:     Supported
 F:     drivers/block/ub.c
@@ -6165,6 +6113,12 @@ L:       linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/rndis_wlan.c
 
+USB XHCI DRIVER
+P:     Sarah Sharp
+M:     sarah.a.sharp@intel.com
+L:     linux-usb@vger.kernel.org
+S:     Supported
+
 USB ZC0301 DRIVER
 P:     Luca Risolia
 M:     luca.risolia@studio.unibo.it
@@ -6212,7 +6166,6 @@ P:        Hans J. Koch
 M:     hjk@linutronix.de
 P:     Greg Kroah-Hartman
 M:     gregkh@suse.de
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
@@ -6238,7 +6191,6 @@ F:        drivers/video/uvesafb.*
 VFAT/FAT/MSDOS FILESYSTEM
 P:     OGAWA Hirofumi
 M:     hirofumi@mail.parknet.co.jp
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/vfat.txt
 F:     fs/fat/
@@ -6282,6 +6234,14 @@ F:       drivers/net/macvlan.c
 F:     include/linux/if_*vlan.h
 F:     net/8021q/
 
+VLYNQ BUS
+P:     Florian Fainelli
+M:     florian@openwrt.org
+L:     openwrt-devel@lists.openwrt.org
+S:     Maintained
+F:     drivers/vlynq/vlynq.c
+F:     include/linux/vlynq.h
+
 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
 P:     Liam Girdwood
 M:     lrg@slimlogic.co.uk
@@ -6335,7 +6295,6 @@ F:        drivers/hwmon/w83793.c
 W83L51xD SD/MMC CARD INTERFACE DRIVER
 P:     Pierre Ossman
 M:     pierre@ossman.eu
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/mmc/host/wbsd.*
 
@@ -6422,7 +6381,6 @@ M:        mingo@redhat.com
 P:     H. Peter Anvin
 M:     hpa@zytor.com
 M:     x86@kernel.org
-L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
 S:     Maintained
 F:     Documentation/x86/
@@ -6458,7 +6416,6 @@ XILINX SYSTEMACE DRIVER
 P:     Grant Likely
 M:     grant.likely@secretlab.ca
 W:     http://www.secretlab.ca/
-L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     drivers/block/xsysace.c
 
@@ -6523,5 +6480,9 @@ F:        drivers/serial/zs.*
 
 THE REST
 P:     Linus Torvalds
+M:     torvalds@linux-foundation.org
+L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
 S:     Buried alive in reporters
+F:     *
+F:     */
index fef5c1450e471643968802394079689f95213fc9..a71c9c1455a722fed428bb89d0c2b677b7a7087e 100644 (file)
@@ -1,10 +1,3 @@
 /*
  * 8253/8254 Programmable Interval Timer
  */
-
-#ifndef _8253PIT_H
-#define _8253PIT_H
-
-#define PIT_TICK_RATE  1193180UL
-
-#endif
index 3e6735a34c571b011172cf54ee3e09db254434da..a8d4ec8ea4b60f679c3986f0548fe055888d5755 100644 (file)
@@ -3,30 +3,12 @@
 
 /* Dummy header just to define km_type. */
 
-
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif
index c2938e574a40c2de349556430bc83fc0038569c0..19b86328ffd7907d437eca4ab61055dfadda1a09 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
 struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_mm);
 EXPORT_SYMBOL(init_task);
 
 union thread_union init_thread_union
index 67c19f8a9944804ad9ef3cd2ca56de73fc9c97fc..38c805dfc5445dd02ba8683d6c538d9daf4c8822 100644 (file)
@@ -227,7 +227,7 @@ struct irqaction timer_irqaction = {
        .name           = "timer",
 };
 
-static struct hw_interrupt_type rtc_irq_type = {
+static struct irq_chip rtc_irq_type = {
        .typename       = "RTC",
        .startup        = rtc_startup,
        .shutdown       = rtc_enable_disable,
index 9405bee9894e9c4febd62efb2e2487dc63e5bf13..50bfec9b588ffbc8ba4d08a47c532bf5f0de7299 100644 (file)
@@ -83,7 +83,7 @@ i8259a_end_irq(unsigned int irq)
                i8259a_enable_irq(irq);
 }
 
-struct hw_interrupt_type i8259a_irq_type = {
+struct irq_chip i8259a_irq_type = {
        .typename       = "XT-PIC",
        .startup        = i8259a_startup_irq,
        .shutdown       = i8259a_disable_irq,
index cc9a8a7aa279e05b5a16876f52f563467611eb2f..b63ccd7386f18230293c036acddab148f22b015a 100644 (file)
@@ -36,7 +36,7 @@ extern void i8259a_disable_irq(unsigned int);
 extern void i8259a_mask_and_ack_irq(unsigned int);
 extern unsigned int i8259a_startup_irq(unsigned int);
 extern void i8259a_end_irq(unsigned int);
-extern struct hw_interrupt_type i8259a_irq_type;
+extern struct irq_chip i8259a_irq_type;
 extern void init_i8259a_irqs(void);
 
 extern void handle_irq(int irq);
index d53edbccbfe5d88653c59433be2d29147800c33f..69199a76ec4a41f6d165f66a56738a5b048340bd 100644 (file)
@@ -70,7 +70,7 @@ pyxis_mask_and_ack_irq(unsigned int irq)
        *(vulp)PYXIS_INT_MASK;
 }
 
-static struct hw_interrupt_type pyxis_irq_type = {
+static struct irq_chip pyxis_irq_type = {
        .typename       = "PYXIS",
        .startup        = pyxis_startup_irq,
        .shutdown       = pyxis_disable_irq,
index a03fbca4940eb0d0881b4d197cd3e84a3cf2b68a..85229369a1f8bfa31e7e326f1e0bc42020550a75 100644 (file)
@@ -48,7 +48,7 @@ srm_end_irq(unsigned int irq)
 }
 
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
-static struct hw_interrupt_type srm_irq_type = {
+static struct irq_chip srm_irq_type = {
        .typename       = "SRM",
        .startup        = srm_startup_irq,
        .shutdown       = srm_disable_irq,
index 80df86cd746bd2e8ca81ef82f338f7b337c229d5..d2634e4476b4589f03831f575ec00751b0434ca9 100644 (file)
@@ -252,9 +252,9 @@ reserve_std_resources(void)
 }
 
 #define PFN_MAX                PFN_DOWN(0x80000000)
-#define for_each_mem_cluster(memdesc, cluster, i)              \
-       for ((cluster) = (memdesc)->cluster, (i) = 0;           \
-            (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+#define for_each_mem_cluster(memdesc, _cluster, i)             \
+       for ((_cluster) = (memdesc)->cluster, (i) = 0;          \
+            (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
 
 static unsigned long __init
 get_mem_size_limit(char *s)
index e53a1e1c2f217233abfc19ff40249f7bcde269b8..382035ef7394666e20f7a4b9c821f2f71051e800 100644 (file)
@@ -89,7 +89,7 @@ alcor_end_irq(unsigned int irq)
                alcor_enable_irq(irq);
 }
 
-static struct hw_interrupt_type alcor_irq_type = {
+static struct irq_chip alcor_irq_type = {
        .typename       = "ALCOR",
        .startup        = alcor_startup_irq,
        .shutdown       = alcor_disable_irq,
index ace475c124f69d9d2e248268e8ef3436c4262f1e..ed349436732ba2ded473fc8c65776b422cb77b09 100644 (file)
@@ -71,7 +71,7 @@ cabriolet_end_irq(unsigned int irq)
                cabriolet_enable_irq(irq);
 }
 
-static struct hw_interrupt_type cabriolet_irq_type = {
+static struct irq_chip cabriolet_irq_type = {
        .typename       = "CABRIOLET",
        .startup        = cabriolet_startup_irq,
        .shutdown       = cabriolet_disable_irq,
index 5bd5259324b7c8827adb237facd7bf120edd4ca2..46e70ece5176ec1372e26dc09072759b26de5c43 100644 (file)
@@ -198,7 +198,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
        return 0;
 }
 
-static struct hw_interrupt_type dp264_irq_type = {
+static struct irq_chip dp264_irq_type = {
        .typename       = "DP264",
        .startup        = dp264_startup_irq,
        .shutdown       = dp264_disable_irq,
@@ -209,7 +209,7 @@ static struct hw_interrupt_type dp264_irq_type = {
        .set_affinity   = dp264_set_affinity,
 };
 
-static struct hw_interrupt_type clipper_irq_type = {
+static struct irq_chip clipper_irq_type = {
        .typename       = "CLIPPER",
        .startup        = clipper_startup_irq,
        .shutdown       = clipper_disable_irq,
@@ -298,7 +298,7 @@ clipper_srm_device_interrupt(unsigned long vector)
 }
 
 static void __init
-init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
index 9c5a306dc0ee825a47d3d8c0bd27e2d091069086..660c23ef661f686bfbb7c6e2fe382ad3eb8b3478 100644 (file)
@@ -69,7 +69,7 @@ eb64p_end_irq(unsigned int irq)
                eb64p_enable_irq(irq);
 }
 
-static struct hw_interrupt_type eb64p_irq_type = {
+static struct irq_chip eb64p_irq_type = {
        .typename       = "EB64P",
        .startup        = eb64p_startup_irq,
        .shutdown       = eb64p_disable_irq,
index baf60f36cbd773114f5d8e69a151b03d98608986..b99ea488d8446139d4fabdaa53dbeeedf51446eb 100644 (file)
@@ -80,7 +80,7 @@ eiger_end_irq(unsigned int irq)
                eiger_enable_irq(irq);
 }
 
-static struct hw_interrupt_type eiger_irq_type = {
+static struct irq_chip eiger_irq_type = {
        .typename       = "EIGER",
        .startup        = eiger_startup_irq,
        .shutdown       = eiger_disable_irq,
index 2b5caf3d9b1526678506fef65bc0e746b61edc9f..ef0b83a070accb165129291fb58b2bfa19f748e7 100644 (file)
@@ -118,7 +118,7 @@ jensen_local_end(unsigned int irq)
                i8259a_end_irq(1);
 }
 
-static struct hw_interrupt_type jensen_local_irq_type = {
+static struct irq_chip jensen_local_irq_type = {
        .typename       = "LOCAL",
        .startup        = jensen_local_startup,
        .shutdown       = jensen_local_shutdown,
index c5a1a2438c678191eeb6bb1c39b518cdf1c4adc9..bbfc4f20ca72a2ed9f6e0ac2c3172fc81f8c33d1 100644 (file)
@@ -169,7 +169,7 @@ marvel_irq_noop_return(unsigned int irq)
        return 0; 
 }
 
-static struct hw_interrupt_type marvel_legacy_irq_type = {
+static struct irq_chip marvel_legacy_irq_type = {
        .typename       = "LEGACY",
        .startup        = marvel_irq_noop_return,
        .shutdown       = marvel_irq_noop,
@@ -179,7 +179,7 @@ static struct hw_interrupt_type marvel_legacy_irq_type = {
        .end            = marvel_irq_noop,
 };
 
-static struct hw_interrupt_type io7_lsi_irq_type = {
+static struct irq_chip io7_lsi_irq_type = {
        .typename       = "LSI",
        .startup        = io7_startup_irq,
        .shutdown       = io7_disable_irq,
@@ -189,7 +189,7 @@ static struct hw_interrupt_type io7_lsi_irq_type = {
        .end            = io7_end_irq,
 };
 
-static struct hw_interrupt_type io7_msi_irq_type = {
+static struct irq_chip io7_msi_irq_type = {
        .typename       = "MSI",
        .startup        = io7_startup_irq,
        .shutdown       = io7_disable_irq,
@@ -273,8 +273,8 @@ init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
 
 static void __init
 init_io7_irqs(struct io7 *io7, 
-             struct hw_interrupt_type *lsi_ops,
-             struct hw_interrupt_type *msi_ops)
+             struct irq_chip *lsi_ops,
+             struct irq_chip *msi_ops)
 {
        long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
        long i;
index 8d3e9429c5ee60c17d9a015ebff09a40feabe9e0..4e366641a08ed20c4f6cba1755f2e9082216a5df 100644 (file)
@@ -68,7 +68,7 @@ mikasa_end_irq(unsigned int irq)
                mikasa_enable_irq(irq);
 }
 
-static struct hw_interrupt_type mikasa_irq_type = {
+static struct irq_chip mikasa_irq_type = {
        .typename       = "MIKASA",
        .startup        = mikasa_startup_irq,
        .shutdown       = mikasa_disable_irq,
index 538876b62449a215034d053036f3e0e114e51dd4..35753a173bac3073305d8c8a18156d5e7b90840a 100644 (file)
@@ -73,7 +73,7 @@ noritake_end_irq(unsigned int irq)
                 noritake_enable_irq(irq);
 }
 
-static struct hw_interrupt_type noritake_irq_type = {
+static struct irq_chip noritake_irq_type = {
        .typename       = "NORITAKE",
        .startup        = noritake_startup_irq,
        .shutdown       = noritake_disable_irq,
index 672cb2df53dfc7e6b53814701f181d346ebf9599..f3aec7e085c8c6f0a0165a7f47af0fe076312377 100644 (file)
@@ -135,7 +135,7 @@ rawhide_end_irq(unsigned int irq)
                rawhide_enable_irq(irq);
 }
 
-static struct hw_interrupt_type rawhide_irq_type = {
+static struct irq_chip rawhide_irq_type = {
        .typename       = "RAWHIDE",
        .startup        = rawhide_startup_irq,
        .shutdown       = rawhide_disable_irq,
index f15a329b6011167791b6baf2f898de662a5fb1d1..d9f9cfeb9931354cee9d6df304877941ef13ecea 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
+#include <linux/timex.h>
 #include <linux/init.h>
 
 #include <asm/ptrace.h>
index ce1faa6f1df1f4e1aba6ac57f3f8001f75bdf2f4..fc9246373452896439e58c01c8c595a1771c7d63 100644 (file)
@@ -72,7 +72,7 @@ rx164_end_irq(unsigned int irq)
                rx164_enable_irq(irq);
 }
 
-static struct hw_interrupt_type rx164_irq_type = {
+static struct irq_chip rx164_irq_type = {
        .typename       = "RX164",
        .startup        = rx164_startup_irq,
        .shutdown       = rx164_disable_irq,
index 9e263256a42d551c386c345147d77817f8d221a7..426eb6906d0192a96872aa5697d2a0fc3bcb57a8 100644 (file)
@@ -501,7 +501,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
        spin_unlock(&sable_lynx_irq_lock);
 }
 
-static struct hw_interrupt_type sable_lynx_irq_type = {
+static struct irq_chip sable_lynx_irq_type = {
        .typename       = "SABLE/LYNX",
        .startup        = sable_lynx_startup_irq,
        .shutdown       = sable_lynx_disable_irq,
index 9bd9a31450c64a414ba08cc137ae349a535b0497..830318c21661dba4f36b2443e90686a7e4689d5b 100644 (file)
@@ -74,7 +74,7 @@ takara_end_irq(unsigned int irq)
                takara_enable_irq(irq);
 }
 
-static struct hw_interrupt_type takara_irq_type = {
+static struct irq_chip takara_irq_type = {
        .typename       = "TAKARA",
        .startup        = takara_startup_irq,
        .shutdown       = takara_disable_irq,
index 8dd239ebdb9e2cc3489c3d128402a9da37033a29..88978fc60f835061b35746d8623b6713606a559b 100644 (file)
@@ -185,7 +185,7 @@ titan_srm_device_interrupt(unsigned long vector)
 
 
 static void __init
-init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+init_titan_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
@@ -194,7 +194,7 @@ init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
        }
 }
 
-static struct hw_interrupt_type titan_irq_type = {
+static struct irq_chip titan_irq_type = {
        .typename       = "TITAN",
        .startup        = titan_startup_irq,
        .shutdown       = titan_disable_irq,
index 42c3eede4d099a3a970f9d78e25f42f3f8eeb8b1..e91b4c3838a8d3e5f21f4e5aa01d7135014fbe14 100644 (file)
@@ -157,7 +157,7 @@ wildfire_end_irq(unsigned int irq)
                wildfire_enable_irq(irq);
 }
 
-static struct hw_interrupt_type wildfire_irq_type = {
+static struct irq_chip wildfire_irq_type = {
        .typename       = "WILDFIRE",
        .startup        = wildfire_startup_irq,
        .shutdown       = wildfire_disable_irq,
index a13de49d126579c58f2b4d3131dd861741628d59..0eab55749423f2a9e8df8fade6a890efa6c0f480 100644 (file)
@@ -28,9 +28,9 @@ EXPORT_SYMBOL(node_data);
 #define DBGDCONT(args...)
 #endif
 
-#define for_each_mem_cluster(memdesc, cluster, i)              \
-       for ((cluster) = (memdesc)->cluster, (i) = 0;           \
-            (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+#define for_each_mem_cluster(memdesc, _cluster, i)             \
+       for ((_cluster) = (memdesc)->cluster, (i) = 0;          \
+            (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
 
 static void __init show_mem_layout(void)
 {
index e859af349467420e3107074cfa94f7d993125c70..3f470866bb89ebe4f903db8dfb389313505df502 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
new file mode 100644 (file)
index 0000000..36a85f5
--- /dev/null
@@ -0,0 +1,50 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      http://armlinux.simtec.co.uk/
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device PHY registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, this is a seperate header file as some of the clock framework
+ * needs to touch this if the clk_48m is used as the USB OHCI or other
+ * peripheral source.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
+
+/* S3C64XX_PA_USB_HSPHY */
+
+#define S3C_HSOTG_PHYREG(x)    ((x) + S3C_VA_USB_HSPHY)
+
+#define S3C_PHYPWR                             S3C_HSOTG_PHYREG(0x00)
+#define SRC_PHYPWR_OTG_DISABLE                 (1 << 4)
+#define SRC_PHYPWR_ANALOG_POWERDOWN            (1 << 3)
+#define SRC_PHYPWR_FORCE_SUSPEND               (1 << 1)
+
+#define S3C_PHYCLK                             S3C_HSOTG_PHYREG(0x04)
+#define S3C_PHYCLK_MODE_USB11                  (1 << 6)
+#define S3C_PHYCLK_EXT_OSC                     (1 << 5)
+#define S3C_PHYCLK_CLK_FORCE                   (1 << 4)
+#define S3C_PHYCLK_ID_PULL                     (1 << 2)
+#define S3C_PHYCLK_CLKSEL_MASK                 (0x3 << 0)
+#define S3C_PHYCLK_CLKSEL_SHIFT                        (0)
+#define S3C_PHYCLK_CLKSEL_48M                  (0x0 << 0)
+#define S3C_PHYCLK_CLKSEL_12M                  (0x2 << 0)
+#define S3C_PHYCLK_CLKSEL_24M                  (0x3 << 0)
+
+#define S3C_RSTCON                             S3C_HSOTG_PHYREG(0x08)
+#define S3C_RSTCON_PHYCLK                      (1 << 2)
+#define S3C_RSTCON_HCLK                                (1 << 2)
+#define S3C_RSTCON_PHY                         (1 << 0)
+
+#define S3C_PHYTUNE                            S3C_HSOTG_PHYREG(0x20)
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
new file mode 100644 (file)
index 0000000..8d18d9d
--- /dev/null
@@ -0,0 +1,377 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      http://armlinux.simtec.co.uk/
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device block registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_H __FILE__
+
+#define S3C_HSOTG_REG(x) (x)
+
+#define S3C_GOTGCTL                            S3C_HSOTG_REG(0x000)
+#define S3C_GOTGCTL_BSESVLD                    (1 << 19)
+#define S3C_GOTGCTL_ASESVLD                    (1 << 18)
+#define S3C_GOTGCTL_DBNC_SHORT                 (1 << 17)
+#define S3C_GOTGCTL_CONID_B                    (1 << 16)
+#define S3C_GOTGCTL_DEVHNPEN                   (1 << 11)
+#define S3C_GOTGCTL_HSSETHNPEN                 (1 << 10)
+#define S3C_GOTGCTL_HNPREQ                     (1 << 9)
+#define S3C_GOTGCTL_HSTNEGSCS                  (1 << 8)
+#define S3C_GOTGCTL_SESREQ                     (1 << 1)
+#define S3C_GOTGCTL_SESREQSCS                  (1 << 0)
+
+#define S3C_GOTGINT                            S3C_HSOTG_REG(0x004)
+#define S3C_GOTGINT_DbnceDone                  (1 << 19)
+#define S3C_GOTGINT_ADevTOUTChg                        (1 << 18)
+#define S3C_GOTGINT_HstNegDet                  (1 << 17)
+#define S3C_GOTGINT_HstnegSucStsChng           (1 << 9)
+#define S3C_GOTGINT_SesReqSucStsChng           (1 << 8)
+#define S3C_GOTGINT_SesEndDet                  (1 << 2)
+
+#define S3C_GAHBCFG                            S3C_HSOTG_REG(0x008)
+#define S3C_GAHBCFG_PTxFEmpLvl                 (1 << 8)
+#define S3C_GAHBCFG_NPTxFEmpLvl                        (1 << 7)
+#define S3C_GAHBCFG_DMAEn                      (1 << 5)
+#define S3C_GAHBCFG_HBstLen_MASK               (0xf << 1)
+#define S3C_GAHBCFG_HBstLen_SHIFT              (1)
+#define S3C_GAHBCFG_HBstLen_Single             (0x0 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr               (0x1 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr4              (0x3 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr8              (0x5 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr16             (0x7 << 1)
+#define S3C_GAHBCFG_GlblIntrEn                 (1 << 0)
+
+#define S3C_GUSBCFG                            S3C_HSOTG_REG(0x00C)
+#define S3C_GUSBCFG_PHYLPClkSel                        (1 << 15)
+#define S3C_GUSBCFG_HNPCap                     (1 << 9)
+#define S3C_GUSBCFG_SRPCap                     (1 << 8)
+#define S3C_GUSBCFG_PHYIf16                    (1 << 3)
+#define S3C_GUSBCFG_TOutCal_MASK               (0x7 << 0)
+#define S3C_GUSBCFG_TOutCal_SHIFT              (0)
+#define S3C_GUSBCFG_TOutCal_LIMIT              (0x7)
+#define S3C_GUSBCFG_TOutCal(_x)                        ((_x) << 0)
+
+#define S3C_GRSTCTL                            S3C_HSOTG_REG(0x010)
+
+#define S3C_GRSTCTL_AHBIdle                    (1 << 31)
+#define S3C_GRSTCTL_DMAReq                     (1 << 30)
+#define S3C_GRSTCTL_TxFNum_MASK                        (0x1f << 6)
+#define S3C_GRSTCTL_TxFNum_SHIFT               (6)
+#define S3C_GRSTCTL_TxFNum_LIMIT               (0x1f)
+#define S3C_GRSTCTL_TxFNum(_x)                 ((_x) << 6)
+#define S3C_GRSTCTL_TxFFlsh                    (1 << 5)
+#define S3C_GRSTCTL_RxFFlsh                    (1 << 4)
+#define S3C_GRSTCTL_INTknQFlsh                 (1 << 3)
+#define S3C_GRSTCTL_FrmCntrRst                 (1 << 2)
+#define S3C_GRSTCTL_HSftRst                    (1 << 1)
+#define S3C_GRSTCTL_CSftRst                    (1 << 0)
+
+#define S3C_GINTSTS                            S3C_HSOTG_REG(0x014)
+#define S3C_GINTMSK                            S3C_HSOTG_REG(0x018)
+
+#define S3C_GINTSTS_WkUpInt                    (1 << 31)
+#define S3C_GINTSTS_SessReqInt                 (1 << 30)
+#define S3C_GINTSTS_DisconnInt                 (1 << 29)
+#define S3C_GINTSTS_ConIDStsChng               (1 << 28)
+#define S3C_GINTSTS_PTxFEmp                    (1 << 26)
+#define S3C_GINTSTS_HChInt                     (1 << 25)
+#define S3C_GINTSTS_PrtInt                     (1 << 24)
+#define S3C_GINTSTS_FetSusp                    (1 << 22)
+#define S3C_GINTSTS_incompIP                   (1 << 21)
+#define S3C_GINTSTS_IncomplSOIN                        (1 << 20)
+#define S3C_GINTSTS_OEPInt                     (1 << 19)
+#define S3C_GINTSTS_IEPInt                     (1 << 18)
+#define S3C_GINTSTS_EPMis                      (1 << 17)
+#define S3C_GINTSTS_EOPF                       (1 << 15)
+#define S3C_GINTSTS_ISOutDrop                  (1 << 14)
+#define S3C_GINTSTS_EnumDone                   (1 << 13)
+#define S3C_GINTSTS_USBRst                     (1 << 12)
+#define S3C_GINTSTS_USBSusp                    (1 << 11)
+#define S3C_GINTSTS_ErlySusp                   (1 << 10)
+#define S3C_GINTSTS_GOUTNakEff                 (1 << 7)
+#define S3C_GINTSTS_GINNakEff                  (1 << 6)
+#define S3C_GINTSTS_NPTxFEmp                   (1 << 5)
+#define S3C_GINTSTS_RxFLvl                     (1 << 4)
+#define S3C_GINTSTS_SOF                                (1 << 3)
+#define S3C_GINTSTS_OTGInt                     (1 << 2)
+#define S3C_GINTSTS_ModeMis                    (1 << 1)
+#define S3C_GINTSTS_CurMod_Host                        (1 << 0)
+
+#define S3C_GRXSTSR                            S3C_HSOTG_REG(0x01C)
+#define S3C_GRXSTSP                            S3C_HSOTG_REG(0x020)
+
+#define S3C_GRXSTS_FN_MASK                     (0x7f << 25)
+#define S3C_GRXSTS_FN_SHIFT                    (25)
+
+#define S3C_GRXSTS_PktSts_MASK                 (0xf << 17)
+#define S3C_GRXSTS_PktSts_SHIFT                        (17)
+#define S3C_GRXSTS_PktSts_GlobalOutNAK         (0x1 << 17)
+#define S3C_GRXSTS_PktSts_OutRX                        (0x2 << 17)
+#define S3C_GRXSTS_PktSts_OutDone              (0x3 << 17)
+#define S3C_GRXSTS_PktSts_SetupDone            (0x4 << 17)
+#define S3C_GRXSTS_PktSts_SetupRX              (0x6 << 17)
+
+#define S3C_GRXSTS_DPID_MASK                   (0x3 << 15)
+#define S3C_GRXSTS_DPID_SHIFT                  (15)
+#define S3C_GRXSTS_ByteCnt_MASK                        (0x7ff << 4)
+#define S3C_GRXSTS_ByteCnt_SHIFT               (4)
+#define S3C_GRXSTS_EPNum_MASK                  (0xf << 0)
+#define S3C_GRXSTS_EPNum_SHIFT                 (0)
+
+#define S3C_GRXFSIZ                            S3C_HSOTG_REG(0x024)
+
+#define S3C_GNPTXFSIZ                          S3C_HSOTG_REG(0x028)
+
+#define S3C_GNPTXFSIZ_NPTxFDep_MASK            (0xffff << 16)
+#define S3C_GNPTXFSIZ_NPTxFDep_SHIFT           (16)
+#define S3C_GNPTXFSIZ_NPTxFDep_LIMIT           (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFDep(_x)             ((_x) << 16)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_MASK         (0xffff << 0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_SHIFT                (0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_LIMIT                (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFStAddr(_x)          ((_x) << 0)
+
+#define S3C_GNPTXSTS                           S3C_HSOTG_REG(0x02C)
+
+#define S3C_GNPTXSTS_NPtxQTop_MASK             (0x7f << 24)
+#define S3C_GNPTXSTS_NPtxQTop_SHIFT            (24)
+
+#define S3C_GNPTXSTS_NPTxQSpcAvail_MASK                (0xff << 16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_SHIFT       (16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_GET(_v)     (((_v) >> 16) & 0xff)
+
+#define S3C_GNPTXSTS_NPTxFSpcAvail_MASK                (0xffff << 0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_SHIFT       (0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_GET(_v)     (((_v) >> 0) & 0xffff)
+
+
+#define S3C_HPTXFSIZ                           S3C_HSOTG_REG(0x100)
+
+#define S3C_DPTXFSIZn(_a)                      S3C_HSOTG_REG(0x104 + (((_a) - 1) * 4))
+
+#define S3C_DPTXFSIZn_DPTxFSize_MASK           (0xffff << 16)
+#define S3C_DPTXFSIZn_DPTxFSize_SHIFT          (16)
+#define S3C_DPTXFSIZn_DPTxFSize_GET(_v)                (((_v) >> 16) & 0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize_LIMIT          (0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize(_x)            ((_x) << 16)
+
+#define S3C_DPTXFSIZn_DPTxFStAddr_MASK         (0xffff << 0)
+#define S3C_DPTXFSIZn_DPTxFStAddr_SHIFT                (0)
+
+/* Device mode registers */
+#define S3C_DCFG                               S3C_HSOTG_REG(0x800)
+
+#define S3C_DCFG_EPMisCnt_MASK                 (0x1f << 18)
+#define S3C_DCFG_EPMisCnt_SHIFT                        (18)
+#define S3C_DCFG_EPMisCnt_LIMIT                        (0x1f)
+#define S3C_DCFG_EPMisCnt(_x)                  ((_x) << 18)
+
+#define S3C_DCFG_PerFrInt_MASK                 (0x3 << 11)
+#define S3C_DCFG_PerFrInt_SHIFT                        (11)
+#define S3C_DCFG_PerFrInt_LIMIT                        (0x3)
+#define S3C_DCFG_PerFrInt(_x)                  ((_x) << 11)
+
+#define S3C_DCFG_DevAddr_MASK                  (0x7f << 4)
+#define S3C_DCFG_DevAddr_SHIFT                 (4)
+#define S3C_DCFG_DevAddr_LIMIT                 (0x7f)
+#define S3C_DCFG_DevAddr(_x)                   ((_x) << 4)
+
+#define S3C_DCFG_NZStsOUTHShk                  (1 << 2)
+
+#define S3C_DCFG_DevSpd_MASK                   (0x3 << 0)
+#define S3C_DCFG_DevSpd_SHIFT                  (0)
+#define S3C_DCFG_DevSpd_HS                     (0x0 << 0)
+#define S3C_DCFG_DevSpd_FS                     (0x1 << 0)
+#define S3C_DCFG_DevSpd_LS                     (0x2 << 0)
+#define S3C_DCFG_DevSpd_FS48                   (0x3 << 0)
+
+#define S3C_DCTL                               S3C_HSOTG_REG(0x804)
+
+#define S3C_DCTL_PWROnPrgDone                  (1 << 11)
+#define S3C_DCTL_CGOUTNak                      (1 << 10)
+#define S3C_DCTL_SGOUTNak                      (1 << 9)
+#define S3C_DCTL_CGNPInNAK                     (1 << 8)
+#define S3C_DCTL_SGNPInNAK                     (1 << 7)
+#define S3C_DCTL_TstCtl_MASK                   (0x7 << 4)
+#define S3C_DCTL_TstCtl_SHIFT                  (4)
+#define S3C_DCTL_GOUTNakSts                    (1 << 3)
+#define S3C_DCTL_GNPINNakSts                   (1 << 2)
+#define S3C_DCTL_SftDiscon                     (1 << 1)
+#define S3C_DCTL_RmtWkUpSig                    (1 << 0)
+
+#define S3C_DSTS                               S3C_HSOTG_REG(0x808)
+
+#define S3C_DSTS_SOFFN_MASK                    (0x3fff << 8)
+#define S3C_DSTS_SOFFN_SHIFT                   (8)
+#define S3C_DSTS_SOFFN_LIMIT                   (0x3fff)
+#define S3C_DSTS_SOFFN(_x)                     ((_x) << 8)
+#define S3C_DSTS_ErraticErr                    (1 << 3)
+#define S3C_DSTS_EnumSpd_MASK                  (0x3 << 1)
+#define S3C_DSTS_EnumSpd_SHIFT                 (1)
+#define S3C_DSTS_EnumSpd_HS                    (0x0 << 1)
+#define S3C_DSTS_EnumSpd_FS                    (0x1 << 1)
+#define S3C_DSTS_EnumSpd_LS                    (0x2 << 1)
+#define S3C_DSTS_EnumSpd_FS48                  (0x3 << 1)
+
+#define S3C_DSTS_SuspSts                       (1 << 0)
+
+#define S3C_DIEPMSK                            S3C_HSOTG_REG(0x810)
+
+#define S3C_DIEPMSK_INEPNakEffMsk              (1 << 6)
+#define S3C_DIEPMSK_INTknEPMisMsk              (1 << 5)
+#define S3C_DIEPMSK_INTknTXFEmpMsk             (1 << 4)
+#define S3C_DIEPMSK_TimeOUTMsk                 (1 << 3)
+#define S3C_DIEPMSK_AHBErrMsk                  (1 << 2)
+#define S3C_DIEPMSK_EPDisbldMsk                        (1 << 1)
+#define S3C_DIEPMSK_XferComplMsk               (1 << 0)
+
+#define S3C_DOEPMSK                            S3C_HSOTG_REG(0x814)
+
+#define S3C_DOEPMSK_Back2BackSetup             (1 << 6)
+#define S3C_DOEPMSK_OUTTknEPdisMsk             (1 << 4)
+#define S3C_DOEPMSK_SetupMsk                   (1 << 3)
+#define S3C_DOEPMSK_AHBErrMsk                  (1 << 2)
+#define S3C_DOEPMSK_EPDisbldMsk                        (1 << 1)
+#define S3C_DOEPMSK_XferComplMsk               (1 << 0)
+
+#define S3C_DAINT                              S3C_HSOTG_REG(0x818)
+#define S3C_DAINTMSK                           S3C_HSOTG_REG(0x81C)
+
+#define S3C_DAINT_OutEP_SHIFT                  (16)
+#define S3C_DAINT_OutEP(x)                     (1 << ((x) + 16))
+#define S3C_DAINT_InEP(x)                      (1 << (x))
+
+#define S3C_DTKNQR1                            S3C_HSOTG_REG(0x820)
+#define S3C_DTKNQR2                            S3C_HSOTG_REG(0x824)
+#define S3C_DTKNQR3                            S3C_HSOTG_REG(0x830)
+#define S3C_DTKNQR4                            S3C_HSOTG_REG(0x834)
+
+#define S3C_DVBUSDIS                           S3C_HSOTG_REG(0x828)
+#define S3C_DVBUSPULSE                         S3C_HSOTG_REG(0x82C)
+
+#define S3C_DIEPCTL0                           S3C_HSOTG_REG(0x900)
+#define S3C_DOEPCTL0                           S3C_HSOTG_REG(0xB00)
+#define S3C_DIEPCTL(_a)                                S3C_HSOTG_REG(0x900 + ((_a) * 0x20))
+#define S3C_DOEPCTL(_a)                                S3C_HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting differenct for EP0
+*/
+#define S3C_D0EPCTL_MPS_MASK                   (0x3 << 0)
+#define S3C_D0EPCTL_MPS_SHIFT                  (0)
+#define S3C_D0EPCTL_MPS_64                     (0x0 << 0)
+#define S3C_D0EPCTL_MPS_32                     (0x1 << 0)
+#define S3C_D0EPCTL_MPS_16                     (0x2 << 0)
+#define S3C_D0EPCTL_MPS_8                      (0x3 << 0)
+
+#define S3C_DxEPCTL_EPEna                      (1 << 31)
+#define S3C_DxEPCTL_EPDis                      (1 << 30)
+#define S3C_DxEPCTL_SetD1PID                   (1 << 29)
+#define S3C_DxEPCTL_SetOddFr                   (1 << 29)
+#define S3C_DxEPCTL_SetD0PID                   (1 << 28)
+#define S3C_DxEPCTL_SetEvenFr                  (1 << 28)
+#define S3C_DxEPCTL_SNAK                       (1 << 27)
+#define S3C_DxEPCTL_CNAK                       (1 << 26)
+#define S3C_DxEPCTL_TxFNum_MASK                        (0xf << 22)
+#define S3C_DxEPCTL_TxFNum_SHIFT               (22)
+#define S3C_DxEPCTL_TxFNum_LIMIT               (0xf)
+#define S3C_DxEPCTL_TxFNum(_x)                 ((_x) << 22)
+
+#define S3C_DxEPCTL_Stall                      (1 << 21)
+#define S3C_DxEPCTL_Snp                                (1 << 20)
+#define S3C_DxEPCTL_EPType_MASK                        (0x3 << 18)
+#define S3C_DxEPCTL_EPType_SHIFT               (18)
+#define S3C_DxEPCTL_EPType_Control             (0x0 << 18)
+#define S3C_DxEPCTL_EPType_Iso                 (0x1 << 18)
+#define S3C_DxEPCTL_EPType_Bulk                        (0x2 << 18)
+#define S3C_DxEPCTL_EPType_Intterupt           (0x3 << 18)
+
+#define S3C_DxEPCTL_NAKsts                     (1 << 17)
+#define S3C_DxEPCTL_DPID                       (1 << 16)
+#define S3C_DxEPCTL_EOFrNum                    (1 << 16)
+#define S3C_DxEPCTL_USBActEp                   (1 << 15)
+#define S3C_DxEPCTL_NextEp_MASK                        (0xf << 11)
+#define S3C_DxEPCTL_NextEp_SHIFT               (11)
+#define S3C_DxEPCTL_NextEp_LIMIT               (0xf)
+#define S3C_DxEPCTL_NextEp(_x)                 ((_x) << 11)
+
+#define S3C_DxEPCTL_MPS_MASK                   (0x7ff << 0)
+#define S3C_DxEPCTL_MPS_SHIFT                  (0)
+#define S3C_DxEPCTL_MPS_LIMIT                  (0x7ff)
+#define S3C_DxEPCTL_MPS(_x)                    ((_x) << 0)
+
+#define S3C_DIEPINT(_a)                                S3C_HSOTG_REG(0x908 + ((_a) * 0x20))
+#define S3C_DOEPINT(_a)                                S3C_HSOTG_REG(0xB08 + ((_a) * 0x20))
+
+#define S3C_DxEPINT_INEPNakEff                 (1 << 6)
+#define S3C_DxEPINT_Back2BackSetup             (1 << 6)
+#define S3C_DxEPINT_INTknEPMis                 (1 << 5)
+#define S3C_DxEPINT_INTknTXFEmp                        (1 << 4)
+#define S3C_DxEPINT_OUTTknEPdis                        (1 << 4)
+#define S3C_DxEPINT_Timeout                    (1 << 3)
+#define S3C_DxEPINT_Setup                      (1 << 3)
+#define S3C_DxEPINT_AHBErr                     (1 << 2)
+#define S3C_DxEPINT_EPDisbld                   (1 << 1)
+#define S3C_DxEPINT_XferCompl                  (1 << 0)
+
+#define S3C_DIEPTSIZ0                          S3C_HSOTG_REG(0x910)
+
+#define S3C_DIEPTSIZ0_PktCnt_MASK              (0x3 << 19)
+#define S3C_DIEPTSIZ0_PktCnt_SHIFT             (19)
+#define S3C_DIEPTSIZ0_PktCnt_LIMIT             (0x3)
+#define S3C_DIEPTSIZ0_PktCnt(_x)               ((_x) << 19)
+
+#define S3C_DIEPTSIZ0_XferSize_MASK            (0x7f << 0)
+#define S3C_DIEPTSIZ0_XferSize_SHIFT           (0)
+#define S3C_DIEPTSIZ0_XferSize_LIMIT           (0x7f)
+#define S3C_DIEPTSIZ0_XferSize(_x)             ((_x) << 0)
+
+
+#define DOEPTSIZ0                              S3C_HSOTG_REG(0xB10)
+#define S3C_DOEPTSIZ0_SUPCnt_MASK              (0x3 << 29)
+#define S3C_DOEPTSIZ0_SUPCnt_SHIFT             (29)
+#define S3C_DOEPTSIZ0_SUPCnt_LIMIT             (0x3)
+#define S3C_DOEPTSIZ0_SUPCnt(_x)               ((_x) << 29)
+
+#define S3C_DOEPTSIZ0_PktCnt                   (1 << 19)
+#define S3C_DOEPTSIZ0_XferSize_MASK            (0x7f << 0)
+#define S3C_DOEPTSIZ0_XferSize_SHIFT           (0)
+
+#define S3C_DIEPTSIZ(_a)                       S3C_HSOTG_REG(0x910 + ((_a) * 0x20))
+#define S3C_DOEPTSIZ(_a)                       S3C_HSOTG_REG(0xB10 + ((_a) * 0x20))
+
+#define S3C_DxEPTSIZ_MC_MASK                   (0x3 << 29)
+#define S3C_DxEPTSIZ_MC_SHIFT                  (29)
+#define S3C_DxEPTSIZ_MC_LIMIT                  (0x3)
+#define S3C_DxEPTSIZ_MC(_x)                    ((_x) << 29)
+
+#define S3C_DxEPTSIZ_PktCnt_MASK               (0x3ff << 19)
+#define S3C_DxEPTSIZ_PktCnt_SHIFT              (19)
+#define S3C_DxEPTSIZ_PktCnt_GET(_v)            (((_v) >> 19) & 0x3ff)
+#define S3C_DxEPTSIZ_PktCnt_LIMIT              (0x3ff)
+#define S3C_DxEPTSIZ_PktCnt(_x)                        ((_x) << 19)
+
+#define S3C_DxEPTSIZ_XferSize_MASK             (0x7ffff << 0)
+#define S3C_DxEPTSIZ_XferSize_SHIFT            (0)
+#define S3C_DxEPTSIZ_XferSize_GET(_v)          (((_v) >> 0) & 0x7ffff)
+#define S3C_DxEPTSIZ_XferSize_LIMIT            (0x7ffff)
+#define S3C_DxEPTSIZ_XferSize(_x)              ((_x) << 0)
+
+
+#define S3C_DIEPDMA(_a)                                S3C_HSOTG_REG(0x914 + ((_a) * 0x20))
+#define S3C_DOEPDMA(_a)                                S3C_HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define S3C_EPFIFO(_a)                         S3C_HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_H */
index 993d56ee3cf303375a57328680fdc23aa25728f6..57ec9f2dcd953b2ebe9270f1c9dbfa6cf99d2401 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure. Must be aligned on an 8192-byte boundary.
  */
index e215f7104974fb76957d66695185f9e925edc519..0a88622339ee363d0663d53390a2106e26e51038 100644 (file)
@@ -1,21 +1,6 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif
index 2c228c020978eee16b8edecc047d12689fb1c016..c26c34de9f3cb096580d3d48ef9de3a728d9f975 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index 492988cb9077978be3a9589173e37381aaaed87a..d2d643c4ea592947cfdf440d3842345c0eff9b44 100644 (file)
@@ -5,21 +5,6 @@
  * is actually used on cris. 
  */
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif
index 4df0b320d524e351847e9f2b270237731b2b1607..51dcd04d2777f01b87a86108d500efb800be1d2d 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 29429a8b7f6a44b0a6e52629c9b134d0f72317e3..1d3df1d9495c3379180ad6b9cf683cc9b5f24b25 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 1ec8a3427120876f6ef7d998b72798a37c0cc53a..be12a7160116ae7cfd40d9cb90e948e158b7a5a5 100644 (file)
@@ -1,21 +1,6 @@
 #ifndef _ASM_H8300_KMAP_TYPES_H
 #define _ASM_H8300_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif
index cb5dc552da97899e4f443b40efd5fae50de4f3f1..089c65ed6eb327c5e8ffa39e5a69a5c6e5d86921 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index 56ceb68eb99d244fffc70feab344e337ec385f7d..fe63b2dc9d075c8ceabc4e28bc7e246bff31f30a 100644 (file)
@@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
 #ifdef CONFIG_NUMA
        {
                struct page *page;
-               page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
+               page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
                                        numa_node_id() : ioc->node, flags,
                                        get_order(size));
 
index 5d1658aa2b3bf2a4770ce77c02519439a0c73857..05d5f9996105223e0e61908c6622ab99fdf02e7d 100644 (file)
@@ -1,30 +1,12 @@
 #ifndef _ASM_IA64_KMAP_TYPES_H
 #define _ASM_IA64_KMAP_TYPES_H
 
-
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif /* _ASM_IA64_KMAP_TYPES_H */
index 5b0e830c6f33212d48b28ec74f24113f6c1bc5ef..c475fc281be755accd969935eb8bd278069a1ffe 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index 8f33a8840422ce96bc1a6df85f4876e0f5180b1d..5b17bd4022754ddcf1fcc8f5d37e4adcfbd6cdd6 100644 (file)
@@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data)
                        data = mca_bootmem();
                        first_time = 0;
                } else
-                       data = page_address(alloc_pages_node(numa_node_id(),
-                                       GFP_KERNEL, get_order(sz)));
+                       data = __get_free_pages(GFP_KERNEL, get_order(sz));
                if (!data)
                        panic("Could not allocate MCA memory for cpu %d\n",
                                        cpu);
index 8a06dc480594c0d3e93636eccad9ea9316944127..bdc176cb5e85bc471649d47259951274db3c29e9 100644 (file)
@@ -5595,7 +5595,7 @@ pfm_interrupt_handler(int irq, void *arg)
                (*pfm_alt_intr_handler->handler)(irq, arg, regs);
        }
 
-       put_cpu_no_resched();
+       put_cpu();
        return IRQ_HANDLED;
 }
 
index 8eff8c1d40a628404db5f8b0d70b82b3fd693eb8..6ba72ab42fcc513bac46bcb18eab8f7ed8ab198f 100644 (file)
@@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
 
        /* attempt to allocate a granule's worth of cached memory pages */
 
-       page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+       page = alloc_pages_exact_node(nid,
+                               GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                IA64_GRANULE_SHIFT-PAGE_SHIFT);
        if (!page) {
                mutex_unlock(&uc_pool->add_chunk_mutex);
index d876423e4e755465d8249f4aeb01610490f62cff..98b684928e12eae87c840344f1e256c1d118702c 100644 (file)
@@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
         */
        node = pcibus_to_node(pdev->bus);
        if (likely(node >=0)) {
-               struct page *p = alloc_pages_node(node, flags, get_order(size));
+               struct page *p = alloc_pages_exact_node(node,
+                                               flags, get_order(size));
 
                if (likely(p))
                        cpuaddr = page_address(p);
index fa94dc6410ea1129083a401f01558e5028e05c23..4cdb5e3a06bfa6714bd724878f8e5c252d4be16e 100644 (file)
@@ -2,28 +2,11 @@
 #define __M32R_KMAP_TYPES_H
 
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif /* __M32R_KMAP_TYPES_H */
index 016885c6f26094b3191fc8f790410d7a99b5c143..fce57e5d3f913950b96c5185209bc3e9b066f986 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 7daf897292cf65c2ec12832265f26035cda399af..b7a78ad429b78f270913e620ea875083467141d0 100644 (file)
@@ -154,9 +154,9 @@ unsigned long __init zone_sizes_init(void)
         *  Use all area of internal RAM.
         *  see __alloc_pages()
         */
-       NODE_DATA(1)->node_zones->pages_min = 0;
-       NODE_DATA(1)->node_zones->pages_low = 0;
-       NODE_DATA(1)->node_zones->pages_high = 0;
+       NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
+       NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
+       NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
 
        return holes;
 }
index 98138b4e92208efa78dd3dcd5549f155c9b79a9b..922fdfdadeaa220830d52a99f97a1cbe22e7ebd1 100644 (file)
@@ -63,7 +63,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32104ut_irq_type =
+static struct irq_chip m32104ut_irq_type =
 {
        .typename = "M32104UT-IRQ",
        .startup = startup_m32104ut_irq,
index 77b0ae9379e99c532a88c219bbef4c79e36f2cab..9c1bc7487c1e652ff6bd9cb77ea79c5cdb989caf 100644 (file)
@@ -69,7 +69,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32700ut_irq_type =
+static struct irq_chip m32700ut_irq_type =
 {
        .typename = "M32700UT-IRQ",
        .startup = startup_m32700ut_irq,
@@ -146,7 +146,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32700ut_pld_irq_type =
+static struct irq_chip m32700ut_pld_irq_type =
 {
        .typename = "M32700UT-PLD-IRQ",
        .startup = startup_m32700ut_pld_irq,
@@ -215,7 +215,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32700ut_lanpld_irq_type =
+static struct irq_chip m32700ut_lanpld_irq_type =
 {
        .typename = "M32700UT-PLD-LAN-IRQ",
        .startup = startup_m32700ut_lanpld_irq,
@@ -284,7 +284,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32700ut_lcdpld_irq_type =
+static struct irq_chip m32700ut_lcdpld_irq_type =
 {
        .typename = "M32700UT-PLD-LCD-IRQ",
        .startup = startup_m32700ut_lcdpld_irq,
index 3ec087ff2214d226f36c86b31c6de932a38d2244..fb4b17799b66fd090b04f4d02364044f5558123d 100644 (file)
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type mappi_irq_type =
+static struct irq_chip mappi_irq_type =
 {
        .typename = "MAPPI-IRQ",
        .startup = startup_mappi_irq,
index d87969c6356e3041ea1856916c7f496419a5558e..6a65eda0a056c67ef039a34cc5d0326251be14d5 100644 (file)
@@ -70,7 +70,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type mappi2_irq_type =
+static struct irq_chip mappi2_irq_type =
 {
        .typename = "MAPPI2-IRQ",
        .startup = startup_mappi2_irq,
index 785b4bd6d9fd4c3ddf414f3a7b5e1698329734b0..9c337aeac94b7db1a1d3d0f9307ef2116e15c687 100644 (file)
@@ -70,7 +70,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type mappi3_irq_type =
+static struct irq_chip mappi3_irq_type =
 {
        .typename = "MAPPI3-IRQ",
        .startup = startup_mappi3_irq,
index 6faa5db68e950132b9b62269dd131527e9bdb80e..ed865741c38df1c6d9702e94bb17f033fdcca5c2 100644 (file)
@@ -61,7 +61,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type oaks32r_irq_type =
+static struct irq_chip oaks32r_irq_type =
 {
        .typename = "OAKS32R-IRQ",
        .startup = startup_oaks32r_irq,
index fab13fd85422a593817d9d2282548b0c05a03044..80d68065701963dff6107f6377bb7fc1c3416707 100644 (file)
@@ -70,7 +70,7 @@ static void shutdown_opsput_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type opsput_irq_type =
+static struct irq_chip opsput_irq_type =
 {
        .typename = "OPSPUT-IRQ",
        .startup = startup_opsput_irq,
@@ -147,7 +147,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type opsput_pld_irq_type =
+static struct irq_chip opsput_pld_irq_type =
 {
        .typename = "OPSPUT-PLD-IRQ",
        .startup = startup_opsput_pld_irq,
@@ -216,7 +216,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type opsput_lanpld_irq_type =
+static struct irq_chip opsput_lanpld_irq_type =
 {
        .typename = "OPSPUT-PLD-LAN-IRQ",
        .startup = startup_opsput_lanpld_irq,
@@ -285,7 +285,7 @@ static void shutdown_opsput_lcdpld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type opsput_lcdpld_irq_type =
+static struct irq_chip opsput_lcdpld_irq_type =
 {
        "OPSPUT-PLD-LCD-IRQ",
        startup_opsput_lcdpld_irq,
index 89588d649eb7cc0f42c41bcafb91d51145438eff..757302660af84f2f09ffbb0e48fcf946ab27cd45 100644 (file)
@@ -61,7 +61,7 @@ static void shutdown_mappi_irq(unsigned int irq)
        outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type mappi_irq_type =
+static struct irq_chip mappi_irq_type =
 {
        .typename = "M32700-IRQ",
        .startup = startup_mappi_irq,
@@ -134,7 +134,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
        outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct hw_interrupt_type m32700ut_pld_irq_type =
+static struct irq_chip m32700ut_pld_irq_type =
 {
        .typename = "USRV-PLD-IRQ",
        .startup = startup_m32700ut_pld_irq,
index c843c63d380161411f70e046e16c66705b9a6fac..3413cc1390ecffbd3e43c4c0bdecbfa74f791326 100644 (file)
@@ -1,21 +1,6 @@
 #ifndef __ASM_M68K_KMAP_TYPES_H
 #define __ASM_M68K_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif /* __ASM_M68K_KMAP_TYPES_H */
index ec37fb56c127a095ed8f5060c8af8ff6672f90d0..72bad65dba3a1d3b4091f074c22207aa8f9a277b 100644 (file)
  */
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 union thread_union init_thread_union
 __attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
        = { INIT_THREAD_INFO(init_task) };
index fe282de1d596dcb4b93d4077ba8c5fe4c6f3aaee..45e97a207fedaf57a2cab44f97d7d0370cf95e2b 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index 4d7e222f5dd7252a78f99561ce5d505db619d2c8..25975252d83dbd0b5e92211a6957bcdbe961cfd8 100644 (file)
@@ -1,29 +1,6 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
 #ifndef _ASM_MICROBLAZE_KMAP_TYPES_H
 #define _ASM_MICROBLAZE_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR,
-};
+#include <asm-generic/kmap_types.h>
 
 #endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */
index 5dabc870b32295779a6d2a93bbed0d90ac6a1f90..032ca73f181bec27c0f7b741862ada66ec104342 100644 (file)
@@ -12,8 +12,6 @@
 #define PIT_CH0                        0x40
 #define PIT_CH2                        0x42
 
-#define PIT_TICK_RATE          1193182UL
-
 extern spinlock_t i8253_lock;
 
 extern void setup_pit_timer(void);
index 806aae3c533892b1e65917c4742943ff2692374b..58e91ed0388f7393150b6da2569428f9828b8488 100644 (file)
@@ -1,30 +1,12 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif
index 149cd914526e8e2e2d1c407852879cf2111fa822..5b457a40c784f84997c9bc24c17de910ecbe61b3 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 7396cd719900255c0379eda6b4d62dafe5fa8c6f..6827feb4de9671b7ef72201e55bff0abdffa6329 100644 (file)
@@ -38,7 +38,7 @@ int __init sni_eisa_root_init(void)
        if (!r)
                return r;
 
-       eisa_root_dev.dev.driver_data = &eisa_bus_root;
+       dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
 
        if (eisa_root_register(&eisa_bus_root)) {
                /* A real bridge may have been registered before
index 3398f9f356030767fde04242fdb1229809176ffd..76d093b58d4fe8248852737b2df0422379680036 100644 (file)
@@ -1,31 +1,6 @@
-/* MN10300 kmap_atomic() slot IDs
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif /* _ASM_KMAP_TYPES_H */
index 5ac3566f8c98b62c5a96673665b09f12d5a48696..80d423b80af30cfebbf7364829153c6f3314a3b1 100644 (file)
@@ -20,9 +20,6 @@
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 806aae3c533892b1e65917c4742943ff2692374b..58e91ed0388f7393150b6da2569428f9828b8488 100644 (file)
@@ -1,30 +1,12 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif
index 1e25a45d64c17fd48d461489c3f2e950ed29ee53..82974b20fc106b85c0462cc83e78eb2c3201a1c4 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index b70d6e53b303519a7e22ae6eb43e28be6dbd2f4b..a71c9c1455a722fed428bb89d0c2b677b7a7087e 100644 (file)
@@ -1,10 +1,3 @@
-#ifndef _ASM_POWERPC_8253PIT_H
-#define _ASM_POWERPC_8253PIT_H
-
 /*
  * 8253/8254 Programmable Interval Timer
  */
-
-#define PIT_TICK_RATE  1193182UL
-
-#endif /* _ASM_POWERPC_8253PIT_H */
index 688b329800bd25f559c75f2ea6f0dcdd22ade3b1..ffc4253fef55e99a59070b09f1b912e51bfb4de8 100644 (file)
@@ -9,10 +9,6 @@
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 2f0e64b53642eb0c68191cd62929b906a2eba97c..ef6f64950e9b54080ca56a22845bb14ddbdaf1d4 100644 (file)
 #include <asm/sections.h>
 #include <asm/machdep.h>
 
-#ifdef CONFIG_LOGO_LINUX_CLUT224
 #include <linux/linux_logo.h>
-extern const struct linux_logo logo_linux_clut224;
-#endif
 
 /*
  * Properties whose value is longer than this get excluded from our
index 296b5268754efc651f261a8f3688e54b5281ac26..5e0a191764fc0158b5eb7377af019615c3540767 100644 (file)
@@ -122,8 +122,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
 
        area->nid = nid;
        area->order = order;
-       area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE,
-                                       area->order);
+       area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
+                                               area->order);
 
        if (!area->pages) {
                printk(KERN_WARNING "%s: no page on node %d\n",
index 9abd210d87c1362f831b7a50bae27bb78d6c31f7..8547e86bfb42ff2650c50dea4bdb3533e647a1b6 100644 (file)
@@ -752,17 +752,8 @@ static int __init init_spu_base(void)
                goto out_unregister_sysdev_class;
        }
 
-       if (ret > 0) {
-               /*
-                * We cannot put the forward declaration in
-                * <linux/linux_logo.h> because of conflicting session type
-                * conflicts for const and __initdata with different compiler
-                * versions
-                */
-               extern const struct linux_logo logo_spe_clut224;
-
+       if (ret > 0)
                fb_append_extra_logo(&logo_spe_clut224, ret);
-       }
 
        mutex_lock(&spu_full_list_mutex);
        xmon_register_spus(&spu_full_list);
index fd1574648223b5aaa3a9bb60255098a64b4db919..94ec3ee07983f8e9b97c6d857b5e7702bbfa36ee 100644 (file)
@@ -2,22 +2,7 @@
 #ifndef _ASM_KMAP_TYPES_H
 #define _ASM_KMAP_TYPES_H
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,    
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif
 #endif /* __KERNEL__ */
index 7db95c0b86938603176baf5496f23338cd455bc2..fe787f9e5f3f375753291b01f1e135546b5875b6 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 84d565c696befbaee91e7770721d1ec2a5ed6f8c..5962b08b6dd8a6258cdeb646fd723a86c0b81cde 100644 (file)
@@ -3,30 +3,12 @@
 
 /* Dummy header just to define km_type. */
 
-
 #ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif
index 80c35ff71d564d524cf01c752efd0618fa868308..1719957c0a691202b192da5c9a6329e810dcc44f 100644 (file)
@@ -10,9 +10,6 @@
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
 struct pt_regs fake_swapper_regs;
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial thread structure.
  *
index 602f5e034f7a48afd0d85af3f72e2fbfd382d9d4..aad21745fbb92a6d67d2ea0d1efcfeb7467a1250 100644 (file)
@@ -5,21 +5,6 @@
  * is actually used on sparc.  -DaveM
  */
 
-enum km_type {
-       KM_BOUNCE_READ,
-       KM_SKB_SUNRPC_DATA,
-       KM_SKB_DATA_SOFTIRQ,
-       KM_USER0,
-       KM_USER1,
-       KM_BIO_SRC_IRQ,
-       KM_BIO_DST_IRQ,
-       KM_PTE0,
-       KM_PTE1,
-       KM_IRQ0,
-       KM_IRQ1,
-       KM_SOFTIRQ0,
-       KM_SOFTIRQ1,
-       KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif
index f28cb8278e98abb6f2fb79c634b2e74e52cfac84..28125c5b3d3c306e216012ac7b529fa6b0f33e0a 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
 struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_mm);
 EXPORT_SYMBOL(init_task);
 
 /* .text section in head.S is aligned at 8k boundary and this gets linked
index 434ba121e3c59a30009117fd6f1c146f516270a6..3b44b47c7e1d4615dbf73dd6f2e92997b9fa29c3 100644 (file)
@@ -360,7 +360,7 @@ static struct platform_driver uml_net_driver = {
 
 static void net_device_release(struct device *dev)
 {
-       struct uml_net *device = dev->driver_data;
+       struct uml_net *device = dev_get_drvdata(dev);
        struct net_device *netdev = device->dev;
        struct uml_net_private *lp = netdev_priv(netdev);
 
@@ -440,7 +440,7 @@ static void eth_configure(int n, void *init, char *mac,
        device->pdev.id = n;
        device->pdev.name = DRIVER_NAME;
        device->pdev.dev.release = net_device_release;
-       device->pdev.dev.driver_data = device;
+       dev_set_drvdata(&device->pdev.dev, device);
        if (platform_device_register(&device->pdev))
                goto out_free_netdev;
        SET_NETDEV_DEV(dev,&device->pdev.dev);
index aa9e926e13d73dca17015a5bd5f8e5eb76842c63..8f05d4d9da128788d0b5b7d5f39ac5fe2741bd61 100644 (file)
@@ -778,7 +778,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
 
 static void ubd_device_release(struct device *dev)
 {
-       struct ubd *ubd_dev = dev->driver_data;
+       struct ubd *ubd_dev = dev_get_drvdata(dev);
 
        blk_cleanup_queue(ubd_dev->queue);
        *ubd_dev = ((struct ubd) DEFAULT_UBD);
@@ -807,7 +807,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
                ubd_devs[unit].pdev.id   = unit;
                ubd_devs[unit].pdev.name = DRIVER_NAME;
                ubd_devs[unit].pdev.dev.release = ubd_device_release;
-               ubd_devs[unit].pdev.dev.driver_data = &ubd_devs[unit];
+               dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
                platform_device_register(&ubd_devs[unit].pdev);
                disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
        }
index 37dd097c16c07131282b8ddd484d52e1cb6fd369..b3906f860a87e31189c0824d3df1edc2a6c0378c 100644 (file)
@@ -27,7 +27,7 @@
  * sign followed by value, e.g.:
  *
  * static int init_variable __initdata = 0;
- * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
  *
  * Don't forget to initialize data not at file scope, i.e. within a function,
  * as gcc otherwise puts the data into the bss section and not into the init
index 63bee158cd8efbe2639f5a293c741ca5af1623ef..3dabbe128e40b0186538f9b4389a1311f2b738f2 100644 (file)
@@ -8,7 +8,7 @@
 
 #define ETH_ADDR_LEN (6)
 #define ETH_HEADER_ETHERTAP (16)
-#define ETH_HEADER_OTHER (14)
+#define ETH_HEADER_OTHER (26) /* 14 for ethernet + VLAN + MPLS for crazy people */
 #define ETH_MAX_PACKET (1500)
 
 #define UML_NET_VERSION (4)
index 806d381947bf1c704dc584e5b29360abb7b40a88..b25121b537d8dfb6d0588d1aaf4ebc9b915cc663 100644 (file)
 #include "linux/mqueue.h"
 #include "asm/uaccess.h"
 
-struct mm_struct init_mm = INIT_MM(init_mm);
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-EXPORT_SYMBOL(init_mm);
-
 /*
  * Initial task structure.
  *
index 336b61569072be6958ccb74c22e6c87abbb7c9b2..454cdb43e351a93fbb6ea50db791f726a8fb2733 100644 (file)
@@ -358,7 +358,7 @@ EXPORT_SYMBOL(um_request_irq);
 EXPORT_SYMBOL(reactivate_fd);
 
 /*
- * hw_interrupt_type must define (startup || enable) &&
+ * irq_chip must define (startup || enable) &&
  * (shutdown || disable) && end
  */
 static void dummy(unsigned int irq)
@@ -366,7 +366,7 @@ static void dummy(unsigned int irq)
 }
 
 /* This is used for everything else than the timer. */
-static struct hw_interrupt_type normal_irq_type = {
+static struct irq_chip normal_irq_type = {
        .typename = "SIGIO",
        .release = free_irq_by_irq_and_dev,
        .disable = dummy,
@@ -375,7 +375,7 @@ static struct hw_interrupt_type normal_irq_type = {
        .end = dummy
 };
 
-static struct hw_interrupt_type SIGVTALRM_irq_type = {
+static struct irq_chip SIGVTALRM_irq_type = {
        .typename = "SIGVTALRM",
        .release = free_irq_by_irq_and_dev,
        .shutdown = dummy, /* never called */
index c41b04bf5fa005399183176935a683dbfe92d3ce..54a36ec20cb75c32b65a7e0e28043a8c19c6a6f0 100644 (file)
@@ -1,7 +1,7 @@
 #include "as-layout.h"
 
        .globl syscall_stub
-.section .__syscall_stub, "x"
+.section .__syscall_stub, "ax"
 
        .globl batch_syscall_stub
 batch_syscall_stub:
index 6e8a9195e95266ff55276e7de633782e970a646b..04b9e87c8dadd38f5f0ebc60ae72b190e63c7920 100644 (file)
@@ -66,28 +66,28 @@ typedef struct user_i387_struct elf_fpregset_t;
        PT_REGS_R15(regs) = 0; \
 } while (0)
 
-#define ELF_CORE_COPY_REGS(pr_reg, regs)               \
-       (pr_reg)[0] = (regs)->regs.gp[0];                       \
-       (pr_reg)[1] = (regs)->regs.gp[1];                       \
-       (pr_reg)[2] = (regs)->regs.gp[2];                       \
-       (pr_reg)[3] = (regs)->regs.gp[3];                       \
-       (pr_reg)[4] = (regs)->regs.gp[4];                       \
-       (pr_reg)[5] = (regs)->regs.gp[5];                       \
-       (pr_reg)[6] = (regs)->regs.gp[6];                       \
-       (pr_reg)[7] = (regs)->regs.gp[7];                       \
-       (pr_reg)[8] = (regs)->regs.gp[8];                       \
-       (pr_reg)[9] = (regs)->regs.gp[9];                       \
-       (pr_reg)[10] = (regs)->regs.gp[10];                     \
-       (pr_reg)[11] = (regs)->regs.gp[11];                     \
-       (pr_reg)[12] = (regs)->regs.gp[12];                     \
-       (pr_reg)[13] = (regs)->regs.gp[13];                     \
-       (pr_reg)[14] = (regs)->regs.gp[14];                     \
-       (pr_reg)[15] = (regs)->regs.gp[15];                     \
-       (pr_reg)[16] = (regs)->regs.gp[16];                     \
-       (pr_reg)[17] = (regs)->regs.gp[17];                     \
-       (pr_reg)[18] = (regs)->regs.gp[18];                     \
-       (pr_reg)[19] = (regs)->regs.gp[19];                     \
-       (pr_reg)[20] = (regs)->regs.gp[20];                     \
+#define ELF_CORE_COPY_REGS(pr_reg, _regs)              \
+       (pr_reg)[0] = (_regs)->regs.gp[0];                      \
+       (pr_reg)[1] = (_regs)->regs.gp[1];                      \
+       (pr_reg)[2] = (_regs)->regs.gp[2];                      \
+       (pr_reg)[3] = (_regs)->regs.gp[3];                      \
+       (pr_reg)[4] = (_regs)->regs.gp[4];                      \
+       (pr_reg)[5] = (_regs)->regs.gp[5];                      \
+       (pr_reg)[6] = (_regs)->regs.gp[6];                      \
+       (pr_reg)[7] = (_regs)->regs.gp[7];                      \
+       (pr_reg)[8] = (_regs)->regs.gp[8];                      \
+       (pr_reg)[9] = (_regs)->regs.gp[9];                      \
+       (pr_reg)[10] = (_regs)->regs.gp[10];                    \
+       (pr_reg)[11] = (_regs)->regs.gp[11];                    \
+       (pr_reg)[12] = (_regs)->regs.gp[12];                    \
+       (pr_reg)[13] = (_regs)->regs.gp[13];                    \
+       (pr_reg)[14] = (_regs)->regs.gp[14];                    \
+       (pr_reg)[15] = (_regs)->regs.gp[15];                    \
+       (pr_reg)[16] = (_regs)->regs.gp[16];                    \
+       (pr_reg)[17] = (_regs)->regs.gp[17];                    \
+       (pr_reg)[18] = (_regs)->regs.gp[18];                    \
+       (pr_reg)[19] = (_regs)->regs.gp[19];                    \
+       (pr_reg)[20] = (_regs)->regs.gp[20];                    \
        (pr_reg)[21] = current->thread.arch.fs;                 \
        (pr_reg)[22] = 0;                                       \
        (pr_reg)[23] = 0;                                       \
index 6d9edf9fabce15d4dac536ae8ce18e70b301822e..20e4a96a6dcbe064ddc99f719e7ffdb506fa4cdc 100644 (file)
@@ -1,7 +1,7 @@
 #include "as-layout.h"
 
        .globl syscall_stub
-.section .__syscall_stub, "x"
+.section .__syscall_stub, "ax"
 syscall_stub:
        syscall
        /* We don't have 64-bit constants, so this constructs the address
index 356d2ec8e2fbbf0a4e0ccee99c1f261226ed4cde..cf42fc305419873d2816f2989bb01f23711d74ed 100644 (file)
@@ -46,6 +46,7 @@ config X86
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_LZMA
+       select HAVE_ARCH_KMEMCHECK
 
 config OUTPUT_FORMAT
        string
index edbd0ca620678fd6627c60d4849a07852a823b11..1b68659c41b4c4ce91a3ad7bee7aa8c532fc2168 100644 (file)
@@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
         endif
 endif
 
+# Don't unroll struct assignments with kmemcheck enabled
+ifeq ($(CONFIG_KMEMCHECK),y)
+       KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
+endif
+
 # Stackpointer is addressed different for 32 bit and 64 bit x86
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
index f82fdc412c64b9d3371fb1a9df65f38ba141bb52..b93405b228b47868dba489944324a147852f90cc 100644 (file)
@@ -6,6 +6,7 @@
  * Documentation/DMA-API.txt for documentation.
  */
 
+#include <linux/kmemcheck.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
 #include <linux/dma-attrs.h>
@@ -60,6 +61,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
        dma_addr_t addr;
 
        BUG_ON(!valid_dma_direction(dir));
+       kmemcheck_mark_initialized(ptr, size);
        addr = ops->map_page(hwdev, virt_to_page(ptr),
                             (unsigned long)ptr & ~PAGE_MASK, size,
                             dir, NULL);
@@ -87,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg,
 {
        struct dma_map_ops *ops = get_dma_ops(hwdev);
        int ents;
+       struct scatterlist *s;
+       int i;
 
        BUG_ON(!valid_dma_direction(dir));
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
        ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
        debug_dma_map_sg(hwdev, sg, nents, ents, dir);
 
@@ -200,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
        dma_addr_t addr;
 
        BUG_ON(!valid_dma_direction(dir));
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
        addr = ops->map_page(dev, page, offset, size, dir, NULL);
        debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
index 5759c165a5cf5516adc32d6cc6d841451cf55aaa..9e00a731a7fbaad71b252d3779a54b04311407fd 100644 (file)
@@ -2,28 +2,11 @@
 #define _ASM_X86_KMAP_TYPES_H
 
 #if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define  __WITH_KM_FENCE
 #endif
 
-enum km_type {
-D(0)   KM_BOUNCE_READ,
-D(1)   KM_SKB_SUNRPC_DATA,
-D(2)   KM_SKB_DATA_SOFTIRQ,
-D(3)   KM_USER0,
-D(4)   KM_USER1,
-D(5)   KM_BIO_SRC_IRQ,
-D(6)   KM_BIO_DST_IRQ,
-D(7)   KM_PTE0,
-D(8)   KM_PTE1,
-D(9)   KM_IRQ0,
-D(10)  KM_IRQ1,
-D(11)  KM_SOFTIRQ0,
-D(12)  KM_SOFTIRQ1,
-D(13)  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
-#undef D
+#undef __WITH_KM_FENCE
 
 #endif /* _ASM_X86_KMAP_TYPES_H */
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
new file mode 100644 (file)
index 0000000..ed01518
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef ASM_X86_KMEMCHECK_H
+#define ASM_X86_KMEMCHECK_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KMEMCHECK
+bool kmemcheck_active(struct pt_regs *regs);
+
+void kmemcheck_show(struct pt_regs *regs);
+void kmemcheck_hide(struct pt_regs *regs);
+
+bool kmemcheck_fault(struct pt_regs *regs,
+       unsigned long address, unsigned long error_code);
+bool kmemcheck_trap(struct pt_regs *regs);
+#else
+static inline bool kmemcheck_active(struct pt_regs *regs)
+{
+       return false;
+}
+
+static inline void kmemcheck_show(struct pt_regs *regs)
+{
+}
+
+static inline void kmemcheck_hide(struct pt_regs *regs)
+{
+}
+
+static inline bool kmemcheck_fault(struct pt_regs *regs,
+       unsigned long address, unsigned long error_code)
+{
+       return false;
+}
+
+static inline bool kmemcheck_trap(struct pt_regs *regs)
+{
+       return false;
+}
+#endif /* CONFIG_KMEMCHECK */
+
+#endif
index 18ef7ebf2631709dad4f26a681fdbf5dff5a8b2b..3cc06e3fceb8e8ba2b9a7d72b7608c520aa5b81f 100644 (file)
@@ -317,6 +317,11 @@ static inline int pte_present(pte_t a)
        return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
 }
 
+static inline int pte_hidden(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_HIDDEN;
+}
+
 static inline int pmd_present(pmd_t pmd)
 {
        return pmd_flags(pmd) & _PAGE_PRESENT;
index 4d258ad76a0fc04925ac484c1fa9b0bfc47a1cb9..54cb697f4900e03f1934f784196dc4d167bdd366 100644 (file)
@@ -18,7 +18,7 @@
 #define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
 #define _PAGE_BIT_UNUSED1      9       /* available for programmer */
 #define _PAGE_BIT_IOMAP                10      /* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3      11
+#define _PAGE_BIT_HIDDEN       11      /* hidden by kmemcheck */
 #define _PAGE_BIT_PAT_LARGE    12      /* On 2MB or 1GB pages */
 #define _PAGE_BIT_SPECIAL      _PAGE_BIT_UNUSED1
 #define _PAGE_BIT_CPA_TEST     _PAGE_BIT_UNUSED1
 #define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
 #define _PAGE_UNUSED1  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
 #define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
 #define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define _PAGE_SPECIAL  (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
 #define __HAVE_ARCH_PTE_SPECIAL
 
+#ifdef CONFIG_KMEMCHECK
+#define _PAGE_HIDDEN   (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#else
+#define _PAGE_HIDDEN   (_AT(pteval_t, 0))
+#endif
+
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 #define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
 #else
index 0e0e3ba827f74dcad11f51f898bf1f1311afa805..c86f452256de935492dae82c0256f8040791f9c8 100644 (file)
@@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
  *     No 3D Now!
  */
 
+#ifndef CONFIG_KMEMCHECK
 #define memcpy(t, f, n)                                \
        (__builtin_constant_p((n))              \
         ? __constant_memcpy((t), (f), (n))     \
         : __memcpy((t), (f), (n)))
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(t, f, n) __memcpy((t), (f), (n))
+#endif
 
 #endif
 
index 2afe164bf1e67a73f622c751adaeed870e12075f..19e2c468fc2c8045469ac2a704947cd07ea81100 100644 (file)
@@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
    function. */
 
 #define __HAVE_ARCH_MEMCPY 1
+#ifndef CONFIG_KMEMCHECK
 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
 extern void *memcpy(void *to, const void *from, size_t len);
 #else
@@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len);
        __ret;                                                  \
 })
 #endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
+#endif
 
 #define __HAVE_ARCH_MEMSET
 void *memset(void *s, int c, size_t n);
index 602c769fc98ca7340f5388fe21e57ce5431b376e..b0783520988b8bba544ad79dc38d4ea4580876f6 100644 (file)
@@ -154,9 +154,9 @@ struct thread_info {
 
 /* thread information allocation */
 #ifdef CONFIG_DEBUG_STACK_USAGE
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 #else
-#define THREAD_FLAGS GFP_KERNEL
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
 #endif
 
 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
index b5c9d45c981fa7a51b9429aff281a9a595fcb370..1375cfc93960e1054dcaa3d8a7f41cacaaee028a 100644 (file)
@@ -4,9 +4,7 @@
 #include <asm/processor.h>
 #include <asm/tsc.h>
 
-/* The PIT ticks at this frequency (in HZ): */
-#define PIT_TICK_RATE          1193182
-
+/* Assume we use the PIT time source for the clock tick */
 #define CLOCK_TICK_RATE                PIT_TICK_RATE
 
 #define ARCH_HAS_READ_CURRENT_TIMER
index 11b3bb86e17bc06e579a0d4e1493c30903a845a6..7fcf6f3dbcc37af7e45b3ffaa38f7b6a02f25f26 100644 (file)
@@ -1,5 +1,10 @@
+#ifdef CONFIG_KMEMCHECK
+/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
+# include <asm-generic/xor.h>
+#else
 #ifdef CONFIG_X86_32
 # include "xor_32.h"
 #else
 # include "xor_64.h"
 #endif
+#endif
index 3ffdcfa9abdf07accfa466518b8ac1adf16a9c05..9fa33886c0d791a2a6aad25b42accb1e5a978a34 100644 (file)
@@ -487,7 +487,6 @@ out:
 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
-       static int printed;
        int i;
 
        for (i = 0; i < X86_VENDOR_NUM; i++) {
@@ -504,13 +503,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
                }
        }
 
-       if (!printed) {
-               printed++;
-               printk(KERN_ERR
-                   "CPU: vendor_id '%s' unknown, using generic init.\n", v);
-
-               printk(KERN_ERR "CPU: Your system may be unstable.\n");
-       }
+       printk_once(KERN_ERR
+                       "CPU: vendor_id '%s' unknown, using generic init.\n" \
+                       "CPU: Your system may be unstable.\n", v);
 
        c->x86_vendor = X86_VENDOR_UNKNOWN;
        this_cpu = &default_cpu;
index daed39ba2614dbcad31e4725b343d56d0912f05d..3260ab04499610d603ca5809758844c025dfaa19 100644 (file)
@@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
         */
        if (c->x86 == 6 && c->x86_model < 15)
                clear_cpu_cap(c, X86_FEATURE_PAT);
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * P4s have a "fast strings" feature which causes single-
+        * stepping REP instructions to only generate a #DB on
+        * cache-line boundaries.
+        *
+        * Ingo Molnar reported a Pentium D (model 6) and a Xeon
+        * (model 2) with the same problem.
+        */
+       if (c->x86 == 15) {
+               u64 misc_enable;
+
+               rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+               if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
+                       printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
+
+                       misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
+                       wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+               }
+       }
+#endif
 }
 
 #ifdef CONFIG_X86_32
index 2ac1f0c2beb3ba54088d4e0c4e89622dbc03094d..b07af8861244cd4f108181c061a3fab787a724b2 100644 (file)
@@ -182,6 +182,11 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier =
        .notifier_call = cpuid_class_cpu_callback,
 };
 
+static char *cpuid_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
+}
+
 static int __init cpuid_init(void)
 {
        int i, err = 0;
@@ -198,6 +203,7 @@ static int __init cpuid_init(void)
                err = PTR_ERR(cpuid_class);
                goto out_chrdev;
        }
+       cpuid_class->nodename = cpuid_nodename;
        for_each_online_cpu(i) {
                err = cpuid_device_create(i);
                if (err != 0)
index c2e0bb0890d4a17d652f1710a4df8ab948dec7a5..5cf36c053ac401a08a9e1ced6679e7f59c09d782 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/spinlock.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
+#include <linux/timex.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/io.h>
index df3bf269beab96466785764b619bc737fd0f5d55..270ff83efc11d8dc27089a3fec085fdec9074b5b 100644 (file)
@@ -12,7 +12,6 @@
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
 
 /*
  * Initial thread structure.
index 9c4461501fcbb9618ac3ce12fef93f990b8f3955..9371448290ac3c4f3a3489b0d9edbe55a5dd689b 100644 (file)
@@ -236,6 +236,7 @@ static const struct file_operations microcode_fops = {
 static struct miscdevice microcode_dev = {
        .minor                  = MICROCODE_MINOR,
        .name                   = "microcode",
+       .devnode                = "cpu/microcode",
        .fops                   = &microcode_fops,
 };
 
index 3cf3413ec626936f334c309f9a20f2a32a36de96..98fd6cd4e3a479e8e1cea7a4ccfad7063f921850 100644 (file)
@@ -196,6 +196,11 @@ static struct notifier_block __refdata msr_class_cpu_notifier = {
        .notifier_call = msr_class_cpu_callback,
 };
 
+static char *msr_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
 static int __init msr_init(void)
 {
        int i, err = 0;
@@ -212,6 +217,7 @@ static int __init msr_init(void)
                err = PTR_ERR(msr_class);
                goto out_chrdev;
        }
+       msr_class->nodename = msr_nodename;
        for_each_online_cpu(i) {
                err = msr_device_create(i);
                if (err != 0)
index 3bb2be1649bddb5b3ba9870553b72ce73e41b460..994dd6a4a2a004bcb5d91955fb921ac8c303eb3d 100644 (file)
@@ -63,7 +63,7 @@ void arch_task_cache_init(void)
         task_xstate_cachep =
                kmem_cache_create("task_xstate", xstate_size,
                                  __alignof__(union thread_xstate),
-                                 SLAB_PANIC, NULL);
+                                 SLAB_PANIC | SLAB_NOTRACK, NULL);
 }
 
 /*
index 4aaf7e48394fb562343f27d811e7a9f329be2b76..c3eb207181feeef5be791127b61798b432935cc4 100644 (file)
@@ -77,6 +77,13 @@ void save_stack_trace(struct stack_trace *trace)
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
+void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
+{
+       dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
index 1e1e27b7d438c4106e23f719335f2aa8f3f6ef5c..5f935f0d5861c625b3ffa6811846e4a480c91282 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/edac.h>
 #endif
 
+#include <asm/kmemcheck.h>
 #include <asm/stacktrace.h>
 #include <asm/processor.h>
 #include <asm/debugreg.h>
@@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
 
        get_debugreg(condition, 6);
 
+       /* Catch kmemcheck conditions first of all! */
+       if (condition & DR_STEP && kmemcheck_trap(regs))
+               return;
+
        /*
         * The processor cleared BTF, so don't mark that we need it set.
         */
index 3e1c057e98fe039325eed2056d8960fa52f023c4..ae3180c506a6ba47163ff621b46b1814f55cdd8e 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/delay.h>
 #include <linux/clocksource.h>
 #include <linux/percpu.h>
+#include <linux/timex.h>
 
 #include <asm/hpet.h>
 #include <asm/timer.h>
index 32d6ae8fb60e339eba6b85968676776d44293c8b..e770bf349ec4f7f473f8639dabf4068188a4b3b5 100644 (file)
@@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
        struct page *pages;
        struct vmcs *vmcs;
 
-       pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+       pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
        if (!pages)
                return NULL;
        vmcs = page_address(pages);
index fdd30d08ab524a24ca3e2c27a07e1b0b326d88a4..eefdeee8a871175d49a8a7f10bdebd37ed90265c 100644 (file)
@@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP)      += dump_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)          += highmem_32.o
 
+obj-$(CONFIG_KMEMCHECK)                += kmemcheck/
+
 obj-$(CONFIG_MMIOTRACE)                += mmiotrace.o
 mmiotrace-y                    := kmmio.o pf_in.o mmio-mod.o
 obj-$(CONFIG_MMIOTRACE_TEST)   += testmmiotrace.o
index c6acc632637417c193394da4881fa19112ace761..baa0e86adfbc65325baa07c60cd31be67ed046cc 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
+#include <asm/kmemcheck.h>             /* kmemcheck_*(), ...           */
 
 /*
  * Page fault error code bits:
@@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        /* Get the faulting address: */
        address = read_cr2();
 
+       /*
+        * Detect and handle instructions that would cause a page fault for
+        * both a tracked kernel page and a userspace page.
+        */
+       if (kmemcheck_active(regs))
+               kmemcheck_hide(regs);
+
        if (unlikely(kmmio_fault(regs, address)))
                return;
 
@@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
         * protection error (error_code & 9) == 0.
         */
        if (unlikely(fault_in_kernel_space(address))) {
-               if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
-                   vmalloc_fault(address) >= 0)
-                       return;
+               if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+                       if (vmalloc_fault(address) >= 0)
+                               return;
+
+                       if (kmemcheck_fault(regs, address, error_code))
+                               return;
+               }
 
                /* Can handle a stale RO->RW TLB: */
                if (spurious_fault(error_code, address))
index 34c1bfb64f1ca07d80838f363b514caf07acf4db..f53b57e4086fffde114e8c153c67b76a7a857554 100644 (file)
@@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
        if (!after_bootmem)
                init_gbpages();
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
        /*
         * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
         * This will simplify cpa(), which otherwise needs to support splitting
index 9ff3c0816d158e33c168e4c11a64e511c236705f..3cd7711bb94940d0f19ffb4bac4f4b5f9f6a1a5f 100644 (file)
@@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
                pte_t *page_table = NULL;
 
                if (after_bootmem) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
                        page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
                        if (!page_table)
index 52bb9519bb86b4ec778d613939ea658adb1052a6..9c543290a813837f4a5f6140511a305424e480ba 100644 (file)
@@ -104,7 +104,7 @@ static __ref void *spp_getpage(void)
        void *ptr;
 
        if (after_bootmem)
-               ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+               ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
        else
                ptr = alloc_bootmem_pages(PAGE_SIZE);
 
@@ -281,7 +281,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
        void *adr;
 
        if (after_bootmem) {
-               adr = (void *)get_zeroed_page(GFP_ATOMIC);
+               adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
                *phys = __pa(adr);
 
                return adr;
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
new file mode 100644 (file)
index 0000000..520b3bc
--- /dev/null
@@ -0,0 +1 @@
+obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
new file mode 100644 (file)
index 0000000..4901d0d
--- /dev/null
@@ -0,0 +1,228 @@
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+
+#include "error.h"
+#include "shadow.h"
+
+enum kmemcheck_error_type {
+       KMEMCHECK_ERROR_INVALID_ACCESS,
+       KMEMCHECK_ERROR_BUG,
+};
+
+#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
+
+struct kmemcheck_error {
+       enum kmemcheck_error_type type;
+
+       union {
+               /* KMEMCHECK_ERROR_INVALID_ACCESS */
+               struct {
+                       /* Kind of access that caused the error */
+                       enum kmemcheck_shadow state;
+                       /* Address and size of the erroneous read */
+                       unsigned long   address;
+                       unsigned int    size;
+               };
+       };
+
+       struct pt_regs          regs;
+       struct stack_trace      trace;
+       unsigned long           trace_entries[32];
+
+       /* We compress it to a char. */
+       unsigned char           shadow_copy[SHADOW_COPY_SIZE];
+       unsigned char           memory_copy[SHADOW_COPY_SIZE];
+};
+
+/*
+ * Create a ring queue of errors to output. We can't call printk() directly
+ * from the kmemcheck traps, since this may call the console drivers and
+ * result in a recursive fault.
+ */
+static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
+static unsigned int error_count;
+static unsigned int error_rd;
+static unsigned int error_wr;
+static unsigned int error_missed_count;
+
+static struct kmemcheck_error *error_next_wr(void)
+{
+       struct kmemcheck_error *e;
+
+       if (error_count == ARRAY_SIZE(error_fifo)) {
+               ++error_missed_count;
+               return NULL;
+       }
+
+       e = &error_fifo[error_wr];
+       if (++error_wr == ARRAY_SIZE(error_fifo))
+               error_wr = 0;
+       ++error_count;
+       return e;
+}
+
+static struct kmemcheck_error *error_next_rd(void)
+{
+       struct kmemcheck_error *e;
+
+       if (error_count == 0)
+               return NULL;
+
+       e = &error_fifo[error_rd];
+       if (++error_rd == ARRAY_SIZE(error_fifo))
+               error_rd = 0;
+       --error_count;
+       return e;
+}
+
+void kmemcheck_error_recall(void)
+{
+       static const char *desc[] = {
+               [KMEMCHECK_SHADOW_UNALLOCATED]          = "unallocated",
+               [KMEMCHECK_SHADOW_UNINITIALIZED]        = "uninitialized",
+               [KMEMCHECK_SHADOW_INITIALIZED]          = "initialized",
+               [KMEMCHECK_SHADOW_FREED]                = "freed",
+       };
+
+       static const char short_desc[] = {
+               [KMEMCHECK_SHADOW_UNALLOCATED]          = 'a',
+               [KMEMCHECK_SHADOW_UNINITIALIZED]        = 'u',
+               [KMEMCHECK_SHADOW_INITIALIZED]          = 'i',
+               [KMEMCHECK_SHADOW_FREED]                = 'f',
+       };
+
+       struct kmemcheck_error *e;
+       unsigned int i;
+
+       e = error_next_rd();
+       if (!e)
+               return;
+
+       switch (e->type) {
+       case KMEMCHECK_ERROR_INVALID_ACCESS:
+               printk(KERN_ERR  "WARNING: kmemcheck: Caught %d-bit read "
+                       "from %s memory (%p)\n",
+                       8 * e->size, e->state < ARRAY_SIZE(desc) ?
+                               desc[e->state] : "(invalid shadow state)",
+                       (void *) e->address);
+
+               printk(KERN_INFO);
+               for (i = 0; i < SHADOW_COPY_SIZE; ++i)
+                       printk("%02x", e->memory_copy[i]);
+               printk("\n");
+
+               printk(KERN_INFO);
+               for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
+                       if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
+                               printk(" %c", short_desc[e->shadow_copy[i]]);
+                       else
+                               printk(" ?");
+               }
+               printk("\n");
+               printk(KERN_INFO "%*c\n", 2 + 2
+                       * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
+               break;
+       case KMEMCHECK_ERROR_BUG:
+               printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
+               break;
+       }
+
+       __show_regs(&e->regs, 1);
+       print_stack_trace(&e->trace, 0);
+}
+
+static void do_wakeup(unsigned long data)
+{
+       while (error_count > 0)
+               kmemcheck_error_recall();
+
+       if (error_missed_count > 0) {
+               printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
+                       "the queue was too small\n", error_missed_count);
+               error_missed_count = 0;
+       }
+}
+
+static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
+
+/*
+ * Save the context of an error report.
+ */
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+       unsigned long address, unsigned int size, struct pt_regs *regs)
+{
+       static unsigned long prev_ip;
+
+       struct kmemcheck_error *e;
+       void *shadow_copy;
+       void *memory_copy;
+
+       /* Don't report several adjacent errors from the same EIP. */
+       if (regs->ip == prev_ip)
+               return;
+       prev_ip = regs->ip;
+
+       e = error_next_wr();
+       if (!e)
+               return;
+
+       e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
+
+       e->state = state;
+       e->address = address;
+       e->size = size;
+
+       /* Save regs */
+       memcpy(&e->regs, regs, sizeof(*regs));
+
+       /* Save stack trace */
+       e->trace.nr_entries = 0;
+       e->trace.entries = e->trace_entries;
+       e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+       e->trace.skip = 0;
+       save_stack_trace_bp(&e->trace, regs->bp);
+
+       /* Round address down to nearest 16 bytes */
+       shadow_copy = kmemcheck_shadow_lookup(address
+               & ~(SHADOW_COPY_SIZE - 1));
+       BUG_ON(!shadow_copy);
+
+       memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
+
+       kmemcheck_show_addr(address);
+       memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
+       memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
+       kmemcheck_hide_addr(address);
+
+       tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
+
+/*
+ * Save the context of a kmemcheck bug.
+ */
+void kmemcheck_error_save_bug(struct pt_regs *regs)
+{
+       struct kmemcheck_error *e;
+
+       e = error_next_wr();
+       if (!e)
+               return;
+
+       e->type = KMEMCHECK_ERROR_BUG;
+
+       memcpy(&e->regs, regs, sizeof(*regs));
+
+       e->trace.nr_entries = 0;
+       e->trace.entries = e->trace_entries;
+       e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+       e->trace.skip = 1;
+       save_stack_trace(&e->trace);
+
+       tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
new file mode 100644 (file)
index 0000000..0efc2e8
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
+#define ARCH__X86__MM__KMEMCHECK__ERROR_H
+
+#include <linux/ptrace.h>
+
+#include "shadow.h"
+
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+       unsigned long address, unsigned int size, struct pt_regs *regs);
+
+void kmemcheck_error_save_bug(struct pt_regs *regs);
+
+void kmemcheck_error_recall(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
new file mode 100644 (file)
index 0000000..2c55ed0
--- /dev/null
@@ -0,0 +1,640 @@
+/**
+ * kmemcheck - a heavyweight memory checker for the linux kernel
+ * Copyright (C) 2007, 2008  Vegard Nossum <vegardno@ifi.uio.no>
+ * (With a lot of help from Ingo Molnar and Pekka Enberg.)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kmemcheck.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include "error.h"
+#include "opcode.h"
+#include "pte.h"
+#include "selftest.h"
+#include "shadow.h"
+
+
+#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 0
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 1
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
+#  define KMEMCHECK_ENABLED 2
+#endif
+
+int kmemcheck_enabled = KMEMCHECK_ENABLED;
+
+int __init kmemcheck_init(void)
+{
+#ifdef CONFIG_SMP
+       /*
+        * Limit SMP to use a single CPU. We rely on the fact that this code
+        * runs before SMP is set up.
+        */
+       if (setup_max_cpus > 1) {
+               printk(KERN_INFO
+                       "kmemcheck: Limiting number of CPUs to 1.\n");
+               setup_max_cpus = 1;
+       }
+#endif
+
+       if (!kmemcheck_selftest()) {
+               printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
+               kmemcheck_enabled = 0;
+               return -EINVAL;
+       }
+
+       printk(KERN_INFO "kmemcheck: Initialized\n");
+       return 0;
+}
+
+early_initcall(kmemcheck_init);
+
+/*
+ * We need to parse the kmemcheck= option before any memory is allocated.
+ */
+static int __init param_kmemcheck(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       sscanf(str, "%d", &kmemcheck_enabled);
+       return 0;
+}
+
+early_param("kmemcheck", param_kmemcheck);
+
+int kmemcheck_show_addr(unsigned long address)
+{
+       pte_t *pte;
+
+       pte = kmemcheck_pte_lookup(address);
+       if (!pte)
+               return 0;
+
+       set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+       __flush_tlb_one(address);
+       return 1;
+}
+
+int kmemcheck_hide_addr(unsigned long address)
+{
+       pte_t *pte;
+
+       pte = kmemcheck_pte_lookup(address);
+       if (!pte)
+               return 0;
+
+       set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+       __flush_tlb_one(address);
+       return 1;
+}
+
+struct kmemcheck_context {
+       bool busy;
+       int balance;
+
+       /*
+        * There can be at most two memory operands to an instruction, but
+        * each address can cross a page boundary -- so we may need up to
+        * four addresses that must be hidden/revealed for each fault.
+        */
+       unsigned long addr[4];
+       unsigned long n_addrs;
+       unsigned long flags;
+
+       /* Data size of the instruction that caused a fault. */
+       unsigned int size;
+};
+
+static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
+
+bool kmemcheck_active(struct pt_regs *regs)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+       return data->balance > 0;
+}
+
+/* Save an address that needs to be shown/hidden */
+static void kmemcheck_save_addr(unsigned long addr)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+       BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
+       data->addr[data->n_addrs++] = addr;
+}
+
+static unsigned int kmemcheck_show_all(void)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+       unsigned int i;
+       unsigned int n;
+
+       n = 0;
+       for (i = 0; i < data->n_addrs; ++i)
+               n += kmemcheck_show_addr(data->addr[i]);
+
+       return n;
+}
+
+static unsigned int kmemcheck_hide_all(void)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+       unsigned int i;
+       unsigned int n;
+
+       n = 0;
+       for (i = 0; i < data->n_addrs; ++i)
+               n += kmemcheck_hide_addr(data->addr[i]);
+
+       return n;
+}
+
+/*
+ * Called from the #PF handler.
+ */
+void kmemcheck_show(struct pt_regs *regs)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+       BUG_ON(!irqs_disabled());
+
+       if (unlikely(data->balance != 0)) {
+               kmemcheck_show_all();
+               kmemcheck_error_save_bug(regs);
+               data->balance = 0;
+               return;
+       }
+
+       /*
+        * None of the addresses actually belonged to kmemcheck. Note that
+        * this is not an error.
+        */
+       if (kmemcheck_show_all() == 0)
+               return;
+
+       ++data->balance;
+
+       /*
+        * The IF needs to be cleared as well, so that the faulting
+        * instruction can run "uninterrupted". Otherwise, we might take
+        * an interrupt and start executing that before we've had a chance
+        * to hide the page again.
+        *
+        * NOTE: In the rare case of multiple faults, we must not override
+        * the original flags:
+        */
+       if (!(regs->flags & X86_EFLAGS_TF))
+               data->flags = regs->flags;
+
+       regs->flags |= X86_EFLAGS_TF;
+       regs->flags &= ~X86_EFLAGS_IF;
+}
+
+/*
+ * Called from the #DB handler.
+ */
+void kmemcheck_hide(struct pt_regs *regs)
+{
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+       int n;
+
+       BUG_ON(!irqs_disabled());
+
+       if (data->balance == 0)
+               return;
+
+       if (unlikely(data->balance != 1)) {
+               kmemcheck_show_all();
+               kmemcheck_error_save_bug(regs);
+               data->n_addrs = 0;
+               data->balance = 0;
+
+               if (!(data->flags & X86_EFLAGS_TF))
+                       regs->flags &= ~X86_EFLAGS_TF;
+               if (data->flags & X86_EFLAGS_IF)
+                       regs->flags |= X86_EFLAGS_IF;
+               return;
+       }
+
+       if (kmemcheck_enabled)
+               n = kmemcheck_hide_all();
+       else
+               n = kmemcheck_show_all();
+
+       if (n == 0)
+               return;
+
+       --data->balance;
+
+       data->n_addrs = 0;
+
+       if (!(data->flags & X86_EFLAGS_TF))
+               regs->flags &= ~X86_EFLAGS_TF;
+       if (data->flags & X86_EFLAGS_IF)
+               regs->flags |= X86_EFLAGS_IF;
+}
+
+void kmemcheck_show_pages(struct page *p, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 0; i < n; ++i) {
+               unsigned long address;
+               pte_t *pte;
+               unsigned int level;
+
+               address = (unsigned long) page_address(&p[i]);
+               pte = lookup_address(address, &level);
+               BUG_ON(!pte);
+               BUG_ON(level != PG_LEVEL_4K);
+
+               set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+               set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
+               __flush_tlb_one(address);
+       }
+}
+
+bool kmemcheck_page_is_tracked(struct page *p)
+{
+       /* This will also check the "hidden" flag of the PTE. */
+       return kmemcheck_pte_lookup((unsigned long) page_address(p));
+}
+
+void kmemcheck_hide_pages(struct page *p, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 0; i < n; ++i) {
+               unsigned long address;
+               pte_t *pte;
+               unsigned int level;
+
+               address = (unsigned long) page_address(&p[i]);
+               pte = lookup_address(address, &level);
+               BUG_ON(!pte);
+               BUG_ON(level != PG_LEVEL_4K);
+
+               set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+               set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
+               __flush_tlb_one(address);
+       }
+}
+
+/* Access may NOT cross page boundary */
+static void kmemcheck_read_strict(struct pt_regs *regs,
+       unsigned long addr, unsigned int size)
+{
+       void *shadow;
+       enum kmemcheck_shadow status;
+
+       shadow = kmemcheck_shadow_lookup(addr);
+       if (!shadow)
+               return;
+
+       kmemcheck_save_addr(addr);
+       status = kmemcheck_shadow_test(shadow, size);
+       if (status == KMEMCHECK_SHADOW_INITIALIZED)
+               return;
+
+       if (kmemcheck_enabled)
+               kmemcheck_error_save(status, addr, size, regs);
+
+       if (kmemcheck_enabled == 2)
+               kmemcheck_enabled = 0;
+
+       /* Don't warn about it again. */
+       kmemcheck_shadow_set(shadow, size);
+}
+
+/* Access may cross page boundary */
+static void kmemcheck_read(struct pt_regs *regs,
+       unsigned long addr, unsigned int size)
+{
+       unsigned long page = addr & PAGE_MASK;
+       unsigned long next_addr = addr + size - 1;
+       unsigned long next_page = next_addr & PAGE_MASK;
+
+       if (likely(page == next_page)) {
+               kmemcheck_read_strict(regs, addr, size);
+               return;
+       }
+
+       /*
+        * What we do is basically to split the access across the
+        * two pages and handle each part separately. Yes, this means
+        * that we may now see reads that are 3 + 5 bytes, for
+        * example (and if both are uninitialized, there will be two
+        * reports), but it makes the code a lot simpler.
+        */
+       kmemcheck_read_strict(regs, addr, next_page - addr);
+       kmemcheck_read_strict(regs, next_page, next_addr - next_page);
+}
+
+static void kmemcheck_write_strict(struct pt_regs *regs,
+       unsigned long addr, unsigned int size)
+{
+       void *shadow;
+
+       shadow = kmemcheck_shadow_lookup(addr);
+       if (!shadow)
+               return;
+
+       kmemcheck_save_addr(addr);
+       kmemcheck_shadow_set(shadow, size);
+}
+
+static void kmemcheck_write(struct pt_regs *regs,
+       unsigned long addr, unsigned int size)
+{
+       unsigned long page = addr & PAGE_MASK;
+       unsigned long next_addr = addr + size - 1;
+       unsigned long next_page = next_addr & PAGE_MASK;
+
+       if (likely(page == next_page)) {
+               kmemcheck_write_strict(regs, addr, size);
+               return;
+       }
+
+       /* See comment in kmemcheck_read(). */
+       kmemcheck_write_strict(regs, addr, next_page - addr);
+       kmemcheck_write_strict(regs, next_page, next_addr - next_page);
+}
+
+/*
+ * Copying is hard. We have two addresses, each of which may be split across
+ * a page (and each page will have different shadow addresses).
+ */
+static void kmemcheck_copy(struct pt_regs *regs,
+       unsigned long src_addr, unsigned long dst_addr, unsigned int size)
+{
+       uint8_t shadow[8];
+       enum kmemcheck_shadow status;
+
+       unsigned long page;
+       unsigned long next_addr;
+       unsigned long next_page;
+
+       uint8_t *x;
+       unsigned int i;
+       unsigned int n;
+
+       BUG_ON(size > sizeof(shadow));
+
+       page = src_addr & PAGE_MASK;
+       next_addr = src_addr + size - 1;
+       next_page = next_addr & PAGE_MASK;
+
+       if (likely(page == next_page)) {
+               /* Same page */
+               x = kmemcheck_shadow_lookup(src_addr);
+               if (x) {
+                       kmemcheck_save_addr(src_addr);
+                       for (i = 0; i < size; ++i)
+                               shadow[i] = x[i];
+               } else {
+                       for (i = 0; i < size; ++i)
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+               }
+       } else {
+               n = next_page - src_addr;
+               BUG_ON(n > sizeof(shadow));
+
+               /* First page */
+               x = kmemcheck_shadow_lookup(src_addr);
+               if (x) {
+                       kmemcheck_save_addr(src_addr);
+                       for (i = 0; i < n; ++i)
+                               shadow[i] = x[i];
+               } else {
+                       /* Not tracked */
+                       for (i = 0; i < n; ++i)
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+               }
+
+               /* Second page */
+               x = kmemcheck_shadow_lookup(next_page);
+               if (x) {
+                       kmemcheck_save_addr(next_page);
+                       for (i = n; i < size; ++i)
+                               shadow[i] = x[i - n];
+               } else {
+                       /* Not tracked */
+                       for (i = n; i < size; ++i)
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+               }
+       }
+
+       page = dst_addr & PAGE_MASK;
+       next_addr = dst_addr + size - 1;
+       next_page = next_addr & PAGE_MASK;
+
+       if (likely(page == next_page)) {
+               /* Same page */
+               x = kmemcheck_shadow_lookup(dst_addr);
+               if (x) {
+                       kmemcheck_save_addr(dst_addr);
+                       for (i = 0; i < size; ++i) {
+                               x[i] = shadow[i];
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+                       }
+               }
+       } else {
+               n = next_page - dst_addr;
+               BUG_ON(n > sizeof(shadow));
+
+               /* First page */
+               x = kmemcheck_shadow_lookup(dst_addr);
+               if (x) {
+                       kmemcheck_save_addr(dst_addr);
+                       for (i = 0; i < n; ++i) {
+                               x[i] = shadow[i];
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+                       }
+               }
+
+               /* Second page */
+               x = kmemcheck_shadow_lookup(next_page);
+               if (x) {
+                       kmemcheck_save_addr(next_page);
+                       for (i = n; i < size; ++i) {
+                               x[i - n] = shadow[i];
+                               shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+                       }
+               }
+       }
+
+       status = kmemcheck_shadow_test(shadow, size);
+       if (status == KMEMCHECK_SHADOW_INITIALIZED)
+               return;
+
+       if (kmemcheck_enabled)
+               kmemcheck_error_save(status, src_addr, size, regs);
+
+       if (kmemcheck_enabled == 2)
+               kmemcheck_enabled = 0;
+}
+
+enum kmemcheck_method {
+       KMEMCHECK_READ,
+       KMEMCHECK_WRITE,
+};
+
+static void kmemcheck_access(struct pt_regs *regs,
+       unsigned long fallback_address, enum kmemcheck_method fallback_method)
+{
+       const uint8_t *insn;
+       const uint8_t *insn_primary;
+       unsigned int size;
+
+       struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+       /* Recursive fault -- ouch. */
+       if (data->busy) {
+               kmemcheck_show_addr(fallback_address);
+               kmemcheck_error_save_bug(regs);
+               return;
+       }
+
+       data->busy = true;
+
+       insn = (const uint8_t *) regs->ip;
+       insn_primary = kmemcheck_opcode_get_primary(insn);
+
+       kmemcheck_opcode_decode(insn, &size);
+
+       switch (insn_primary[0]) {
+#ifdef CONFIG_KMEMCHECK_BITOPS_OK
+               /* AND, OR, XOR */
+               /*
+                * Unfortunately, these instructions have to be excluded from
+                * our regular checking since they access only some (and not
+                * all) bits. This clears out "bogus" bitfield-access warnings.
+                */
+       case 0x80:
+       case 0x81:
+       case 0x82:
+       case 0x83:
+               switch ((insn_primary[1] >> 3) & 7) {
+                       /* OR */
+               case 1:
+                       /* AND */
+               case 4:
+                       /* XOR */
+               case 6:
+                       kmemcheck_write(regs, fallback_address, size);
+                       goto out;
+
+                       /* ADD */
+               case 0:
+                       /* ADC */
+               case 2:
+                       /* SBB */
+               case 3:
+                       /* SUB */
+               case 5:
+                       /* CMP */
+               case 7:
+                       break;
+               }
+               break;
+#endif
+
+               /* MOVS, MOVSB, MOVSW, MOVSD */
+       case 0xa4:
+       case 0xa5:
+               /*
+                * These instructions are special because they take two
+                * addresses, but we only get one page fault.
+                */
+               kmemcheck_copy(regs, regs->si, regs->di, size);
+               goto out;
+
+               /* CMPS, CMPSB, CMPSW, CMPSD */
+       case 0xa6:
+       case 0xa7:
+               kmemcheck_read(regs, regs->si, size);
+               kmemcheck_read(regs, regs->di, size);
+               goto out;
+       }
+
+       /*
+        * If the opcode isn't special in any way, we use the data from the
+        * page fault handler to determine the address and type of memory
+        * access.
+        */
+       switch (fallback_method) {
+       case KMEMCHECK_READ:
+               kmemcheck_read(regs, fallback_address, size);
+               goto out;
+       case KMEMCHECK_WRITE:
+               kmemcheck_write(regs, fallback_address, size);
+               goto out;
+       }
+
+out:
+       data->busy = false;
+}
+
+bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+       unsigned long error_code)
+{
+       pte_t *pte;
+
+       /*
+        * XXX: Is it safe to assume that memory accesses from virtual 86
+        * mode or non-kernel code segments will _never_ access kernel
+        * memory (e.g. tracked pages)? For now, we need this to avoid
+        * invoking kmemcheck for PnP BIOS calls.
+        */
+       if (regs->flags & X86_VM_MASK)
+               return false;
+       if (regs->cs != __KERNEL_CS)
+               return false;
+
+       pte = kmemcheck_pte_lookup(address);
+       if (!pte)
+               return false;
+
+       if (error_code & 2)
+               kmemcheck_access(regs, address, KMEMCHECK_WRITE);
+       else
+               kmemcheck_access(regs, address, KMEMCHECK_READ);
+
+       kmemcheck_show(regs);
+       return true;
+}
+
+bool kmemcheck_trap(struct pt_regs *regs)
+{
+       if (!kmemcheck_active(regs))
+               return false;
+
+       /* We're done. */
+       kmemcheck_hide(regs);
+       return true;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
new file mode 100644 (file)
index 0000000..63c19e2
--- /dev/null
@@ -0,0 +1,106 @@
+#include <linux/types.h>
+
+#include "opcode.h"
+
+static bool opcode_is_prefix(uint8_t b)
+{
+       return
+               /* Group 1 */
+               b == 0xf0 || b == 0xf2 || b == 0xf3
+               /* Group 2 */
+               || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
+               || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
+               /* Group 3 */
+               || b == 0x66
+               /* Group 4 */
+               || b == 0x67;
+}
+
+#ifdef CONFIG_X86_64
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+       return (b & 0xf0) == 0x40;
+}
+#else
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+       return false;
+}
+#endif
+
+#define REX_W (1 << 3)
+
+/*
+ * This is a VERY crude opcode decoder. We only need to find the size of the
+ * load/store that caused our #PF and this should work for all the opcodes
+ * that we care about. Moreover, the ones who invented this instruction set
+ * should be shot.
+ */
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
+{
+       /* Default operand size */
+       int operand_size_override = 4;
+
+       /* prefixes */
+       for (; opcode_is_prefix(*op); ++op) {
+               if (*op == 0x66)
+                       operand_size_override = 2;
+       }
+
+       /* REX prefix */
+       if (opcode_is_rex_prefix(*op)) {
+               uint8_t rex = *op;
+
+               ++op;
+               if (rex & REX_W) {
+                       switch (*op) {
+                       case 0x63:
+                               *size = 4;
+                               return;
+                       case 0x0f:
+                               ++op;
+
+                               switch (*op) {
+                               case 0xb6:
+                               case 0xbe:
+                                       *size = 1;
+                                       return;
+                               case 0xb7:
+                               case 0xbf:
+                                       *size = 2;
+                                       return;
+                               }
+
+                               break;
+                       }
+
+                       *size = 8;
+                       return;
+               }
+       }
+
+       /* escape opcode */
+       if (*op == 0x0f) {
+               ++op;
+
+               /*
+                * This is move with zero-extend and sign-extend, respectively;
+                * we don't have to think about 0xb6/0xbe, because this is
+                * already handled in the conditional below.
+                */
+               if (*op == 0xb7 || *op == 0xbf)
+                       operand_size_override = 2;
+       }
+
+       *size = (*op & 1) ? operand_size_override : 1;
+}
+
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
+{
+       /* skip prefixes */
+       while (opcode_is_prefix(*op))
+               ++op;
+       if (opcode_is_rex_prefix(*op))
+               ++op;
+       return op;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
new file mode 100644 (file)
index 0000000..6956aad
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
+#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
+
+#include <linux/types.h>
+
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
new file mode 100644 (file)
index 0000000..4ead26e
--- /dev/null
@@ -0,0 +1,22 @@
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+#include "pte.h"
+
+pte_t *kmemcheck_pte_lookup(unsigned long address)
+{
+       pte_t *pte;
+       unsigned int level;
+
+       pte = lookup_address(address, &level);
+       if (!pte)
+               return NULL;
+       if (level != PG_LEVEL_4K)
+               return NULL;
+       if (!pte_hidden(*pte))
+               return NULL;
+
+       return pte;
+}
+
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
new file mode 100644 (file)
index 0000000..9f59664
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
+#define ARCH__X86__MM__KMEMCHECK__PTE_H
+
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+pte_t *kmemcheck_pte_lookup(unsigned long address);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
new file mode 100644 (file)
index 0000000..036efbe
--- /dev/null
@@ -0,0 +1,69 @@
+#include <linux/kernel.h>
+
+#include "opcode.h"
+#include "selftest.h"
+
+struct selftest_opcode {
+       unsigned int expected_size;
+       const uint8_t *insn;
+       const char *desc;
+};
+
+static const struct selftest_opcode selftest_opcodes[] = {
+       /* REP MOVS */
+       {1, "\xf3\xa4",                 "rep movsb <mem8>, <mem8>"},
+       {4, "\xf3\xa5",                 "rep movsl <mem32>, <mem32>"},
+
+       /* MOVZX / MOVZXD */
+       {1, "\x66\x0f\xb6\x51\xf8",     "movzwq <mem8>, <reg16>"},
+       {1, "\x0f\xb6\x51\xf8",         "movzwq <mem8>, <reg32>"},
+
+       /* MOVSX / MOVSXD */
+       {1, "\x66\x0f\xbe\x51\xf8",     "movswq <mem8>, <reg16>"},
+       {1, "\x0f\xbe\x51\xf8",         "movswq <mem8>, <reg32>"},
+
+#ifdef CONFIG_X86_64
+       /* MOVZX / MOVZXD */
+       {1, "\x49\x0f\xb6\x51\xf8",     "movzbq <mem8>, <reg64>"},
+       {2, "\x49\x0f\xb7\x51\xf8",     "movzbq <mem16>, <reg64>"},
+
+       /* MOVSX / MOVSXD */
+       {1, "\x49\x0f\xbe\x51\xf8",     "movsbq <mem8>, <reg64>"},
+       {2, "\x49\x0f\xbf\x51\xf8",     "movsbq <mem16>, <reg64>"},
+       {4, "\x49\x63\x51\xf8",         "movslq <mem32>, <reg64>"},
+#endif
+};
+
+static bool selftest_opcode_one(const struct selftest_opcode *op)
+{
+       unsigned size;
+
+       kmemcheck_opcode_decode(op->insn, &size);
+
+       if (size == op->expected_size)
+               return true;
+
+       printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
+               op->desc, op->expected_size, size);
+       return false;
+}
+
+static bool selftest_opcodes_all(void)
+{
+       bool pass = true;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
+               pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
+
+       return pass;
+}
+
+bool kmemcheck_selftest(void)
+{
+       bool pass = true;
+
+       pass = pass && selftest_opcodes_all();
+
+       return pass;
+}
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
new file mode 100644 (file)
index 0000000..8fed4fe
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+
+bool kmemcheck_selftest(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
new file mode 100644 (file)
index 0000000..e773b6b
--- /dev/null
@@ -0,0 +1,162 @@
+#include <linux/kmemcheck.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include "pte.h"
+#include "shadow.h"
+
+/*
+ * Return the shadow address for the given address. Returns NULL if the
+ * address is not tracked.
+ *
+ * We need to be extremely careful not to follow any invalid pointers,
+ * because this function can be called for *any* possible address.
+ */
+void *kmemcheck_shadow_lookup(unsigned long address)
+{
+       pte_t *pte;
+       struct page *page;
+
+       if (!virt_addr_valid(address))
+               return NULL;
+
+       pte = kmemcheck_pte_lookup(address);
+       if (!pte)
+               return NULL;
+
+       page = virt_to_page(address);
+       if (!page->shadow)
+               return NULL;
+       return page->shadow + (address & (PAGE_SIZE - 1));
+}
+
+static void mark_shadow(void *address, unsigned int n,
+       enum kmemcheck_shadow status)
+{
+       unsigned long addr = (unsigned long) address;
+       unsigned long last_addr = addr + n - 1;
+       unsigned long page = addr & PAGE_MASK;
+       unsigned long last_page = last_addr & PAGE_MASK;
+       unsigned int first_n;
+       void *shadow;
+
+       /* If the memory range crosses a page boundary, stop there. */
+       if (page == last_page)
+               first_n = n;
+       else
+               first_n = page + PAGE_SIZE - addr;
+
+       shadow = kmemcheck_shadow_lookup(addr);
+       if (shadow)
+               memset(shadow, status, first_n);
+
+       addr += first_n;
+       n -= first_n;
+
+       /* Do full-page memset()s. */
+       while (n >= PAGE_SIZE) {
+               shadow = kmemcheck_shadow_lookup(addr);
+               if (shadow)
+                       memset(shadow, status, PAGE_SIZE);
+
+               addr += PAGE_SIZE;
+               n -= PAGE_SIZE;
+       }
+
+       /* Do the remaining page, if any. */
+       if (n > 0) {
+               shadow = kmemcheck_shadow_lookup(addr);
+               if (shadow)
+                       memset(shadow, status, n);
+       }
+}
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+       mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
+}
+
+void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+       mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
+}
+
+/*
+ * Fill the shadow memory of the given address such that the memory at that
+ * address is marked as being initialized.
+ */
+void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+       mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
+}
+EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
+
+void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+       mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
+}
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 0; i < n; ++i)
+               kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 0; i < n; ++i)
+               kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
+{
+       unsigned int i;
+
+       for (i = 0; i < n; ++i)
+               kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
+{
+       uint8_t *x;
+       unsigned int i;
+
+       x = shadow;
+
+#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
+       /*
+        * Make sure _some_ bytes are initialized. Gcc frequently generates
+        * code to access neighboring bytes.
+        */
+       for (i = 0; i < size; ++i) {
+               if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
+                       return x[i];
+       }
+#else
+       /* All bytes must be initialized. */
+       for (i = 0; i < size; ++i) {
+               if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
+                       return x[i];
+       }
+#endif
+
+       return x[0];
+}
+
+void kmemcheck_shadow_set(void *shadow, unsigned int size)
+{
+       uint8_t *x;
+       unsigned int i;
+
+       x = shadow;
+       for (i = 0; i < size; ++i)
+               x[i] = KMEMCHECK_SHADOW_INITIALIZED;
+}
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
new file mode 100644 (file)
index 0000000..af46d9a
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
+#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
+
+enum kmemcheck_shadow {
+       KMEMCHECK_SHADOW_UNALLOCATED,
+       KMEMCHECK_SHADOW_UNINITIALIZED,
+       KMEMCHECK_SHADOW_INITIALIZED,
+       KMEMCHECK_SHADOW_FREED,
+};
+
+void *kmemcheck_shadow_lookup(unsigned long address);
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
+void kmemcheck_shadow_set(void *shadow, unsigned int size);
+
+#endif
index 6ce9518fe2acb6457db7d6c19c1739e6f48f2c9b..3cfe9ced8a4c6e500b6df519dceadc7534a5730a 100644 (file)
@@ -470,7 +470,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
 
        if (!debug_pagealloc)
                spin_unlock(&cpa_lock);
-       base = alloc_pages(GFP_KERNEL, 0);
+       base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
        if (!debug_pagealloc)
                spin_lock(&cpa_lock);
        if (!base)
index 7aa03a5389f53ea635b9d553fac783e879c14535..8e43bdd45456017cd431c2cf9b4678c5b7e9f65d 100644 (file)
@@ -4,9 +4,11 @@
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
 
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       return (pte_t *)__get_free_page(PGALLOC_GFP);
 }
 
 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -14,9 +16,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+       pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
 #else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       pte = alloc_pages(PGALLOC_GFP, 0);
 #endif
        if (pte)
                pgtable_page_ctor(pte);
@@ -161,7 +163,7 @@ static int preallocate_pmds(pmd_t *pmds[])
        bool failed = false;
 
        for(i = 0; i < PREALLOCATED_PMDS; i++) {
-               pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+               pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
                if (pmd == NULL)
                        failed = true;
                pmds[i] = pmd;
@@ -228,7 +230,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        pmd_t *pmds[PREALLOCATED_PMDS];
        unsigned long flags;
 
-       pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+       pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
        if (pgd == NULL)
                goto out;
index 9e822d2e3bcecf69871eb65acdf81655c02e1124..11c687e527f101137188089b30b762b5c93f55e4 100644 (file)
@@ -1,31 +1,6 @@
-/*
- * include/asm-xtensa/kmap_types.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
 #ifndef _XTENSA_KMAP_TYPES_H
 #define _XTENSA_KMAP_TYPES_H
 
-enum km_type {
-  KM_BOUNCE_READ,
-  KM_SKB_SUNRPC_DATA,
-  KM_SKB_DATA_SOFTIRQ,
-  KM_USER0,
-  KM_USER1,
-  KM_BIO_SRC_IRQ,
-  KM_BIO_DST_IRQ,
-  KM_PTE0,
-  KM_PTE1,
-  KM_IRQ0,
-  KM_IRQ1,
-  KM_SOFTIRQ0,
-  KM_SOFTIRQ1,
-  KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
 
 #endif /* _XTENSA_KMAP_TYPES_H */
index e07f5c9fcd3500727efdd0a734b4cfcf654412fc..c4302f0e4ba08a66b39023d13986dda288a4f487 100644 (file)
 
 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
 union thread_union init_thread_union
        __attribute__((__section__(".data.init_task"))) =
 { INIT_THREAD_INFO(init_task) };
index 5358f9ae13c1797b7eda131c7033c73a0c525162..54106f052f707bde6c4b89530d53a585c06366ce 100644 (file)
@@ -1065,6 +1065,11 @@ EXPORT_SYMBOL_GPL(bsg_register_queue);
 
 static struct cdev bsg_cdev;
 
+static char *bsg_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
+}
+
 static int __init bsg_init(void)
 {
        int ret, i;
@@ -1085,6 +1090,7 @@ static int __init bsg_init(void)
                ret = PTR_ERR(bsg_class);
                goto destroy_kmemcache;
        }
+       bsg_class->nodename = bsg_nodename;
 
        ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
        if (ret)
index fe7ccc0a618f35e21ed7fbbe19ae49665270d498..f4c64c2b303a33e2f46bd10b01d36c2cd931b4d6 100644 (file)
@@ -996,10 +996,20 @@ struct class block_class = {
        .name           = "block",
 };
 
+static char *block_nodename(struct device *dev)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       if (disk->nodename)
+               return disk->nodename(disk);
+       return NULL;
+}
+
 static struct device_type disk_type = {
        .name           = "disk",
        .groups         = disk_attr_groups,
        .release        = disk_release,
+       .nodename       = block_nodename,
 };
 
 #ifdef CONFIG_PROC_FS
index 996b6ee57d9e3fb3ff48251ea9ee01d405b648ad..fc5b836f343084f74b76f516133adbe455e04ab7 100644 (file)
@@ -101,7 +101,12 @@ calibrate_xor_blocks(void)
        void *b1, *b2;
        struct xor_block_template *f, *fastest;
 
-       b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
+       /*
+        * Note: Since the memory is not actually used for _anything_ but to
+        * test the XOR speed, we don't really want kmemcheck to warn about
+        * reading uninitialized bytes here.
+        */
+       b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
        if (!b1) {
                printk(KERN_WARNING "xor: Yikes!  No memory available.\n");
                return -ENOMEM;
index 00cf9553f74065291612b0971337f79995933a06..a442c8f29fc1e697d71394fed73bb1d81db0150d 100644 (file)
@@ -104,6 +104,8 @@ source "drivers/auxdisplay/Kconfig"
 
 source "drivers/uio/Kconfig"
 
+source "drivers/vlynq/Kconfig"
+
 source "drivers/xen/Kconfig"
 
 source "drivers/staging/Kconfig"
index 9e7d4e56c85ba2747a0463daefef1aa7fef1ac40..00b44f4ccf03836eca44bbcad29d9cd9546f6ffb 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_PPC_PS3)               += ps3/
 obj-$(CONFIG_OF)               += of/
 obj-$(CONFIG_SSB)              += ssb/
 obj-$(CONFIG_VIRTIO)           += virtio/
+obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
 obj-y                          += ieee802154/
index 1977d4beb89e0012290a567c850f6a59dd3da1b0..7ecb1938e590f34526beaaeda2a3f6683f4024ce 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/semaphore.h>
 #include <linux/mutex.h>
+#include <linux/async.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -161,10 +162,18 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
        struct device *dev = to_dev(kobj);
        int retval = 0;
 
-       /* add the major/minor if present */
+       /* add device node properties if present */
        if (MAJOR(dev->devt)) {
+               const char *tmp;
+               const char *name;
+
                add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
                add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+               name = device_get_nodename(dev, &tmp);
+               if (name) {
+                       add_uevent_var(env, "DEVNAME=%s", name);
+                       kfree(tmp);
+               }
        }
 
        if (dev->type && dev->type->name)
@@ -874,7 +883,7 @@ int device_add(struct device *dev)
         * the name, and force the use of dev_name()
         */
        if (dev->init_name) {
-               dev_set_name(dev, dev->init_name);
+               dev_set_name(dev, "%s", dev->init_name);
                dev->init_name = NULL;
        }
 
@@ -1127,6 +1136,47 @@ static struct device *next_device(struct klist_iter *i)
        return dev;
 }
 
+/**
+ * device_get_nodename - path of device node file
+ * @dev: device
+ * @tmp: possibly allocated string
+ *
+ * Return the relative path of a possible device node.
+ * Non-default names may need to allocate a memory to compose
+ * a name. This memory is returned in tmp and needs to be
+ * freed by the caller.
+ */
+const char *device_get_nodename(struct device *dev, const char **tmp)
+{
+       char *s;
+
+       *tmp = NULL;
+
+       /* the device type may provide a specific name */
+       if (dev->type && dev->type->nodename)
+               *tmp = dev->type->nodename(dev);
+       if (*tmp)
+               return *tmp;
+
+       /* the class may provide a specific name */
+       if (dev->class && dev->class->nodename)
+               *tmp = dev->class->nodename(dev);
+       if (*tmp)
+               return *tmp;
+
+       /* return name without allocation, tmp == NULL */
+       if (strchr(dev_name(dev), '!') == NULL)
+               return dev_name(dev);
+
+       /* replace '!' in the name with '/' */
+       *tmp = kstrdup(dev_name(dev), GFP_KERNEL);
+       if (!*tmp)
+               return NULL;
+       while ((s = strchr(*tmp, '!')))
+               s[0] = '/';
+       return *tmp;
+}
+
 /**
  * device_for_each_child - device child iterator.
  * @parent: parent struct device.
@@ -1271,7 +1321,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
        if (!root)
                return ERR_PTR(err);
 
-       err = dev_set_name(&root->dev, name);
+       err = dev_set_name(&root->dev, "%s", name);
        if (err) {
                kfree(root);
                return ERR_PTR(err);
@@ -1665,4 +1715,5 @@ void device_shutdown(void)
        kobject_put(sysfs_dev_char_kobj);
        kobject_put(sysfs_dev_block_kobj);
        kobject_put(dev_kobj);
+       async_synchronize_full();
 }
index 742cbe6b042bbf711054e2cd7f02cdcc66354752..f0106875f01da48f9fb70afb6b2a68ac7259e37a 100644 (file)
@@ -226,7 +226,7 @@ static int __device_attach(struct device_driver *drv, void *data)
  * pair is found, break out and return.
  *
  * Returns 1 if the device was bound to a driver;
- * 0 if no matching device was found;
+ * 0 if no matching driver was found;
  * -ENODEV if the device is not registered.
  *
  * When called for a USB interface, @dev->parent->sem must be held.
@@ -320,6 +320,10 @@ static void __device_release_driver(struct device *dev)
                devres_release_all(dev);
                dev->driver = NULL;
                klist_remove(&dev->p->knode_driver);
+               if (dev->bus)
+                       blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+                                                    BUS_NOTIFY_UNBOUND_DRIVER,
+                                                    dev);
        }
 }
 
index 8a267c4276291bb775e2590a6935825ae35e9305..ddeb819c8f878a3cf07c18ed9d6d2c3ffe082e46 100644 (file)
@@ -40,7 +40,7 @@ static int loading_timeout = 60;      /* In seconds */
 static DEFINE_MUTEX(fw_lock);
 
 struct firmware_priv {
-       char fw_id[FIRMWARE_NAME_MAX];
+       char *fw_id;
        struct completion completion;
        struct bin_attribute attr_data;
        struct firmware *fw;
@@ -355,8 +355,9 @@ static void fw_dev_release(struct device *dev)
        for (i = 0; i < fw_priv->nr_pages; i++)
                __free_page(fw_priv->pages[i]);
        kfree(fw_priv->pages);
+       kfree(fw_priv->fw_id);
        kfree(fw_priv);
-       kfree(dev);
+       put_device(dev);
 
        module_put(THIS_MODULE);
 }
@@ -386,13 +387,19 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
 
        init_completion(&fw_priv->completion);
        fw_priv->attr_data = firmware_attr_data_tmpl;
-       strlcpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX);
+       fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
+       if (!fw_priv->fw_id) {
+               dev_err(device, "%s: Firmware name allocation failed\n",
+                       __func__);
+               retval = -ENOMEM;
+               goto error_kfree;
+       }
 
        fw_priv->timeout.function = firmware_class_timeout;
        fw_priv->timeout.data = (u_long) fw_priv;
        init_timer(&fw_priv->timeout);
 
-       dev_set_name(f_dev, dev_name(device));
+       dev_set_name(f_dev, "%s", dev_name(device));
        f_dev->parent = device;
        f_dev->class = &firmware_class;
        dev_set_drvdata(f_dev, fw_priv);
@@ -400,14 +407,17 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
        retval = device_register(f_dev);
        if (retval) {
                dev_err(device, "%s: device_register failed\n", __func__);
-               goto error_kfree;
+               put_device(f_dev);
+               goto error_kfree_fw_id;
        }
        *dev_p = f_dev;
        return 0;
 
+error_kfree_fw_id:
+       kfree(fw_priv->fw_id);
 error_kfree:
-       kfree(fw_priv);
        kfree(f_dev);
+       kfree(fw_priv);
        return retval;
 }
 
@@ -615,8 +625,9 @@ request_firmware_work_func(void *arg)
  * @cont: function will be called asynchronously when the firmware
  *     request is over.
  *
- *     Asynchronous variant of request_firmware() for contexts where
- *     it is not possible to sleep.
+ *     Asynchronous variant of request_firmware() for user contexts where
+ *     it is not possible to sleep for long time. It can't be called
+ *     in atomic contexts.
  **/
 int
 request_firmware_nowait(
index 40b809742a1c3ecf3b42a0e55b8130059d7d6e8e..91d4087b4039490a844d23e909ffe34eaa0ff0f9 100644 (file)
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       "Node %d Inactive(anon): %8lu kB\n"
                       "Node %d Active(file):   %8lu kB\n"
                       "Node %d Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                       "Node %d Unevictable:    %8lu kB\n"
                       "Node %d Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                       "Node %d HighTotal:      %8lu kB\n"
                       "Node %d HighFree:       %8lu kB\n"
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
                       nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
                       nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
                       nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                       nid, K(node_page_state(nid, NR_UNEVICTABLE)),
                       nid, K(node_page_state(nid, NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                       nid, K(i.totalhigh),
                       nid, K(i.freehigh),
index ead3f64c41d0506774788860f0f2232f22c9ce0a..81cb01bfc356294865a4e04721d77d5a9c64ffff 100644 (file)
@@ -69,7 +69,8 @@ EXPORT_SYMBOL_GPL(platform_get_irq);
  * @name: resource name
  */
 struct resource *platform_get_resource_byname(struct platform_device *dev,
-                                             unsigned int type, char *name)
+                                             unsigned int type,
+                                             const char *name)
 {
        int i;
 
@@ -88,7 +89,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
  * @dev: platform device
  * @name: IRQ name
  */
-int platform_get_irq_byname(struct platform_device *dev, char *name)
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
 {
        struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
                                                          name);
@@ -244,7 +245,7 @@ int platform_device_add(struct platform_device *pdev)
        if (pdev->id != -1)
                dev_set_name(&pdev->dev, "%s.%d", pdev->name,  pdev->id);
        else
-               dev_set_name(&pdev->dev, pdev->name);
+               dev_set_name(&pdev->dev, "%s", pdev->name);
 
        for (i = 0; i < pdev->num_resources; i++) {
                struct resource *p, *r = &pdev->resource[i];
index 9742a78c9fe42a043bfa0c57e89ead65e4f6fc2f..79a9ae5238acc1b9fa06a2fed1ed70d2440a9416 100644 (file)
@@ -131,6 +131,8 @@ static struct kset *system_kset;
 
 int sysdev_class_register(struct sysdev_class *cls)
 {
+       int retval;
+
        pr_debug("Registering sysdev class '%s'\n", cls->name);
 
        INIT_LIST_HEAD(&cls->drivers);
@@ -138,7 +140,11 @@ int sysdev_class_register(struct sysdev_class *cls)
        cls->kset.kobj.parent = &system_kset->kobj;
        cls->kset.kobj.ktype = &ktype_sysdev_class;
        cls->kset.kobj.kset = system_kset;
-       kobject_set_name(&cls->kset.kobj, cls->name);
+
+       retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
+       if (retval)
+               return retval;
+
        return kset_register(&cls->kset);
 }
 
index 200efc4d2c1e756cd9a7182709bbfb2328a57140..19888354188f7f4c4afb0f1c37050cc1d06805fc 100644 (file)
@@ -266,6 +266,11 @@ static const struct file_operations aoe_fops = {
        .owner = THIS_MODULE,
 };
 
+static char *aoe_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
+}
+
 int __init
 aoechr_init(void)
 {
@@ -283,6 +288,8 @@ aoechr_init(void)
                unregister_chrdev(AOE_MAJOR, "aoechr");
                return PTR_ERR(aoe_class);
        }
+       aoe_class->nodename = aoe_nodename;
+
        for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
                device_create(aoe_class, NULL,
                              MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
index d57f11759480c1c2874922e773cee62ac6571472..83650e00632d80b6837678355fbac3c50c6d9a94 100644 (file)
@@ -430,7 +430,7 @@ static void pkt_sysfs_cleanup(void)
 /********************************************************************
   entries in debugfs
 
-  /debugfs/pktcdvd[0-7]/
+  /sys/kernel/debug/pktcdvd[0-7]/
                        info
 
  *******************************************************************/
@@ -2855,6 +2855,11 @@ static struct block_device_operations pktcdvd_ops = {
        .media_changed =        pkt_media_changed,
 };
 
+static char *pktcdvd_nodename(struct gendisk *gd)
+{
+       return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
+}
+
 /*
  * Set up mapping from pktcdvd device to CD-ROM device.
  */
@@ -2907,6 +2912,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
        disk->fops = &pktcdvd_ops;
        disk->flags = GENHD_FL_REMOVABLE;
        strcpy(disk->disk_name, pd->name);
+       disk->nodename = pktcdvd_nodename;
        disk->private_data = pd;
        disk->queue = blk_alloc_queue(GFP_KERNEL);
        if (!disk->queue)
@@ -3062,6 +3068,7 @@ static const struct file_operations pkt_ctl_fops = {
 static struct miscdevice pkt_misc = {
        .minor          = MISC_DYNAMIC_MINOR,
        .name           = DRIVER_NAME,
+       .name           = "pktcdvd/control",
        .fops           = &pkt_ctl_fops
 };
 
index c1996829d5ecb92231042afd0cdc90ee7b4ad186..e53284767f7c9e59b89c6fe1611487526dee081c 100644 (file)
@@ -753,12 +753,12 @@ static int blkfront_probe(struct xenbus_device *dev,
 
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
-       dev->dev.driver_data = info;
+       dev_set_drvdata(&dev->dev, info);
 
        err = talk_to_backend(dev, info);
        if (err) {
                kfree(info);
-               dev->dev.driver_data = NULL;
+               dev_set_drvdata(&dev->dev, NULL);
                return err;
        }
 
@@ -843,7 +843,7 @@ static int blkif_recover(struct blkfront_info *info)
  */
 static int blkfront_resume(struct xenbus_device *dev)
 {
-       struct blkfront_info *info = dev->dev.driver_data;
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        int err;
 
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
@@ -922,7 +922,7 @@ static void blkfront_connect(struct blkfront_info *info)
  */
 static void blkfront_closing(struct xenbus_device *dev)
 {
-       struct blkfront_info *info = dev->dev.driver_data;
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        unsigned long flags;
 
        dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
@@ -957,7 +957,7 @@ static void blkfront_closing(struct xenbus_device *dev)
 static void backend_changed(struct xenbus_device *dev,
                            enum xenbus_state backend_state)
 {
-       struct blkfront_info *info = dev->dev.driver_data;
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        struct block_device *bd;
 
        dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
@@ -997,7 +997,7 @@ static void backend_changed(struct xenbus_device *dev,
 
 static int blkfront_remove(struct xenbus_device *dev)
 {
-       struct blkfront_info *info = dev->dev.driver_data;
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 
        dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
 
@@ -1010,7 +1010,7 @@ static int blkfront_remove(struct xenbus_device *dev)
 
 static int blkfront_is_ready(struct xenbus_device *dev)
 {
-       struct blkfront_info *info = dev->dev.driver_data;
+       struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 
        return info->is_ready;
 }
index c76bccf5354dc77f1b97bdb3ea22266d8154faa5..7d64e4230e661ebaac5aeff1383c6967cf963f7b 100644 (file)
@@ -347,7 +347,7 @@ static void __exit hvcs_module_exit(void);
 
 static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
 {
-       return viod->dev.driver_data;
+       return dev_get_drvdata(&viod->dev);
 }
 /* The sysfs interface for the driver and devices */
 
@@ -785,7 +785,7 @@ static int __devinit hvcs_probe(
        kref_init(&hvcsd->kref);
 
        hvcsd->vdev = dev;
-       dev->dev.driver_data = hvcsd;
+       dev_set_drvdata(&dev->dev, hvcsd);
 
        hvcsd->index = index;
 
@@ -831,7 +831,7 @@ static int __devinit hvcs_probe(
 
 static int __devexit hvcs_remove(struct vio_dev *dev)
 {
-       struct hvcs_struct *hvcsd = dev->dev.driver_data;
+       struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
        unsigned long flags;
        struct tty_struct *tty;
 
index e5d583c84e4f864ae31cb95534bd5befe6291ed6..fc93e2fc7c71220d02e31acbaf67d40eda7b3133 100644 (file)
@@ -153,6 +153,7 @@ static const struct file_operations rng_chrdev_ops = {
 static struct miscdevice rng_miscdev = {
        .minor          = RNG_MISCDEV_MINOR,
        .name           = RNG_MODULE_NAME,
+       .devnode        = "hwrng",
        .fops           = &rng_chrdev_ops,
 };
 
index 259644646b82e88ea2f120b1f73eb3563c627aa4..d2e698096ace182698152e2366d7c2dc2cf91342 100644 (file)
@@ -2375,14 +2375,14 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
                info->io.addr_data, info->io.regsize, info->io.regspacing,
                info->irq);
 
-       dev->dev.driver_data = (void *) info;
+       dev_set_drvdata(&dev->dev, info);
 
        return try_smi_init(info);
 }
 
 static int __devexit ipmi_of_remove(struct of_device *dev)
 {
-       cleanup_one_si(dev->dev.driver_data);
+       cleanup_one_si(dev_get_drvdata(&dev->dev));
        return 0;
 }
 
index a5e0db9d7662d4a060ff167260753b193a64ee9f..62c99fa59e2b5bf09ee792652648eab4760f37bc 100644 (file)
@@ -168,7 +168,6 @@ static const struct file_operations misc_fops = {
        .open           = misc_open,
 };
 
-
 /**
  *     misc_register   -       register a miscellaneous device
  *     @misc: device structure
@@ -217,8 +216,8 @@ int misc_register(struct miscdevice * misc)
                misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
        dev = MKDEV(MISC_MAJOR, misc->minor);
 
-       misc->this_device = device_create(misc_class, misc->parent, dev, NULL,
-                                         "%s", misc->name);
+       misc->this_device = device_create(misc_class, misc->parent, dev,
+                                         misc, "%s", misc->name);
        if (IS_ERR(misc->this_device)) {
                err = PTR_ERR(misc->this_device);
                goto out;
@@ -264,6 +263,15 @@ int misc_deregister(struct miscdevice *misc)
 EXPORT_SYMBOL(misc_register);
 EXPORT_SYMBOL(misc_deregister);
 
+static char *misc_nodename(struct device *dev)
+{
+       struct miscdevice *c = dev_get_drvdata(dev);
+
+       if (c->devnode)
+               return kstrdup(c->devnode, GFP_KERNEL);
+       return NULL;
+}
+
 static int __init misc_init(void)
 {
        int err;
@@ -279,6 +287,7 @@ static int __init misc_init(void)
        err = -EIO;
        if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
                goto fail_printk;
+       misc_class->nodename = misc_nodename;
        return 0;
 
 fail_printk:
index db32f0e4c7dd48aaef0f1022627a4bebaded6f39..05f9d18b9361227e7cff39946e2d3de41faf8609 100644 (file)
@@ -261,6 +261,11 @@ static const struct file_operations raw_ctl_fops = {
 
 static struct cdev raw_cdev;
 
+static char *raw_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
+}
+
 static int __init raw_init(void)
 {
        dev_t dev = MKDEV(RAW_MAJOR, 0);
@@ -284,6 +289,7 @@ static int __init raw_init(void)
                ret = PTR_ERR(raw_class);
                goto error_region;
        }
+       raw_class->nodename = raw_nodename;
        device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
 
        return 0;
index c796a86ab7f36bbff85998425b31d78fb2417d32..d9113b4c76e370bad289b19558958448846a6064 100644 (file)
@@ -171,8 +171,9 @@ int do_poke_blanked_console;
 int console_blanked;
 
 static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
-static int blankinterval = 10*60*HZ;
 static int vesa_off_interval;
+static int blankinterval = 10*60;
+core_param(consoleblank, blankinterval, int, 0444);
 
 static DECLARE_WORK(console_work, console_callback);
 
@@ -1485,7 +1486,7 @@ static void setterm_command(struct vc_data *vc)
                        update_attr(vc);
                        break;
                case 9: /* set blanking interval */
-                       blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ;
+                       blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
                        poke_blanked_console();
                        break;
                case 10: /* set bell frequency in Hz */
@@ -2871,7 +2872,7 @@ static int __init con_init(void)
 
        if (blankinterval) {
                blank_state = blank_normal_wait;
-               mod_timer(&console_timer, jiffies + blankinterval);
+               mod_timer(&console_timer, jiffies + (blankinterval * HZ));
        }
 
        for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
@@ -3677,7 +3678,7 @@ void do_unblank_screen(int leaving_gfx)
                return; /* but leave console_blanked != 0 */
 
        if (blankinterval) {
-               mod_timer(&console_timer, jiffies + blankinterval);
+               mod_timer(&console_timer, jiffies + (blankinterval * HZ));
                blank_state = blank_normal_wait;
        }
 
@@ -3711,7 +3712,7 @@ void unblank_screen(void)
 static void blank_screen_t(unsigned long dummy)
 {
        if (unlikely(!keventd_up())) {
-               mod_timer(&console_timer, jiffies + blankinterval);
+               mod_timer(&console_timer, jiffies + (blankinterval * HZ));
                return;
        }
        blank_timer_expired = 1;
@@ -3741,7 +3742,7 @@ void poke_blanked_console(void)
        if (console_blanked)
                unblank_screen();
        else if (blankinterval) {
-               mod_timer(&console_timer, jiffies + blankinterval);
+               mod_timer(&console_timer, jiffies + (blankinterval * HZ));
                blank_state = blank_normal_wait;
        }
 }
index 40bd8c61c7d7c5ead10f794ad52b1250bf9e33bd..72a633a6ec982e74153235e8f015ddba65d84af4 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/acpi_pmtmr.h>
 #include <linux/clocksource.h>
+#include <linux/timex.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/pci.h>
index ed69837d8b746edf376c42857992f60433191c03..6cbb7a514436fd0b355e38a2d6138a65254f147d 100644 (file)
@@ -1140,6 +1140,11 @@ NON0301 "c't Universale Graphic Adapter"
 NON0401 "c't Universal Ethernet Adapter"
 NON0501 "c't Universal 16-Bit Sound Adapter"
 NON0601 "c't Universal 8-Bit Adapter"
+NPI0120 "Network Peripherals NP-EISA-1 FDDI Interface"
+NPI0221 "Network Peripherals NP-EISA-2 FDDI Interface"
+NPI0223 "Network Peripherals NP-EISA-2E Enhanced FDDI Interface"
+NPI0301 "Network Peripherals NP-EISA-3 FDDI Interface"
+NPI0303 "Network Peripherals NP-EISA-3E Enhanced FDDI Interface"
 NSS0011 "Newport Systems Solutions WNIC Adapter"
 NVL0701 "Novell NE3200 Bus Master Ethernet"
 NVL0702 "Novell NE3200T Bus Master Ethernet"
index 74edb1d0110f7675f2635d26f5dc4ade609a96ac..0dd0f633b18ddabbcfb55f095b7631eeee177fea 100644 (file)
@@ -31,11 +31,11 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
        }
 
        pci_eisa_root.dev              = &pdev->dev;
-       pci_eisa_root.dev->driver_data = &pci_eisa_root;
        pci_eisa_root.res              = pdev->bus->resource[0];
        pci_eisa_root.bus_base_addr    = pdev->bus->resource[0]->start;
        pci_eisa_root.slots            = EISA_MAX_SLOTS;
        pci_eisa_root.dma_mask         = pdev->dma_mask;
+       dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
 
        if (eisa_root_register (&pci_eisa_root)) {
                printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
index 3074879f231f26d9df3b5219e69a49bfcf8704d5..535e4f9c83f4fed09ca3c00e085b53c60e0cee62 100644 (file)
@@ -57,7 +57,7 @@ static int __init virtual_eisa_root_init (void)
 
        eisa_bus_root.force_probe = force_probe;
        
-       eisa_root_dev.dev.driver_data = &eisa_bus_root;
+       dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
 
        if (eisa_root_register (&eisa_bus_root)) {
                /* A real bridge may have been registered before
index a7c31e9039c14c0912554ed981c1fa390ce7606d..bc3b9bf822bf1e15c661944edae679406e673a0e 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the Linux IEEE 1394 implementation
 #
 
-firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
-                   fw-device.o fw-cdev.o
-firewire-ohci-y += fw-ohci.o
-firewire-sbp2-y += fw-sbp2.o
+firewire-core-y += core-card.o core-cdev.o core-device.o \
+                   core-iso.o core-topology.o core-transaction.o
+firewire-ohci-y += ohci.o
+firewire-sbp2-y += sbp2.o
 
 obj-$(CONFIG_FIREWIRE) += firewire-core.o
 obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
new file mode 100644 (file)
index 0000000..4c1be64
--- /dev/null
@@ -0,0 +1,561 @@
+/*
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/crc-itu-t.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+int fw_compute_block_crc(u32 *block)
+{
+       __be32 be32_block[256];
+       int i, length;
+
+       length = (*block >> 16) & 0xff;
+       for (i = 0; i < length; i++)
+               be32_block[i] = cpu_to_be32(block[i + 1]);
+       *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
+
+       return length;
+}
+
+static DEFINE_MUTEX(card_mutex);
+static LIST_HEAD(card_list);
+
+static LIST_HEAD(descriptor_list);
+static int descriptor_count;
+
+#define BIB_CRC(v)             ((v) <<  0)
+#define BIB_CRC_LENGTH(v)      ((v) << 16)
+#define BIB_INFO_LENGTH(v)     ((v) << 24)
+
+#define BIB_LINK_SPEED(v)      ((v) <<  0)
+#define BIB_GENERATION(v)      ((v) <<  4)
+#define BIB_MAX_ROM(v)         ((v) <<  8)
+#define BIB_MAX_RECEIVE(v)     ((v) << 12)
+#define BIB_CYC_CLK_ACC(v)     ((v) << 16)
+#define BIB_PMC                        ((1) << 27)
+#define BIB_BMC                        ((1) << 28)
+#define BIB_ISC                        ((1) << 29)
+#define BIB_CMC                        ((1) << 30)
+#define BIB_IMC                        ((1) << 31)
+
+static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
+{
+       struct fw_descriptor *desc;
+       static u32 config_rom[256];
+       int i, j, length;
+
+       /*
+        * Initialize contents of config rom buffer.  On the OHCI
+        * controller, block reads to the config rom accesses the host
+        * memory, but quadlet read access the hardware bus info block
+        * registers.  That's just crack, but it means we should make
+        * sure the contents of bus info block in host memory matches
+        * the version stored in the OHCI registers.
+        */
+
+       memset(config_rom, 0, sizeof(config_rom));
+       config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
+       config_rom[1] = 0x31333934;
+
+       config_rom[2] =
+               BIB_LINK_SPEED(card->link_speed) |
+               BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
+               BIB_MAX_ROM(2) |
+               BIB_MAX_RECEIVE(card->max_receive) |
+               BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
+       config_rom[3] = card->guid >> 32;
+       config_rom[4] = card->guid;
+
+       /* Generate root directory. */
+       i = 5;
+       config_rom[i++] = 0;
+       config_rom[i++] = 0x0c0083c0; /* node capabilities */
+       j = i + descriptor_count;
+
+       /* Generate root directory entries for descriptors. */
+       list_for_each_entry (desc, &descriptor_list, link) {
+               if (desc->immediate > 0)
+                       config_rom[i++] = desc->immediate;
+               config_rom[i] = desc->key | (j - i);
+               i++;
+               j += desc->length;
+       }
+
+       /* Update root directory length. */
+       config_rom[5] = (i - 5 - 1) << 16;
+
+       /* End of root directory, now copy in descriptors. */
+       list_for_each_entry (desc, &descriptor_list, link) {
+               memcpy(&config_rom[i], desc->data, desc->length * 4);
+               i += desc->length;
+       }
+
+       /* Calculate CRCs for all blocks in the config rom.  This
+        * assumes that CRC length and info length are identical for
+        * the bus info block, which is always the case for this
+        * implementation. */
+       for (i = 0; i < j; i += length + 1)
+               length = fw_compute_block_crc(config_rom + i);
+
+       *config_rom_length = j;
+
+       return config_rom;
+}
+
+static void update_config_roms(void)
+{
+       struct fw_card *card;
+       u32 *config_rom;
+       size_t length;
+
+       list_for_each_entry (card, &card_list, link) {
+               config_rom = generate_config_rom(card, &length);
+               card->driver->set_config_rom(card, config_rom, length);
+       }
+}
+
+int fw_core_add_descriptor(struct fw_descriptor *desc)
+{
+       size_t i;
+
+       /*
+        * Check descriptor is valid; the length of all blocks in the
+        * descriptor has to add up to exactly the length of the
+        * block.
+        */
+       i = 0;
+       while (i < desc->length)
+               i += (desc->data[i] >> 16) + 1;
+
+       if (i != desc->length)
+               return -EINVAL;
+
+       mutex_lock(&card_mutex);
+
+       list_add_tail(&desc->link, &descriptor_list);
+       descriptor_count++;
+       if (desc->immediate > 0)
+               descriptor_count++;
+       update_config_roms();
+
+       mutex_unlock(&card_mutex);
+
+       return 0;
+}
+
+void fw_core_remove_descriptor(struct fw_descriptor *desc)
+{
+       mutex_lock(&card_mutex);
+
+       list_del(&desc->link);
+       descriptor_count--;
+       if (desc->immediate > 0)
+               descriptor_count--;
+       update_config_roms();
+
+       mutex_unlock(&card_mutex);
+}
+
+static void allocate_broadcast_channel(struct fw_card *card, int generation)
+{
+       int channel, bandwidth = 0;
+
+       fw_iso_resource_manage(card, generation, 1ULL << 31,
+                              &channel, &bandwidth, true);
+       if (channel == 31) {
+               card->broadcast_channel_allocated = true;
+               device_for_each_child(card->device, (void *)(long)generation,
+                                     fw_device_set_broadcast_channel);
+       }
+}
+
+static const char gap_count_table[] = {
+       63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+};
+
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
+{
+       int scheduled;
+
+       fw_card_get(card);
+       scheduled = schedule_delayed_work(&card->work, delay);
+       if (!scheduled)
+               fw_card_put(card);
+}
+
+static void fw_card_bm_work(struct work_struct *work)
+{
+       struct fw_card *card = container_of(work, struct fw_card, work.work);
+       struct fw_device *root_device;
+       struct fw_node *root_node;
+       unsigned long flags;
+       int root_id, new_root_id, irm_id, local_id;
+       int gap_count, generation, grace, rcode;
+       bool do_reset = false;
+       bool root_device_is_running;
+       bool root_device_is_cmc;
+       __be32 lock_data[2];
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       if (card->local_node == NULL) {
+               spin_unlock_irqrestore(&card->lock, flags);
+               goto out_put_card;
+       }
+
+       generation = card->generation;
+       root_node = card->root_node;
+       fw_node_get(root_node);
+       root_device = root_node->data;
+       root_device_is_running = root_device &&
+                       atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+       root_device_is_cmc = root_device && root_device->cmc;
+       root_id  = root_node->node_id;
+       irm_id   = card->irm_node->node_id;
+       local_id = card->local_node->node_id;
+
+       grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+
+       if (is_next_generation(generation, card->bm_generation) ||
+           (card->bm_generation != generation && grace)) {
+               /*
+                * This first step is to figure out who is IRM and
+                * then try to become bus manager.  If the IRM is not
+                * well defined (e.g. does not have an active link
+                * layer or does not responds to our lock request, we
+                * will have to do a little vigilante bus management.
+                * In that case, we do a goto into the gap count logic
+                * so that when we do the reset, we still optimize the
+                * gap count.  That could well save a reset in the
+                * next generation.
+                */
+
+               if (!card->irm_node->link_on) {
+                       new_root_id = local_id;
+                       fw_notify("IRM has link off, making local node (%02x) root.\n",
+                                 new_root_id);
+                       goto pick_me;
+               }
+
+               lock_data[0] = cpu_to_be32(0x3f);
+               lock_data[1] = cpu_to_be32(local_id);
+
+               spin_unlock_irqrestore(&card->lock, flags);
+
+               rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                               irm_id, generation, SCODE_100,
+                               CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
+                               lock_data, sizeof(lock_data));
+
+               if (rcode == RCODE_GENERATION)
+                       /* Another bus reset, BM work has been rescheduled. */
+                       goto out;
+
+               if (rcode == RCODE_COMPLETE &&
+                   lock_data[0] != cpu_to_be32(0x3f)) {
+
+                       /* Somebody else is BM.  Only act as IRM. */
+                       if (local_id == irm_id)
+                               allocate_broadcast_channel(card, generation);
+
+                       goto out;
+               }
+
+               spin_lock_irqsave(&card->lock, flags);
+
+               if (rcode != RCODE_COMPLETE) {
+                       /*
+                        * The lock request failed, maybe the IRM
+                        * isn't really IRM capable after all. Let's
+                        * do a bus reset and pick the local node as
+                        * root, and thus, IRM.
+                        */
+                       new_root_id = local_id;
+                       fw_notify("BM lock failed, making local node (%02x) root.\n",
+                                 new_root_id);
+                       goto pick_me;
+               }
+       } else if (card->bm_generation != generation) {
+               /*
+                * We weren't BM in the last generation, and the last
+                * bus reset is less than 125ms ago.  Reschedule this job.
+                */
+               spin_unlock_irqrestore(&card->lock, flags);
+               fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
+               goto out;
+       }
+
+       /*
+        * We're bus manager for this generation, so next step is to
+        * make sure we have an active cycle master and do gap count
+        * optimization.
+        */
+       card->bm_generation = generation;
+
+       if (root_device == NULL) {
+               /*
+                * Either link_on is false, or we failed to read the
+                * config rom.  In either case, pick another root.
+                */
+               new_root_id = local_id;
+       } else if (!root_device_is_running) {
+               /*
+                * If we haven't probed this device yet, bail out now
+                * and let's try again once that's done.
+                */
+               spin_unlock_irqrestore(&card->lock, flags);
+               goto out;
+       } else if (root_device_is_cmc) {
+               /*
+                * FIXME: I suppose we should set the cmstr bit in the
+                * STATE_CLEAR register of this node, as described in
+                * 1394-1995, 8.4.2.6.  Also, send out a force root
+                * packet for this node.
+                */
+               new_root_id = root_id;
+       } else {
+               /*
+                * Current root has an active link layer and we
+                * successfully read the config rom, but it's not
+                * cycle master capable.
+                */
+               new_root_id = local_id;
+       }
+
+ pick_me:
+       /*
+        * Pick a gap count from 1394a table E-1.  The table doesn't cover
+        * the typically much larger 1394b beta repeater delays though.
+        */
+       if (!card->beta_repeaters_present &&
+           root_node->max_hops < ARRAY_SIZE(gap_count_table))
+               gap_count = gap_count_table[root_node->max_hops];
+       else
+               gap_count = 63;
+
+       /*
+        * Finally, figure out if we should do a reset or not.  If we have
+        * done less than 5 resets with the same physical topology and we
+        * have either a new root or a new gap count setting, let's do it.
+        */
+
+       if (card->bm_retries++ < 5 &&
+           (card->gap_count != gap_count || new_root_id != root_id))
+               do_reset = true;
+
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (do_reset) {
+               fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
+                         card->index, new_root_id, gap_count);
+               fw_send_phy_config(card, new_root_id, generation, gap_count);
+               fw_core_initiate_bus_reset(card, 1);
+               /* Will allocate broadcast channel after the reset. */
+       } else {
+               if (local_id == irm_id)
+                       allocate_broadcast_channel(card, generation);
+       }
+
+ out:
+       fw_node_put(root_node);
+ out_put_card:
+       fw_card_put(card);
+}
+
+static void flush_timer_callback(unsigned long data)
+{
+       struct fw_card *card = (struct fw_card *)data;
+
+       fw_flush_transactions(card);
+}
+
+void fw_card_initialize(struct fw_card *card,
+                       const struct fw_card_driver *driver,
+                       struct device *device)
+{
+       static atomic_t index = ATOMIC_INIT(-1);
+
+       card->index = atomic_inc_return(&index);
+       card->driver = driver;
+       card->device = device;
+       card->current_tlabel = 0;
+       card->tlabel_mask = 0;
+       card->color = 0;
+       card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
+
+       kref_init(&card->kref);
+       init_completion(&card->done);
+       INIT_LIST_HEAD(&card->transaction_list);
+       spin_lock_init(&card->lock);
+       setup_timer(&card->flush_timer,
+                   flush_timer_callback, (unsigned long)card);
+
+       card->local_node = NULL;
+
+       INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
+}
+EXPORT_SYMBOL(fw_card_initialize);
+
+int fw_card_add(struct fw_card *card,
+               u32 max_receive, u32 link_speed, u64 guid)
+{
+       u32 *config_rom;
+       size_t length;
+       int ret;
+
+       card->max_receive = max_receive;
+       card->link_speed = link_speed;
+       card->guid = guid;
+
+       mutex_lock(&card_mutex);
+       config_rom = generate_config_rom(card, &length);
+       list_add_tail(&card->link, &card_list);
+       mutex_unlock(&card_mutex);
+
+       ret = card->driver->enable(card, config_rom, length);
+       if (ret < 0) {
+               mutex_lock(&card_mutex);
+               list_del(&card->link);
+               mutex_unlock(&card_mutex);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(fw_card_add);
+
+
+/*
+ * The next few functions implements a dummy driver that use once a
+ * card driver shuts down an fw_card.  This allows the driver to
+ * cleanly unload, as all IO to the card will be handled by the dummy
+ * driver instead of calling into the (possibly) unloaded module.  The
+ * dummy driver just fails all IO.
+ */
+
+static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+       BUG();
+       return -1;
+}
+
+static int dummy_update_phy_reg(struct fw_card *card, int address,
+                               int clear_bits, int set_bits)
+{
+       return -ENODEV;
+}
+
+static int dummy_set_config_rom(struct fw_card *card,
+                               u32 *config_rom, size_t length)
+{
+       /*
+        * We take the card out of card_list before setting the dummy
+        * driver, so this should never get called.
+        */
+       BUG();
+       return -1;
+}
+
+static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+       packet->callback(packet, card, -ENODEV);
+}
+
+static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+       packet->callback(packet, card, -ENODEV);
+}
+
+static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+       return -ENOENT;
+}
+
+static int dummy_enable_phys_dma(struct fw_card *card,
+                                int node_id, int generation)
+{
+       return -ENODEV;
+}
+
+static struct fw_card_driver dummy_driver = {
+       .enable          = dummy_enable,
+       .update_phy_reg  = dummy_update_phy_reg,
+       .set_config_rom  = dummy_set_config_rom,
+       .send_request    = dummy_send_request,
+       .cancel_packet   = dummy_cancel_packet,
+       .send_response   = dummy_send_response,
+       .enable_phys_dma = dummy_enable_phys_dma,
+};
+
+void fw_card_release(struct kref *kref)
+{
+       struct fw_card *card = container_of(kref, struct fw_card, kref);
+
+       complete(&card->done);
+}
+
+void fw_core_remove_card(struct fw_card *card)
+{
+       card->driver->update_phy_reg(card, 4,
+                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
+       fw_core_initiate_bus_reset(card, 1);
+
+       mutex_lock(&card_mutex);
+       list_del_init(&card->link);
+       mutex_unlock(&card_mutex);
+
+       /* Set up the dummy driver. */
+       card->driver = &dummy_driver;
+
+       fw_destroy_nodes(card);
+
+       /* Wait for all users, especially device workqueue jobs, to finish. */
+       fw_card_put(card);
+       wait_for_completion(&card->done);
+
+       WARN_ON(!list_empty(&card->transaction_list));
+       del_timer_sync(&card->flush_timer);
+}
+EXPORT_SYMBOL(fw_core_remove_card);
+
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
+{
+       int reg = short_reset ? 5 : 1;
+       int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
+
+       return card->driver->update_phy_reg(card, reg, 0, bit);
+}
+EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
new file mode 100644 (file)
index 0000000..d1d30c6
--- /dev/null
@@ -0,0 +1,1458 @@
+/*
+ * Char device for device raw access
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/compat.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-cdev.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <asm/system.h>
+
+#include "core.h"
+
+struct client {
+       u32 version;
+       struct fw_device *device;
+
+       spinlock_t lock;
+       bool in_shutdown;
+       struct idr resource_idr;
+       struct list_head event_list;
+       wait_queue_head_t wait;
+       u64 bus_reset_closure;
+
+       struct fw_iso_context *iso_context;
+       u64 iso_closure;
+       struct fw_iso_buffer buffer;
+       unsigned long vm_start;
+
+       struct list_head link;
+       struct kref kref;
+};
+
+static inline void client_get(struct client *client)
+{
+       kref_get(&client->kref);
+}
+
+static void client_release(struct kref *kref)
+{
+       struct client *client = container_of(kref, struct client, kref);
+
+       fw_device_put(client->device);
+       kfree(client);
+}
+
+static void client_put(struct client *client)
+{
+       kref_put(&client->kref, client_release);
+}
+
+struct client_resource;
+typedef void (*client_resource_release_fn_t)(struct client *,
+                                            struct client_resource *);
+struct client_resource {
+       client_resource_release_fn_t release;
+       int handle;
+};
+
+struct address_handler_resource {
+       struct client_resource resource;
+       struct fw_address_handler handler;
+       __u64 closure;
+       struct client *client;
+};
+
+struct outbound_transaction_resource {
+       struct client_resource resource;
+       struct fw_transaction transaction;
+};
+
+struct inbound_transaction_resource {
+       struct client_resource resource;
+       struct fw_request *request;
+       void *data;
+       size_t length;
+};
+
+struct descriptor_resource {
+       struct client_resource resource;
+       struct fw_descriptor descriptor;
+       u32 data[0];
+};
+
+struct iso_resource {
+       struct client_resource resource;
+       struct client *client;
+       /* Schedule work and access todo only with client->lock held. */
+       struct delayed_work work;
+       enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
+             ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
+       int generation;
+       u64 channels;
+       s32 bandwidth;
+       struct iso_resource_event *e_alloc, *e_dealloc;
+};
+
+static void schedule_iso_resource(struct iso_resource *);
+static void release_iso_resource(struct client *, struct client_resource *);
+
+/*
+ * dequeue_event() just kfree()'s the event, so the event has to be
+ * the first field in a struct XYZ_event.
+ */
+struct event {
+       struct { void *data; size_t size; } v[2];
+       struct list_head link;
+};
+
+struct bus_reset_event {
+       struct event event;
+       struct fw_cdev_event_bus_reset reset;
+};
+
+struct outbound_transaction_event {
+       struct event event;
+       struct client *client;
+       struct outbound_transaction_resource r;
+       struct fw_cdev_event_response response;
+};
+
+struct inbound_transaction_event {
+       struct event event;
+       struct fw_cdev_event_request request;
+};
+
+struct iso_interrupt_event {
+       struct event event;
+       struct fw_cdev_event_iso_interrupt interrupt;
+};
+
+struct iso_resource_event {
+       struct event event;
+       struct fw_cdev_event_iso_resource resource;
+};
+
+static inline void __user *u64_to_uptr(__u64 value)
+{
+       return (void __user *)(unsigned long)value;
+}
+
+static inline __u64 uptr_to_u64(void __user *ptr)
+{
+       return (__u64)(unsigned long)ptr;
+}
+
+static int fw_device_op_open(struct inode *inode, struct file *file)
+{
+       struct fw_device *device;
+       struct client *client;
+
+       device = fw_device_get_by_devt(inode->i_rdev);
+       if (device == NULL)
+               return -ENODEV;
+
+       if (fw_device_is_shutdown(device)) {
+               fw_device_put(device);
+               return -ENODEV;
+       }
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (client == NULL) {
+               fw_device_put(device);
+               return -ENOMEM;
+       }
+
+       client->device = device;
+       spin_lock_init(&client->lock);
+       idr_init(&client->resource_idr);
+       INIT_LIST_HEAD(&client->event_list);
+       init_waitqueue_head(&client->wait);
+       kref_init(&client->kref);
+
+       file->private_data = client;
+
+       mutex_lock(&device->client_list_mutex);
+       list_add_tail(&client->link, &device->client_list);
+       mutex_unlock(&device->client_list_mutex);
+
+       return 0;
+}
+
+static void queue_event(struct client *client, struct event *event,
+                       void *data0, size_t size0, void *data1, size_t size1)
+{
+       unsigned long flags;
+
+       event->v[0].data = data0;
+       event->v[0].size = size0;
+       event->v[1].data = data1;
+       event->v[1].size = size1;
+
+       spin_lock_irqsave(&client->lock, flags);
+       if (client->in_shutdown)
+               kfree(event);
+       else
+               list_add_tail(&event->link, &client->event_list);
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       wake_up_interruptible(&client->wait);
+}
+
+static int dequeue_event(struct client *client,
+                        char __user *buffer, size_t count)
+{
+       struct event *event;
+       size_t size, total;
+       int i, ret;
+
+       ret = wait_event_interruptible(client->wait,
+                       !list_empty(&client->event_list) ||
+                       fw_device_is_shutdown(client->device));
+       if (ret < 0)
+               return ret;
+
+       if (list_empty(&client->event_list) &&
+                      fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       spin_lock_irq(&client->lock);
+       event = list_first_entry(&client->event_list, struct event, link);
+       list_del(&event->link);
+       spin_unlock_irq(&client->lock);
+
+       total = 0;
+       for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
+               size = min(event->v[i].size, count - total);
+               if (copy_to_user(buffer + total, event->v[i].data, size)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               total += size;
+       }
+       ret = total;
+
+ out:
+       kfree(event);
+
+       return ret;
+}
+
+static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
+                                size_t count, loff_t *offset)
+{
+       struct client *client = file->private_data;
+
+       return dequeue_event(client, buffer, count);
+}
+
+static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
+                                struct client *client)
+{
+       struct fw_card *card = client->device->card;
+
+       spin_lock_irq(&card->lock);
+
+       event->closure       = client->bus_reset_closure;
+       event->type          = FW_CDEV_EVENT_BUS_RESET;
+       event->generation    = client->device->generation;
+       event->node_id       = client->device->node_id;
+       event->local_node_id = card->local_node->node_id;
+       event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
+       event->irm_node_id   = card->irm_node->node_id;
+       event->root_node_id  = card->root_node->node_id;
+
+       spin_unlock_irq(&card->lock);
+}
+
+static void for_each_client(struct fw_device *device,
+                           void (*callback)(struct client *client))
+{
+       struct client *c;
+
+       mutex_lock(&device->client_list_mutex);
+       list_for_each_entry(c, &device->client_list, link)
+               callback(c);
+       mutex_unlock(&device->client_list_mutex);
+}
+
+static int schedule_reallocations(int id, void *p, void *data)
+{
+       struct client_resource *r = p;
+
+       if (r->release == release_iso_resource)
+               schedule_iso_resource(container_of(r,
+                                       struct iso_resource, resource));
+       return 0;
+}
+
+static void queue_bus_reset_event(struct client *client)
+{
+       struct bus_reset_event *e;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (e == NULL) {
+               fw_notify("Out of memory when allocating bus reset event\n");
+               return;
+       }
+
+       fill_bus_reset_event(&e->reset, client);
+
+       queue_event(client, &e->event,
+                   &e->reset, sizeof(e->reset), NULL, 0);
+
+       spin_lock_irq(&client->lock);
+       idr_for_each(&client->resource_idr, schedule_reallocations, client);
+       spin_unlock_irq(&client->lock);
+}
+
+void fw_device_cdev_update(struct fw_device *device)
+{
+       for_each_client(device, queue_bus_reset_event);
+}
+
+static void wake_up_client(struct client *client)
+{
+       wake_up_interruptible(&client->wait);
+}
+
+void fw_device_cdev_remove(struct fw_device *device)
+{
+       for_each_client(device, wake_up_client);
+}
+
+static int ioctl_get_info(struct client *client, void *buffer)
+{
+       struct fw_cdev_get_info *get_info = buffer;
+       struct fw_cdev_event_bus_reset bus_reset;
+       unsigned long ret = 0;
+
+       client->version = get_info->version;
+       get_info->version = FW_CDEV_VERSION;
+       get_info->card = client->device->card->index;
+
+       down_read(&fw_device_rwsem);
+
+       if (get_info->rom != 0) {
+               void __user *uptr = u64_to_uptr(get_info->rom);
+               size_t want = get_info->rom_length;
+               size_t have = client->device->config_rom_length * 4;
+
+               ret = copy_to_user(uptr, client->device->config_rom,
+                                  min(want, have));
+       }
+       get_info->rom_length = client->device->config_rom_length * 4;
+
+       up_read(&fw_device_rwsem);
+
+       if (ret != 0)
+               return -EFAULT;
+
+       client->bus_reset_closure = get_info->bus_reset_closure;
+       if (get_info->bus_reset != 0) {
+               void __user *uptr = u64_to_uptr(get_info->bus_reset);
+
+               fill_bus_reset_event(&bus_reset, client);
+               if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
+static int add_client_resource(struct client *client,
+                              struct client_resource *resource, gfp_t gfp_mask)
+{
+       unsigned long flags;
+       int ret;
+
+ retry:
+       if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&client->lock, flags);
+       if (client->in_shutdown)
+               ret = -ECANCELED;
+       else
+               ret = idr_get_new(&client->resource_idr, resource,
+                                 &resource->handle);
+       if (ret >= 0) {
+               client_get(client);
+               if (resource->release == release_iso_resource)
+                       schedule_iso_resource(container_of(resource,
+                                               struct iso_resource, resource));
+       }
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       if (ret == -EAGAIN)
+               goto retry;
+
+       return ret < 0 ? ret : 0;
+}
+
+static int release_client_resource(struct client *client, u32 handle,
+                                  client_resource_release_fn_t release,
+                                  struct client_resource **resource)
+{
+       struct client_resource *r;
+
+       spin_lock_irq(&client->lock);
+       if (client->in_shutdown)
+               r = NULL;
+       else
+               r = idr_find(&client->resource_idr, handle);
+       if (r && r->release == release)
+               idr_remove(&client->resource_idr, handle);
+       spin_unlock_irq(&client->lock);
+
+       if (!(r && r->release == release))
+               return -EINVAL;
+
+       if (resource)
+               *resource = r;
+       else
+               r->release(client, r);
+
+       client_put(client);
+
+       return 0;
+}
+
+static void release_transaction(struct client *client,
+                               struct client_resource *resource)
+{
+       struct outbound_transaction_resource *r = container_of(resource,
+                       struct outbound_transaction_resource, resource);
+
+       fw_cancel_transaction(client->device->card, &r->transaction);
+}
+
+static void complete_transaction(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct outbound_transaction_event *e = data;
+       struct fw_cdev_event_response *rsp = &e->response;
+       struct client *client = e->client;
+       unsigned long flags;
+
+       if (length < rsp->length)
+               rsp->length = length;
+       if (rcode == RCODE_COMPLETE)
+               memcpy(rsp->data, payload, rsp->length);
+
+       spin_lock_irqsave(&client->lock, flags);
+       /*
+        * 1. If called while in shutdown, the idr tree must be left untouched.
+        *    The idr handle will be removed and the client reference will be
+        *    dropped later.
+        * 2. If the call chain was release_client_resource ->
+        *    release_transaction -> complete_transaction (instead of a normal
+        *    conclusion of the transaction), i.e. if this resource was already
+        *    unregistered from the idr, the client reference will be dropped
+        *    by release_client_resource and we must not drop it here.
+        */
+       if (!client->in_shutdown &&
+           idr_find(&client->resource_idr, e->r.resource.handle)) {
+               idr_remove(&client->resource_idr, e->r.resource.handle);
+               /* Drop the idr's reference */
+               client_put(client);
+       }
+       spin_unlock_irqrestore(&client->lock, flags);
+
+       rsp->type = FW_CDEV_EVENT_RESPONSE;
+       rsp->rcode = rcode;
+
+       /*
+        * In the case that sizeof(*rsp) doesn't align with the position of the
+        * data, and the read is short, preserve an extra copy of the data
+        * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
+        * for short reads and some apps depended on it, this is both safe
+        * and prudent for compatibility.
+        */
+       if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
+               queue_event(client, &e->event, rsp, sizeof(*rsp),
+                           rsp->data, rsp->length);
+       else
+               queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
+                           NULL, 0);
+
+       /* Drop the transaction callback's reference */
+       client_put(client);
+}
+
+static int init_request(struct client *client,
+                       struct fw_cdev_send_request *request,
+                       int destination_id, int speed)
+{
+       struct outbound_transaction_event *e;
+       int ret;
+
+       if (request->tcode != TCODE_STREAM_DATA &&
+           (request->length > 4096 || request->length > 512 << speed))
+               return -EIO;
+
+       e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+
+       e->client = client;
+       e->response.length = request->length;
+       e->response.closure = request->closure;
+
+       if (request->data &&
+           copy_from_user(e->response.data,
+                          u64_to_uptr(request->data), request->length)) {
+               ret = -EFAULT;
+               goto failed;
+       }
+
+       e->r.resource.release = release_transaction;
+       ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
+       if (ret < 0)
+               goto failed;
+
+       /* Get a reference for the transaction callback */
+       client_get(client);
+
+       fw_send_request(client->device->card, &e->r.transaction,
+                       request->tcode, destination_id, request->generation,
+                       speed, request->offset, e->response.data,
+                       request->length, complete_transaction, e);
+       return 0;
+
+ failed:
+       kfree(e);
+
+       return ret;
+}
+
+static int ioctl_send_request(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_request *request = buffer;
+
+       switch (request->tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_READ_QUADLET_REQUEST:
+       case TCODE_READ_BLOCK_REQUEST:
+       case TCODE_LOCK_MASK_SWAP:
+       case TCODE_LOCK_COMPARE_SWAP:
+       case TCODE_LOCK_FETCH_ADD:
+       case TCODE_LOCK_LITTLE_ADD:
+       case TCODE_LOCK_BOUNDED_ADD:
+       case TCODE_LOCK_WRAP_ADD:
+       case TCODE_LOCK_VENDOR_DEPENDENT:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return init_request(client, request, client->device->node_id,
+                           client->device->max_speed);
+}
+
+static void release_request(struct client *client,
+                           struct client_resource *resource)
+{
+       struct inbound_transaction_resource *r = container_of(resource,
+                       struct inbound_transaction_resource, resource);
+
+       fw_send_response(client->device->card, r->request,
+                        RCODE_CONFLICT_ERROR);
+       kfree(r);
+}
+
+static void handle_request(struct fw_card *card, struct fw_request *request,
+                          int tcode, int destination, int source,
+                          int generation, int speed,
+                          unsigned long long offset,
+                          void *payload, size_t length, void *callback_data)
+{
+       struct address_handler_resource *handler = callback_data;
+       struct inbound_transaction_resource *r;
+       struct inbound_transaction_event *e;
+       int ret;
+
+       r = kmalloc(sizeof(*r), GFP_ATOMIC);
+       e = kmalloc(sizeof(*e), GFP_ATOMIC);
+       if (r == NULL || e == NULL)
+               goto failed;
+
+       r->request = request;
+       r->data    = payload;
+       r->length  = length;
+
+       r->resource.release = release_request;
+       ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
+       if (ret < 0)
+               goto failed;
+
+       e->request.type    = FW_CDEV_EVENT_REQUEST;
+       e->request.tcode   = tcode;
+       e->request.offset  = offset;
+       e->request.length  = length;
+       e->request.handle  = r->resource.handle;
+       e->request.closure = handler->closure;
+
+       queue_event(handler->client, &e->event,
+                   &e->request, sizeof(e->request), payload, length);
+       return;
+
+ failed:
+       kfree(r);
+       kfree(e);
+       fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+}
+
+static void release_address_handler(struct client *client,
+                                   struct client_resource *resource)
+{
+       struct address_handler_resource *r =
+           container_of(resource, struct address_handler_resource, resource);
+
+       fw_core_remove_address_handler(&r->handler);
+       kfree(r);
+}
+
+static int ioctl_allocate(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate *request = buffer;
+       struct address_handler_resource *r;
+       struct fw_address_region region;
+       int ret;
+
+       r = kmalloc(sizeof(*r), GFP_KERNEL);
+       if (r == NULL)
+               return -ENOMEM;
+
+       region.start = request->offset;
+       region.end = request->offset + request->length;
+       r->handler.length = request->length;
+       r->handler.address_callback = handle_request;
+       r->handler.callback_data = r;
+       r->closure = request->closure;
+       r->client = client;
+
+       ret = fw_core_add_address_handler(&r->handler, &region);
+       if (ret < 0) {
+               kfree(r);
+               return ret;
+       }
+
+       r->resource.release = release_address_handler;
+       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+       if (ret < 0) {
+               release_address_handler(client, &r->resource);
+               return ret;
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+}
+
+static int ioctl_deallocate(struct client *client, void *buffer)
+{
+       struct fw_cdev_deallocate *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_address_handler, NULL);
+}
+
+static int ioctl_send_response(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_response *request = buffer;
+       struct client_resource *resource;
+       struct inbound_transaction_resource *r;
+
+       if (release_client_resource(client, request->handle,
+                                   release_request, &resource) < 0)
+               return -EINVAL;
+
+       r = container_of(resource, struct inbound_transaction_resource,
+                        resource);
+       if (request->length < r->length)
+               r->length = request->length;
+       if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
+               return -EFAULT;
+
+       fw_send_response(client->device->card, r->request, request->rcode);
+       kfree(r);
+
+       return 0;
+}
+
+static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+{
+       struct fw_cdev_initiate_bus_reset *request = buffer;
+       int short_reset;
+
+       short_reset = (request->type == FW_CDEV_SHORT_RESET);
+
+       return fw_core_initiate_bus_reset(client->device->card, short_reset);
+}
+
+static void release_descriptor(struct client *client,
+                              struct client_resource *resource)
+{
+       struct descriptor_resource *r =
+               container_of(resource, struct descriptor_resource, resource);
+
+       fw_core_remove_descriptor(&r->descriptor);
+       kfree(r);
+}
+
+static int ioctl_add_descriptor(struct client *client, void *buffer)
+{
+       struct fw_cdev_add_descriptor *request = buffer;
+       struct descriptor_resource *r;
+       int ret;
+
+       /* Access policy: Allow this ioctl only on local nodes' device files. */
+       if (!client->device->is_local)
+               return -ENOSYS;
+
+       if (request->length > 256)
+               return -EINVAL;
+
+       r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+       if (r == NULL)
+               return -ENOMEM;
+
+       if (copy_from_user(r->data,
+                          u64_to_uptr(request->data), request->length * 4)) {
+               ret = -EFAULT;
+               goto failed;
+       }
+
+       r->descriptor.length    = request->length;
+       r->descriptor.immediate = request->immediate;
+       r->descriptor.key       = request->key;
+       r->descriptor.data      = r->data;
+
+       ret = fw_core_add_descriptor(&r->descriptor);
+       if (ret < 0)
+               goto failed;
+
+       r->resource.release = release_descriptor;
+       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+       if (ret < 0) {
+               fw_core_remove_descriptor(&r->descriptor);
+               goto failed;
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+ failed:
+       kfree(r);
+
+       return ret;
+}
+
+static int ioctl_remove_descriptor(struct client *client, void *buffer)
+{
+       struct fw_cdev_remove_descriptor *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_descriptor, NULL);
+}
+
+static void iso_callback(struct fw_iso_context *context, u32 cycle,
+                        size_t header_length, void *header, void *data)
+{
+       struct client *client = data;
+       struct iso_interrupt_event *e;
+
+       e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
+       if (e == NULL)
+               return;
+
+       e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
+       e->interrupt.closure   = client->iso_closure;
+       e->interrupt.cycle     = cycle;
+       e->interrupt.header_length = header_length;
+       memcpy(e->interrupt.header, header, header_length);
+       queue_event(client, &e->event, &e->interrupt,
+                   sizeof(e->interrupt) + header_length, NULL, 0);
+}
+
+static int ioctl_create_iso_context(struct client *client, void *buffer)
+{
+       struct fw_cdev_create_iso_context *request = buffer;
+       struct fw_iso_context *context;
+
+       /* We only support one context at this time. */
+       if (client->iso_context != NULL)
+               return -EBUSY;
+
+       if (request->channel > 63)
+               return -EINVAL;
+
+       switch (request->type) {
+       case FW_ISO_CONTEXT_RECEIVE:
+               if (request->header_size < 4 || (request->header_size & 3))
+                       return -EINVAL;
+
+               break;
+
+       case FW_ISO_CONTEXT_TRANSMIT:
+               if (request->speed > SCODE_3200)
+                       return -EINVAL;
+
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       context =  fw_iso_context_create(client->device->card,
+                                        request->type,
+                                        request->channel,
+                                        request->speed,
+                                        request->header_size,
+                                        iso_callback, client);
+       if (IS_ERR(context))
+               return PTR_ERR(context);
+
+       client->iso_closure = request->closure;
+       client->iso_context = context;
+
+       /* We only support one context at this time. */
+       request->handle = 0;
+
+       return 0;
+}
+
+/* Macros for decoding the iso packet control header. */
+#define GET_PAYLOAD_LENGTH(v)  ((v) & 0xffff)
+#define GET_INTERRUPT(v)       (((v) >> 16) & 0x01)
+#define GET_SKIP(v)            (((v) >> 17) & 0x01)
+#define GET_TAG(v)             (((v) >> 18) & 0x03)
+#define GET_SY(v)              (((v) >> 20) & 0x0f)
+#define GET_HEADER_LENGTH(v)   (((v) >> 24) & 0xff)
+
+static int ioctl_queue_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_queue_iso *request = buffer;
+       struct fw_cdev_iso_packet __user *p, *end, *next;
+       struct fw_iso_context *ctx = client->iso_context;
+       unsigned long payload, buffer_end, header_length;
+       u32 control;
+       int count;
+       struct {
+               struct fw_iso_packet packet;
+               u8 header[256];
+       } u;
+
+       if (ctx == NULL || request->handle != 0)
+               return -EINVAL;
+
+       /*
+        * If the user passes a non-NULL data pointer, has mmap()'ed
+        * the iso buffer, and the pointer points inside the buffer,
+        * we setup the payload pointers accordingly.  Otherwise we
+        * set them both to 0, which will still let packets with
+        * payload_length == 0 through.  In other words, if no packets
+        * use the indirect payload, the iso buffer need not be mapped
+        * and the request->data pointer is ignored.
+        */
+
+       payload = (unsigned long)request->data - client->vm_start;
+       buffer_end = client->buffer.page_count << PAGE_SHIFT;
+       if (request->data == 0 || client->buffer.pages == NULL ||
+           payload >= buffer_end) {
+               payload = 0;
+               buffer_end = 0;
+       }
+
+       p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+
+       if (!access_ok(VERIFY_READ, p, request->size))
+               return -EFAULT;
+
+       end = (void __user *)p + request->size;
+       count = 0;
+       while (p < end) {
+               if (get_user(control, &p->control))
+                       return -EFAULT;
+               u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
+               u.packet.interrupt = GET_INTERRUPT(control);
+               u.packet.skip = GET_SKIP(control);
+               u.packet.tag = GET_TAG(control);
+               u.packet.sy = GET_SY(control);
+               u.packet.header_length = GET_HEADER_LENGTH(control);
+
+               if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
+                       header_length = u.packet.header_length;
+               } else {
+                       /*
+                        * We require that header_length is a multiple of
+                        * the fixed header size, ctx->header_size.
+                        */
+                       if (ctx->header_size == 0) {
+                               if (u.packet.header_length > 0)
+                                       return -EINVAL;
+                       } else if (u.packet.header_length % ctx->header_size != 0) {
+                               return -EINVAL;
+                       }
+                       header_length = 0;
+               }
+
+               next = (struct fw_cdev_iso_packet __user *)
+                       &p->header[header_length / 4];
+               if (next > end)
+                       return -EINVAL;
+               if (__copy_from_user
+                   (u.packet.header, p->header, header_length))
+                       return -EFAULT;
+               if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+                   u.packet.header_length + u.packet.payload_length > 0)
+                       return -EINVAL;
+               if (payload + u.packet.payload_length > buffer_end)
+                       return -EINVAL;
+
+               if (fw_iso_context_queue(ctx, &u.packet,
+                                        &client->buffer, payload))
+                       break;
+
+               p = next;
+               payload += u.packet.payload_length;
+               count++;
+       }
+
+       request->size    -= uptr_to_u64(p) - request->packets;
+       request->packets  = uptr_to_u64(p);
+       request->data     = client->vm_start + payload;
+
+       return count;
+}
+
+static int ioctl_start_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_start_iso *request = buffer;
+
+       if (client->iso_context == NULL || request->handle != 0)
+               return -EINVAL;
+
+       if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
+               if (request->tags == 0 || request->tags > 15)
+                       return -EINVAL;
+
+               if (request->sync > 15)
+                       return -EINVAL;
+       }
+
+       return fw_iso_context_start(client->iso_context, request->cycle,
+                                   request->sync, request->tags);
+}
+
+static int ioctl_stop_iso(struct client *client, void *buffer)
+{
+       struct fw_cdev_stop_iso *request = buffer;
+
+       if (client->iso_context == NULL || request->handle != 0)
+               return -EINVAL;
+
+       return fw_iso_context_stop(client->iso_context);
+}
+
+static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+{
+       struct fw_cdev_get_cycle_timer *request = buffer;
+       struct fw_card *card = client->device->card;
+       unsigned long long bus_time;
+       struct timeval tv;
+       unsigned long flags;
+
+       preempt_disable();
+       local_irq_save(flags);
+
+       bus_time = card->driver->get_bus_time(card);
+       do_gettimeofday(&tv);
+
+       local_irq_restore(flags);
+       preempt_enable();
+
+       request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
+       request->cycle_timer = bus_time & 0xffffffff;
+       return 0;
+}
+
+static void iso_resource_work(struct work_struct *work)
+{
+       struct iso_resource_event *e;
+       struct iso_resource *r =
+                       container_of(work, struct iso_resource, work.work);
+       struct client *client = r->client;
+       int generation, channel, bandwidth, todo;
+       bool skip, free, success;
+
+       spin_lock_irq(&client->lock);
+       generation = client->device->generation;
+       todo = r->todo;
+       /* Allow 1000ms grace period for other reallocations. */
+       if (todo == ISO_RES_ALLOC &&
+           time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
+               if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
+                       client_get(client);
+               skip = true;
+       } else {
+               /* We could be called twice within the same generation. */
+               skip = todo == ISO_RES_REALLOC &&
+                      r->generation == generation;
+       }
+       free = todo == ISO_RES_DEALLOC ||
+              todo == ISO_RES_ALLOC_ONCE ||
+              todo == ISO_RES_DEALLOC_ONCE;
+       r->generation = generation;
+       spin_unlock_irq(&client->lock);
+
+       if (skip)
+               goto out;
+
+       bandwidth = r->bandwidth;
+
+       fw_iso_resource_manage(client->device->card, generation,
+                       r->channels, &channel, &bandwidth,
+                       todo == ISO_RES_ALLOC ||
+                       todo == ISO_RES_REALLOC ||
+                       todo == ISO_RES_ALLOC_ONCE);
+       /*
+        * Is this generation outdated already?  As long as this resource sticks
+        * in the idr, it will be scheduled again for a newer generation or at
+        * shutdown.
+        */
+       if (channel == -EAGAIN &&
+           (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
+               goto out;
+
+       success = channel >= 0 || bandwidth > 0;
+
+       spin_lock_irq(&client->lock);
+       /*
+        * Transit from allocation to reallocation, except if the client
+        * requested deallocation in the meantime.
+        */
+       if (r->todo == ISO_RES_ALLOC)
+               r->todo = ISO_RES_REALLOC;
+       /*
+        * Allocation or reallocation failure?  Pull this resource out of the
+        * idr and prepare for deletion, unless the client is shutting down.
+        */
+       if (r->todo == ISO_RES_REALLOC && !success &&
+           !client->in_shutdown &&
+           idr_find(&client->resource_idr, r->resource.handle)) {
+               idr_remove(&client->resource_idr, r->resource.handle);
+               client_put(client);
+               free = true;
+       }
+       spin_unlock_irq(&client->lock);
+
+       if (todo == ISO_RES_ALLOC && channel >= 0)
+               r->channels = 1ULL << channel;
+
+       if (todo == ISO_RES_REALLOC && success)
+               goto out;
+
+       if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
+               e = r->e_alloc;
+               r->e_alloc = NULL;
+       } else {
+               e = r->e_dealloc;
+               r->e_dealloc = NULL;
+       }
+       e->resource.handle      = r->resource.handle;
+       e->resource.channel     = channel;
+       e->resource.bandwidth   = bandwidth;
+
+       queue_event(client, &e->event,
+                   &e->resource, sizeof(e->resource), NULL, 0);
+
+       if (free) {
+               cancel_delayed_work(&r->work);
+               kfree(r->e_alloc);
+               kfree(r->e_dealloc);
+               kfree(r);
+       }
+ out:
+       client_put(client);
+}
+
+static void schedule_iso_resource(struct iso_resource *r)
+{
+       client_get(r->client);
+       if (!schedule_delayed_work(&r->work, 0))
+               client_put(r->client);
+}
+
+static void release_iso_resource(struct client *client,
+                                struct client_resource *resource)
+{
+       struct iso_resource *r =
+               container_of(resource, struct iso_resource, resource);
+
+       spin_lock_irq(&client->lock);
+       r->todo = ISO_RES_DEALLOC;
+       schedule_iso_resource(r);
+       spin_unlock_irq(&client->lock);
+}
+
+static int init_iso_resource(struct client *client,
+               struct fw_cdev_allocate_iso_resource *request, int todo)
+{
+       struct iso_resource_event *e1, *e2;
+       struct iso_resource *r;
+       int ret;
+
+       if ((request->channels == 0 && request->bandwidth == 0) ||
+           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
+           request->bandwidth < 0)
+               return -EINVAL;
+
+       r  = kmalloc(sizeof(*r), GFP_KERNEL);
+       e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
+       e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
+       if (r == NULL || e1 == NULL || e2 == NULL) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       INIT_DELAYED_WORK(&r->work, iso_resource_work);
+       r->client       = client;
+       r->todo         = todo;
+       r->generation   = -1;
+       r->channels     = request->channels;
+       r->bandwidth    = request->bandwidth;
+       r->e_alloc      = e1;
+       r->e_dealloc    = e2;
+
+       e1->resource.closure    = request->closure;
+       e1->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
+       e2->resource.closure    = request->closure;
+       e2->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
+
+       if (todo == ISO_RES_ALLOC) {
+               r->resource.release = release_iso_resource;
+               ret = add_client_resource(client, &r->resource, GFP_KERNEL);
+               if (ret < 0)
+                       goto fail;
+       } else {
+               r->resource.release = NULL;
+               r->resource.handle = -1;
+               schedule_iso_resource(r);
+       }
+       request->handle = r->resource.handle;
+
+       return 0;
+ fail:
+       kfree(r);
+       kfree(e1);
+       kfree(e2);
+
+       return ret;
+}
+
+static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_ALLOC);
+}
+
+static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+{
+       struct fw_cdev_deallocate *request = buffer;
+
+       return release_client_resource(client, request->handle,
+                                      release_iso_resource, NULL);
+}
+
+static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+}
+
+static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+{
+       struct fw_cdev_allocate_iso_resource *request = buffer;
+
+       return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+}
+
+/*
+ * Returns a speed code:  Maximum speed to or from this device,
+ * limited by the device's link speed, the local node's link speed,
+ * and all PHY port speeds between the two links.
+ */
+static int ioctl_get_speed(struct client *client, void *buffer)
+{
+       return client->device->max_speed;
+}
+
+static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_request *request = buffer;
+
+       switch (request->tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Security policy: Only allow accesses to Units Space. */
+       if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+               return -EACCES;
+
+       return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+}
+
+static int ioctl_send_stream_packet(struct client *client, void *buffer)
+{
+       struct fw_cdev_send_stream_packet *p = buffer;
+       struct fw_cdev_send_request request;
+       int dest;
+
+       if (p->speed > client->device->card->link_speed ||
+           p->length > 1024 << p->speed)
+               return -EIO;
+
+       if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+               return -EINVAL;
+
+       dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+       request.tcode           = TCODE_STREAM_DATA;
+       request.length          = p->length;
+       request.closure         = p->closure;
+       request.data            = p->data;
+       request.generation      = p->generation;
+
+       return init_request(client, &request, dest, p->speed);
+}
+
+static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+       ioctl_get_info,
+       ioctl_send_request,
+       ioctl_allocate,
+       ioctl_deallocate,
+       ioctl_send_response,
+       ioctl_initiate_bus_reset,
+       ioctl_add_descriptor,
+       ioctl_remove_descriptor,
+       ioctl_create_iso_context,
+       ioctl_queue_iso,
+       ioctl_start_iso,
+       ioctl_stop_iso,
+       ioctl_get_cycle_timer,
+       ioctl_allocate_iso_resource,
+       ioctl_deallocate_iso_resource,
+       ioctl_allocate_iso_resource_once,
+       ioctl_deallocate_iso_resource_once,
+       ioctl_get_speed,
+       ioctl_send_broadcast_request,
+       ioctl_send_stream_packet,
+};
+
+static int dispatch_ioctl(struct client *client,
+                         unsigned int cmd, void __user *arg)
+{
+       char buffer[256];
+       int ret;
+
+       if (_IOC_TYPE(cmd) != '#' ||
+           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
+               return -EINVAL;
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+       }
+
+       ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+       if (ret < 0)
+               return ret;
+
+       if (_IOC_DIR(cmd) & _IOC_READ) {
+               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
+                   copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+       }
+
+       return ret;
+}
+
+static long fw_device_op_ioctl(struct file *file,
+                              unsigned int cmd, unsigned long arg)
+{
+       struct client *client = file->private_data;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       return dispatch_ioctl(client, cmd, (void __user *) arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long fw_device_op_compat_ioctl(struct file *file,
+                                     unsigned int cmd, unsigned long arg)
+{
+       struct client *client = file->private_data;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       return dispatch_ioctl(client, cmd, compat_ptr(arg));
+}
+#endif
+
+static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct client *client = file->private_data;
+       enum dma_data_direction direction;
+       unsigned long size;
+       int page_count, ret;
+
+       if (fw_device_is_shutdown(client->device))
+               return -ENODEV;
+
+       /* FIXME: We could support multiple buffers, but we don't. */
+       if (client->buffer.pages != NULL)
+               return -EBUSY;
+
+       if (!(vma->vm_flags & VM_SHARED))
+               return -EINVAL;
+
+       if (vma->vm_start & ~PAGE_MASK)
+               return -EINVAL;
+
+       client->vm_start = vma->vm_start;
+       size = vma->vm_end - vma->vm_start;
+       page_count = size >> PAGE_SHIFT;
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       if (vma->vm_flags & VM_WRITE)
+               direction = DMA_TO_DEVICE;
+       else
+               direction = DMA_FROM_DEVICE;
+
+       ret = fw_iso_buffer_init(&client->buffer, client->device->card,
+                                page_count, direction);
+       if (ret < 0)
+               return ret;
+
+       ret = fw_iso_buffer_map(&client->buffer, vma);
+       if (ret < 0)
+               fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+       return ret;
+}
+
+static int shutdown_resource(int id, void *p, void *data)
+{
+       struct client_resource *r = p;
+       struct client *client = data;
+
+       r->release(client, r);
+       client_put(client);
+
+       return 0;
+}
+
+static int fw_device_op_release(struct inode *inode, struct file *file)
+{
+       struct client *client = file->private_data;
+       struct event *e, *next_e;
+
+       mutex_lock(&client->device->client_list_mutex);
+       list_del(&client->link);
+       mutex_unlock(&client->device->client_list_mutex);
+
+       if (client->iso_context)
+               fw_iso_context_destroy(client->iso_context);
+
+       if (client->buffer.pages)
+               fw_iso_buffer_destroy(&client->buffer, client->device->card);
+
+       /* Freeze client->resource_idr and client->event_list */
+       spin_lock_irq(&client->lock);
+       client->in_shutdown = true;
+       spin_unlock_irq(&client->lock);
+
+       idr_for_each(&client->resource_idr, shutdown_resource, client);
+       idr_remove_all(&client->resource_idr);
+       idr_destroy(&client->resource_idr);
+
+       list_for_each_entry_safe(e, next_e, &client->event_list, link)
+               kfree(e);
+
+       client_put(client);
+
+       return 0;
+}
+
+static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+{
+       struct client *client = file->private_data;
+       unsigned int mask = 0;
+
+       poll_wait(file, &client->wait, pt);
+
+       if (fw_device_is_shutdown(client->device))
+               mask |= POLLHUP | POLLERR;
+       if (!list_empty(&client->event_list))
+               mask |= POLLIN | POLLRDNORM;
+
+       return mask;
+}
+
+const struct file_operations fw_device_ops = {
+       .owner          = THIS_MODULE,
+       .open           = fw_device_op_open,
+       .read           = fw_device_op_read,
+       .unlocked_ioctl = fw_device_op_ioctl,
+       .poll           = fw_device_op_poll,
+       .release        = fw_device_op_release,
+       .mmap           = fw_device_op_mmap,
+
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = fw_device_op_compat_ioctl,
+#endif
+};
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
new file mode 100644 (file)
index 0000000..97e656a
--- /dev/null
@@ -0,0 +1,1228 @@
+/*
+ * Device probing and sysfs code.
+ *
+ * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+#include "core.h"
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+{
+       ci->p = p + 1;
+       ci->end = ci->p + (p[0] >> 16);
+}
+EXPORT_SYMBOL(fw_csr_iterator_init);
+
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
+{
+       *key = *ci->p >> 24;
+       *value = *ci->p & 0xffffff;
+
+       return ci->p++ < ci->end;
+}
+EXPORT_SYMBOL(fw_csr_iterator_next);
+
+static bool is_fw_unit(struct device *dev);
+
+static int match_unit_directory(u32 *directory, u32 match_flags,
+                               const struct ieee1394_device_id *id)
+{
+       struct fw_csr_iterator ci;
+       int key, value, match;
+
+       match = 0;
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key == CSR_VENDOR && value == id->vendor_id)
+                       match |= IEEE1394_MATCH_VENDOR_ID;
+               if (key == CSR_MODEL && value == id->model_id)
+                       match |= IEEE1394_MATCH_MODEL_ID;
+               if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
+                       match |= IEEE1394_MATCH_SPECIFIER_ID;
+               if (key == CSR_VERSION && value == id->version)
+                       match |= IEEE1394_MATCH_VERSION;
+       }
+
+       return (match & match_flags) == match_flags;
+}
+
+static int fw_unit_match(struct device *dev, struct device_driver *drv)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_device *device;
+       const struct ieee1394_device_id *id;
+
+       /* We only allow binding to fw_units. */
+       if (!is_fw_unit(dev))
+               return 0;
+
+       device = fw_parent_device(unit);
+       id = container_of(drv, struct fw_driver, driver)->id_table;
+
+       for (; id->match_flags != 0; id++) {
+               if (match_unit_directory(unit->directory, id->match_flags, id))
+                       return 1;
+
+               /* Also check vendor ID in the root directory. */
+               if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
+                   match_unit_directory(&device->config_rom[5],
+                               IEEE1394_MATCH_VENDOR_ID, id) &&
+                   match_unit_directory(unit->directory, id->match_flags
+                               & ~IEEE1394_MATCH_VENDOR_ID, id))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+{
+       struct fw_device *device = fw_parent_device(unit);
+       struct fw_csr_iterator ci;
+
+       int key, value;
+       int vendor = 0;
+       int model = 0;
+       int specifier_id = 0;
+       int version = 0;
+
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_VENDOR:
+                       vendor = value;
+                       break;
+               case CSR_MODEL:
+                       model = value;
+                       break;
+               }
+       }
+
+       fw_csr_iterator_init(&ci, unit->directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_SPECIFIER_ID:
+                       specifier_id = value;
+                       break;
+               case CSR_VERSION:
+                       version = value;
+                       break;
+               }
+       }
+
+       return snprintf(buffer, buffer_size,
+                       "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
+                       vendor, model, specifier_id, version);
+}
+
+static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       char modalias[64];
+
+       get_modalias(unit, modalias, sizeof(modalias));
+
+       if (add_uevent_var(env, "MODALIAS=%s", modalias))
+               return -ENOMEM;
+
+       return 0;
+}
+
+struct bus_type fw_bus_type = {
+       .name = "firewire",
+       .match = fw_unit_match,
+};
+EXPORT_SYMBOL(fw_bus_type);
+
+int fw_device_enable_phys_dma(struct fw_device *device)
+{
+       int generation = device->generation;
+
+       /* device->node_id, accessed below, must not be older than generation */
+       smp_rmb();
+
+       return device->card->driver->enable_phys_dma(device->card,
+                                                    device->node_id,
+                                                    generation);
+}
+EXPORT_SYMBOL(fw_device_enable_phys_dma);
+
+struct config_rom_attribute {
+       struct device_attribute attr;
+       u32 key;
+};
+
+static ssize_t show_immediate(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+{
+       struct config_rom_attribute *attr =
+               container_of(dattr, struct config_rom_attribute, attr);
+       struct fw_csr_iterator ci;
+       u32 *dir;
+       int key, value, ret = -ENOENT;
+
+       down_read(&fw_device_rwsem);
+
+       if (is_fw_unit(dev))
+               dir = fw_unit(dev)->directory;
+       else
+               dir = fw_device(dev)->config_rom + 5;
+
+       fw_csr_iterator_init(&ci, dir);
+       while (fw_csr_iterator_next(&ci, &key, &value))
+               if (attr->key == key) {
+                       ret = snprintf(buf, buf ? PAGE_SIZE : 0,
+                                      "0x%06x\n", value);
+                       break;
+               }
+
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+#define IMMEDIATE_ATTR(name, key)                              \
+       { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
+
+static ssize_t show_text_leaf(struct device *dev,
+                             struct device_attribute *dattr, char *buf)
+{
+       struct config_rom_attribute *attr =
+               container_of(dattr, struct config_rom_attribute, attr);
+       struct fw_csr_iterator ci;
+       u32 *dir, *block = NULL, *p, *end;
+       int length, key, value, last_key = 0, ret = -ENOENT;
+       char *b;
+
+       down_read(&fw_device_rwsem);
+
+       if (is_fw_unit(dev))
+               dir = fw_unit(dev)->directory;
+       else
+               dir = fw_device(dev)->config_rom + 5;
+
+       fw_csr_iterator_init(&ci, dir);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (attr->key == last_key &&
+                   key == (CSR_DESCRIPTOR | CSR_LEAF))
+                       block = ci.p - 1 + value;
+               last_key = key;
+       }
+
+       if (block == NULL)
+               goto out;
+
+       length = min(block[0] >> 16, 256U);
+       if (length < 3)
+               goto out;
+
+       if (block[1] != 0 || block[2] != 0)
+               /* Unknown encoding. */
+               goto out;
+
+       if (buf == NULL) {
+               ret = length * 4;
+               goto out;
+       }
+
+       b = buf;
+       end = &block[length + 1];
+       for (p = &block[3]; p < end; p++, b += 4)
+               * (u32 *) b = (__force u32) __cpu_to_be32(*p);
+
+       /* Strip trailing whitespace and add newline. */
+       while (b--, (isspace(*b) || *b == '\0') && b > buf);
+       strcpy(b + 1, "\n");
+       ret = b + 2 - buf;
+ out:
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+#define TEXT_LEAF_ATTR(name, key)                              \
+       { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
+
+static struct config_rom_attribute config_rom_attributes[] = {
+       IMMEDIATE_ATTR(vendor, CSR_VENDOR),
+       IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
+       IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
+       IMMEDIATE_ATTR(version, CSR_VERSION),
+       IMMEDIATE_ATTR(model, CSR_MODEL),
+       TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
+       TEXT_LEAF_ATTR(model_name, CSR_MODEL),
+       TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
+};
+
+static void init_fw_attribute_group(struct device *dev,
+                                   struct device_attribute *attrs,
+                                   struct fw_attribute_group *group)
+{
+       struct device_attribute *attr;
+       int i, j;
+
+       for (j = 0; attrs[j].attr.name != NULL; j++)
+               group->attrs[j] = &attrs[j].attr;
+
+       for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
+               attr = &config_rom_attributes[i].attr;
+               if (attr->show(dev, attr, NULL) < 0)
+                       continue;
+               group->attrs[j++] = &attr->attr;
+       }
+
+       group->attrs[j] = NULL;
+       group->groups[0] = &group->group;
+       group->groups[1] = NULL;
+       group->group.attrs = group->attrs;
+       dev->groups = group->groups;
+}
+
+static ssize_t modalias_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       int length;
+
+       length = get_modalias(unit, buf, PAGE_SIZE);
+       strcpy(buf + length, "\n");
+
+       return length + 1;
+}
+
+static ssize_t rom_index_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev->parent);
+       struct fw_unit *unit = fw_unit(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       (int)(unit->directory - device->config_rom));
+}
+
+static struct device_attribute fw_unit_attributes[] = {
+       __ATTR_RO(modalias),
+       __ATTR_RO(rom_index),
+       __ATTR_NULL,
+};
+
+static ssize_t config_rom_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       size_t length;
+
+       down_read(&fw_device_rwsem);
+       length = device->config_rom_length * 4;
+       memcpy(buf, device->config_rom, length);
+       up_read(&fw_device_rwsem);
+
+       return length;
+}
+
+static ssize_t guid_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       int ret;
+
+       down_read(&fw_device_rwsem);
+       ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
+                      device->config_rom[3], device->config_rom[4]);
+       up_read(&fw_device_rwsem);
+
+       return ret;
+}
+
+static int units_sprintf(char *buf, u32 *directory)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+       int specifier_id = 0;
+       int version = 0;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+               case CSR_SPECIFIER_ID:
+                       specifier_id = value;
+                       break;
+               case CSR_VERSION:
+                       version = value;
+                       break;
+               }
+       }
+
+       return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
+}
+
+static ssize_t units_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct fw_device *device = fw_device(dev);
+       struct fw_csr_iterator ci;
+       int key, value, i = 0;
+
+       down_read(&fw_device_rwsem);
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key != (CSR_UNIT | CSR_DIRECTORY))
+                       continue;
+               i += units_sprintf(&buf[i], ci.p + value - 1);
+               if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
+                       break;
+       }
+       up_read(&fw_device_rwsem);
+
+       if (i)
+               buf[i - 1] = '\n';
+
+       return i;
+}
+
+static struct device_attribute fw_device_attributes[] = {
+       __ATTR_RO(config_rom),
+       __ATTR_RO(guid),
+       __ATTR_RO(units),
+       __ATTR_NULL,
+};
+
+static int read_rom(struct fw_device *device,
+                   int generation, int index, u32 *data)
+{
+       int rcode;
+
+       /* device->node_id, accessed below, must not be older than generation */
+       smp_rmb();
+
+       rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
+                       device->node_id, generation, device->max_speed,
+                       (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
+                       data, 4);
+       be32_to_cpus(data);
+
+       return rcode;
+}
+
+#define READ_BIB_ROM_SIZE      256
+#define READ_BIB_STACK_SIZE    16
+
+/*
+ * Read the bus info block, perform a speed probe, and read all of the rest of
+ * the config ROM.  We do all this with a cached bus generation.  If the bus
+ * generation changes under us, read_bus_info_block will fail and get retried.
+ * It's better to start all over in this case because the node from which we
+ * are reading the ROM may have changed the ROM during the reset.
+ */
+static int read_bus_info_block(struct fw_device *device, int generation)
+{
+       u32 *rom, *stack, *old_rom, *new_rom;
+       u32 sp, key;
+       int i, end, length, ret = -1;
+
+       rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
+                     sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+       if (rom == NULL)
+               return -ENOMEM;
+
+       stack = &rom[READ_BIB_ROM_SIZE];
+
+       device->max_speed = SCODE_100;
+
+       /* First read the bus info block. */
+       for (i = 0; i < 5; i++) {
+               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+                       goto out;
+               /*
+                * As per IEEE1212 7.2, during power-up, devices can
+                * reply with a 0 for the first quadlet of the config
+                * rom to indicate that they are booting (for example,
+                * if the firmware is on the disk of a external
+                * harddisk).  In that case we just fail, and the
+                * retry mechanism will try again later.
+                */
+               if (i == 0 && rom[i] == 0)
+                       goto out;
+       }
+
+       device->max_speed = device->node->max_speed;
+
+       /*
+        * Determine the speed of
+        *   - devices with link speed less than PHY speed,
+        *   - devices with 1394b PHY (unless only connected to 1394a PHYs),
+        *   - all devices if there are 1394b repeaters.
+        * Note, we cannot use the bus info block's link_spd as starting point
+        * because some buggy firmwares set it lower than necessary and because
+        * 1394-1995 nodes do not have the field.
+        */
+       if ((rom[2] & 0x7) < device->max_speed ||
+           device->max_speed == SCODE_BETA ||
+           device->card->beta_repeaters_present) {
+               u32 dummy;
+
+               /* for S1600 and S3200 */
+               if (device->max_speed == SCODE_BETA)
+                       device->max_speed = device->card->link_speed;
+
+               while (device->max_speed > SCODE_100) {
+                       if (read_rom(device, generation, 0, &dummy) ==
+                           RCODE_COMPLETE)
+                               break;
+                       device->max_speed--;
+               }
+       }
+
+       /*
+        * Now parse the config rom.  The config rom is a recursive
+        * directory structure so we parse it using a stack of
+        * references to the blocks that make up the structure.  We
+        * push a reference to the root directory on the stack to
+        * start things off.
+        */
+       length = i;
+       sp = 0;
+       stack[sp++] = 0xc0000005;
+       while (sp > 0) {
+               /*
+                * Pop the next block reference of the stack.  The
+                * lower 24 bits is the offset into the config rom,
+                * the upper 8 bits are the type of the reference the
+                * block.
+                */
+               key = stack[--sp];
+               i = key & 0xffffff;
+               if (i >= READ_BIB_ROM_SIZE)
+                       /*
+                        * The reference points outside the standard
+                        * config rom area, something's fishy.
+                        */
+                       goto out;
+
+               /* Read header quadlet for the block to get the length. */
+               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+                       goto out;
+               end = i + (rom[i] >> 16) + 1;
+               i++;
+               if (end > READ_BIB_ROM_SIZE)
+                       /*
+                        * This block extends outside standard config
+                        * area (and the array we're reading it
+                        * into).  That's broken, so ignore this
+                        * device.
+                        */
+                       goto out;
+
+               /*
+                * Now read in the block.  If this is a directory
+                * block, check the entries as we read them to see if
+                * it references another block, and push it in that case.
+                */
+               while (i < end) {
+                       if (read_rom(device, generation, i, &rom[i]) !=
+                           RCODE_COMPLETE)
+                               goto out;
+                       if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
+                           sp < READ_BIB_STACK_SIZE)
+                               stack[sp++] = i + rom[i];
+                       i++;
+               }
+               if (length < i)
+                       length = i;
+       }
+
+       old_rom = device->config_rom;
+       new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
+       if (new_rom == NULL)
+               goto out;
+
+       down_write(&fw_device_rwsem);
+       device->config_rom = new_rom;
+       device->config_rom_length = length;
+       up_write(&fw_device_rwsem);
+
+       kfree(old_rom);
+       ret = 0;
+       device->max_rec = rom[2] >> 12 & 0xf;
+       device->cmc     = rom[2] >> 30 & 1;
+       device->irmc    = rom[2] >> 31 & 1;
+ out:
+       kfree(rom);
+
+       return ret;
+}
+
+static void fw_unit_release(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+
+       kfree(unit);
+}
+
+static struct device_type fw_unit_type = {
+       .uevent         = fw_unit_uevent,
+       .release        = fw_unit_release,
+};
+
+static bool is_fw_unit(struct device *dev)
+{
+       return dev->type == &fw_unit_type;
+}
+
+static void create_units(struct fw_device *device)
+{
+       struct fw_csr_iterator ci;
+       struct fw_unit *unit;
+       int key, value, i;
+
+       i = 0;
+       fw_csr_iterator_init(&ci, &device->config_rom[5]);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               if (key != (CSR_UNIT | CSR_DIRECTORY))
+                       continue;
+
+               /*
+                * Get the address of the unit directory and try to
+                * match the drivers id_tables against it.
+                */
+               unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+               if (unit == NULL) {
+                       fw_error("failed to allocate memory for unit\n");
+                       continue;
+               }
+
+               unit->directory = ci.p + value - 1;
+               unit->device.bus = &fw_bus_type;
+               unit->device.type = &fw_unit_type;
+               unit->device.parent = &device->device;
+               dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
+
+               BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
+                               ARRAY_SIZE(fw_unit_attributes) +
+                               ARRAY_SIZE(config_rom_attributes));
+               init_fw_attribute_group(&unit->device,
+                                       fw_unit_attributes,
+                                       &unit->attribute_group);
+
+               if (device_register(&unit->device) < 0)
+                       goto skip_unit;
+
+               continue;
+
+       skip_unit:
+               kfree(unit);
+       }
+}
+
+static int shutdown_unit(struct device *device, void *data)
+{
+       device_unregister(device);
+
+       return 0;
+}
+
+/*
+ * fw_device_rwsem acts as dual purpose mutex:
+ *   - serializes accesses to fw_device_idr,
+ *   - serializes accesses to fw_device.config_rom/.config_rom_length and
+ *     fw_unit.directory, unless those accesses happen at safe occasions
+ */
+DECLARE_RWSEM(fw_device_rwsem);
+
+DEFINE_IDR(fw_device_idr);
+int fw_cdev_major;
+
+struct fw_device *fw_device_get_by_devt(dev_t devt)
+{
+       struct fw_device *device;
+
+       down_read(&fw_device_rwsem);
+       device = idr_find(&fw_device_idr, MINOR(devt));
+       if (device)
+               fw_device_get(device);
+       up_read(&fw_device_rwsem);
+
+       return device;
+}
+
+/*
+ * These defines control the retry behavior for reading the config
+ * rom.  It shouldn't be necessary to tweak these; if the device
+ * doesn't respond to a config rom read within 10 seconds, it's not
+ * going to respond at all.  As for the initial delay, a lot of
+ * devices will be able to respond within half a second after bus
+ * reset.  On the other hand, it's not really worth being more
+ * aggressive than that, since it scales pretty well; if 10 devices
+ * are plugged in, they're all getting read within one second.
+ */
+
+#define MAX_RETRIES    10
+#define RETRY_DELAY    (3 * HZ)
+#define INITIAL_DELAY  (HZ / 2)
+#define SHUTDOWN_DELAY (2 * HZ)
+
+static void fw_device_shutdown(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       int minor = MINOR(device->device.devt);
+
+       if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
+           && !list_empty(&device->card->link)) {
+               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+               return;
+       }
+
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_GONE,
+                          FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
+               return;
+
+       fw_device_cdev_remove(device);
+       device_for_each_child(&device->device, NULL, shutdown_unit);
+       device_unregister(&device->device);
+
+       down_write(&fw_device_rwsem);
+       idr_remove(&fw_device_idr, minor);
+       up_write(&fw_device_rwsem);
+
+       fw_device_put(device);
+}
+
+static void fw_device_release(struct device *dev)
+{
+       struct fw_device *device = fw_device(dev);
+       struct fw_card *card = device->card;
+       unsigned long flags;
+
+       /*
+        * Take the card lock so we don't set this to NULL while a
+        * FW_NODE_UPDATED callback is being handled or while the
+        * bus manager work looks at this node.
+        */
+       spin_lock_irqsave(&card->lock, flags);
+       device->node->data = NULL;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       fw_node_put(device->node);
+       kfree(device->config_rom);
+       kfree(device);
+       fw_card_put(card);
+}
+
+static struct device_type fw_device_type = {
+       .release = fw_device_release,
+};
+
+static bool is_fw_device(struct device *dev)
+{
+       return dev->type == &fw_device_type;
+}
+
+static int update_unit(struct device *dev, void *data)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_driver *driver = (struct fw_driver *)dev->driver;
+
+       if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
+               down(&dev->sem);
+               driver->update(unit);
+               up(&dev->sem);
+       }
+
+       return 0;
+}
+
+static void fw_device_update(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+
+       fw_device_cdev_update(device);
+       device_for_each_child(&device->device, NULL, update_unit);
+}
+
+/*
+ * If a device was pending for deletion because its node went away but its
+ * bus info block and root directory header matches that of a newly discovered
+ * device, revive the existing fw_device.
+ * The newly allocated fw_device becomes obsolete instead.
+ */
+static int lookup_existing_device(struct device *dev, void *data)
+{
+       struct fw_device *old = fw_device(dev);
+       struct fw_device *new = data;
+       struct fw_card *card = new->card;
+       int match = 0;
+
+       if (!is_fw_device(dev))
+               return 0;
+
+       down_read(&fw_device_rwsem); /* serialize config_rom access */
+       spin_lock_irq(&card->lock);  /* serialize node access */
+
+       if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
+           atomic_cmpxchg(&old->state,
+                          FW_DEVICE_GONE,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+               struct fw_node *current_node = new->node;
+               struct fw_node *obsolete_node = old->node;
+
+               new->node = obsolete_node;
+               new->node->data = new;
+               old->node = current_node;
+               old->node->data = old;
+
+               old->max_speed = new->max_speed;
+               old->node_id = current_node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               old->generation = card->generation;
+               old->config_rom_retries = 0;
+               fw_notify("rediscovered device %s\n", dev_name(dev));
+
+               PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+               schedule_delayed_work(&old->work, 0);
+
+               if (current_node == card->root_node)
+                       fw_schedule_bm_work(card, 0);
+
+               match = 1;
+       }
+
+       spin_unlock_irq(&card->lock);
+       up_read(&fw_device_rwsem);
+
+       return match;
+}
+
+enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
+
+static void set_broadcast_channel(struct fw_device *device, int generation)
+{
+       struct fw_card *card = device->card;
+       __be32 data;
+       int rcode;
+
+       if (!card->broadcast_channel_allocated)
+               return;
+
+       /*
+        * The Broadcast_Channel Valid bit is required by nodes which want to
+        * transmit on this channel.  Such transmissions are practically
+        * exclusive to IP over 1394 (RFC 2734).  IP capable nodes are required
+        * to be IRM capable and have a max_rec of 8 or more.  We use this fact
+        * to narrow down to which nodes we send Broadcast_Channel updates.
+        */
+       if (!device->irmc || device->max_rec < 8)
+               return;
+
+       /*
+        * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
+        * Perform a read test first.
+        */
+       if (device->bc_implemented == BC_UNKNOWN) {
+               rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
+                               device->node_id, generation, device->max_speed,
+                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+                               &data, 4);
+               switch (rcode) {
+               case RCODE_COMPLETE:
+                       if (data & cpu_to_be32(1 << 31)) {
+                               device->bc_implemented = BC_IMPLEMENTED;
+                               break;
+                       }
+                       /* else fall through to case address error */
+               case RCODE_ADDRESS_ERROR:
+                       device->bc_implemented = BC_UNIMPLEMENTED;
+               }
+       }
+
+       if (device->bc_implemented == BC_IMPLEMENTED) {
+               data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
+                                  BROADCAST_CHANNEL_VALID);
+               fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+                               device->node_id, generation, device->max_speed,
+                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
+                               &data, 4);
+       }
+}
+
+int fw_device_set_broadcast_channel(struct device *dev, void *gen)
+{
+       if (is_fw_device(dev))
+               set_broadcast_channel(fw_device(dev), (long)gen);
+
+       return 0;
+}
+
+static void fw_device_init(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       struct device *revived_dev;
+       int minor, ret;
+
+       /*
+        * All failure paths here set node->data to NULL, so that we
+        * don't try to do device_for_each_child() on a kfree()'d
+        * device.
+        */
+
+       if (read_bus_info_block(device, device->generation) < 0) {
+               if (device->config_rom_retries < MAX_RETRIES &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY);
+               } else {
+                       fw_notify("giving up on config rom for node id %x\n",
+                                 device->node_id);
+                       if (device->node == device->card->root_node)
+                               fw_schedule_bm_work(device->card, 0);
+                       fw_device_release(&device->device);
+               }
+               return;
+       }
+
+       revived_dev = device_find_child(device->card->device,
+                                       device, lookup_existing_device);
+       if (revived_dev) {
+               put_device(revived_dev);
+               fw_device_release(&device->device);
+
+               return;
+       }
+
+       device_initialize(&device->device);
+
+       fw_device_get(device);
+       down_write(&fw_device_rwsem);
+       ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+             idr_get_new(&fw_device_idr, device, &minor) :
+             -ENOMEM;
+       up_write(&fw_device_rwsem);
+
+       if (ret < 0)
+               goto error;
+
+       device->device.bus = &fw_bus_type;
+       device->device.type = &fw_device_type;
+       device->device.parent = device->card->device;
+       device->device.devt = MKDEV(fw_cdev_major, minor);
+       dev_set_name(&device->device, "fw%d", minor);
+
+       BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
+                       ARRAY_SIZE(fw_device_attributes) +
+                       ARRAY_SIZE(config_rom_attributes));
+       init_fw_attribute_group(&device->device,
+                               fw_device_attributes,
+                               &device->attribute_group);
+
+       if (device_add(&device->device)) {
+               fw_error("Failed to add device.\n");
+               goto error_with_cdev;
+       }
+
+       create_units(device);
+
+       /*
+        * Transition the device to running state.  If it got pulled
+        * out from under us while we did the intialization work, we
+        * have to shut down the device again here.  Normally, though,
+        * fw_node_event will be responsible for shutting it down when
+        * necessary.  We have to use the atomic cmpxchg here to avoid
+        * racing with the FW_NODE_DESTROYED case in
+        * fw_node_event().
+        */
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_INITIALIZING,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
+               PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+       } else {
+               if (device->config_rom_retries)
+                       fw_notify("created device %s: GUID %08x%08x, S%d00, "
+                                 "%d config ROM retries\n",
+                                 dev_name(&device->device),
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed,
+                                 device->config_rom_retries);
+               else
+                       fw_notify("created device %s: GUID %08x%08x, S%d00\n",
+                                 dev_name(&device->device),
+                                 device->config_rom[3], device->config_rom[4],
+                                 1 << device->max_speed);
+               device->config_rom_retries = 0;
+
+               set_broadcast_channel(device, device->generation);
+       }
+
+       /*
+        * Reschedule the IRM work if we just finished reading the
+        * root node config rom.  If this races with a bus reset we
+        * just end up running the IRM work a couple of extra times -
+        * pretty harmless.
+        */
+       if (device->node == device->card->root_node)
+               fw_schedule_bm_work(device->card, 0);
+
+       return;
+
+ error_with_cdev:
+       down_write(&fw_device_rwsem);
+       idr_remove(&fw_device_idr, minor);
+       up_write(&fw_device_rwsem);
+ error:
+       fw_device_put(device);          /* fw_device_idr's reference */
+
+       put_device(&device->device);    /* our reference */
+}
+
+enum {
+       REREAD_BIB_ERROR,
+       REREAD_BIB_GONE,
+       REREAD_BIB_UNCHANGED,
+       REREAD_BIB_CHANGED,
+};
+
+/* Reread and compare bus info block and header of root directory */
+static int reread_bus_info_block(struct fw_device *device, int generation)
+{
+       u32 q;
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
+                       return REREAD_BIB_ERROR;
+
+               if (i == 0 && q == 0)
+                       return REREAD_BIB_GONE;
+
+               if (q != device->config_rom[i])
+                       return REREAD_BIB_CHANGED;
+       }
+
+       return REREAD_BIB_UNCHANGED;
+}
+
+static void fw_device_refresh(struct work_struct *work)
+{
+       struct fw_device *device =
+               container_of(work, struct fw_device, work.work);
+       struct fw_card *card = device->card;
+       int node_id = device->node_id;
+
+       switch (reread_bus_info_block(device, device->generation)) {
+       case REREAD_BIB_ERROR:
+               if (device->config_rom_retries < MAX_RETRIES / 2 &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY / 2);
+
+                       return;
+               }
+               goto give_up;
+
+       case REREAD_BIB_GONE:
+               goto gone;
+
+       case REREAD_BIB_UNCHANGED:
+               if (atomic_cmpxchg(&device->state,
+                                  FW_DEVICE_INITIALIZING,
+                                  FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
+                       goto gone;
+
+               fw_device_update(work);
+               device->config_rom_retries = 0;
+               goto out;
+
+       case REREAD_BIB_CHANGED:
+               break;
+       }
+
+       /*
+        * Something changed.  We keep things simple and don't investigate
+        * further.  We just destroy all previous units and create new ones.
+        */
+       device_for_each_child(&device->device, NULL, shutdown_unit);
+
+       if (read_bus_info_block(device, device->generation) < 0) {
+               if (device->config_rom_retries < MAX_RETRIES &&
+                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+                       device->config_rom_retries++;
+                       schedule_delayed_work(&device->work, RETRY_DELAY);
+
+                       return;
+               }
+               goto give_up;
+       }
+
+       create_units(device);
+
+       /* Userspace may want to re-read attributes. */
+       kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
+
+       if (atomic_cmpxchg(&device->state,
+                          FW_DEVICE_INITIALIZING,
+                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
+               goto gone;
+
+       fw_notify("refreshed device %s\n", dev_name(&device->device));
+       device->config_rom_retries = 0;
+       goto out;
+
+ give_up:
+       fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
+ gone:
+       atomic_set(&device->state, FW_DEVICE_GONE);
+       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+       schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
+ out:
+       if (node_id == card->root_node->node_id)
+               fw_schedule_bm_work(card, 0);
+}
+
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
+{
+       struct fw_device *device;
+
+       switch (event) {
+       case FW_NODE_CREATED:
+       case FW_NODE_LINK_ON:
+               if (!node->link_on)
+                       break;
+ create:
+               device = kzalloc(sizeof(*device), GFP_ATOMIC);
+               if (device == NULL)
+                       break;
+
+               /*
+                * Do minimal intialization of the device here, the
+                * rest will happen in fw_device_init().
+                *
+                * Attention:  A lot of things, even fw_device_get(),
+                * cannot be done before fw_device_init() finished!
+                * You can basically just check device->state and
+                * schedule work until then, but only while holding
+                * card->lock.
+                */
+               atomic_set(&device->state, FW_DEVICE_INITIALIZING);
+               device->card = fw_card_get(card);
+               device->node = fw_node_get(node);
+               device->node_id = node->node_id;
+               device->generation = card->generation;
+               device->is_local = node == card->local_node;
+               mutex_init(&device->client_list_mutex);
+               INIT_LIST_HEAD(&device->client_list);
+
+               /*
+                * Set the node data to point back to this device so
+                * FW_NODE_UPDATED callbacks can update the node_id
+                * and generation for the device.
+                */
+               node->data = device;
+
+               /*
+                * Many devices are slow to respond after bus resets,
+                * especially if they are bus powered and go through
+                * power-up after getting plugged in.  We schedule the
+                * first config rom scan half a second after bus reset.
+                */
+               INIT_DELAYED_WORK(&device->work, fw_device_init);
+               schedule_delayed_work(&device->work, INITIAL_DELAY);
+               break;
+
+       case FW_NODE_INITIATED_RESET:
+               device = node->data;
+               if (device == NULL)
+                       goto create;
+
+               device->node_id = node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               device->generation = card->generation;
+               if (atomic_cmpxchg(&device->state,
+                           FW_DEVICE_RUNNING,
+                           FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+                       schedule_delayed_work(&device->work,
+                               device->is_local ? 0 : INITIAL_DELAY);
+               }
+               break;
+
+       case FW_NODE_UPDATED:
+               if (!node->link_on || node->data == NULL)
+                       break;
+
+               device = node->data;
+               device->node_id = node->node_id;
+               smp_wmb();  /* update node_id before generation */
+               device->generation = card->generation;
+               if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+                       schedule_delayed_work(&device->work, 0);
+               }
+               break;
+
+       case FW_NODE_DESTROYED:
+       case FW_NODE_LINK_OFF:
+               if (!node->data)
+                       break;
+
+               /*
+                * Destroy the device associated with the node.  There
+                * are two cases here: either the device is fully
+                * initialized (FW_DEVICE_RUNNING) or we're in the
+                * process of reading its config rom
+                * (FW_DEVICE_INITIALIZING).  If it is fully
+                * initialized we can reuse device->work to schedule a
+                * full fw_device_shutdown().  If not, there's work
+                * scheduled to read it's config rom, and we just put
+                * the device in shutdown state to have that code fail
+                * to create the device.
+                */
+               device = node->data;
+               if (atomic_xchg(&device->state,
+                               FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
+                       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+                       schedule_delayed_work(&device->work,
+                               list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
+               }
+               break;
+       }
+}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
new file mode 100644 (file)
index 0000000..28076c8
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Isochronous I/O functionality:
+ *   - Isochronous DMA context management
+ *   - Isochronous bus resource management (channels, bandwidth), client side
+ *
+ * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+/*
+ * Isochronous DMA context management
+ */
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+                      int page_count, enum dma_data_direction direction)
+{
+       int i, j;
+       dma_addr_t address;
+
+       buffer->page_count = page_count;
+       buffer->direction = direction;
+
+       buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
+                               GFP_KERNEL);
+       if (buffer->pages == NULL)
+               goto out;
+
+       for (i = 0; i < buffer->page_count; i++) {
+               buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+               if (buffer->pages[i] == NULL)
+                       goto out_pages;
+
+               address = dma_map_page(card->device, buffer->pages[i],
+                                      0, PAGE_SIZE, direction);
+               if (dma_mapping_error(card->device, address)) {
+                       __free_page(buffer->pages[i]);
+                       goto out_pages;
+               }
+               set_page_private(buffer->pages[i], address);
+       }
+
+       return 0;
+
+ out_pages:
+       for (j = 0; j < i; j++) {
+               address = page_private(buffer->pages[j]);
+               dma_unmap_page(card->device, address,
+                              PAGE_SIZE, DMA_TO_DEVICE);
+               __free_page(buffer->pages[j]);
+       }
+       kfree(buffer->pages);
+ out:
+       buffer->pages = NULL;
+
+       return -ENOMEM;
+}
+
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+{
+       unsigned long uaddr;
+       int i, err;
+
+       uaddr = vma->vm_start;
+       for (i = 0; i < buffer->page_count; i++) {
+               err = vm_insert_page(vma, uaddr, buffer->pages[i]);
+               if (err)
+                       return err;
+
+               uaddr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
+                          struct fw_card *card)
+{
+       int i;
+       dma_addr_t address;
+
+       for (i = 0; i < buffer->page_count; i++) {
+               address = page_private(buffer->pages[i]);
+               dma_unmap_page(card->device, address,
+                              PAGE_SIZE, DMA_TO_DEVICE);
+               __free_page(buffer->pages[i]);
+       }
+
+       kfree(buffer->pages);
+       buffer->pages = NULL;
+}
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+               int type, int channel, int speed, size_t header_size,
+               fw_iso_callback_t callback, void *callback_data)
+{
+       struct fw_iso_context *ctx;
+
+       ctx = card->driver->allocate_iso_context(card,
+                                                type, channel, header_size);
+       if (IS_ERR(ctx))
+               return ctx;
+
+       ctx->card = card;
+       ctx->type = type;
+       ctx->channel = channel;
+       ctx->speed = speed;
+       ctx->header_size = header_size;
+       ctx->callback = callback;
+       ctx->callback_data = callback_data;
+
+       return ctx;
+}
+
+void fw_iso_context_destroy(struct fw_iso_context *ctx)
+{
+       struct fw_card *card = ctx->card;
+
+       card->driver->free_iso_context(ctx);
+}
+
+int fw_iso_context_start(struct fw_iso_context *ctx,
+                        int cycle, int sync, int tags)
+{
+       return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
+}
+
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+                        struct fw_iso_packet *packet,
+                        struct fw_iso_buffer *buffer,
+                        unsigned long payload)
+{
+       struct fw_card *card = ctx->card;
+
+       return card->driver->queue_iso(ctx, packet, buffer, payload);
+}
+
+int fw_iso_context_stop(struct fw_iso_context *ctx)
+{
+       return ctx->card->driver->stop_iso(ctx);
+}
+
+/*
+ * Isochronous bus resource management (channels, bandwidth), client side
+ */
+
+static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
+                           int bandwidth, bool allocate)
+{
+       __be32 data[2];
+       int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
+
+       /*
+        * On a 1394a IRM with low contention, try < 1 is enough.
+        * On a 1394-1995 IRM, we need at least try < 2.
+        * Let's just do try < 5.
+        */
+       for (try = 0; try < 5; try++) {
+               new = allocate ? old - bandwidth : old + bandwidth;
+               if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
+                       break;
+
+               data[0] = cpu_to_be32(old);
+               data[1] = cpu_to_be32(new);
+               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                               irm_id, generation, SCODE_100,
+                               CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
+                               data, sizeof(data))) {
+               case RCODE_GENERATION:
+                       /* A generation change frees all bandwidth. */
+                       return allocate ? -EAGAIN : bandwidth;
+
+               case RCODE_COMPLETE:
+                       if (be32_to_cpup(data) == old)
+                               return bandwidth;
+
+                       old = be32_to_cpup(data);
+                       /* Fall through. */
+               }
+       }
+
+       return -EIO;
+}
+
+static int manage_channel(struct fw_card *card, int irm_id, int generation,
+                         u32 channels_mask, u64 offset, bool allocate)
+{
+       __be32 data[2], c, all, old;
+       int i, retry = 5;
+
+       old = all = allocate ? cpu_to_be32(~0) : 0;
+
+       for (i = 0; i < 32; i++) {
+               if (!(channels_mask & 1 << i))
+                       continue;
+
+               c = cpu_to_be32(1 << (31 - i));
+               if ((old & c) != (all & c))
+                       continue;
+
+               data[0] = old;
+               data[1] = old ^ c;
+               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
+                                          irm_id, generation, SCODE_100,
+                                          offset, data, sizeof(data))) {
+               case RCODE_GENERATION:
+                       /* A generation change frees all channels. */
+                       return allocate ? -EAGAIN : i;
+
+               case RCODE_COMPLETE:
+                       if (data[0] == old)
+                               return i;
+
+                       old = data[0];
+
+                       /* Is the IRM 1394a-2000 compliant? */
+                       if ((data[0] & c) == (data[1] & c))
+                               continue;
+
+                       /* 1394-1995 IRM, fall through to retry. */
+               default:
+                       if (retry--)
+                               i--;
+               }
+       }
+
+       return -EIO;
+}
+
+static void deallocate_channel(struct fw_card *card, int irm_id,
+                              int generation, int channel)
+{
+       u32 mask;
+       u64 offset;
+
+       mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
+       offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
+                               CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
+
+       manage_channel(card, irm_id, generation, mask, offset, false);
+}
+
+/**
+ * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
+ *
+ * In parameters: card, generation, channels_mask, bandwidth, allocate
+ * Out parameters: channel, bandwidth
+ * This function blocks (sleeps) during communication with the IRM.
+ *
+ * Allocates or deallocates at most one channel out of channels_mask.
+ * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
+ * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
+ * channel 0 and LSB for channel 63.)
+ * Allocates or deallocates as many bandwidth allocation units as specified.
+ *
+ * Returns channel < 0 if no channel was allocated or deallocated.
+ * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
+ *
+ * If generation is stale, deallocations succeed but allocations fail with
+ * channel = -EAGAIN.
+ *
+ * If channel allocation fails, no bandwidth will be allocated either.
+ * If bandwidth allocation fails, no channel will be allocated either.
+ * But deallocations of channel and bandwidth are tried independently
+ * of each other's success.
+ */
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+                           u64 channels_mask, int *channel, int *bandwidth,
+                           bool allocate)
+{
+       u32 channels_hi = channels_mask;        /* channels 31...0 */
+       u32 channels_lo = channels_mask >> 32;  /* channels 63...32 */
+       int irm_id, ret, c = -EINVAL;
+
+       spin_lock_irq(&card->lock);
+       irm_id = card->irm_node->node_id;
+       spin_unlock_irq(&card->lock);
+
+       if (channels_hi)
+               c = manage_channel(card, irm_id, generation, channels_hi,
+                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
+       if (channels_lo && c < 0) {
+               c = manage_channel(card, irm_id, generation, channels_lo,
+                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
+               if (c >= 0)
+                       c += 32;
+       }
+       *channel = c;
+
+       if (allocate && channels_mask != 0 && c < 0)
+               *bandwidth = 0;
+
+       if (*bandwidth == 0)
+               return;
+
+       ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
+       if (ret < 0)
+               *bandwidth = 0;
+
+       if (allocate && ret < 0 && c >= 0) {
+               deallocate_channel(card, irm_id, generation, c);
+               *channel = ret;
+       }
+}
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
new file mode 100644 (file)
index 0000000..fddf2b3
--- /dev/null
@@ -0,0 +1,572 @@
+/*
+ * Incremental bus scan, based on bus topology
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "core.h"
+
+#define SELF_ID_PHY_ID(q)              (((q) >> 24) & 0x3f)
+#define SELF_ID_EXTENDED(q)            (((q) >> 23) & 0x01)
+#define SELF_ID_LINK_ON(q)             (((q) >> 22) & 0x01)
+#define SELF_ID_GAP_COUNT(q)           (((q) >> 16) & 0x3f)
+#define SELF_ID_PHY_SPEED(q)           (((q) >> 14) & 0x03)
+#define SELF_ID_CONTENDER(q)           (((q) >> 11) & 0x01)
+#define SELF_ID_PHY_INITIATOR(q)       (((q) >>  1) & 0x01)
+#define SELF_ID_MORE_PACKETS(q)                (((q) >>  0) & 0x01)
+
+#define SELF_ID_EXT_SEQUENCE(q)                (((q) >> 20) & 0x07)
+
+#define SELFID_PORT_CHILD      0x3
+#define SELFID_PORT_PARENT     0x2
+#define SELFID_PORT_NCONN      0x1
+#define SELFID_PORT_NONE       0x0
+
+static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
+{
+       u32 q;
+       int port_type, shift, seq;
+
+       *total_port_count = 0;
+       *child_port_count = 0;
+
+       shift = 6;
+       q = *sid;
+       seq = 0;
+
+       while (1) {
+               port_type = (q >> shift) & 0x03;
+               switch (port_type) {
+               case SELFID_PORT_CHILD:
+                       (*child_port_count)++;
+               case SELFID_PORT_PARENT:
+               case SELFID_PORT_NCONN:
+                       (*total_port_count)++;
+               case SELFID_PORT_NONE:
+                       break;
+               }
+
+               shift -= 2;
+               if (shift == 0) {
+                       if (!SELF_ID_MORE_PACKETS(q))
+                               return sid + 1;
+
+                       shift = 16;
+                       sid++;
+                       q = *sid;
+
+                       /*
+                        * Check that the extra packets actually are
+                        * extended self ID packets and that the
+                        * sequence numbers in the extended self ID
+                        * packets increase as expected.
+                        */
+
+                       if (!SELF_ID_EXTENDED(q) ||
+                           seq != SELF_ID_EXT_SEQUENCE(q))
+                               return NULL;
+
+                       seq++;
+               }
+       }
+}
+
+static int get_port_type(u32 *sid, int port_index)
+{
+       int index, shift;
+
+       index = (port_index + 5) / 8;
+       shift = 16 - ((port_index + 5) & 7) * 2;
+       return (sid[index] >> shift) & 0x03;
+}
+
+static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
+{
+       struct fw_node *node;
+
+       node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
+                      GFP_ATOMIC);
+       if (node == NULL)
+               return NULL;
+
+       node->color = color;
+       node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
+       node->link_on = SELF_ID_LINK_ON(sid);
+       node->phy_speed = SELF_ID_PHY_SPEED(sid);
+       node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
+       node->port_count = port_count;
+
+       atomic_set(&node->ref_count, 1);
+       INIT_LIST_HEAD(&node->link);
+
+       return node;
+}
+
+/*
+ * Compute the maximum hop count for this node and it's children.  The
+ * maximum hop count is the maximum number of connections between any
+ * two nodes in the subtree rooted at this node.  We need this for
+ * setting the gap count.  As we build the tree bottom up in
+ * build_tree() below, this is fairly easy to do: for each node we
+ * maintain the max hop count and the max depth, ie the number of hops
+ * to the furthest leaf.  Computing the max hop count breaks down into
+ * two cases: either the path goes through this node, in which case
+ * the hop count is the sum of the two biggest child depths plus 2.
+ * Or it could be the case that the max hop path is entirely
+ * containted in a child tree, in which case the max hop count is just
+ * the max hop count of this child.
+ */
+static void update_hop_count(struct fw_node *node)
+{
+       int depths[2] = { -1, -1 };
+       int max_child_hops = 0;
+       int i;
+
+       for (i = 0; i < node->port_count; i++) {
+               if (node->ports[i] == NULL)
+                       continue;
+
+               if (node->ports[i]->max_hops > max_child_hops)
+                       max_child_hops = node->ports[i]->max_hops;
+
+               if (node->ports[i]->max_depth > depths[0]) {
+                       depths[1] = depths[0];
+                       depths[0] = node->ports[i]->max_depth;
+               } else if (node->ports[i]->max_depth > depths[1])
+                       depths[1] = node->ports[i]->max_depth;
+       }
+
+       node->max_depth = depths[0] + 1;
+       node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
+}
+
+static inline struct fw_node *fw_node(struct list_head *l)
+{
+       return list_entry(l, struct fw_node, link);
+}
+
+/**
+ * build_tree - Build the tree representation of the topology
+ * @self_ids: array of self IDs to create the tree from
+ * @self_id_count: the length of the self_ids array
+ * @local_id: the node ID of the local node
+ *
+ * This function builds the tree representation of the topology given
+ * by the self IDs from the latest bus reset.  During the construction
+ * of the tree, the function checks that the self IDs are valid and
+ * internally consistent.  On succcess this function returns the
+ * fw_node corresponding to the local card otherwise NULL.
+ */
+static struct fw_node *build_tree(struct fw_card *card,
+                                 u32 *sid, int self_id_count)
+{
+       struct fw_node *node, *child, *local_node, *irm_node;
+       struct list_head stack, *h;
+       u32 *next_sid, *end, q;
+       int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
+       int gap_count;
+       bool beta_repeaters_present;
+
+       local_node = NULL;
+       node = NULL;
+       INIT_LIST_HEAD(&stack);
+       stack_depth = 0;
+       end = sid + self_id_count;
+       phy_id = 0;
+       irm_node = NULL;
+       gap_count = SELF_ID_GAP_COUNT(*sid);
+       beta_repeaters_present = false;
+
+       while (sid < end) {
+               next_sid = count_ports(sid, &port_count, &child_port_count);
+
+               if (next_sid == NULL) {
+                       fw_error("Inconsistent extended self IDs.\n");
+                       return NULL;
+               }
+
+               q = *sid;
+               if (phy_id != SELF_ID_PHY_ID(q)) {
+                       fw_error("PHY ID mismatch in self ID: %d != %d.\n",
+                                phy_id, SELF_ID_PHY_ID(q));
+                       return NULL;
+               }
+
+               if (child_port_count > stack_depth) {
+                       fw_error("Topology stack underflow\n");
+                       return NULL;
+               }
+
+               /*
+                * Seek back from the top of our stack to find the
+                * start of the child nodes for this node.
+                */
+               for (i = 0, h = &stack; i < child_port_count; i++)
+                       h = h->prev;
+               /*
+                * When the stack is empty, this yields an invalid value,
+                * but that pointer will never be dereferenced.
+                */
+               child = fw_node(h);
+
+               node = fw_node_create(q, port_count, card->color);
+               if (node == NULL) {
+                       fw_error("Out of memory while building topology.\n");
+                       return NULL;
+               }
+
+               if (phy_id == (card->node_id & 0x3f))
+                       local_node = node;
+
+               if (SELF_ID_CONTENDER(q))
+                       irm_node = node;
+
+               parent_count = 0;
+
+               for (i = 0; i < port_count; i++) {
+                       switch (get_port_type(sid, i)) {
+                       case SELFID_PORT_PARENT:
+                               /*
+                                * Who's your daddy?  We dont know the
+                                * parent node at this time, so we
+                                * temporarily abuse node->color for
+                                * remembering the entry in the
+                                * node->ports array where the parent
+                                * node should be.  Later, when we
+                                * handle the parent node, we fix up
+                                * the reference.
+                                */
+                               parent_count++;
+                               node->color = i;
+                               break;
+
+                       case SELFID_PORT_CHILD:
+                               node->ports[i] = child;
+                               /*
+                                * Fix up parent reference for this
+                                * child node.
+                                */
+                               child->ports[child->color] = node;
+                               child->color = card->color;
+                               child = fw_node(child->link.next);
+                               break;
+                       }
+               }
+
+               /*
+                * Check that the node reports exactly one parent
+                * port, except for the root, which of course should
+                * have no parents.
+                */
+               if ((next_sid == end && parent_count != 0) ||
+                   (next_sid < end && parent_count != 1)) {
+                       fw_error("Parent port inconsistency for node %d: "
+                                "parent_count=%d\n", phy_id, parent_count);
+                       return NULL;
+               }
+
+               /* Pop the child nodes off the stack and push the new node. */
+               __list_del(h->prev, &stack);
+               list_add_tail(&node->link, &stack);
+               stack_depth += 1 - child_port_count;
+
+               if (node->phy_speed == SCODE_BETA &&
+                   parent_count + child_port_count > 1)
+                       beta_repeaters_present = true;
+
+               /*
+                * If PHYs report different gap counts, set an invalid count
+                * which will force a gap count reconfiguration and a reset.
+                */
+               if (SELF_ID_GAP_COUNT(q) != gap_count)
+                       gap_count = 0;
+
+               update_hop_count(node);
+
+               sid = next_sid;
+               phy_id++;
+       }
+
+       card->root_node = node;
+       card->irm_node = irm_node;
+       card->gap_count = gap_count;
+       card->beta_repeaters_present = beta_repeaters_present;
+
+       return local_node;
+}
+
+typedef void (*fw_node_callback_t)(struct fw_card * card,
+                                  struct fw_node * node,
+                                  struct fw_node * parent);
+
+static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
+                            fw_node_callback_t callback)
+{
+       struct list_head list;
+       struct fw_node *node, *next, *child, *parent;
+       int i;
+
+       INIT_LIST_HEAD(&list);
+
+       fw_node_get(root);
+       list_add_tail(&root->link, &list);
+       parent = NULL;
+       list_for_each_entry(node, &list, link) {
+               node->color = card->color;
+
+               for (i = 0; i < node->port_count; i++) {
+                       child = node->ports[i];
+                       if (!child)
+                               continue;
+                       if (child->color == card->color)
+                               parent = child;
+                       else {
+                               fw_node_get(child);
+                               list_add_tail(&child->link, &list);
+                       }
+               }
+
+               callback(card, node, parent);
+       }
+
+       list_for_each_entry_safe(node, next, &list, link)
+               fw_node_put(node);
+}
+
+static void report_lost_node(struct fw_card *card,
+                            struct fw_node *node, struct fw_node *parent)
+{
+       fw_node_event(card, node, FW_NODE_DESTROYED);
+       fw_node_put(node);
+
+       /* Topology has changed - reset bus manager retry counter */
+       card->bm_retries = 0;
+}
+
+static void report_found_node(struct fw_card *card,
+                             struct fw_node *node, struct fw_node *parent)
+{
+       int b_path = (node->phy_speed == SCODE_BETA);
+
+       if (parent != NULL) {
+               /* min() macro doesn't work here with gcc 3.4 */
+               node->max_speed = parent->max_speed < node->phy_speed ?
+                                       parent->max_speed : node->phy_speed;
+               node->b_path = parent->b_path && b_path;
+       } else {
+               node->max_speed = node->phy_speed;
+               node->b_path = b_path;
+       }
+
+       fw_node_event(card, node, FW_NODE_CREATED);
+
+       /* Topology has changed - reset bus manager retry counter */
+       card->bm_retries = 0;
+}
+
+void fw_destroy_nodes(struct fw_card *card)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       card->color++;
+       if (card->local_node != NULL)
+               for_each_fw_node(card, card->local_node, report_lost_node);
+       card->local_node = NULL;
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
+{
+       struct fw_node *tree;
+       int i;
+
+       tree = node1->ports[port];
+       node0->ports[port] = tree;
+       for (i = 0; i < tree->port_count; i++) {
+               if (tree->ports[i] == node1) {
+                       tree->ports[i] = node0;
+                       break;
+               }
+       }
+}
+
+/**
+ * update_tree - compare the old topology tree for card with the new
+ * one specified by root.  Queue the nodes and mark them as either
+ * found, lost or updated.  Update the nodes in the card topology tree
+ * as we go.
+ */
+static void update_tree(struct fw_card *card, struct fw_node *root)
+{
+       struct list_head list0, list1;
+       struct fw_node *node0, *node1, *next1;
+       int i, event;
+
+       INIT_LIST_HEAD(&list0);
+       list_add_tail(&card->local_node->link, &list0);
+       INIT_LIST_HEAD(&list1);
+       list_add_tail(&root->link, &list1);
+
+       node0 = fw_node(list0.next);
+       node1 = fw_node(list1.next);
+
+       while (&node0->link != &list0) {
+               WARN_ON(node0->port_count != node1->port_count);
+
+               if (node0->link_on && !node1->link_on)
+                       event = FW_NODE_LINK_OFF;
+               else if (!node0->link_on && node1->link_on)
+                       event = FW_NODE_LINK_ON;
+               else if (node1->initiated_reset && node1->link_on)
+                       event = FW_NODE_INITIATED_RESET;
+               else
+                       event = FW_NODE_UPDATED;
+
+               node0->node_id = node1->node_id;
+               node0->color = card->color;
+               node0->link_on = node1->link_on;
+               node0->initiated_reset = node1->initiated_reset;
+               node0->max_hops = node1->max_hops;
+               node1->color = card->color;
+               fw_node_event(card, node0, event);
+
+               if (card->root_node == node1)
+                       card->root_node = node0;
+               if (card->irm_node == node1)
+                       card->irm_node = node0;
+
+               for (i = 0; i < node0->port_count; i++) {
+                       if (node0->ports[i] && node1->ports[i]) {
+                               /*
+                                * This port didn't change, queue the
+                                * connected node for further
+                                * investigation.
+                                */
+                               if (node0->ports[i]->color == card->color)
+                                       continue;
+                               list_add_tail(&node0->ports[i]->link, &list0);
+                               list_add_tail(&node1->ports[i]->link, &list1);
+                       } else if (node0->ports[i]) {
+                               /*
+                                * The nodes connected here were
+                                * unplugged; unref the lost nodes and
+                                * queue FW_NODE_LOST callbacks for
+                                * them.
+                                */
+
+                               for_each_fw_node(card, node0->ports[i],
+                                                report_lost_node);
+                               node0->ports[i] = NULL;
+                       } else if (node1->ports[i]) {
+                               /*
+                                * One or more node were connected to
+                                * this port. Move the new nodes into
+                                * the tree and queue FW_NODE_CREATED
+                                * callbacks for them.
+                                */
+                               move_tree(node0, node1, i);
+                               for_each_fw_node(card, node0->ports[i],
+                                                report_found_node);
+                       }
+               }
+
+               node0 = fw_node(node0->link.next);
+               next1 = fw_node(node1->link.next);
+               fw_node_put(node1);
+               node1 = next1;
+       }
+}
+
+static void update_topology_map(struct fw_card *card,
+                               u32 *self_ids, int self_id_count)
+{
+       int node_count;
+
+       card->topology_map[1]++;
+       node_count = (card->root_node->node_id & 0x3f) + 1;
+       card->topology_map[2] = (node_count << 16) | self_id_count;
+       card->topology_map[0] = (self_id_count + 2) << 16;
+       memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
+       fw_compute_block_crc(card->topology_map);
+}
+
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
+                             int self_id_count, u32 *self_ids)
+{
+       struct fw_node *local_node;
+       unsigned long flags;
+
+       /*
+        * If the selfID buffer is not the immediate successor of the
+        * previously processed one, we cannot reliably compare the
+        * old and new topologies.
+        */
+       if (!is_next_generation(generation, card->generation) &&
+           card->local_node != NULL) {
+               fw_notify("skipped bus generations, destroying all nodes\n");
+               fw_destroy_nodes(card);
+               card->bm_retries = 0;
+       }
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       card->broadcast_channel_allocated = false;
+       card->node_id = node_id;
+       /*
+        * Update node_id before generation to prevent anybody from using
+        * a stale node_id together with a current generation.
+        */
+       smp_wmb();
+       card->generation = generation;
+       card->reset_jiffies = jiffies;
+       fw_schedule_bm_work(card, 0);
+
+       local_node = build_tree(card, self_ids, self_id_count);
+
+       update_topology_map(card, self_ids, self_id_count);
+
+       card->color++;
+
+       if (local_node == NULL) {
+               fw_error("topology build failed\n");
+               /* FIXME: We need to issue a bus reset in this case. */
+       } else if (card->local_node == NULL) {
+               card->local_node = local_node;
+               for_each_fw_node(card, local_node, report_found_node);
+       } else {
+               update_tree(card, local_node);
+       }
+
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
new file mode 100644 (file)
index 0000000..479b22f
--- /dev/null
@@ -0,0 +1,978 @@
+/*
+ * Core IEEE1394 transaction logic
+ *
+ * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/idr.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include "core.h"
+
+#define HEADER_PRI(pri)                        ((pri) << 0)
+#define HEADER_TCODE(tcode)            ((tcode) << 4)
+#define HEADER_RETRY(retry)            ((retry) << 8)
+#define HEADER_TLABEL(tlabel)          ((tlabel) << 10)
+#define HEADER_DESTINATION(destination)        ((destination) << 16)
+#define HEADER_SOURCE(source)          ((source) << 16)
+#define HEADER_RCODE(rcode)            ((rcode) << 12)
+#define HEADER_OFFSET_HIGH(offset_high)        ((offset_high) << 0)
+#define HEADER_DATA_LENGTH(length)     ((length) << 16)
+#define HEADER_EXTENDED_TCODE(tcode)   ((tcode) << 0)
+
+#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
+#define HEADER_GET_TLABEL(q)           (((q) >> 10) & 0x3f)
+#define HEADER_GET_RCODE(q)            (((q) >> 12) & 0x0f)
+#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_SOURCE(q)           (((q) >> 16) & 0xffff)
+#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
+
+#define HEADER_DESTINATION_IS_BROADCAST(q) \
+       (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
+
+#define PHY_PACKET_CONFIG      0x0
+#define PHY_PACKET_LINK_ON     0x1
+#define PHY_PACKET_SELF_ID     0x2
+
+#define PHY_CONFIG_GAP_COUNT(gap_count)        (((gap_count) << 16) | (1 << 22))
+#define PHY_CONFIG_ROOT_ID(node_id)    ((((node_id) & 0x3f) << 24) | (1 << 23))
+#define PHY_IDENTIFIER(id)             ((id) << 30)
+
+static int close_transaction(struct fw_transaction *transaction,
+                            struct fw_card *card, int rcode)
+{
+       struct fw_transaction *t;
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(t, &card->transaction_list, link) {
+               if (t == transaction) {
+                       list_del(&t->link);
+                       card->tlabel_mask &= ~(1ULL << t->tlabel);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&t->link != &card->transaction_list) {
+               t->callback(card, rcode, NULL, 0, t->callback_data);
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+/*
+ * Only valid for transactions that are potentially pending (ie have
+ * been sent).
+ */
+int fw_cancel_transaction(struct fw_card *card,
+                         struct fw_transaction *transaction)
+{
+       /*
+        * Cancel the packet transmission if it's still queued.  That
+        * will call the packet transmission callback which cancels
+        * the transaction.
+        */
+
+       if (card->driver->cancel_packet(card, &transaction->packet) == 0)
+               return 0;
+
+       /*
+        * If the request packet has already been sent, we need to see
+        * if the transaction is still pending and remove it in that case.
+        */
+
+       return close_transaction(transaction, card, RCODE_CANCELLED);
+}
+EXPORT_SYMBOL(fw_cancel_transaction);
+
+static void transmit_complete_callback(struct fw_packet *packet,
+                                      struct fw_card *card, int status)
+{
+       struct fw_transaction *t =
+           container_of(packet, struct fw_transaction, packet);
+
+       switch (status) {
+       case ACK_COMPLETE:
+               close_transaction(t, card, RCODE_COMPLETE);
+               break;
+       case ACK_PENDING:
+               t->timestamp = packet->timestamp;
+               break;
+       case ACK_BUSY_X:
+       case ACK_BUSY_A:
+       case ACK_BUSY_B:
+               close_transaction(t, card, RCODE_BUSY);
+               break;
+       case ACK_DATA_ERROR:
+               close_transaction(t, card, RCODE_DATA_ERROR);
+               break;
+       case ACK_TYPE_ERROR:
+               close_transaction(t, card, RCODE_TYPE_ERROR);
+               break;
+       default:
+               /*
+                * In this case the ack is really a juju specific
+                * rcode, so just forward that to the callback.
+                */
+               close_transaction(t, card, status);
+               break;
+       }
+}
+
+static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
+               int destination_id, int source_id, int generation, int speed,
+               unsigned long long offset, void *payload, size_t length)
+{
+       int ext_tcode;
+
+       if (tcode == TCODE_STREAM_DATA) {
+               packet->header[0] =
+                       HEADER_DATA_LENGTH(length) |
+                       destination_id |
+                       HEADER_TCODE(TCODE_STREAM_DATA);
+               packet->header_length = 4;
+               packet->payload = payload;
+               packet->payload_length = length;
+
+               goto common;
+       }
+
+       if (tcode > 0x10) {
+               ext_tcode = tcode & ~0x10;
+               tcode = TCODE_LOCK_REQUEST;
+       } else
+               ext_tcode = 0;
+
+       packet->header[0] =
+               HEADER_RETRY(RETRY_X) |
+               HEADER_TLABEL(tlabel) |
+               HEADER_TCODE(tcode) |
+               HEADER_DESTINATION(destination_id);
+       packet->header[1] =
+               HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
+       packet->header[2] =
+               offset;
+
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+               packet->header[3] = *(u32 *)payload;
+               packet->header_length = 16;
+               packet->payload_length = 0;
+               break;
+
+       case TCODE_LOCK_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               packet->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(ext_tcode);
+               packet->header_length = 16;
+               packet->payload = payload;
+               packet->payload_length = length;
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               packet->header_length = 12;
+               packet->payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+               packet->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(ext_tcode);
+               packet->header_length = 16;
+               packet->payload_length = 0;
+               break;
+       }
+ common:
+       packet->speed = speed;
+       packet->generation = generation;
+       packet->ack = 0;
+       packet->payload_bus = 0;
+}
+
+/**
+ * This function provides low-level access to the IEEE1394 transaction
+ * logic.  Most C programs would use either fw_read(), fw_write() or
+ * fw_lock() instead - those function are convenience wrappers for
+ * this function.  The fw_send_request() function is primarily
+ * provided as a flexible, one-stop entry point for languages bindings
+ * and protocol bindings.
+ *
+ * FIXME: Document this function further, in particular the possible
+ * values for rcode in the callback.  In short, we map ACK_COMPLETE to
+ * RCODE_COMPLETE, internal errors set errno and set rcode to
+ * RCODE_SEND_ERROR (which is out of range for standard ieee1394
+ * rcodes).  All other rcodes are forwarded unchanged.  For all
+ * errors, payload is NULL, length is 0.
+ *
+ * Can not expect the callback to be called before the function
+ * returns, though this does happen in some cases (ACK_COMPLETE and
+ * errors).
+ *
+ * The payload is only used for write requests and must not be freed
+ * until the callback has been called.
+ *
+ * @param card the card from which to send the request
+ * @param tcode the tcode for this transaction.  Do not use
+ *   TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
+ *   etc. to specify tcode and ext_tcode.
+ * @param node_id the destination node ID (bus ID and PHY ID concatenated)
+ * @param generation the generation for which node_id is valid
+ * @param speed the speed to use for sending the request
+ * @param offset the 48 bit offset on the destination node
+ * @param payload the data payload for the request subaction
+ * @param length the length in bytes of the data to read
+ * @param callback function to be called when the transaction is completed
+ * @param callback_data pointer to arbitrary data, which will be
+ *   passed to the callback
+ *
+ * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
+ * needs to synthesize @destination_id with fw_stream_packet_destination_id().
+ */
+void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+                    int destination_id, int generation, int speed,
+                    unsigned long long offset, void *payload, size_t length,
+                    fw_transaction_callback_t callback, void *callback_data)
+{
+       unsigned long flags;
+       int tlabel;
+
+       /*
+        * Bump the flush timer up 100ms first of all so we
+        * don't race with a flush timer callback.
+        */
+
+       mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
+
+       /*
+        * Allocate tlabel from the bitmap and put the transaction on
+        * the list while holding the card spinlock.
+        */
+
+       spin_lock_irqsave(&card->lock, flags);
+
+       tlabel = card->current_tlabel;
+       if (card->tlabel_mask & (1ULL << tlabel)) {
+               spin_unlock_irqrestore(&card->lock, flags);
+               callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
+               return;
+       }
+
+       card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
+       card->tlabel_mask |= (1ULL << tlabel);
+
+       t->node_id = destination_id;
+       t->tlabel = tlabel;
+       t->callback = callback;
+       t->callback_data = callback_data;
+
+       fw_fill_request(&t->packet, tcode, t->tlabel,
+                       destination_id, card->node_id, generation,
+                       speed, offset, payload, length);
+       t->packet.callback = transmit_complete_callback;
+
+       list_add_tail(&t->link, &card->transaction_list);
+
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       card->driver->send_request(card, &t->packet);
+}
+EXPORT_SYMBOL(fw_send_request);
+
+struct transaction_callback_data {
+       struct completion done;
+       void *payload;
+       int rcode;
+};
+
+static void transaction_callback(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct transaction_callback_data *d = data;
+
+       if (rcode == RCODE_COMPLETE)
+               memcpy(d->payload, payload, length);
+       d->rcode = rcode;
+       complete(&d->done);
+}
+
+/**
+ * fw_run_transaction - send request and sleep until transaction is completed
+ *
+ * Returns the RCODE.
+ */
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+                      int generation, int speed, unsigned long long offset,
+                      void *payload, size_t length)
+{
+       struct transaction_callback_data d;
+       struct fw_transaction t;
+
+       init_completion(&d.done);
+       d.payload = payload;
+       fw_send_request(card, &t, tcode, destination_id, generation, speed,
+                       offset, payload, length, transaction_callback, &d);
+       wait_for_completion(&d.done);
+
+       return d.rcode;
+}
+EXPORT_SYMBOL(fw_run_transaction);
+
+static DEFINE_MUTEX(phy_config_mutex);
+static DECLARE_COMPLETION(phy_config_done);
+
+static void transmit_phy_packet_callback(struct fw_packet *packet,
+                                        struct fw_card *card, int status)
+{
+       complete(&phy_config_done);
+}
+
+static struct fw_packet phy_config_packet = {
+       .header_length  = 8,
+       .payload_length = 0,
+       .speed          = SCODE_100,
+       .callback       = transmit_phy_packet_callback,
+};
+
+void fw_send_phy_config(struct fw_card *card,
+                       int node_id, int generation, int gap_count)
+{
+       long timeout = DIV_ROUND_UP(HZ, 10);
+       u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
+                  PHY_CONFIG_ROOT_ID(node_id) |
+                  PHY_CONFIG_GAP_COUNT(gap_count);
+
+       mutex_lock(&phy_config_mutex);
+
+       phy_config_packet.header[0] = data;
+       phy_config_packet.header[1] = ~data;
+       phy_config_packet.generation = generation;
+       INIT_COMPLETION(phy_config_done);
+
+       card->driver->send_request(card, &phy_config_packet);
+       wait_for_completion_timeout(&phy_config_done, timeout);
+
+       mutex_unlock(&phy_config_mutex);
+}
+
+void fw_flush_transactions(struct fw_card *card)
+{
+       struct fw_transaction *t, *next;
+       struct list_head list;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&list);
+       spin_lock_irqsave(&card->lock, flags);
+       list_splice_init(&card->transaction_list, &list);
+       card->tlabel_mask = 0;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       list_for_each_entry_safe(t, next, &list, link) {
+               card->driver->cancel_packet(card, &t->packet);
+
+               /*
+                * At this point cancel_packet will never call the
+                * transaction callback, since we just took all the
+                * transactions out of the list.  So do it here.
+                */
+               t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
+       }
+}
+
+static struct fw_address_handler *lookup_overlapping_address_handler(
+       struct list_head *list, unsigned long long offset, size_t length)
+{
+       struct fw_address_handler *handler;
+
+       list_for_each_entry(handler, list, link) {
+               if (handler->offset < offset + length &&
+                   offset < handler->offset + handler->length)
+                       return handler;
+       }
+
+       return NULL;
+}
+
+static struct fw_address_handler *lookup_enclosing_address_handler(
+       struct list_head *list, unsigned long long offset, size_t length)
+{
+       struct fw_address_handler *handler;
+
+       list_for_each_entry(handler, list, link) {
+               if (handler->offset <= offset &&
+                   offset + length <= handler->offset + handler->length)
+                       return handler;
+       }
+
+       return NULL;
+}
+
+static DEFINE_SPINLOCK(address_handler_lock);
+static LIST_HEAD(address_handler_list);
+
+const struct fw_address_region fw_high_memory_region =
+       { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
+EXPORT_SYMBOL(fw_high_memory_region);
+
+#if 0
+const struct fw_address_region fw_low_memory_region =
+       { .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
+const struct fw_address_region fw_private_region =
+       { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
+const struct fw_address_region fw_csr_region =
+       { .start = CSR_REGISTER_BASE,
+         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
+const struct fw_address_region fw_unit_space_region =
+       { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
+#endif  /*  0  */
+
+/**
+ * fw_core_add_address_handler - register for incoming requests
+ * @handler: callback
+ * @region: region in the IEEE 1212 node space address range
+ *
+ * region->start, ->end, and handler->length have to be quadlet-aligned.
+ *
+ * When a request is received that falls within the specified address range,
+ * the specified callback is invoked.  The parameters passed to the callback
+ * give the details of the particular request.
+ *
+ * Return value:  0 on success, non-zero otherwise.
+ * The start offset of the handler's address region is determined by
+ * fw_core_add_address_handler() and is returned in handler->offset.
+ */
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+                               const struct fw_address_region *region)
+{
+       struct fw_address_handler *other;
+       unsigned long flags;
+       int ret = -EBUSY;
+
+       if (region->start & 0xffff000000000003ULL ||
+           region->end   & 0xffff000000000003ULL ||
+           region->start >= region->end ||
+           handler->length & 3 ||
+           handler->length == 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+
+       handler->offset = region->start;
+       while (handler->offset + handler->length <= region->end) {
+               other =
+                   lookup_overlapping_address_handler(&address_handler_list,
+                                                      handler->offset,
+                                                      handler->length);
+               if (other != NULL) {
+                       handler->offset += other->length;
+               } else {
+                       list_add_tail(&handler->link, &address_handler_list);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(fw_core_add_address_handler);
+
+/**
+ * fw_core_remove_address_handler - unregister an address handler
+ */
+void fw_core_remove_address_handler(struct fw_address_handler *handler)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+       list_del(&handler->link);
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+}
+EXPORT_SYMBOL(fw_core_remove_address_handler);
+
+struct fw_request {
+       struct fw_packet response;
+       u32 request_header[4];
+       int ack;
+       u32 length;
+       u32 data[0];
+};
+
+static void free_response_callback(struct fw_packet *packet,
+                                  struct fw_card *card, int status)
+{
+       struct fw_request *request;
+
+       request = container_of(packet, struct fw_request, response);
+       kfree(request);
+}
+
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+                     int rcode, void *payload, size_t length)
+{
+       int tcode, tlabel, extended_tcode, source, destination;
+
+       tcode          = HEADER_GET_TCODE(request_header[0]);
+       tlabel         = HEADER_GET_TLABEL(request_header[0]);
+       source         = HEADER_GET_DESTINATION(request_header[0]);
+       destination    = HEADER_GET_SOURCE(request_header[1]);
+       extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
+
+       response->header[0] =
+               HEADER_RETRY(RETRY_1) |
+               HEADER_TLABEL(tlabel) |
+               HEADER_DESTINATION(destination);
+       response->header[1] =
+               HEADER_SOURCE(source) |
+               HEADER_RCODE(rcode);
+       response->header[2] = 0;
+
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_WRITE_BLOCK_REQUEST:
+               response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
+               response->header_length = 12;
+               response->payload_length = 0;
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               response->header[0] |=
+                       HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
+               if (payload != NULL)
+                       response->header[3] = *(u32 *)payload;
+               else
+                       response->header[3] = 0;
+               response->header_length = 16;
+               response->payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+       case TCODE_LOCK_REQUEST:
+               response->header[0] |= HEADER_TCODE(tcode + 2);
+               response->header[3] =
+                       HEADER_DATA_LENGTH(length) |
+                       HEADER_EXTENDED_TCODE(extended_tcode);
+               response->header_length = 16;
+               response->payload = payload;
+               response->payload_length = length;
+               break;
+
+       default:
+               BUG();
+               return;
+       }
+
+       response->payload_bus = 0;
+}
+EXPORT_SYMBOL(fw_fill_response);
+
+static struct fw_request *allocate_request(struct fw_packet *p)
+{
+       struct fw_request *request;
+       u32 *data, length;
+       int request_tcode, t;
+
+       request_tcode = HEADER_GET_TCODE(p->header[0]);
+       switch (request_tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+               data = &p->header[3];
+               length = 4;
+               break;
+
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_LOCK_REQUEST:
+               data = p->payload;
+               length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       case TCODE_READ_QUADLET_REQUEST:
+               data = NULL;
+               length = 4;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST:
+               data = NULL;
+               length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       default:
+               fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
+                        p->header[0], p->header[1], p->header[2]);
+               return NULL;
+       }
+
+       request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
+       if (request == NULL)
+               return NULL;
+
+       t = (p->timestamp & 0x1fff) + 4000;
+       if (t >= 8000)
+               t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
+       else
+               t = (p->timestamp & ~0x1fff) + t;
+
+       request->response.speed = p->speed;
+       request->response.timestamp = t;
+       request->response.generation = p->generation;
+       request->response.ack = 0;
+       request->response.callback = free_response_callback;
+       request->ack = p->ack;
+       request->length = length;
+       if (data)
+               memcpy(request->data, data, length);
+
+       memcpy(request->request_header, p->header, sizeof(p->header));
+
+       return request;
+}
+
+void fw_send_response(struct fw_card *card,
+                     struct fw_request *request, int rcode)
+{
+       /* unified transaction or broadcast transaction: don't respond */
+       if (request->ack != ACK_PENDING ||
+           HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
+               kfree(request);
+               return;
+       }
+
+       if (rcode == RCODE_COMPLETE)
+               fw_fill_response(&request->response, request->request_header,
+                                rcode, request->data, request->length);
+       else
+               fw_fill_response(&request->response, request->request_header,
+                                rcode, NULL, 0);
+
+       card->driver->send_response(card, &request->response);
+}
+EXPORT_SYMBOL(fw_send_response);
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
+{
+       struct fw_address_handler *handler;
+       struct fw_request *request;
+       unsigned long long offset;
+       unsigned long flags;
+       int tcode, destination, source;
+
+       if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
+               return;
+
+       request = allocate_request(p);
+       if (request == NULL) {
+               /* FIXME: send statically allocated busy packet. */
+               return;
+       }
+
+       offset      =
+               ((unsigned long long)
+                HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
+       tcode       = HEADER_GET_TCODE(p->header[0]);
+       destination = HEADER_GET_DESTINATION(p->header[0]);
+       source      = HEADER_GET_SOURCE(p->header[1]);
+
+       spin_lock_irqsave(&address_handler_lock, flags);
+       handler = lookup_enclosing_address_handler(&address_handler_list,
+                                                  offset, request->length);
+       spin_unlock_irqrestore(&address_handler_lock, flags);
+
+       /*
+        * FIXME: lookup the fw_node corresponding to the sender of
+        * this request and pass that to the address handler instead
+        * of the node ID.  We may also want to move the address
+        * allocations to fw_node so we only do this callback if the
+        * upper layers registered it for this node.
+        */
+
+       if (handler == NULL)
+               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+       else
+               handler->address_callback(card, request,
+                                         tcode, destination, source,
+                                         p->generation, p->speed, offset,
+                                         request->data, request->length,
+                                         handler->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_request);
+
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
+{
+       struct fw_transaction *t;
+       unsigned long flags;
+       u32 *data;
+       size_t data_length;
+       int tcode, tlabel, destination, source, rcode;
+
+       tcode       = HEADER_GET_TCODE(p->header[0]);
+       tlabel      = HEADER_GET_TLABEL(p->header[0]);
+       destination = HEADER_GET_DESTINATION(p->header[0]);
+       source      = HEADER_GET_SOURCE(p->header[1]);
+       rcode       = HEADER_GET_RCODE(p->header[1]);
+
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(t, &card->transaction_list, link) {
+               if (t->node_id == source && t->tlabel == tlabel) {
+                       list_del(&t->link);
+                       card->tlabel_mask &= ~(1 << t->tlabel);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&t->link == &card->transaction_list) {
+               fw_notify("Unsolicited response (source %x, tlabel %x)\n",
+                         source, tlabel);
+               return;
+       }
+
+       /*
+        * FIXME: sanity check packet, is length correct, does tcodes
+        * and addresses match.
+        */
+
+       switch (tcode) {
+       case TCODE_READ_QUADLET_RESPONSE:
+               data = (u32 *) &p->header[3];
+               data_length = 4;
+               break;
+
+       case TCODE_WRITE_RESPONSE:
+               data = NULL;
+               data_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_RESPONSE:
+       case TCODE_LOCK_RESPONSE:
+               data = p->payload;
+               data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
+               break;
+
+       default:
+               /* Should never happen, this is just to shut up gcc. */
+               data = NULL;
+               data_length = 0;
+               break;
+       }
+
+       /*
+        * The response handler may be executed while the request handler
+        * is still pending.  Cancel the request handler.
+        */
+       card->driver->cancel_packet(card, &t->packet);
+
+       t->callback(card, rcode, data, data_length, t->callback_data);
+}
+EXPORT_SYMBOL(fw_core_handle_response);
+
+static const struct fw_address_region topology_map_region =
+       { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
+         .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
+
+static void handle_topology_map(struct fw_card *card, struct fw_request *request,
+               int tcode, int destination, int source, int generation,
+               int speed, unsigned long long offset,
+               void *payload, size_t length, void *callback_data)
+{
+       int i, start, end;
+       __be32 *map;
+
+       if (!TCODE_IS_READ_REQUEST(tcode)) {
+               fw_send_response(card, request, RCODE_TYPE_ERROR);
+               return;
+       }
+
+       if ((offset & 3) > 0 || (length & 3) > 0) {
+               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+               return;
+       }
+
+       start = (offset - topology_map_region.start) / 4;
+       end = start + length / 4;
+       map = payload;
+
+       for (i = 0; i < length / 4; i++)
+               map[i] = cpu_to_be32(card->topology_map[start + i]);
+
+       fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static struct fw_address_handler topology_map = {
+       .length                 = 0x200,
+       .address_callback       = handle_topology_map,
+};
+
+static const struct fw_address_region registers_region =
+       { .start = CSR_REGISTER_BASE,
+         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
+
+static void handle_registers(struct fw_card *card, struct fw_request *request,
+               int tcode, int destination, int source, int generation,
+               int speed, unsigned long long offset,
+               void *payload, size_t length, void *callback_data)
+{
+       int reg = offset & ~CSR_REGISTER_BASE;
+       unsigned long long bus_time;
+       __be32 *data = payload;
+       int rcode = RCODE_COMPLETE;
+
+       switch (reg) {
+       case CSR_CYCLE_TIME:
+       case CSR_BUS_TIME:
+               if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
+                       rcode = RCODE_TYPE_ERROR;
+                       break;
+               }
+
+               bus_time = card->driver->get_bus_time(card);
+               if (reg == CSR_CYCLE_TIME)
+                       *data = cpu_to_be32(bus_time);
+               else
+                       *data = cpu_to_be32(bus_time >> 25);
+               break;
+
+       case CSR_BROADCAST_CHANNEL:
+               if (tcode == TCODE_READ_QUADLET_REQUEST)
+                       *data = cpu_to_be32(card->broadcast_channel);
+               else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
+                       card->broadcast_channel =
+                           (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
+                           BROADCAST_CHANNEL_INITIAL;
+               else
+                       rcode = RCODE_TYPE_ERROR;
+               break;
+
+       case CSR_BUS_MANAGER_ID:
+       case CSR_BANDWIDTH_AVAILABLE:
+       case CSR_CHANNELS_AVAILABLE_HI:
+       case CSR_CHANNELS_AVAILABLE_LO:
+               /*
+                * FIXME: these are handled by the OHCI hardware and
+                * the stack never sees these request. If we add
+                * support for a new type of controller that doesn't
+                * handle this in hardware we need to deal with these
+                * transactions.
+                */
+               BUG();
+               break;
+
+       case CSR_BUSY_TIMEOUT:
+               /* FIXME: Implement this. */
+
+       default:
+               rcode = RCODE_ADDRESS_ERROR;
+               break;
+       }
+
+       fw_send_response(card, request, rcode);
+}
+
+static struct fw_address_handler registers = {
+       .length                 = 0x400,
+       .address_callback       = handle_registers,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
+MODULE_LICENSE("GPL");
+
+static const u32 vendor_textual_descriptor[] = {
+       /* textual descriptor leaf () */
+       0x00060000,
+       0x00000000,
+       0x00000000,
+       0x4c696e75,             /* L i n u */
+       0x78204669,             /* x   F i */
+       0x72657769,             /* r e w i */
+       0x72650000,             /* r e     */
+};
+
+static const u32 model_textual_descriptor[] = {
+       /* model descriptor leaf () */
+       0x00030000,
+       0x00000000,
+       0x00000000,
+       0x4a756a75,             /* J u j u */
+};
+
+static struct fw_descriptor vendor_id_descriptor = {
+       .length = ARRAY_SIZE(vendor_textual_descriptor),
+       .immediate = 0x03d00d1e,
+       .key = 0x81000000,
+       .data = vendor_textual_descriptor,
+};
+
+static struct fw_descriptor model_id_descriptor = {
+       .length = ARRAY_SIZE(model_textual_descriptor),
+       .immediate = 0x17000001,
+       .key = 0x81000000,
+       .data = model_textual_descriptor,
+};
+
+static int __init fw_core_init(void)
+{
+       int ret;
+
+       ret = bus_register(&fw_bus_type);
+       if (ret < 0)
+               return ret;
+
+       fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
+       if (fw_cdev_major < 0) {
+               bus_unregister(&fw_bus_type);
+               return fw_cdev_major;
+       }
+
+       fw_core_add_address_handler(&topology_map, &topology_map_region);
+       fw_core_add_address_handler(&registers, &registers_region);
+       fw_core_add_descriptor(&vendor_id_descriptor);
+       fw_core_add_descriptor(&model_id_descriptor);
+
+       return 0;
+}
+
+static void __exit fw_core_cleanup(void)
+{
+       unregister_chrdev(fw_cdev_major, "firewire");
+       bus_unregister(&fw_bus_type);
+       idr_destroy(&fw_device_idr);
+}
+
+module_init(fw_core_init);
+module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
new file mode 100644 (file)
index 0000000..0a25a7b
--- /dev/null
@@ -0,0 +1,293 @@
+#ifndef _FIREWIRE_CORE_H
+#define _FIREWIRE_CORE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+#include <linux/mm_types.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+struct device;
+struct fw_card;
+struct fw_device;
+struct fw_iso_buffer;
+struct fw_iso_context;
+struct fw_iso_packet;
+struct fw_node;
+struct fw_packet;
+
+
+/* -card */
+
+/* bitfields within the PHY registers */
+#define PHY_LINK_ACTIVE                0x80
+#define PHY_CONTENDER          0x40
+#define PHY_BUS_RESET          0x40
+#define PHY_BUS_SHORT_RESET    0x40
+
+#define BANDWIDTH_AVAILABLE_INITIAL    4915
+#define BROADCAST_CHANNEL_INITIAL      (1 << 31 | 31)
+#define BROADCAST_CHANNEL_VALID                (1 << 30)
+
+struct fw_card_driver {
+       /*
+        * Enable the given card with the given initial config rom.
+        * This function is expected to activate the card, and either
+        * enable the PHY or set the link_on bit and initiate a bus
+        * reset.
+        */
+       int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
+
+       int (*update_phy_reg)(struct fw_card *card, int address,
+                             int clear_bits, int set_bits);
+
+       /*
+        * Update the config rom for an enabled card.  This function
+        * should change the config rom that is presented on the bus
+        * an initiate a bus reset.
+        */
+       int (*set_config_rom)(struct fw_card *card,
+                             u32 *config_rom, size_t length);
+
+       void (*send_request)(struct fw_card *card, struct fw_packet *packet);
+       void (*send_response)(struct fw_card *card, struct fw_packet *packet);
+       /* Calling cancel is valid once a packet has been submitted. */
+       int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
+
+       /*
+        * Allow the specified node ID to do direct DMA out and in of
+        * host memory.  The card will disable this for all node when
+        * a bus reset happens, so driver need to reenable this after
+        * bus reset.  Returns 0 on success, -ENODEV if the card
+        * doesn't support this, -ESTALE if the generation doesn't
+        * match.
+        */
+       int (*enable_phys_dma)(struct fw_card *card,
+                              int node_id, int generation);
+
+       u64 (*get_bus_time)(struct fw_card *card);
+
+       struct fw_iso_context *
+       (*allocate_iso_context)(struct fw_card *card,
+                               int type, int channel, size_t header_size);
+       void (*free_iso_context)(struct fw_iso_context *ctx);
+
+       int (*start_iso)(struct fw_iso_context *ctx,
+                        s32 cycle, u32 sync, u32 tags);
+
+       int (*queue_iso)(struct fw_iso_context *ctx,
+                        struct fw_iso_packet *packet,
+                        struct fw_iso_buffer *buffer,
+                        unsigned long payload);
+
+       int (*stop_iso)(struct fw_iso_context *ctx);
+};
+
+void fw_card_initialize(struct fw_card *card,
+               const struct fw_card_driver *driver, struct device *device);
+int fw_card_add(struct fw_card *card,
+               u32 max_receive, u32 link_speed, u64 guid);
+void fw_core_remove_card(struct fw_card *card);
+int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
+int fw_compute_block_crc(u32 *block);
+void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
+
+struct fw_descriptor {
+       struct list_head link;
+       size_t length;
+       u32 immediate;
+       u32 key;
+       const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+
+/* -cdev */
+
+extern const struct file_operations fw_device_ops;
+
+void fw_device_cdev_update(struct fw_device *device);
+void fw_device_cdev_remove(struct fw_device *device);
+
+
+/* -device */
+
+extern struct rw_semaphore fw_device_rwsem;
+extern struct idr fw_device_idr;
+extern int fw_cdev_major;
+
+struct fw_device *fw_device_get_by_devt(dev_t devt);
+int fw_device_set_broadcast_channel(struct device *dev, void *gen);
+void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
+
+
+/* -iso */
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+       u16 payload_length;     /* Length of indirect payload. */
+       u32 interrupt:1;        /* Generate interrupt on this packet */
+       u32 skip:1;             /* Set to not send packet at all. */
+       u32 tag:2;
+       u32 sy:4;
+       u32 header_length:8;    /* Length of immediate header. */
+       u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT        0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0       1
+#define FW_ISO_CONTEXT_MATCH_TAG1       2
+#define FW_ISO_CONTEXT_MATCH_TAG2       4
+#define FW_ISO_CONTEXT_MATCH_TAG3       8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS  15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction.  Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space.  We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+       enum dma_data_direction direction;
+       struct page **pages;
+       int page_count;
+};
+
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+                                 u32 cycle, size_t header_length,
+                                 void *header, void *data);
+
+struct fw_iso_context {
+       struct fw_card *card;
+       int type;
+       int channel;
+       int speed;
+       size_t header_size;
+       fw_iso_callback_t callback;
+       void *callback_data;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+                      int page_count, enum dma_data_direction direction);
+int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+               int type, int channel, int speed, size_t header_size,
+               fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+                        struct fw_iso_packet *packet,
+                        struct fw_iso_buffer *buffer,
+                        unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+                        int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+               u64 channels_mask, int *channel, int *bandwidth, bool allocate);
+
+
+/* -topology */
+
+enum {
+       FW_NODE_CREATED,
+       FW_NODE_UPDATED,
+       FW_NODE_DESTROYED,
+       FW_NODE_LINK_ON,
+       FW_NODE_LINK_OFF,
+       FW_NODE_INITIATED_RESET,
+};
+
+struct fw_node {
+       u16 node_id;
+       u8 color;
+       u8 port_count;
+       u8 link_on:1;
+       u8 initiated_reset:1;
+       u8 b_path:1;
+       u8 phy_speed:2; /* As in the self ID packet. */
+       u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
+                        * local node to this node. */
+       u8 max_depth:4; /* Maximum depth to any leaf node */
+       u8 max_hops:4;  /* Max hops in this sub tree */
+       atomic_t ref_count;
+
+       /* For serializing node topology into a list. */
+       struct list_head link;
+
+       /* Upper layer specific data. */
+       void *data;
+
+       struct fw_node *ports[0];
+};
+
+static inline struct fw_node *fw_node_get(struct fw_node *node)
+{
+       atomic_inc(&node->ref_count);
+
+       return node;
+}
+
+static inline void fw_node_put(struct fw_node *node)
+{
+       if (atomic_dec_and_test(&node->ref_count))
+               kfree(node);
+}
+
+void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
+                             int generation, int self_id_count, u32 *self_ids);
+void fw_destroy_nodes(struct fw_card *card);
+
+/*
+ * Check whether new_generation is the immediate successor of old_generation.
+ * Take counter roll-over at 255 (as per OHCI) into account.
+ */
+static inline bool is_next_generation(int new_generation, int old_generation)
+{
+       return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
+}
+
+
+/* -transaction */
+
+#define TCODE_IS_READ_REQUEST(tcode)   (((tcode) & ~1) == 4)
+#define TCODE_IS_BLOCK_PACKET(tcode)   (((tcode) &  1) != 0)
+#define TCODE_IS_REQUEST(tcode)                (((tcode) &  2) == 0)
+#define TCODE_IS_RESPONSE(tcode)       (((tcode) &  2) != 0)
+#define TCODE_HAS_REQUEST_DATA(tcode)  (((tcode) & 12) != 4)
+#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
+
+#define LOCAL_BUS 0xffc0
+
+void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
+void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
+void fw_fill_response(struct fw_packet *response, u32 *request_header,
+                     int rcode, void *payload, size_t length);
+void fw_flush_transactions(struct fw_card *card);
+void fw_send_phy_config(struct fw_card *card,
+                       int node_id, int generation, int gap_count);
+
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+       return tag << 14 | channel << 8 | sy;
+}
+
+#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
deleted file mode 100644 (file)
index 8b8c8c2..0000000
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/completion.h>
-#include <linux/crc-itu-t.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
-
-int fw_compute_block_crc(u32 *block)
-{
-       __be32 be32_block[256];
-       int i, length;
-
-       length = (*block >> 16) & 0xff;
-       for (i = 0; i < length; i++)
-               be32_block[i] = cpu_to_be32(block[i + 1]);
-       *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
-
-       return length;
-}
-
-static DEFINE_MUTEX(card_mutex);
-static LIST_HEAD(card_list);
-
-static LIST_HEAD(descriptor_list);
-static int descriptor_count;
-
-#define BIB_CRC(v)             ((v) <<  0)
-#define BIB_CRC_LENGTH(v)      ((v) << 16)
-#define BIB_INFO_LENGTH(v)     ((v) << 24)
-
-#define BIB_LINK_SPEED(v)      ((v) <<  0)
-#define BIB_GENERATION(v)      ((v) <<  4)
-#define BIB_MAX_ROM(v)         ((v) <<  8)
-#define BIB_MAX_RECEIVE(v)     ((v) << 12)
-#define BIB_CYC_CLK_ACC(v)     ((v) << 16)
-#define BIB_PMC                        ((1) << 27)
-#define BIB_BMC                        ((1) << 28)
-#define BIB_ISC                        ((1) << 29)
-#define BIB_CMC                        ((1) << 30)
-#define BIB_IMC                        ((1) << 31)
-
-static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
-{
-       struct fw_descriptor *desc;
-       static u32 config_rom[256];
-       int i, j, length;
-
-       /*
-        * Initialize contents of config rom buffer.  On the OHCI
-        * controller, block reads to the config rom accesses the host
-        * memory, but quadlet read access the hardware bus info block
-        * registers.  That's just crack, but it means we should make
-        * sure the contents of bus info block in host memory matches
-        * the version stored in the OHCI registers.
-        */
-
-       memset(config_rom, 0, sizeof(config_rom));
-       config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
-       config_rom[1] = 0x31333934;
-
-       config_rom[2] =
-               BIB_LINK_SPEED(card->link_speed) |
-               BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
-               BIB_MAX_ROM(2) |
-               BIB_MAX_RECEIVE(card->max_receive) |
-               BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
-       config_rom[3] = card->guid >> 32;
-       config_rom[4] = card->guid;
-
-       /* Generate root directory. */
-       i = 5;
-       config_rom[i++] = 0;
-       config_rom[i++] = 0x0c0083c0; /* node capabilities */
-       j = i + descriptor_count;
-
-       /* Generate root directory entries for descriptors. */
-       list_for_each_entry (desc, &descriptor_list, link) {
-               if (desc->immediate > 0)
-                       config_rom[i++] = desc->immediate;
-               config_rom[i] = desc->key | (j - i);
-               i++;
-               j += desc->length;
-       }
-
-       /* Update root directory length. */
-       config_rom[5] = (i - 5 - 1) << 16;
-
-       /* End of root directory, now copy in descriptors. */
-       list_for_each_entry (desc, &descriptor_list, link) {
-               memcpy(&config_rom[i], desc->data, desc->length * 4);
-               i += desc->length;
-       }
-
-       /* Calculate CRCs for all blocks in the config rom.  This
-        * assumes that CRC length and info length are identical for
-        * the bus info block, which is always the case for this
-        * implementation. */
-       for (i = 0; i < j; i += length + 1)
-               length = fw_compute_block_crc(config_rom + i);
-
-       *config_rom_length = j;
-
-       return config_rom;
-}
-
-static void update_config_roms(void)
-{
-       struct fw_card *card;
-       u32 *config_rom;
-       size_t length;
-
-       list_for_each_entry (card, &card_list, link) {
-               config_rom = generate_config_rom(card, &length);
-               card->driver->set_config_rom(card, config_rom, length);
-       }
-}
-
-int fw_core_add_descriptor(struct fw_descriptor *desc)
-{
-       size_t i;
-
-       /*
-        * Check descriptor is valid; the length of all blocks in the
-        * descriptor has to add up to exactly the length of the
-        * block.
-        */
-       i = 0;
-       while (i < desc->length)
-               i += (desc->data[i] >> 16) + 1;
-
-       if (i != desc->length)
-               return -EINVAL;
-
-       mutex_lock(&card_mutex);
-
-       list_add_tail(&desc->link, &descriptor_list);
-       descriptor_count++;
-       if (desc->immediate > 0)
-               descriptor_count++;
-       update_config_roms();
-
-       mutex_unlock(&card_mutex);
-
-       return 0;
-}
-
-void fw_core_remove_descriptor(struct fw_descriptor *desc)
-{
-       mutex_lock(&card_mutex);
-
-       list_del(&desc->link);
-       descriptor_count--;
-       if (desc->immediate > 0)
-               descriptor_count--;
-       update_config_roms();
-
-       mutex_unlock(&card_mutex);
-}
-
-static int set_broadcast_channel(struct device *dev, void *data)
-{
-       fw_device_set_broadcast_channel(fw_device(dev), (long)data);
-       return 0;
-}
-
-static void allocate_broadcast_channel(struct fw_card *card, int generation)
-{
-       int channel, bandwidth = 0;
-
-       fw_iso_resource_manage(card, generation, 1ULL << 31,
-                              &channel, &bandwidth, true);
-       if (channel == 31) {
-               card->broadcast_channel_allocated = true;
-               device_for_each_child(card->device, (void *)(long)generation,
-                                     set_broadcast_channel);
-       }
-}
-
-static const char gap_count_table[] = {
-       63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
-};
-
-void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
-{
-       int scheduled;
-
-       fw_card_get(card);
-       scheduled = schedule_delayed_work(&card->work, delay);
-       if (!scheduled)
-               fw_card_put(card);
-}
-
-static void fw_card_bm_work(struct work_struct *work)
-{
-       struct fw_card *card = container_of(work, struct fw_card, work.work);
-       struct fw_device *root_device;
-       struct fw_node *root_node;
-       unsigned long flags;
-       int root_id, new_root_id, irm_id, local_id;
-       int gap_count, generation, grace, rcode;
-       bool do_reset = false;
-       bool root_device_is_running;
-       bool root_device_is_cmc;
-       __be32 lock_data[2];
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       if (card->local_node == NULL) {
-               spin_unlock_irqrestore(&card->lock, flags);
-               goto out_put_card;
-       }
-
-       generation = card->generation;
-       root_node = card->root_node;
-       fw_node_get(root_node);
-       root_device = root_node->data;
-       root_device_is_running = root_device &&
-                       atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
-       root_device_is_cmc = root_device && root_device->cmc;
-       root_id  = root_node->node_id;
-       irm_id   = card->irm_node->node_id;
-       local_id = card->local_node->node_id;
-
-       grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
-
-       if (is_next_generation(generation, card->bm_generation) ||
-           (card->bm_generation != generation && grace)) {
-               /*
-                * This first step is to figure out who is IRM and
-                * then try to become bus manager.  If the IRM is not
-                * well defined (e.g. does not have an active link
-                * layer or does not responds to our lock request, we
-                * will have to do a little vigilante bus management.
-                * In that case, we do a goto into the gap count logic
-                * so that when we do the reset, we still optimize the
-                * gap count.  That could well save a reset in the
-                * next generation.
-                */
-
-               if (!card->irm_node->link_on) {
-                       new_root_id = local_id;
-                       fw_notify("IRM has link off, making local node (%02x) root.\n",
-                                 new_root_id);
-                       goto pick_me;
-               }
-
-               lock_data[0] = cpu_to_be32(0x3f);
-               lock_data[1] = cpu_to_be32(local_id);
-
-               spin_unlock_irqrestore(&card->lock, flags);
-
-               rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                               irm_id, generation, SCODE_100,
-                               CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
-                               lock_data, sizeof(lock_data));
-
-               if (rcode == RCODE_GENERATION)
-                       /* Another bus reset, BM work has been rescheduled. */
-                       goto out;
-
-               if (rcode == RCODE_COMPLETE &&
-                   lock_data[0] != cpu_to_be32(0x3f)) {
-
-                       /* Somebody else is BM.  Only act as IRM. */
-                       if (local_id == irm_id)
-                               allocate_broadcast_channel(card, generation);
-
-                       goto out;
-               }
-
-               spin_lock_irqsave(&card->lock, flags);
-
-               if (rcode != RCODE_COMPLETE) {
-                       /*
-                        * The lock request failed, maybe the IRM
-                        * isn't really IRM capable after all. Let's
-                        * do a bus reset and pick the local node as
-                        * root, and thus, IRM.
-                        */
-                       new_root_id = local_id;
-                       fw_notify("BM lock failed, making local node (%02x) root.\n",
-                                 new_root_id);
-                       goto pick_me;
-               }
-       } else if (card->bm_generation != generation) {
-               /*
-                * We weren't BM in the last generation, and the last
-                * bus reset is less than 125ms ago.  Reschedule this job.
-                */
-               spin_unlock_irqrestore(&card->lock, flags);
-               fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
-               goto out;
-       }
-
-       /*
-        * We're bus manager for this generation, so next step is to
-        * make sure we have an active cycle master and do gap count
-        * optimization.
-        */
-       card->bm_generation = generation;
-
-       if (root_device == NULL) {
-               /*
-                * Either link_on is false, or we failed to read the
-                * config rom.  In either case, pick another root.
-                */
-               new_root_id = local_id;
-       } else if (!root_device_is_running) {
-               /*
-                * If we haven't probed this device yet, bail out now
-                * and let's try again once that's done.
-                */
-               spin_unlock_irqrestore(&card->lock, flags);
-               goto out;
-       } else if (root_device_is_cmc) {
-               /*
-                * FIXME: I suppose we should set the cmstr bit in the
-                * STATE_CLEAR register of this node, as described in
-                * 1394-1995, 8.4.2.6.  Also, send out a force root
-                * packet for this node.
-                */
-               new_root_id = root_id;
-       } else {
-               /*
-                * Current root has an active link layer and we
-                * successfully read the config rom, but it's not
-                * cycle master capable.
-                */
-               new_root_id = local_id;
-       }
-
- pick_me:
-       /*
-        * Pick a gap count from 1394a table E-1.  The table doesn't cover
-        * the typically much larger 1394b beta repeater delays though.
-        */
-       if (!card->beta_repeaters_present &&
-           root_node->max_hops < ARRAY_SIZE(gap_count_table))
-               gap_count = gap_count_table[root_node->max_hops];
-       else
-               gap_count = 63;
-
-       /*
-        * Finally, figure out if we should do a reset or not.  If we have
-        * done less than 5 resets with the same physical topology and we
-        * have either a new root or a new gap count setting, let's do it.
-        */
-
-       if (card->bm_retries++ < 5 &&
-           (card->gap_count != gap_count || new_root_id != root_id))
-               do_reset = true;
-
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (do_reset) {
-               fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
-                         card->index, new_root_id, gap_count);
-               fw_send_phy_config(card, new_root_id, generation, gap_count);
-               fw_core_initiate_bus_reset(card, 1);
-               /* Will allocate broadcast channel after the reset. */
-       } else {
-               if (local_id == irm_id)
-                       allocate_broadcast_channel(card, generation);
-       }
-
- out:
-       fw_node_put(root_node);
- out_put_card:
-       fw_card_put(card);
-}
-
-static void flush_timer_callback(unsigned long data)
-{
-       struct fw_card *card = (struct fw_card *)data;
-
-       fw_flush_transactions(card);
-}
-
-void fw_card_initialize(struct fw_card *card,
-                       const struct fw_card_driver *driver,
-                       struct device *device)
-{
-       static atomic_t index = ATOMIC_INIT(-1);
-
-       card->index = atomic_inc_return(&index);
-       card->driver = driver;
-       card->device = device;
-       card->current_tlabel = 0;
-       card->tlabel_mask = 0;
-       card->color = 0;
-       card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
-
-       kref_init(&card->kref);
-       init_completion(&card->done);
-       INIT_LIST_HEAD(&card->transaction_list);
-       spin_lock_init(&card->lock);
-       setup_timer(&card->flush_timer,
-                   flush_timer_callback, (unsigned long)card);
-
-       card->local_node = NULL;
-
-       INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
-}
-EXPORT_SYMBOL(fw_card_initialize);
-
-int fw_card_add(struct fw_card *card,
-               u32 max_receive, u32 link_speed, u64 guid)
-{
-       u32 *config_rom;
-       size_t length;
-       int ret;
-
-       card->max_receive = max_receive;
-       card->link_speed = link_speed;
-       card->guid = guid;
-
-       mutex_lock(&card_mutex);
-       config_rom = generate_config_rom(card, &length);
-       list_add_tail(&card->link, &card_list);
-       mutex_unlock(&card_mutex);
-
-       ret = card->driver->enable(card, config_rom, length);
-       if (ret < 0) {
-               mutex_lock(&card_mutex);
-               list_del(&card->link);
-               mutex_unlock(&card_mutex);
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(fw_card_add);
-
-
-/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card.  This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module.  The
- * dummy driver just fails all IO.
- */
-
-static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
-{
-       BUG();
-       return -1;
-}
-
-static int dummy_update_phy_reg(struct fw_card *card, int address,
-                               int clear_bits, int set_bits)
-{
-       return -ENODEV;
-}
-
-static int dummy_set_config_rom(struct fw_card *card,
-                               u32 *config_rom, size_t length)
-{
-       /*
-        * We take the card out of card_list before setting the dummy
-        * driver, so this should never get called.
-        */
-       BUG();
-       return -1;
-}
-
-static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
-{
-       packet->callback(packet, card, -ENODEV);
-}
-
-static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
-{
-       packet->callback(packet, card, -ENODEV);
-}
-
-static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
-{
-       return -ENOENT;
-}
-
-static int dummy_enable_phys_dma(struct fw_card *card,
-                                int node_id, int generation)
-{
-       return -ENODEV;
-}
-
-static struct fw_card_driver dummy_driver = {
-       .enable          = dummy_enable,
-       .update_phy_reg  = dummy_update_phy_reg,
-       .set_config_rom  = dummy_set_config_rom,
-       .send_request    = dummy_send_request,
-       .cancel_packet   = dummy_cancel_packet,
-       .send_response   = dummy_send_response,
-       .enable_phys_dma = dummy_enable_phys_dma,
-};
-
-void fw_card_release(struct kref *kref)
-{
-       struct fw_card *card = container_of(kref, struct fw_card, kref);
-
-       complete(&card->done);
-}
-
-void fw_core_remove_card(struct fw_card *card)
-{
-       card->driver->update_phy_reg(card, 4,
-                                    PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
-       fw_core_initiate_bus_reset(card, 1);
-
-       mutex_lock(&card_mutex);
-       list_del_init(&card->link);
-       mutex_unlock(&card_mutex);
-
-       /* Set up the dummy driver. */
-       card->driver = &dummy_driver;
-
-       fw_destroy_nodes(card);
-
-       /* Wait for all users, especially device workqueue jobs, to finish. */
-       fw_card_put(card);
-       wait_for_completion(&card->done);
-
-       WARN_ON(!list_empty(&card->transaction_list));
-       del_timer_sync(&card->flush_timer);
-}
-EXPORT_SYMBOL(fw_core_remove_card);
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
-{
-       int reg = short_reset ? 5 : 1;
-       int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
-
-       return card->driver->update_phy_reg(card, reg, 0, bit);
-}
-EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
deleted file mode 100644 (file)
index 7eb6594..0000000
+++ /dev/null
@@ -1,1463 +0,0 @@
-/*
- * Char device for device raw access
- *
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/compat.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/firewire-cdev.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/poll.h>
-#include <linux/preempt.h>
-#include <linux/spinlock.h>
-#include <linux/time.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
-struct client {
-       u32 version;
-       struct fw_device *device;
-
-       spinlock_t lock;
-       bool in_shutdown;
-       struct idr resource_idr;
-       struct list_head event_list;
-       wait_queue_head_t wait;
-       u64 bus_reset_closure;
-
-       struct fw_iso_context *iso_context;
-       u64 iso_closure;
-       struct fw_iso_buffer buffer;
-       unsigned long vm_start;
-
-       struct list_head link;
-       struct kref kref;
-};
-
-static inline void client_get(struct client *client)
-{
-       kref_get(&client->kref);
-}
-
-static void client_release(struct kref *kref)
-{
-       struct client *client = container_of(kref, struct client, kref);
-
-       fw_device_put(client->device);
-       kfree(client);
-}
-
-static void client_put(struct client *client)
-{
-       kref_put(&client->kref, client_release);
-}
-
-struct client_resource;
-typedef void (*client_resource_release_fn_t)(struct client *,
-                                            struct client_resource *);
-struct client_resource {
-       client_resource_release_fn_t release;
-       int handle;
-};
-
-struct address_handler_resource {
-       struct client_resource resource;
-       struct fw_address_handler handler;
-       __u64 closure;
-       struct client *client;
-};
-
-struct outbound_transaction_resource {
-       struct client_resource resource;
-       struct fw_transaction transaction;
-};
-
-struct inbound_transaction_resource {
-       struct client_resource resource;
-       struct fw_request *request;
-       void *data;
-       size_t length;
-};
-
-struct descriptor_resource {
-       struct client_resource resource;
-       struct fw_descriptor descriptor;
-       u32 data[0];
-};
-
-struct iso_resource {
-       struct client_resource resource;
-       struct client *client;
-       /* Schedule work and access todo only with client->lock held. */
-       struct delayed_work work;
-       enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
-             ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
-       int generation;
-       u64 channels;
-       s32 bandwidth;
-       struct iso_resource_event *e_alloc, *e_dealloc;
-};
-
-static void schedule_iso_resource(struct iso_resource *);
-static void release_iso_resource(struct client *, struct client_resource *);
-
-/*
- * dequeue_event() just kfree()'s the event, so the event has to be
- * the first field in a struct XYZ_event.
- */
-struct event {
-       struct { void *data; size_t size; } v[2];
-       struct list_head link;
-};
-
-struct bus_reset_event {
-       struct event event;
-       struct fw_cdev_event_bus_reset reset;
-};
-
-struct outbound_transaction_event {
-       struct event event;
-       struct client *client;
-       struct outbound_transaction_resource r;
-       struct fw_cdev_event_response response;
-};
-
-struct inbound_transaction_event {
-       struct event event;
-       struct fw_cdev_event_request request;
-};
-
-struct iso_interrupt_event {
-       struct event event;
-       struct fw_cdev_event_iso_interrupt interrupt;
-};
-
-struct iso_resource_event {
-       struct event event;
-       struct fw_cdev_event_iso_resource resource;
-};
-
-static inline void __user *u64_to_uptr(__u64 value)
-{
-       return (void __user *)(unsigned long)value;
-}
-
-static inline __u64 uptr_to_u64(void __user *ptr)
-{
-       return (__u64)(unsigned long)ptr;
-}
-
-static int fw_device_op_open(struct inode *inode, struct file *file)
-{
-       struct fw_device *device;
-       struct client *client;
-
-       device = fw_device_get_by_devt(inode->i_rdev);
-       if (device == NULL)
-               return -ENODEV;
-
-       if (fw_device_is_shutdown(device)) {
-               fw_device_put(device);
-               return -ENODEV;
-       }
-
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (client == NULL) {
-               fw_device_put(device);
-               return -ENOMEM;
-       }
-
-       client->device = device;
-       spin_lock_init(&client->lock);
-       idr_init(&client->resource_idr);
-       INIT_LIST_HEAD(&client->event_list);
-       init_waitqueue_head(&client->wait);
-       kref_init(&client->kref);
-
-       file->private_data = client;
-
-       mutex_lock(&device->client_list_mutex);
-       list_add_tail(&client->link, &device->client_list);
-       mutex_unlock(&device->client_list_mutex);
-
-       return 0;
-}
-
-static void queue_event(struct client *client, struct event *event,
-                       void *data0, size_t size0, void *data1, size_t size1)
-{
-       unsigned long flags;
-
-       event->v[0].data = data0;
-       event->v[0].size = size0;
-       event->v[1].data = data1;
-       event->v[1].size = size1;
-
-       spin_lock_irqsave(&client->lock, flags);
-       if (client->in_shutdown)
-               kfree(event);
-       else
-               list_add_tail(&event->link, &client->event_list);
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       wake_up_interruptible(&client->wait);
-}
-
-static int dequeue_event(struct client *client,
-                        char __user *buffer, size_t count)
-{
-       struct event *event;
-       size_t size, total;
-       int i, ret;
-
-       ret = wait_event_interruptible(client->wait,
-                       !list_empty(&client->event_list) ||
-                       fw_device_is_shutdown(client->device));
-       if (ret < 0)
-               return ret;
-
-       if (list_empty(&client->event_list) &&
-                      fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       spin_lock_irq(&client->lock);
-       event = list_first_entry(&client->event_list, struct event, link);
-       list_del(&event->link);
-       spin_unlock_irq(&client->lock);
-
-       total = 0;
-       for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
-               size = min(event->v[i].size, count - total);
-               if (copy_to_user(buffer + total, event->v[i].data, size)) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-               total += size;
-       }
-       ret = total;
-
- out:
-       kfree(event);
-
-       return ret;
-}
-
-static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
-                                size_t count, loff_t *offset)
-{
-       struct client *client = file->private_data;
-
-       return dequeue_event(client, buffer, count);
-}
-
-static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
-                                struct client *client)
-{
-       struct fw_card *card = client->device->card;
-
-       spin_lock_irq(&card->lock);
-
-       event->closure       = client->bus_reset_closure;
-       event->type          = FW_CDEV_EVENT_BUS_RESET;
-       event->generation    = client->device->generation;
-       event->node_id       = client->device->node_id;
-       event->local_node_id = card->local_node->node_id;
-       event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
-       event->irm_node_id   = card->irm_node->node_id;
-       event->root_node_id  = card->root_node->node_id;
-
-       spin_unlock_irq(&card->lock);
-}
-
-static void for_each_client(struct fw_device *device,
-                           void (*callback)(struct client *client))
-{
-       struct client *c;
-
-       mutex_lock(&device->client_list_mutex);
-       list_for_each_entry(c, &device->client_list, link)
-               callback(c);
-       mutex_unlock(&device->client_list_mutex);
-}
-
-static int schedule_reallocations(int id, void *p, void *data)
-{
-       struct client_resource *r = p;
-
-       if (r->release == release_iso_resource)
-               schedule_iso_resource(container_of(r,
-                                       struct iso_resource, resource));
-       return 0;
-}
-
-static void queue_bus_reset_event(struct client *client)
-{
-       struct bus_reset_event *e;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (e == NULL) {
-               fw_notify("Out of memory when allocating bus reset event\n");
-               return;
-       }
-
-       fill_bus_reset_event(&e->reset, client);
-
-       queue_event(client, &e->event,
-                   &e->reset, sizeof(e->reset), NULL, 0);
-
-       spin_lock_irq(&client->lock);
-       idr_for_each(&client->resource_idr, schedule_reallocations, client);
-       spin_unlock_irq(&client->lock);
-}
-
-void fw_device_cdev_update(struct fw_device *device)
-{
-       for_each_client(device, queue_bus_reset_event);
-}
-
-static void wake_up_client(struct client *client)
-{
-       wake_up_interruptible(&client->wait);
-}
-
-void fw_device_cdev_remove(struct fw_device *device)
-{
-       for_each_client(device, wake_up_client);
-}
-
-static int ioctl_get_info(struct client *client, void *buffer)
-{
-       struct fw_cdev_get_info *get_info = buffer;
-       struct fw_cdev_event_bus_reset bus_reset;
-       unsigned long ret = 0;
-
-       client->version = get_info->version;
-       get_info->version = FW_CDEV_VERSION;
-       get_info->card = client->device->card->index;
-
-       down_read(&fw_device_rwsem);
-
-       if (get_info->rom != 0) {
-               void __user *uptr = u64_to_uptr(get_info->rom);
-               size_t want = get_info->rom_length;
-               size_t have = client->device->config_rom_length * 4;
-
-               ret = copy_to_user(uptr, client->device->config_rom,
-                                  min(want, have));
-       }
-       get_info->rom_length = client->device->config_rom_length * 4;
-
-       up_read(&fw_device_rwsem);
-
-       if (ret != 0)
-               return -EFAULT;
-
-       client->bus_reset_closure = get_info->bus_reset_closure;
-       if (get_info->bus_reset != 0) {
-               void __user *uptr = u64_to_uptr(get_info->bus_reset);
-
-               fill_bus_reset_event(&bus_reset, client);
-               if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
-                       return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int add_client_resource(struct client *client,
-                              struct client_resource *resource, gfp_t gfp_mask)
-{
-       unsigned long flags;
-       int ret;
-
- retry:
-       if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&client->lock, flags);
-       if (client->in_shutdown)
-               ret = -ECANCELED;
-       else
-               ret = idr_get_new(&client->resource_idr, resource,
-                                 &resource->handle);
-       if (ret >= 0) {
-               client_get(client);
-               if (resource->release == release_iso_resource)
-                       schedule_iso_resource(container_of(resource,
-                                               struct iso_resource, resource));
-       }
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       if (ret == -EAGAIN)
-               goto retry;
-
-       return ret < 0 ? ret : 0;
-}
-
-static int release_client_resource(struct client *client, u32 handle,
-                                  client_resource_release_fn_t release,
-                                  struct client_resource **resource)
-{
-       struct client_resource *r;
-
-       spin_lock_irq(&client->lock);
-       if (client->in_shutdown)
-               r = NULL;
-       else
-               r = idr_find(&client->resource_idr, handle);
-       if (r && r->release == release)
-               idr_remove(&client->resource_idr, handle);
-       spin_unlock_irq(&client->lock);
-
-       if (!(r && r->release == release))
-               return -EINVAL;
-
-       if (resource)
-               *resource = r;
-       else
-               r->release(client, r);
-
-       client_put(client);
-
-       return 0;
-}
-
-static void release_transaction(struct client *client,
-                               struct client_resource *resource)
-{
-       struct outbound_transaction_resource *r = container_of(resource,
-                       struct outbound_transaction_resource, resource);
-
-       fw_cancel_transaction(client->device->card, &r->transaction);
-}
-
-static void complete_transaction(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct outbound_transaction_event *e = data;
-       struct fw_cdev_event_response *rsp = &e->response;
-       struct client *client = e->client;
-       unsigned long flags;
-
-       if (length < rsp->length)
-               rsp->length = length;
-       if (rcode == RCODE_COMPLETE)
-               memcpy(rsp->data, payload, rsp->length);
-
-       spin_lock_irqsave(&client->lock, flags);
-       /*
-        * 1. If called while in shutdown, the idr tree must be left untouched.
-        *    The idr handle will be removed and the client reference will be
-        *    dropped later.
-        * 2. If the call chain was release_client_resource ->
-        *    release_transaction -> complete_transaction (instead of a normal
-        *    conclusion of the transaction), i.e. if this resource was already
-        *    unregistered from the idr, the client reference will be dropped
-        *    by release_client_resource and we must not drop it here.
-        */
-       if (!client->in_shutdown &&
-           idr_find(&client->resource_idr, e->r.resource.handle)) {
-               idr_remove(&client->resource_idr, e->r.resource.handle);
-               /* Drop the idr's reference */
-               client_put(client);
-       }
-       spin_unlock_irqrestore(&client->lock, flags);
-
-       rsp->type = FW_CDEV_EVENT_RESPONSE;
-       rsp->rcode = rcode;
-
-       /*
-        * In the case that sizeof(*rsp) doesn't align with the position of the
-        * data, and the read is short, preserve an extra copy of the data
-        * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
-        * for short reads and some apps depended on it, this is both safe
-        * and prudent for compatibility.
-        */
-       if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
-               queue_event(client, &e->event, rsp, sizeof(*rsp),
-                           rsp->data, rsp->length);
-       else
-               queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
-                           NULL, 0);
-
-       /* Drop the transaction callback's reference */
-       client_put(client);
-}
-
-static int init_request(struct client *client,
-                       struct fw_cdev_send_request *request,
-                       int destination_id, int speed)
-{
-       struct outbound_transaction_event *e;
-       int ret;
-
-       if (request->tcode != TCODE_STREAM_DATA &&
-           (request->length > 4096 || request->length > 512 << speed))
-               return -EIO;
-
-       e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
-       if (e == NULL)
-               return -ENOMEM;
-
-       e->client = client;
-       e->response.length = request->length;
-       e->response.closure = request->closure;
-
-       if (request->data &&
-           copy_from_user(e->response.data,
-                          u64_to_uptr(request->data), request->length)) {
-               ret = -EFAULT;
-               goto failed;
-       }
-
-       e->r.resource.release = release_transaction;
-       ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
-       if (ret < 0)
-               goto failed;
-
-       /* Get a reference for the transaction callback */
-       client_get(client);
-
-       fw_send_request(client->device->card, &e->r.transaction,
-                       request->tcode, destination_id, request->generation,
-                       speed, request->offset, e->response.data,
-                       request->length, complete_transaction, e);
-       return 0;
-
- failed:
-       kfree(e);
-
-       return ret;
-}
-
-static int ioctl_send_request(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_request *request = buffer;
-
-       switch (request->tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_READ_QUADLET_REQUEST:
-       case TCODE_READ_BLOCK_REQUEST:
-       case TCODE_LOCK_MASK_SWAP:
-       case TCODE_LOCK_COMPARE_SWAP:
-       case TCODE_LOCK_FETCH_ADD:
-       case TCODE_LOCK_LITTLE_ADD:
-       case TCODE_LOCK_BOUNDED_ADD:
-       case TCODE_LOCK_WRAP_ADD:
-       case TCODE_LOCK_VENDOR_DEPENDENT:
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return init_request(client, request, client->device->node_id,
-                           client->device->max_speed);
-}
-
-static void release_request(struct client *client,
-                           struct client_resource *resource)
-{
-       struct inbound_transaction_resource *r = container_of(resource,
-                       struct inbound_transaction_resource, resource);
-
-       fw_send_response(client->device->card, r->request,
-                        RCODE_CONFLICT_ERROR);
-       kfree(r);
-}
-
-static void handle_request(struct fw_card *card, struct fw_request *request,
-                          int tcode, int destination, int source,
-                          int generation, int speed,
-                          unsigned long long offset,
-                          void *payload, size_t length, void *callback_data)
-{
-       struct address_handler_resource *handler = callback_data;
-       struct inbound_transaction_resource *r;
-       struct inbound_transaction_event *e;
-       int ret;
-
-       r = kmalloc(sizeof(*r), GFP_ATOMIC);
-       e = kmalloc(sizeof(*e), GFP_ATOMIC);
-       if (r == NULL || e == NULL)
-               goto failed;
-
-       r->request = request;
-       r->data    = payload;
-       r->length  = length;
-
-       r->resource.release = release_request;
-       ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
-       if (ret < 0)
-               goto failed;
-
-       e->request.type    = FW_CDEV_EVENT_REQUEST;
-       e->request.tcode   = tcode;
-       e->request.offset  = offset;
-       e->request.length  = length;
-       e->request.handle  = r->resource.handle;
-       e->request.closure = handler->closure;
-
-       queue_event(handler->client, &e->event,
-                   &e->request, sizeof(e->request), payload, length);
-       return;
-
- failed:
-       kfree(r);
-       kfree(e);
-       fw_send_response(card, request, RCODE_CONFLICT_ERROR);
-}
-
-static void release_address_handler(struct client *client,
-                                   struct client_resource *resource)
-{
-       struct address_handler_resource *r =
-           container_of(resource, struct address_handler_resource, resource);
-
-       fw_core_remove_address_handler(&r->handler);
-       kfree(r);
-}
-
-static int ioctl_allocate(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate *request = buffer;
-       struct address_handler_resource *r;
-       struct fw_address_region region;
-       int ret;
-
-       r = kmalloc(sizeof(*r), GFP_KERNEL);
-       if (r == NULL)
-               return -ENOMEM;
-
-       region.start = request->offset;
-       region.end = request->offset + request->length;
-       r->handler.length = request->length;
-       r->handler.address_callback = handle_request;
-       r->handler.callback_data = r;
-       r->closure = request->closure;
-       r->client = client;
-
-       ret = fw_core_add_address_handler(&r->handler, &region);
-       if (ret < 0) {
-               kfree(r);
-               return ret;
-       }
-
-       r->resource.release = release_address_handler;
-       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-       if (ret < 0) {
-               release_address_handler(client, &r->resource);
-               return ret;
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
-}
-
-static int ioctl_deallocate(struct client *client, void *buffer)
-{
-       struct fw_cdev_deallocate *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_address_handler, NULL);
-}
-
-static int ioctl_send_response(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_response *request = buffer;
-       struct client_resource *resource;
-       struct inbound_transaction_resource *r;
-
-       if (release_client_resource(client, request->handle,
-                                   release_request, &resource) < 0)
-               return -EINVAL;
-
-       r = container_of(resource, struct inbound_transaction_resource,
-                        resource);
-       if (request->length < r->length)
-               r->length = request->length;
-       if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
-               return -EFAULT;
-
-       fw_send_response(client->device->card, r->request, request->rcode);
-       kfree(r);
-
-       return 0;
-}
-
-static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
-{
-       struct fw_cdev_initiate_bus_reset *request = buffer;
-       int short_reset;
-
-       short_reset = (request->type == FW_CDEV_SHORT_RESET);
-
-       return fw_core_initiate_bus_reset(client->device->card, short_reset);
-}
-
-static void release_descriptor(struct client *client,
-                              struct client_resource *resource)
-{
-       struct descriptor_resource *r =
-               container_of(resource, struct descriptor_resource, resource);
-
-       fw_core_remove_descriptor(&r->descriptor);
-       kfree(r);
-}
-
-static int ioctl_add_descriptor(struct client *client, void *buffer)
-{
-       struct fw_cdev_add_descriptor *request = buffer;
-       struct fw_card *card = client->device->card;
-       struct descriptor_resource *r;
-       int ret;
-
-       /* Access policy: Allow this ioctl only on local nodes' device files. */
-       spin_lock_irq(&card->lock);
-       ret = client->device->node_id != card->local_node->node_id;
-       spin_unlock_irq(&card->lock);
-       if (ret)
-               return -ENOSYS;
-
-       if (request->length > 256)
-               return -EINVAL;
-
-       r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
-       if (r == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(r->data,
-                          u64_to_uptr(request->data), request->length * 4)) {
-               ret = -EFAULT;
-               goto failed;
-       }
-
-       r->descriptor.length    = request->length;
-       r->descriptor.immediate = request->immediate;
-       r->descriptor.key       = request->key;
-       r->descriptor.data      = r->data;
-
-       ret = fw_core_add_descriptor(&r->descriptor);
-       if (ret < 0)
-               goto failed;
-
-       r->resource.release = release_descriptor;
-       ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-       if (ret < 0) {
-               fw_core_remove_descriptor(&r->descriptor);
-               goto failed;
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
- failed:
-       kfree(r);
-
-       return ret;
-}
-
-static int ioctl_remove_descriptor(struct client *client, void *buffer)
-{
-       struct fw_cdev_remove_descriptor *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_descriptor, NULL);
-}
-
-static void iso_callback(struct fw_iso_context *context, u32 cycle,
-                        size_t header_length, void *header, void *data)
-{
-       struct client *client = data;
-       struct iso_interrupt_event *e;
-
-       e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
-       if (e == NULL)
-               return;
-
-       e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
-       e->interrupt.closure   = client->iso_closure;
-       e->interrupt.cycle     = cycle;
-       e->interrupt.header_length = header_length;
-       memcpy(e->interrupt.header, header, header_length);
-       queue_event(client, &e->event, &e->interrupt,
-                   sizeof(e->interrupt) + header_length, NULL, 0);
-}
-
-static int ioctl_create_iso_context(struct client *client, void *buffer)
-{
-       struct fw_cdev_create_iso_context *request = buffer;
-       struct fw_iso_context *context;
-
-       /* We only support one context at this time. */
-       if (client->iso_context != NULL)
-               return -EBUSY;
-
-       if (request->channel > 63)
-               return -EINVAL;
-
-       switch (request->type) {
-       case FW_ISO_CONTEXT_RECEIVE:
-               if (request->header_size < 4 || (request->header_size & 3))
-                       return -EINVAL;
-
-               break;
-
-       case FW_ISO_CONTEXT_TRANSMIT:
-               if (request->speed > SCODE_3200)
-                       return -EINVAL;
-
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       context =  fw_iso_context_create(client->device->card,
-                                        request->type,
-                                        request->channel,
-                                        request->speed,
-                                        request->header_size,
-                                        iso_callback, client);
-       if (IS_ERR(context))
-               return PTR_ERR(context);
-
-       client->iso_closure = request->closure;
-       client->iso_context = context;
-
-       /* We only support one context at this time. */
-       request->handle = 0;
-
-       return 0;
-}
-
-/* Macros for decoding the iso packet control header. */
-#define GET_PAYLOAD_LENGTH(v)  ((v) & 0xffff)
-#define GET_INTERRUPT(v)       (((v) >> 16) & 0x01)
-#define GET_SKIP(v)            (((v) >> 17) & 0x01)
-#define GET_TAG(v)             (((v) >> 18) & 0x03)
-#define GET_SY(v)              (((v) >> 20) & 0x0f)
-#define GET_HEADER_LENGTH(v)   (((v) >> 24) & 0xff)
-
-static int ioctl_queue_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_queue_iso *request = buffer;
-       struct fw_cdev_iso_packet __user *p, *end, *next;
-       struct fw_iso_context *ctx = client->iso_context;
-       unsigned long payload, buffer_end, header_length;
-       u32 control;
-       int count;
-       struct {
-               struct fw_iso_packet packet;
-               u8 header[256];
-       } u;
-
-       if (ctx == NULL || request->handle != 0)
-               return -EINVAL;
-
-       /*
-        * If the user passes a non-NULL data pointer, has mmap()'ed
-        * the iso buffer, and the pointer points inside the buffer,
-        * we setup the payload pointers accordingly.  Otherwise we
-        * set them both to 0, which will still let packets with
-        * payload_length == 0 through.  In other words, if no packets
-        * use the indirect payload, the iso buffer need not be mapped
-        * and the request->data pointer is ignored.
-        */
-
-       payload = (unsigned long)request->data - client->vm_start;
-       buffer_end = client->buffer.page_count << PAGE_SHIFT;
-       if (request->data == 0 || client->buffer.pages == NULL ||
-           payload >= buffer_end) {
-               payload = 0;
-               buffer_end = 0;
-       }
-
-       p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
-
-       if (!access_ok(VERIFY_READ, p, request->size))
-               return -EFAULT;
-
-       end = (void __user *)p + request->size;
-       count = 0;
-       while (p < end) {
-               if (get_user(control, &p->control))
-                       return -EFAULT;
-               u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
-               u.packet.interrupt = GET_INTERRUPT(control);
-               u.packet.skip = GET_SKIP(control);
-               u.packet.tag = GET_TAG(control);
-               u.packet.sy = GET_SY(control);
-               u.packet.header_length = GET_HEADER_LENGTH(control);
-
-               if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
-                       header_length = u.packet.header_length;
-               } else {
-                       /*
-                        * We require that header_length is a multiple of
-                        * the fixed header size, ctx->header_size.
-                        */
-                       if (ctx->header_size == 0) {
-                               if (u.packet.header_length > 0)
-                                       return -EINVAL;
-                       } else if (u.packet.header_length % ctx->header_size != 0) {
-                               return -EINVAL;
-                       }
-                       header_length = 0;
-               }
-
-               next = (struct fw_cdev_iso_packet __user *)
-                       &p->header[header_length / 4];
-               if (next > end)
-                       return -EINVAL;
-               if (__copy_from_user
-                   (u.packet.header, p->header, header_length))
-                       return -EFAULT;
-               if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
-                   u.packet.header_length + u.packet.payload_length > 0)
-                       return -EINVAL;
-               if (payload + u.packet.payload_length > buffer_end)
-                       return -EINVAL;
-
-               if (fw_iso_context_queue(ctx, &u.packet,
-                                        &client->buffer, payload))
-                       break;
-
-               p = next;
-               payload += u.packet.payload_length;
-               count++;
-       }
-
-       request->size    -= uptr_to_u64(p) - request->packets;
-       request->packets  = uptr_to_u64(p);
-       request->data     = client->vm_start + payload;
-
-       return count;
-}
-
-static int ioctl_start_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_start_iso *request = buffer;
-
-       if (client->iso_context == NULL || request->handle != 0)
-               return -EINVAL;
-
-       if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
-               if (request->tags == 0 || request->tags > 15)
-                       return -EINVAL;
-
-               if (request->sync > 15)
-                       return -EINVAL;
-       }
-
-       return fw_iso_context_start(client->iso_context, request->cycle,
-                                   request->sync, request->tags);
-}
-
-static int ioctl_stop_iso(struct client *client, void *buffer)
-{
-       struct fw_cdev_stop_iso *request = buffer;
-
-       if (client->iso_context == NULL || request->handle != 0)
-               return -EINVAL;
-
-       return fw_iso_context_stop(client->iso_context);
-}
-
-static int ioctl_get_cycle_timer(struct client *client, void *buffer)
-{
-       struct fw_cdev_get_cycle_timer *request = buffer;
-       struct fw_card *card = client->device->card;
-       unsigned long long bus_time;
-       struct timeval tv;
-       unsigned long flags;
-
-       preempt_disable();
-       local_irq_save(flags);
-
-       bus_time = card->driver->get_bus_time(card);
-       do_gettimeofday(&tv);
-
-       local_irq_restore(flags);
-       preempt_enable();
-
-       request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
-       request->cycle_timer = bus_time & 0xffffffff;
-       return 0;
-}
-
-static void iso_resource_work(struct work_struct *work)
-{
-       struct iso_resource_event *e;
-       struct iso_resource *r =
-                       container_of(work, struct iso_resource, work.work);
-       struct client *client = r->client;
-       int generation, channel, bandwidth, todo;
-       bool skip, free, success;
-
-       spin_lock_irq(&client->lock);
-       generation = client->device->generation;
-       todo = r->todo;
-       /* Allow 1000ms grace period for other reallocations. */
-       if (todo == ISO_RES_ALLOC &&
-           time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
-               if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
-                       client_get(client);
-               skip = true;
-       } else {
-               /* We could be called twice within the same generation. */
-               skip = todo == ISO_RES_REALLOC &&
-                      r->generation == generation;
-       }
-       free = todo == ISO_RES_DEALLOC ||
-              todo == ISO_RES_ALLOC_ONCE ||
-              todo == ISO_RES_DEALLOC_ONCE;
-       r->generation = generation;
-       spin_unlock_irq(&client->lock);
-
-       if (skip)
-               goto out;
-
-       bandwidth = r->bandwidth;
-
-       fw_iso_resource_manage(client->device->card, generation,
-                       r->channels, &channel, &bandwidth,
-                       todo == ISO_RES_ALLOC ||
-                       todo == ISO_RES_REALLOC ||
-                       todo == ISO_RES_ALLOC_ONCE);
-       /*
-        * Is this generation outdated already?  As long as this resource sticks
-        * in the idr, it will be scheduled again for a newer generation or at
-        * shutdown.
-        */
-       if (channel == -EAGAIN &&
-           (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
-               goto out;
-
-       success = channel >= 0 || bandwidth > 0;
-
-       spin_lock_irq(&client->lock);
-       /*
-        * Transit from allocation to reallocation, except if the client
-        * requested deallocation in the meantime.
-        */
-       if (r->todo == ISO_RES_ALLOC)
-               r->todo = ISO_RES_REALLOC;
-       /*
-        * Allocation or reallocation failure?  Pull this resource out of the
-        * idr and prepare for deletion, unless the client is shutting down.
-        */
-       if (r->todo == ISO_RES_REALLOC && !success &&
-           !client->in_shutdown &&
-           idr_find(&client->resource_idr, r->resource.handle)) {
-               idr_remove(&client->resource_idr, r->resource.handle);
-               client_put(client);
-               free = true;
-       }
-       spin_unlock_irq(&client->lock);
-
-       if (todo == ISO_RES_ALLOC && channel >= 0)
-               r->channels = 1ULL << channel;
-
-       if (todo == ISO_RES_REALLOC && success)
-               goto out;
-
-       if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
-               e = r->e_alloc;
-               r->e_alloc = NULL;
-       } else {
-               e = r->e_dealloc;
-               r->e_dealloc = NULL;
-       }
-       e->resource.handle      = r->resource.handle;
-       e->resource.channel     = channel;
-       e->resource.bandwidth   = bandwidth;
-
-       queue_event(client, &e->event,
-                   &e->resource, sizeof(e->resource), NULL, 0);
-
-       if (free) {
-               cancel_delayed_work(&r->work);
-               kfree(r->e_alloc);
-               kfree(r->e_dealloc);
-               kfree(r);
-       }
- out:
-       client_put(client);
-}
-
-static void schedule_iso_resource(struct iso_resource *r)
-{
-       client_get(r->client);
-       if (!schedule_delayed_work(&r->work, 0))
-               client_put(r->client);
-}
-
-static void release_iso_resource(struct client *client,
-                                struct client_resource *resource)
-{
-       struct iso_resource *r =
-               container_of(resource, struct iso_resource, resource);
-
-       spin_lock_irq(&client->lock);
-       r->todo = ISO_RES_DEALLOC;
-       schedule_iso_resource(r);
-       spin_unlock_irq(&client->lock);
-}
-
-static int init_iso_resource(struct client *client,
-               struct fw_cdev_allocate_iso_resource *request, int todo)
-{
-       struct iso_resource_event *e1, *e2;
-       struct iso_resource *r;
-       int ret;
-
-       if ((request->channels == 0 && request->bandwidth == 0) ||
-           request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
-           request->bandwidth < 0)
-               return -EINVAL;
-
-       r  = kmalloc(sizeof(*r), GFP_KERNEL);
-       e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
-       e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
-       if (r == NULL || e1 == NULL || e2 == NULL) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       INIT_DELAYED_WORK(&r->work, iso_resource_work);
-       r->client       = client;
-       r->todo         = todo;
-       r->generation   = -1;
-       r->channels     = request->channels;
-       r->bandwidth    = request->bandwidth;
-       r->e_alloc      = e1;
-       r->e_dealloc    = e2;
-
-       e1->resource.closure    = request->closure;
-       e1->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
-       e2->resource.closure    = request->closure;
-       e2->resource.type       = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
-
-       if (todo == ISO_RES_ALLOC) {
-               r->resource.release = release_iso_resource;
-               ret = add_client_resource(client, &r->resource, GFP_KERNEL);
-               if (ret < 0)
-                       goto fail;
-       } else {
-               r->resource.release = NULL;
-               r->resource.handle = -1;
-               schedule_iso_resource(r);
-       }
-       request->handle = r->resource.handle;
-
-       return 0;
- fail:
-       kfree(r);
-       kfree(e1);
-       kfree(e2);
-
-       return ret;
-}
-
-static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_ALLOC);
-}
-
-static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
-{
-       struct fw_cdev_deallocate *request = buffer;
-
-       return release_client_resource(client, request->handle,
-                                      release_iso_resource, NULL);
-}
-
-static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
-}
-
-static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
-{
-       struct fw_cdev_allocate_iso_resource *request = buffer;
-
-       return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
-}
-
-/*
- * Returns a speed code:  Maximum speed to or from this device,
- * limited by the device's link speed, the local node's link speed,
- * and all PHY port speeds between the two links.
- */
-static int ioctl_get_speed(struct client *client, void *buffer)
-{
-       return client->device->max_speed;
-}
-
-static int ioctl_send_broadcast_request(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_request *request = buffer;
-
-       switch (request->tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* Security policy: Only allow accesses to Units Space. */
-       if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
-               return -EACCES;
-
-       return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
-}
-
-static int ioctl_send_stream_packet(struct client *client, void *buffer)
-{
-       struct fw_cdev_send_stream_packet *p = buffer;
-       struct fw_cdev_send_request request;
-       int dest;
-
-       if (p->speed > client->device->card->link_speed ||
-           p->length > 1024 << p->speed)
-               return -EIO;
-
-       if (p->tag > 3 || p->channel > 63 || p->sy > 15)
-               return -EINVAL;
-
-       dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
-       request.tcode           = TCODE_STREAM_DATA;
-       request.length          = p->length;
-       request.closure         = p->closure;
-       request.data            = p->data;
-       request.generation      = p->generation;
-
-       return init_request(client, &request, dest, p->speed);
-}
-
-static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
-       ioctl_get_info,
-       ioctl_send_request,
-       ioctl_allocate,
-       ioctl_deallocate,
-       ioctl_send_response,
-       ioctl_initiate_bus_reset,
-       ioctl_add_descriptor,
-       ioctl_remove_descriptor,
-       ioctl_create_iso_context,
-       ioctl_queue_iso,
-       ioctl_start_iso,
-       ioctl_stop_iso,
-       ioctl_get_cycle_timer,
-       ioctl_allocate_iso_resource,
-       ioctl_deallocate_iso_resource,
-       ioctl_allocate_iso_resource_once,
-       ioctl_deallocate_iso_resource_once,
-       ioctl_get_speed,
-       ioctl_send_broadcast_request,
-       ioctl_send_stream_packet,
-};
-
-static int dispatch_ioctl(struct client *client,
-                         unsigned int cmd, void __user *arg)
-{
-       char buffer[256];
-       int ret;
-
-       if (_IOC_TYPE(cmd) != '#' ||
-           _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
-               return -EINVAL;
-
-       if (_IOC_DIR(cmd) & _IOC_WRITE) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
-                       return -EFAULT;
-       }
-
-       ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
-       if (ret < 0)
-               return ret;
-
-       if (_IOC_DIR(cmd) & _IOC_READ) {
-               if (_IOC_SIZE(cmd) > sizeof(buffer) ||
-                   copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
-                       return -EFAULT;
-       }
-
-       return ret;
-}
-
-static long fw_device_op_ioctl(struct file *file,
-                              unsigned int cmd, unsigned long arg)
-{
-       struct client *client = file->private_data;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       return dispatch_ioctl(client, cmd, (void __user *) arg);
-}
-
-#ifdef CONFIG_COMPAT
-static long fw_device_op_compat_ioctl(struct file *file,
-                                     unsigned int cmd, unsigned long arg)
-{
-       struct client *client = file->private_data;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       return dispatch_ioctl(client, cmd, compat_ptr(arg));
-}
-#endif
-
-static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       struct client *client = file->private_data;
-       enum dma_data_direction direction;
-       unsigned long size;
-       int page_count, ret;
-
-       if (fw_device_is_shutdown(client->device))
-               return -ENODEV;
-
-       /* FIXME: We could support multiple buffers, but we don't. */
-       if (client->buffer.pages != NULL)
-               return -EBUSY;
-
-       if (!(vma->vm_flags & VM_SHARED))
-               return -EINVAL;
-
-       if (vma->vm_start & ~PAGE_MASK)
-               return -EINVAL;
-
-       client->vm_start = vma->vm_start;
-       size = vma->vm_end - vma->vm_start;
-       page_count = size >> PAGE_SHIFT;
-       if (size & ~PAGE_MASK)
-               return -EINVAL;
-
-       if (vma->vm_flags & VM_WRITE)
-               direction = DMA_TO_DEVICE;
-       else
-               direction = DMA_FROM_DEVICE;
-
-       ret = fw_iso_buffer_init(&client->buffer, client->device->card,
-                                page_count, direction);
-       if (ret < 0)
-               return ret;
-
-       ret = fw_iso_buffer_map(&client->buffer, vma);
-       if (ret < 0)
-               fw_iso_buffer_destroy(&client->buffer, client->device->card);
-
-       return ret;
-}
-
-static int shutdown_resource(int id, void *p, void *data)
-{
-       struct client_resource *r = p;
-       struct client *client = data;
-
-       r->release(client, r);
-       client_put(client);
-
-       return 0;
-}
-
-static int fw_device_op_release(struct inode *inode, struct file *file)
-{
-       struct client *client = file->private_data;
-       struct event *e, *next_e;
-
-       mutex_lock(&client->device->client_list_mutex);
-       list_del(&client->link);
-       mutex_unlock(&client->device->client_list_mutex);
-
-       if (client->iso_context)
-               fw_iso_context_destroy(client->iso_context);
-
-       if (client->buffer.pages)
-               fw_iso_buffer_destroy(&client->buffer, client->device->card);
-
-       /* Freeze client->resource_idr and client->event_list */
-       spin_lock_irq(&client->lock);
-       client->in_shutdown = true;
-       spin_unlock_irq(&client->lock);
-
-       idr_for_each(&client->resource_idr, shutdown_resource, client);
-       idr_remove_all(&client->resource_idr);
-       idr_destroy(&client->resource_idr);
-
-       list_for_each_entry_safe(e, next_e, &client->event_list, link)
-               kfree(e);
-
-       client_put(client);
-
-       return 0;
-}
-
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
-{
-       struct client *client = file->private_data;
-       unsigned int mask = 0;
-
-       poll_wait(file, &client->wait, pt);
-
-       if (fw_device_is_shutdown(client->device))
-               mask |= POLLHUP | POLLERR;
-       if (!list_empty(&client->event_list))
-               mask |= POLLIN | POLLRDNORM;
-
-       return mask;
-}
-
-const struct file_operations fw_device_ops = {
-       .owner          = THIS_MODULE,
-       .open           = fw_device_op_open,
-       .read           = fw_device_op_read,
-       .unlocked_ioctl = fw_device_op_ioctl,
-       .poll           = fw_device_op_poll,
-       .release        = fw_device_op_release,
-       .mmap           = fw_device_op_mmap,
-
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = fw_device_op_compat_ioctl,
-#endif
-};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
deleted file mode 100644 (file)
index a47e212..0000000
+++ /dev/null
@@ -1,1122 +0,0 @@
-/*
- * Device probing and sysfs code.
- *
- * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/idr.h>
-#include <linux/jiffies.h>
-#include <linux/kobject.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-
-#include <asm/system.h>
-
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
-{
-       ci->p = p + 1;
-       ci->end = ci->p + (p[0] >> 16);
-}
-EXPORT_SYMBOL(fw_csr_iterator_init);
-
-int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
-{
-       *key = *ci->p >> 24;
-       *value = *ci->p & 0xffffff;
-
-       return ci->p++ < ci->end;
-}
-EXPORT_SYMBOL(fw_csr_iterator_next);
-
-static int is_fw_unit(struct device *dev);
-
-static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
-{
-       struct fw_csr_iterator ci;
-       int key, value, match;
-
-       match = 0;
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (key == CSR_VENDOR && value == id->vendor)
-                       match |= FW_MATCH_VENDOR;
-               if (key == CSR_MODEL && value == id->model)
-                       match |= FW_MATCH_MODEL;
-               if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
-                       match |= FW_MATCH_SPECIFIER_ID;
-               if (key == CSR_VERSION && value == id->version)
-                       match |= FW_MATCH_VERSION;
-       }
-
-       return (match & id->match_flags) == id->match_flags;
-}
-
-static int fw_unit_match(struct device *dev, struct device_driver *drv)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_driver *driver = fw_driver(drv);
-       int i;
-
-       /* We only allow binding to fw_units. */
-       if (!is_fw_unit(dev))
-               return 0;
-
-       for (i = 0; driver->id_table[i].match_flags != 0; i++) {
-               if (match_unit_directory(unit->directory, &driver->id_table[i]))
-                       return 1;
-       }
-
-       return 0;
-}
-
-static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
-{
-       struct fw_device *device = fw_device(unit->device.parent);
-       struct fw_csr_iterator ci;
-
-       int key, value;
-       int vendor = 0;
-       int model = 0;
-       int specifier_id = 0;
-       int version = 0;
-
-       fw_csr_iterator_init(&ci, &device->config_rom[5]);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-               case CSR_VENDOR:
-                       vendor = value;
-                       break;
-               case CSR_MODEL:
-                       model = value;
-                       break;
-               }
-       }
-
-       fw_csr_iterator_init(&ci, unit->directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-               case CSR_SPECIFIER_ID:
-                       specifier_id = value;
-                       break;
-               case CSR_VERSION:
-                       version = value;
-                       break;
-               }
-       }
-
-       return snprintf(buffer, buffer_size,
-                       "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
-                       vendor, model, specifier_id, version);
-}
-
-static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       char modalias[64];
-
-       get_modalias(unit, modalias, sizeof(modalias));
-
-       if (add_uevent_var(env, "MODALIAS=%s", modalias))
-               return -ENOMEM;
-
-       return 0;
-}
-
-struct bus_type fw_bus_type = {
-       .name = "firewire",
-       .match = fw_unit_match,
-};
-EXPORT_SYMBOL(fw_bus_type);
-
-int fw_device_enable_phys_dma(struct fw_device *device)
-{
-       int generation = device->generation;
-
-       /* device->node_id, accessed below, must not be older than generation */
-       smp_rmb();
-
-       return device->card->driver->enable_phys_dma(device->card,
-                                                    device->node_id,
-                                                    generation);
-}
-EXPORT_SYMBOL(fw_device_enable_phys_dma);
-
-struct config_rom_attribute {
-       struct device_attribute attr;
-       u32 key;
-};
-
-static ssize_t show_immediate(struct device *dev,
-                             struct device_attribute *dattr, char *buf)
-{
-       struct config_rom_attribute *attr =
-               container_of(dattr, struct config_rom_attribute, attr);
-       struct fw_csr_iterator ci;
-       u32 *dir;
-       int key, value, ret = -ENOENT;
-
-       down_read(&fw_device_rwsem);
-
-       if (is_fw_unit(dev))
-               dir = fw_unit(dev)->directory;
-       else
-               dir = fw_device(dev)->config_rom + 5;
-
-       fw_csr_iterator_init(&ci, dir);
-       while (fw_csr_iterator_next(&ci, &key, &value))
-               if (attr->key == key) {
-                       ret = snprintf(buf, buf ? PAGE_SIZE : 0,
-                                      "0x%06x\n", value);
-                       break;
-               }
-
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-#define IMMEDIATE_ATTR(name, key)                              \
-       { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
-
-static ssize_t show_text_leaf(struct device *dev,
-                             struct device_attribute *dattr, char *buf)
-{
-       struct config_rom_attribute *attr =
-               container_of(dattr, struct config_rom_attribute, attr);
-       struct fw_csr_iterator ci;
-       u32 *dir, *block = NULL, *p, *end;
-       int length, key, value, last_key = 0, ret = -ENOENT;
-       char *b;
-
-       down_read(&fw_device_rwsem);
-
-       if (is_fw_unit(dev))
-               dir = fw_unit(dev)->directory;
-       else
-               dir = fw_device(dev)->config_rom + 5;
-
-       fw_csr_iterator_init(&ci, dir);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (attr->key == last_key &&
-                   key == (CSR_DESCRIPTOR | CSR_LEAF))
-                       block = ci.p - 1 + value;
-               last_key = key;
-       }
-
-       if (block == NULL)
-               goto out;
-
-       length = min(block[0] >> 16, 256U);
-       if (length < 3)
-               goto out;
-
-       if (block[1] != 0 || block[2] != 0)
-               /* Unknown encoding. */
-               goto out;
-
-       if (buf == NULL) {
-               ret = length * 4;
-               goto out;
-       }
-
-       b = buf;
-       end = &block[length + 1];
-       for (p = &block[3]; p < end; p++, b += 4)
-               * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
-       /* Strip trailing whitespace and add newline. */
-       while (b--, (isspace(*b) || *b == '\0') && b > buf);
-       strcpy(b + 1, "\n");
-       ret = b + 2 - buf;
- out:
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-#define TEXT_LEAF_ATTR(name, key)                              \
-       { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
-
-static struct config_rom_attribute config_rom_attributes[] = {
-       IMMEDIATE_ATTR(vendor, CSR_VENDOR),
-       IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
-       IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
-       IMMEDIATE_ATTR(version, CSR_VERSION),
-       IMMEDIATE_ATTR(model, CSR_MODEL),
-       TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
-       TEXT_LEAF_ATTR(model_name, CSR_MODEL),
-       TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
-};
-
-static void init_fw_attribute_group(struct device *dev,
-                                   struct device_attribute *attrs,
-                                   struct fw_attribute_group *group)
-{
-       struct device_attribute *attr;
-       int i, j;
-
-       for (j = 0; attrs[j].attr.name != NULL; j++)
-               group->attrs[j] = &attrs[j].attr;
-
-       for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
-               attr = &config_rom_attributes[i].attr;
-               if (attr->show(dev, attr, NULL) < 0)
-                       continue;
-               group->attrs[j++] = &attr->attr;
-       }
-
-       BUG_ON(j >= ARRAY_SIZE(group->attrs));
-       group->attrs[j++] = NULL;
-       group->groups[0] = &group->group;
-       group->groups[1] = NULL;
-       group->group.attrs = group->attrs;
-       dev->groups = group->groups;
-}
-
-static ssize_t modalias_show(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       int length;
-
-       length = get_modalias(unit, buf, PAGE_SIZE);
-       strcpy(buf + length, "\n");
-
-       return length + 1;
-}
-
-static ssize_t rom_index_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev->parent);
-       struct fw_unit *unit = fw_unit(dev);
-
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       (int)(unit->directory - device->config_rom));
-}
-
-static struct device_attribute fw_unit_attributes[] = {
-       __ATTR_RO(modalias),
-       __ATTR_RO(rom_index),
-       __ATTR_NULL,
-};
-
-static ssize_t config_rom_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev);
-       size_t length;
-
-       down_read(&fw_device_rwsem);
-       length = device->config_rom_length * 4;
-       memcpy(buf, device->config_rom, length);
-       up_read(&fw_device_rwsem);
-
-       return length;
-}
-
-static ssize_t guid_show(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       struct fw_device *device = fw_device(dev);
-       int ret;
-
-       down_read(&fw_device_rwsem);
-       ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
-                      device->config_rom[3], device->config_rom[4]);
-       up_read(&fw_device_rwsem);
-
-       return ret;
-}
-
-static struct device_attribute fw_device_attributes[] = {
-       __ATTR_RO(config_rom),
-       __ATTR_RO(guid),
-       __ATTR_NULL,
-};
-
-static int read_rom(struct fw_device *device,
-                   int generation, int index, u32 *data)
-{
-       int rcode;
-
-       /* device->node_id, accessed below, must not be older than generation */
-       smp_rmb();
-
-       rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
-                       device->node_id, generation, device->max_speed,
-                       (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
-                       data, 4);
-       be32_to_cpus(data);
-
-       return rcode;
-}
-
-#define READ_BIB_ROM_SIZE      256
-#define READ_BIB_STACK_SIZE    16
-
-/*
- * Read the bus info block, perform a speed probe, and read all of the rest of
- * the config ROM.  We do all this with a cached bus generation.  If the bus
- * generation changes under us, read_bus_info_block will fail and get retried.
- * It's better to start all over in this case because the node from which we
- * are reading the ROM may have changed the ROM during the reset.
- */
-static int read_bus_info_block(struct fw_device *device, int generation)
-{
-       u32 *rom, *stack, *old_rom, *new_rom;
-       u32 sp, key;
-       int i, end, length, ret = -1;
-
-       rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
-                     sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
-       if (rom == NULL)
-               return -ENOMEM;
-
-       stack = &rom[READ_BIB_ROM_SIZE];
-
-       device->max_speed = SCODE_100;
-
-       /* First read the bus info block. */
-       for (i = 0; i < 5; i++) {
-               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-                       goto out;
-               /*
-                * As per IEEE1212 7.2, during power-up, devices can
-                * reply with a 0 for the first quadlet of the config
-                * rom to indicate that they are booting (for example,
-                * if the firmware is on the disk of a external
-                * harddisk).  In that case we just fail, and the
-                * retry mechanism will try again later.
-                */
-               if (i == 0 && rom[i] == 0)
-                       goto out;
-       }
-
-       device->max_speed = device->node->max_speed;
-
-       /*
-        * Determine the speed of
-        *   - devices with link speed less than PHY speed,
-        *   - devices with 1394b PHY (unless only connected to 1394a PHYs),
-        *   - all devices if there are 1394b repeaters.
-        * Note, we cannot use the bus info block's link_spd as starting point
-        * because some buggy firmwares set it lower than necessary and because
-        * 1394-1995 nodes do not have the field.
-        */
-       if ((rom[2] & 0x7) < device->max_speed ||
-           device->max_speed == SCODE_BETA ||
-           device->card->beta_repeaters_present) {
-               u32 dummy;
-
-               /* for S1600 and S3200 */
-               if (device->max_speed == SCODE_BETA)
-                       device->max_speed = device->card->link_speed;
-
-               while (device->max_speed > SCODE_100) {
-                       if (read_rom(device, generation, 0, &dummy) ==
-                           RCODE_COMPLETE)
-                               break;
-                       device->max_speed--;
-               }
-       }
-
-       /*
-        * Now parse the config rom.  The config rom is a recursive
-        * directory structure so we parse it using a stack of
-        * references to the blocks that make up the structure.  We
-        * push a reference to the root directory on the stack to
-        * start things off.
-        */
-       length = i;
-       sp = 0;
-       stack[sp++] = 0xc0000005;
-       while (sp > 0) {
-               /*
-                * Pop the next block reference of the stack.  The
-                * lower 24 bits is the offset into the config rom,
-                * the upper 8 bits are the type of the reference the
-                * block.
-                */
-               key = stack[--sp];
-               i = key & 0xffffff;
-               if (i >= READ_BIB_ROM_SIZE)
-                       /*
-                        * The reference points outside the standard
-                        * config rom area, something's fishy.
-                        */
-                       goto out;
-
-               /* Read header quadlet for the block to get the length. */
-               if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
-                       goto out;
-               end = i + (rom[i] >> 16) + 1;
-               i++;
-               if (end > READ_BIB_ROM_SIZE)
-                       /*
-                        * This block extends outside standard config
-                        * area (and the array we're reading it
-                        * into).  That's broken, so ignore this
-                        * device.
-                        */
-                       goto out;
-
-               /*
-                * Now read in the block.  If this is a directory
-                * block, check the entries as we read them to see if
-                * it references another block, and push it in that case.
-                */
-               while (i < end) {
-                       if (read_rom(device, generation, i, &rom[i]) !=
-                           RCODE_COMPLETE)
-                               goto out;
-                       if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
-                           sp < READ_BIB_STACK_SIZE)
-                               stack[sp++] = i + rom[i];
-                       i++;
-               }
-               if (length < i)
-                       length = i;
-       }
-
-       old_rom = device->config_rom;
-       new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
-       if (new_rom == NULL)
-               goto out;
-
-       down_write(&fw_device_rwsem);
-       device->config_rom = new_rom;
-       device->config_rom_length = length;
-       up_write(&fw_device_rwsem);
-
-       kfree(old_rom);
-       ret = 0;
-       device->cmc = rom[2] >> 30 & 1;
- out:
-       kfree(rom);
-
-       return ret;
-}
-
-static void fw_unit_release(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-
-       kfree(unit);
-}
-
-static struct device_type fw_unit_type = {
-       .uevent         = fw_unit_uevent,
-       .release        = fw_unit_release,
-};
-
-static int is_fw_unit(struct device *dev)
-{
-       return dev->type == &fw_unit_type;
-}
-
-static void create_units(struct fw_device *device)
-{
-       struct fw_csr_iterator ci;
-       struct fw_unit *unit;
-       int key, value, i;
-
-       i = 0;
-       fw_csr_iterator_init(&ci, &device->config_rom[5]);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               if (key != (CSR_UNIT | CSR_DIRECTORY))
-                       continue;
-
-               /*
-                * Get the address of the unit directory and try to
-                * match the drivers id_tables against it.
-                */
-               unit = kzalloc(sizeof(*unit), GFP_KERNEL);
-               if (unit == NULL) {
-                       fw_error("failed to allocate memory for unit\n");
-                       continue;
-               }
-
-               unit->directory = ci.p + value - 1;
-               unit->device.bus = &fw_bus_type;
-               unit->device.type = &fw_unit_type;
-               unit->device.parent = &device->device;
-               dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
-
-               init_fw_attribute_group(&unit->device,
-                                       fw_unit_attributes,
-                                       &unit->attribute_group);
-               if (device_register(&unit->device) < 0)
-                       goto skip_unit;
-
-               continue;
-
-       skip_unit:
-               kfree(unit);
-       }
-}
-
-static int shutdown_unit(struct device *device, void *data)
-{
-       device_unregister(device);
-
-       return 0;
-}
-
-/*
- * fw_device_rwsem acts as dual purpose mutex:
- *   - serializes accesses to fw_device_idr,
- *   - serializes accesses to fw_device.config_rom/.config_rom_length and
- *     fw_unit.directory, unless those accesses happen at safe occasions
- */
-DECLARE_RWSEM(fw_device_rwsem);
-
-DEFINE_IDR(fw_device_idr);
-int fw_cdev_major;
-
-struct fw_device *fw_device_get_by_devt(dev_t devt)
-{
-       struct fw_device *device;
-
-       down_read(&fw_device_rwsem);
-       device = idr_find(&fw_device_idr, MINOR(devt));
-       if (device)
-               fw_device_get(device);
-       up_read(&fw_device_rwsem);
-
-       return device;
-}
-
-/*
- * These defines control the retry behavior for reading the config
- * rom.  It shouldn't be necessary to tweak these; if the device
- * doesn't respond to a config rom read within 10 seconds, it's not
- * going to respond at all.  As for the initial delay, a lot of
- * devices will be able to respond within half a second after bus
- * reset.  On the other hand, it's not really worth being more
- * aggressive than that, since it scales pretty well; if 10 devices
- * are plugged in, they're all getting read within one second.
- */
-
-#define MAX_RETRIES    10
-#define RETRY_DELAY    (3 * HZ)
-#define INITIAL_DELAY  (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
-
-static void fw_device_shutdown(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       int minor = MINOR(device->device.devt);
-
-       if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
-           && !list_empty(&device->card->link)) {
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
-               return;
-       }
-
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_GONE,
-                          FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
-               return;
-
-       fw_device_cdev_remove(device);
-       device_for_each_child(&device->device, NULL, shutdown_unit);
-       device_unregister(&device->device);
-
-       down_write(&fw_device_rwsem);
-       idr_remove(&fw_device_idr, minor);
-       up_write(&fw_device_rwsem);
-
-       fw_device_put(device);
-}
-
-static void fw_device_release(struct device *dev)
-{
-       struct fw_device *device = fw_device(dev);
-       struct fw_card *card = device->card;
-       unsigned long flags;
-
-       /*
-        * Take the card lock so we don't set this to NULL while a
-        * FW_NODE_UPDATED callback is being handled or while the
-        * bus manager work looks at this node.
-        */
-       spin_lock_irqsave(&card->lock, flags);
-       device->node->data = NULL;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       fw_node_put(device->node);
-       kfree(device->config_rom);
-       kfree(device);
-       fw_card_put(card);
-}
-
-static struct device_type fw_device_type = {
-       .release = fw_device_release,
-};
-
-static int update_unit(struct device *dev, void *data)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_driver *driver = (struct fw_driver *)dev->driver;
-
-       if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
-               down(&dev->sem);
-               driver->update(unit);
-               up(&dev->sem);
-       }
-
-       return 0;
-}
-
-static void fw_device_update(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-
-       fw_device_cdev_update(device);
-       device_for_each_child(&device->device, NULL, update_unit);
-}
-
-/*
- * If a device was pending for deletion because its node went away but its
- * bus info block and root directory header matches that of a newly discovered
- * device, revive the existing fw_device.
- * The newly allocated fw_device becomes obsolete instead.
- */
-static int lookup_existing_device(struct device *dev, void *data)
-{
-       struct fw_device *old = fw_device(dev);
-       struct fw_device *new = data;
-       struct fw_card *card = new->card;
-       int match = 0;
-
-       down_read(&fw_device_rwsem); /* serialize config_rom access */
-       spin_lock_irq(&card->lock);  /* serialize node access */
-
-       if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
-           atomic_cmpxchg(&old->state,
-                          FW_DEVICE_GONE,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
-               struct fw_node *current_node = new->node;
-               struct fw_node *obsolete_node = old->node;
-
-               new->node = obsolete_node;
-               new->node->data = new;
-               old->node = current_node;
-               old->node->data = old;
-
-               old->max_speed = new->max_speed;
-               old->node_id = current_node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               old->generation = card->generation;
-               old->config_rom_retries = 0;
-               fw_notify("rediscovered device %s\n", dev_name(dev));
-
-               PREPARE_DELAYED_WORK(&old->work, fw_device_update);
-               schedule_delayed_work(&old->work, 0);
-
-               if (current_node == card->root_node)
-                       fw_schedule_bm_work(card, 0);
-
-               match = 1;
-       }
-
-       spin_unlock_irq(&card->lock);
-       up_read(&fw_device_rwsem);
-
-       return match;
-}
-
-enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
-
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
-{
-       struct fw_card *card = device->card;
-       __be32 data;
-       int rcode;
-
-       if (!card->broadcast_channel_allocated)
-               return;
-
-       if (device->bc_implemented == BC_UNKNOWN) {
-               rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
-                               device->node_id, generation, device->max_speed,
-                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
-                               &data, 4);
-               switch (rcode) {
-               case RCODE_COMPLETE:
-                       if (data & cpu_to_be32(1 << 31)) {
-                               device->bc_implemented = BC_IMPLEMENTED;
-                               break;
-                       }
-                       /* else fall through to case address error */
-               case RCODE_ADDRESS_ERROR:
-                       device->bc_implemented = BC_UNIMPLEMENTED;
-               }
-       }
-
-       if (device->bc_implemented == BC_IMPLEMENTED) {
-               data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
-                                  BROADCAST_CHANNEL_VALID);
-               fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
-                               device->node_id, generation, device->max_speed,
-                               CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
-                               &data, 4);
-       }
-}
-
-static void fw_device_init(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       struct device *revived_dev;
-       int minor, ret;
-
-       /*
-        * All failure paths here set node->data to NULL, so that we
-        * don't try to do device_for_each_child() on a kfree()'d
-        * device.
-        */
-
-       if (read_bus_info_block(device, device->generation) < 0) {
-               if (device->config_rom_retries < MAX_RETRIES &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
-               } else {
-                       fw_notify("giving up on config rom for node id %x\n",
-                                 device->node_id);
-                       if (device->node == device->card->root_node)
-                               fw_schedule_bm_work(device->card, 0);
-                       fw_device_release(&device->device);
-               }
-               return;
-       }
-
-       revived_dev = device_find_child(device->card->device,
-                                       device, lookup_existing_device);
-       if (revived_dev) {
-               put_device(revived_dev);
-               fw_device_release(&device->device);
-
-               return;
-       }
-
-       device_initialize(&device->device);
-
-       fw_device_get(device);
-       down_write(&fw_device_rwsem);
-       ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
-             idr_get_new(&fw_device_idr, device, &minor) :
-             -ENOMEM;
-       up_write(&fw_device_rwsem);
-
-       if (ret < 0)
-               goto error;
-
-       device->device.bus = &fw_bus_type;
-       device->device.type = &fw_device_type;
-       device->device.parent = device->card->device;
-       device->device.devt = MKDEV(fw_cdev_major, minor);
-       dev_set_name(&device->device, "fw%d", minor);
-
-       init_fw_attribute_group(&device->device,
-                               fw_device_attributes,
-                               &device->attribute_group);
-       if (device_add(&device->device)) {
-               fw_error("Failed to add device.\n");
-               goto error_with_cdev;
-       }
-
-       create_units(device);
-
-       /*
-        * Transition the device to running state.  If it got pulled
-        * out from under us while we did the intialization work, we
-        * have to shut down the device again here.  Normally, though,
-        * fw_node_event will be responsible for shutting it down when
-        * necessary.  We have to use the atomic cmpxchg here to avoid
-        * racing with the FW_NODE_DESTROYED case in
-        * fw_node_event().
-        */
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_INITIALIZING,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
-               PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-               schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
-       } else {
-               if (device->config_rom_retries)
-                       fw_notify("created device %s: GUID %08x%08x, S%d00, "
-                                 "%d config ROM retries\n",
-                                 dev_name(&device->device),
-                                 device->config_rom[3], device->config_rom[4],
-                                 1 << device->max_speed,
-                                 device->config_rom_retries);
-               else
-                       fw_notify("created device %s: GUID %08x%08x, S%d00\n",
-                                 dev_name(&device->device),
-                                 device->config_rom[3], device->config_rom[4],
-                                 1 << device->max_speed);
-               device->config_rom_retries = 0;
-
-               fw_device_set_broadcast_channel(device, device->generation);
-       }
-
-       /*
-        * Reschedule the IRM work if we just finished reading the
-        * root node config rom.  If this races with a bus reset we
-        * just end up running the IRM work a couple of extra times -
-        * pretty harmless.
-        */
-       if (device->node == device->card->root_node)
-               fw_schedule_bm_work(device->card, 0);
-
-       return;
-
- error_with_cdev:
-       down_write(&fw_device_rwsem);
-       idr_remove(&fw_device_idr, minor);
-       up_write(&fw_device_rwsem);
- error:
-       fw_device_put(device);          /* fw_device_idr's reference */
-
-       put_device(&device->device);    /* our reference */
-}
-
-enum {
-       REREAD_BIB_ERROR,
-       REREAD_BIB_GONE,
-       REREAD_BIB_UNCHANGED,
-       REREAD_BIB_CHANGED,
-};
-
-/* Reread and compare bus info block and header of root directory */
-static int reread_bus_info_block(struct fw_device *device, int generation)
-{
-       u32 q;
-       int i;
-
-       for (i = 0; i < 6; i++) {
-               if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
-                       return REREAD_BIB_ERROR;
-
-               if (i == 0 && q == 0)
-                       return REREAD_BIB_GONE;
-
-               if (q != device->config_rom[i])
-                       return REREAD_BIB_CHANGED;
-       }
-
-       return REREAD_BIB_UNCHANGED;
-}
-
-static void fw_device_refresh(struct work_struct *work)
-{
-       struct fw_device *device =
-               container_of(work, struct fw_device, work.work);
-       struct fw_card *card = device->card;
-       int node_id = device->node_id;
-
-       switch (reread_bus_info_block(device, device->generation)) {
-       case REREAD_BIB_ERROR:
-               if (device->config_rom_retries < MAX_RETRIES / 2 &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY / 2);
-
-                       return;
-               }
-               goto give_up;
-
-       case REREAD_BIB_GONE:
-               goto gone;
-
-       case REREAD_BIB_UNCHANGED:
-               if (atomic_cmpxchg(&device->state,
-                                  FW_DEVICE_INITIALIZING,
-                                  FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
-                       goto gone;
-
-               fw_device_update(work);
-               device->config_rom_retries = 0;
-               goto out;
-
-       case REREAD_BIB_CHANGED:
-               break;
-       }
-
-       /*
-        * Something changed.  We keep things simple and don't investigate
-        * further.  We just destroy all previous units and create new ones.
-        */
-       device_for_each_child(&device->device, NULL, shutdown_unit);
-
-       if (read_bus_info_block(device, device->generation) < 0) {
-               if (device->config_rom_retries < MAX_RETRIES &&
-                   atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
-                       device->config_rom_retries++;
-                       schedule_delayed_work(&device->work, RETRY_DELAY);
-
-                       return;
-               }
-               goto give_up;
-       }
-
-       create_units(device);
-
-       if (atomic_cmpxchg(&device->state,
-                          FW_DEVICE_INITIALIZING,
-                          FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
-               goto gone;
-
-       fw_notify("refreshed device %s\n", dev_name(&device->device));
-       device->config_rom_retries = 0;
-       goto out;
-
- give_up:
-       fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
- gone:
-       atomic_set(&device->state, FW_DEVICE_GONE);
-       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-       schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
- out:
-       if (node_id == card->root_node->node_id)
-               fw_schedule_bm_work(card, 0);
-}
-
-void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
-{
-       struct fw_device *device;
-
-       switch (event) {
-       case FW_NODE_CREATED:
-       case FW_NODE_LINK_ON:
-               if (!node->link_on)
-                       break;
- create:
-               device = kzalloc(sizeof(*device), GFP_ATOMIC);
-               if (device == NULL)
-                       break;
-
-               /*
-                * Do minimal intialization of the device here, the
-                * rest will happen in fw_device_init().
-                *
-                * Attention:  A lot of things, even fw_device_get(),
-                * cannot be done before fw_device_init() finished!
-                * You can basically just check device->state and
-                * schedule work until then, but only while holding
-                * card->lock.
-                */
-               atomic_set(&device->state, FW_DEVICE_INITIALIZING);
-               device->card = fw_card_get(card);
-               device->node = fw_node_get(node);
-               device->node_id = node->node_id;
-               device->generation = card->generation;
-               mutex_init(&device->client_list_mutex);
-               INIT_LIST_HEAD(&device->client_list);
-
-               /*
-                * Set the node data to point back to this device so
-                * FW_NODE_UPDATED callbacks can update the node_id
-                * and generation for the device.
-                */
-               node->data = device;
-
-               /*
-                * Many devices are slow to respond after bus resets,
-                * especially if they are bus powered and go through
-                * power-up after getting plugged in.  We schedule the
-                * first config rom scan half a second after bus reset.
-                */
-               INIT_DELAYED_WORK(&device->work, fw_device_init);
-               schedule_delayed_work(&device->work, INITIAL_DELAY);
-               break;
-
-       case FW_NODE_INITIATED_RESET:
-               device = node->data;
-               if (device == NULL)
-                       goto create;
-
-               device->node_id = node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               device->generation = card->generation;
-               if (atomic_cmpxchg(&device->state,
-                           FW_DEVICE_RUNNING,
-                           FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
-                       schedule_delayed_work(&device->work,
-                               node == card->local_node ? 0 : INITIAL_DELAY);
-               }
-               break;
-
-       case FW_NODE_UPDATED:
-               if (!node->link_on || node->data == NULL)
-                       break;
-
-               device = node->data;
-               device->node_id = node->node_id;
-               smp_wmb();  /* update node_id before generation */
-               device->generation = card->generation;
-               if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_update);
-                       schedule_delayed_work(&device->work, 0);
-               }
-               break;
-
-       case FW_NODE_DESTROYED:
-       case FW_NODE_LINK_OFF:
-               if (!node->data)
-                       break;
-
-               /*
-                * Destroy the device associated with the node.  There
-                * are two cases here: either the device is fully
-                * initialized (FW_DEVICE_RUNNING) or we're in the
-                * process of reading its config rom
-                * (FW_DEVICE_INITIALIZING).  If it is fully
-                * initialized we can reuse device->work to schedule a
-                * full fw_device_shutdown().  If not, there's work
-                * scheduled to read it's config rom, and we just put
-                * the device in shutdown state to have that code fail
-                * to create the device.
-                */
-               device = node->data;
-               if (atomic_xchg(&device->state,
-                               FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
-                       PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
-                       schedule_delayed_work(&device->work,
-                               list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
-               }
-               break;
-       }
-}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
deleted file mode 100644 (file)
index 9758893..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_device_h
-#define __fw_device_h
-
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/idr.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/rwsem.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#include <asm/atomic.h>
-
-enum fw_device_state {
-       FW_DEVICE_INITIALIZING,
-       FW_DEVICE_RUNNING,
-       FW_DEVICE_GONE,
-       FW_DEVICE_SHUTDOWN,
-};
-
-struct fw_attribute_group {
-       struct attribute_group *groups[2];
-       struct attribute_group group;
-       struct attribute *attrs[11];
-};
-
-struct fw_node;
-struct fw_card;
-
-/*
- * Note, fw_device.generation always has to be read before fw_device.node_id.
- * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
- * to an outdated node_id if the generation was updated in the meantime due
- * to a bus reset.
- *
- * Likewise, fw-core will take care to update .node_id before .generation so
- * that whenever fw_device.generation is current WRT the actual bus generation,
- * fw_device.node_id is guaranteed to be current too.
- *
- * The same applies to fw_device.card->node_id vs. fw_device.generation.
- *
- * fw_device.config_rom and fw_device.config_rom_length may be accessed during
- * the lifetime of any fw_unit belonging to the fw_device, before device_del()
- * was called on the last fw_unit.  Alternatively, they may be accessed while
- * holding fw_device_rwsem.
- */
-struct fw_device {
-       atomic_t state;
-       struct fw_node *node;
-       int node_id;
-       int generation;
-       unsigned max_speed;
-       struct fw_card *card;
-       struct device device;
-
-       struct mutex client_list_mutex;
-       struct list_head client_list;
-
-       u32 *config_rom;
-       size_t config_rom_length;
-       int config_rom_retries;
-       unsigned cmc:1;
-       unsigned bc_implemented:2;
-
-       struct delayed_work work;
-       struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_device *fw_device(struct device *dev)
-{
-       return container_of(dev, struct fw_device, device);
-}
-
-static inline int fw_device_is_shutdown(struct fw_device *device)
-{
-       return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
-}
-
-static inline struct fw_device *fw_device_get(struct fw_device *device)
-{
-       get_device(&device->device);
-
-       return device;
-}
-
-static inline void fw_device_put(struct fw_device *device)
-{
-       put_device(&device->device);
-}
-
-struct fw_device *fw_device_get_by_devt(dev_t devt);
-int fw_device_enable_phys_dma(struct fw_device *device);
-void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
-
-void fw_device_cdev_update(struct fw_device *device);
-void fw_device_cdev_remove(struct fw_device *device);
-
-extern struct rw_semaphore fw_device_rwsem;
-extern struct idr fw_device_idr;
-extern int fw_cdev_major;
-
-/*
- * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
- */
-struct fw_unit {
-       struct device device;
-       u32 *directory;
-       struct fw_attribute_group attribute_group;
-};
-
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
-       return container_of(dev, struct fw_unit, device);
-}
-
-static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
-{
-       get_device(&unit->device);
-
-       return unit;
-}
-
-static inline void fw_unit_put(struct fw_unit *unit)
-{
-       put_device(&unit->device);
-}
-
-#define CSR_OFFSET     0x40
-#define CSR_LEAF       0x80
-#define CSR_DIRECTORY  0xc0
-
-#define CSR_DESCRIPTOR         0x01
-#define CSR_VENDOR             0x03
-#define CSR_HARDWARE_VERSION   0x04
-#define CSR_NODE_CAPABILITIES  0x0c
-#define CSR_UNIT               0x11
-#define CSR_SPECIFIER_ID       0x12
-#define CSR_VERSION            0x13
-#define CSR_DEPENDENT_INFO     0x14
-#define CSR_MODEL              0x17
-#define CSR_INSTANCE           0x18
-#define CSR_DIRECTORY_ID       0x20
-
-struct fw_csr_iterator {
-       u32 *p;
-       u32 *end;
-};
-
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
-int fw_csr_iterator_next(struct fw_csr_iterator *ci,
-                        int *key, int *value);
-
-#define FW_MATCH_VENDOR                0x0001
-#define FW_MATCH_MODEL         0x0002
-#define FW_MATCH_SPECIFIER_ID  0x0004
-#define FW_MATCH_VERSION       0x0008
-
-struct fw_device_id {
-       u32 match_flags;
-       u32 vendor;
-       u32 model;
-       u32 specifier_id;
-       u32 version;
-       void *driver_data;
-};
-
-struct fw_driver {
-       struct device_driver driver;
-       /* Called when the parent device sits through a bus reset. */
-       void (*update) (struct fw_unit *unit);
-       const struct fw_device_id *id_table;
-};
-
-static inline struct fw_driver *fw_driver(struct device_driver *drv)
-{
-       return container_of(drv, struct fw_driver, driver);
-}
-
-extern const struct file_operations fw_device_ops;
-
-#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
deleted file mode 100644 (file)
index 2baf100..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Isochronous I/O functionality:
- *   - Isochronous DMA context management
- *   - Isochronous bus resource management (channels, bandwidth), client side
- *
- * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/errno.h>
-#include <linux/firewire-constants.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
-/*
- * Isochronous DMA context management
- */
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
-                      int page_count, enum dma_data_direction direction)
-{
-       int i, j;
-       dma_addr_t address;
-
-       buffer->page_count = page_count;
-       buffer->direction = direction;
-
-       buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
-                               GFP_KERNEL);
-       if (buffer->pages == NULL)
-               goto out;
-
-       for (i = 0; i < buffer->page_count; i++) {
-               buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-               if (buffer->pages[i] == NULL)
-                       goto out_pages;
-
-               address = dma_map_page(card->device, buffer->pages[i],
-                                      0, PAGE_SIZE, direction);
-               if (dma_mapping_error(card->device, address)) {
-                       __free_page(buffer->pages[i]);
-                       goto out_pages;
-               }
-               set_page_private(buffer->pages[i], address);
-       }
-
-       return 0;
-
- out_pages:
-       for (j = 0; j < i; j++) {
-               address = page_private(buffer->pages[j]);
-               dma_unmap_page(card->device, address,
-                              PAGE_SIZE, DMA_TO_DEVICE);
-               __free_page(buffer->pages[j]);
-       }
-       kfree(buffer->pages);
- out:
-       buffer->pages = NULL;
-
-       return -ENOMEM;
-}
-
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
-{
-       unsigned long uaddr;
-       int i, err;
-
-       uaddr = vma->vm_start;
-       for (i = 0; i < buffer->page_count; i++) {
-               err = vm_insert_page(vma, uaddr, buffer->pages[i]);
-               if (err)
-                       return err;
-
-               uaddr += PAGE_SIZE;
-       }
-
-       return 0;
-}
-
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
-                          struct fw_card *card)
-{
-       int i;
-       dma_addr_t address;
-
-       for (i = 0; i < buffer->page_count; i++) {
-               address = page_private(buffer->pages[i]);
-               dma_unmap_page(card->device, address,
-                              PAGE_SIZE, DMA_TO_DEVICE);
-               __free_page(buffer->pages[i]);
-       }
-
-       kfree(buffer->pages);
-       buffer->pages = NULL;
-}
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
-               int type, int channel, int speed, size_t header_size,
-               fw_iso_callback_t callback, void *callback_data)
-{
-       struct fw_iso_context *ctx;
-
-       ctx = card->driver->allocate_iso_context(card,
-                                                type, channel, header_size);
-       if (IS_ERR(ctx))
-               return ctx;
-
-       ctx->card = card;
-       ctx->type = type;
-       ctx->channel = channel;
-       ctx->speed = speed;
-       ctx->header_size = header_size;
-       ctx->callback = callback;
-       ctx->callback_data = callback_data;
-
-       return ctx;
-}
-
-void fw_iso_context_destroy(struct fw_iso_context *ctx)
-{
-       struct fw_card *card = ctx->card;
-
-       card->driver->free_iso_context(ctx);
-}
-
-int fw_iso_context_start(struct fw_iso_context *ctx,
-                        int cycle, int sync, int tags)
-{
-       return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
-}
-
-int fw_iso_context_queue(struct fw_iso_context *ctx,
-                        struct fw_iso_packet *packet,
-                        struct fw_iso_buffer *buffer,
-                        unsigned long payload)
-{
-       struct fw_card *card = ctx->card;
-
-       return card->driver->queue_iso(ctx, packet, buffer, payload);
-}
-
-int fw_iso_context_stop(struct fw_iso_context *ctx)
-{
-       return ctx->card->driver->stop_iso(ctx);
-}
-
-/*
- * Isochronous bus resource management (channels, bandwidth), client side
- */
-
-static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
-                           int bandwidth, bool allocate)
-{
-       __be32 data[2];
-       int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
-
-       /*
-        * On a 1394a IRM with low contention, try < 1 is enough.
-        * On a 1394-1995 IRM, we need at least try < 2.
-        * Let's just do try < 5.
-        */
-       for (try = 0; try < 5; try++) {
-               new = allocate ? old - bandwidth : old + bandwidth;
-               if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
-                       break;
-
-               data[0] = cpu_to_be32(old);
-               data[1] = cpu_to_be32(new);
-               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                               irm_id, generation, SCODE_100,
-                               CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
-                               data, sizeof(data))) {
-               case RCODE_GENERATION:
-                       /* A generation change frees all bandwidth. */
-                       return allocate ? -EAGAIN : bandwidth;
-
-               case RCODE_COMPLETE:
-                       if (be32_to_cpup(data) == old)
-                               return bandwidth;
-
-                       old = be32_to_cpup(data);
-                       /* Fall through. */
-               }
-       }
-
-       return -EIO;
-}
-
-static int manage_channel(struct fw_card *card, int irm_id, int generation,
-                         u32 channels_mask, u64 offset, bool allocate)
-{
-       __be32 data[2], c, all, old;
-       int i, retry = 5;
-
-       old = all = allocate ? cpu_to_be32(~0) : 0;
-
-       for (i = 0; i < 32; i++) {
-               if (!(channels_mask & 1 << i))
-                       continue;
-
-               c = cpu_to_be32(1 << (31 - i));
-               if ((old & c) != (all & c))
-                       continue;
-
-               data[0] = old;
-               data[1] = old ^ c;
-               switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
-                                          irm_id, generation, SCODE_100,
-                                          offset, data, sizeof(data))) {
-               case RCODE_GENERATION:
-                       /* A generation change frees all channels. */
-                       return allocate ? -EAGAIN : i;
-
-               case RCODE_COMPLETE:
-                       if (data[0] == old)
-                               return i;
-
-                       old = data[0];
-
-                       /* Is the IRM 1394a-2000 compliant? */
-                       if ((data[0] & c) == (data[1] & c))
-                               continue;
-
-                       /* 1394-1995 IRM, fall through to retry. */
-               default:
-                       if (retry--)
-                               i--;
-               }
-       }
-
-       return -EIO;
-}
-
-static void deallocate_channel(struct fw_card *card, int irm_id,
-                              int generation, int channel)
-{
-       u32 mask;
-       u64 offset;
-
-       mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
-       offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
-                               CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
-
-       manage_channel(card, irm_id, generation, mask, offset, false);
-}
-
-/**
- * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
- *
- * In parameters: card, generation, channels_mask, bandwidth, allocate
- * Out parameters: channel, bandwidth
- * This function blocks (sleeps) during communication with the IRM.
- *
- * Allocates or deallocates at most one channel out of channels_mask.
- * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
- * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
- * channel 0 and LSB for channel 63.)
- * Allocates or deallocates as many bandwidth allocation units as specified.
- *
- * Returns channel < 0 if no channel was allocated or deallocated.
- * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
- *
- * If generation is stale, deallocations succeed but allocations fail with
- * channel = -EAGAIN.
- *
- * If channel allocation fails, no bandwidth will be allocated either.
- * If bandwidth allocation fails, no channel will be allocated either.
- * But deallocations of channel and bandwidth are tried independently
- * of each other's success.
- */
-void fw_iso_resource_manage(struct fw_card *card, int generation,
-                           u64 channels_mask, int *channel, int *bandwidth,
-                           bool allocate)
-{
-       u32 channels_hi = channels_mask;        /* channels 31...0 */
-       u32 channels_lo = channels_mask >> 32;  /* channels 63...32 */
-       int irm_id, ret, c = -EINVAL;
-
-       spin_lock_irq(&card->lock);
-       irm_id = card->irm_node->node_id;
-       spin_unlock_irq(&card->lock);
-
-       if (channels_hi)
-               c = manage_channel(card, irm_id, generation, channels_hi,
-                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
-       if (channels_lo && c < 0) {
-               c = manage_channel(card, irm_id, generation, channels_lo,
-                   CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
-               if (c >= 0)
-                       c += 32;
-       }
-       *channel = c;
-
-       if (allocate && channels_mask != 0 && c < 0)
-               *bandwidth = 0;
-
-       if (*bandwidth == 0)
-               return;
-
-       ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
-       if (ret < 0)
-               *bandwidth = 0;
-
-       if (allocate && ret < 0 && c >= 0) {
-               deallocate_channel(card, irm_id, generation, c);
-               *channel = ret;
-       }
-}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
deleted file mode 100644 (file)
index 1180d0b..0000000
+++ /dev/null
@@ -1,2629 +0,0 @@
-/*
- * Driver for OHCI 1394 controllers
- *
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/compiler.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-
-#include <asm/page.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/pmac_feature.h>
-#endif
-
-#include "fw-ohci.h"
-#include "fw-transaction.h"
-
-#define DESCRIPTOR_OUTPUT_MORE         0
-#define DESCRIPTOR_OUTPUT_LAST         (1 << 12)
-#define DESCRIPTOR_INPUT_MORE          (2 << 12)
-#define DESCRIPTOR_INPUT_LAST          (3 << 12)
-#define DESCRIPTOR_STATUS              (1 << 11)
-#define DESCRIPTOR_KEY_IMMEDIATE       (2 << 8)
-#define DESCRIPTOR_PING                        (1 << 7)
-#define DESCRIPTOR_YY                  (1 << 6)
-#define DESCRIPTOR_NO_IRQ              (0 << 4)
-#define DESCRIPTOR_IRQ_ERROR           (1 << 4)
-#define DESCRIPTOR_IRQ_ALWAYS          (3 << 4)
-#define DESCRIPTOR_BRANCH_ALWAYS       (3 << 2)
-#define DESCRIPTOR_WAIT                        (3 << 0)
-
-struct descriptor {
-       __le16 req_count;
-       __le16 control;
-       __le32 data_address;
-       __le32 branch_address;
-       __le16 res_count;
-       __le16 transfer_status;
-} __attribute__((aligned(16)));
-
-struct db_descriptor {
-       __le16 first_size;
-       __le16 control;
-       __le16 second_req_count;
-       __le16 first_req_count;
-       __le32 branch_address;
-       __le16 second_res_count;
-       __le16 first_res_count;
-       __le32 reserved0;
-       __le32 first_buffer;
-       __le32 second_buffer;
-       __le32 reserved1;
-} __attribute__((aligned(16)));
-
-#define CONTROL_SET(regs)      (regs)
-#define CONTROL_CLEAR(regs)    ((regs) + 4)
-#define COMMAND_PTR(regs)      ((regs) + 12)
-#define CONTEXT_MATCH(regs)    ((regs) + 16)
-
-struct ar_buffer {
-       struct descriptor descriptor;
-       struct ar_buffer *next;
-       __le32 data[0];
-};
-
-struct ar_context {
-       struct fw_ohci *ohci;
-       struct ar_buffer *current_buffer;
-       struct ar_buffer *last_buffer;
-       void *pointer;
-       u32 regs;
-       struct tasklet_struct tasklet;
-};
-
-struct context;
-
-typedef int (*descriptor_callback_t)(struct context *ctx,
-                                    struct descriptor *d,
-                                    struct descriptor *last);
-
-/*
- * A buffer that contains a block of DMA-able coherent memory used for
- * storing a portion of a DMA descriptor program.
- */
-struct descriptor_buffer {
-       struct list_head list;
-       dma_addr_t buffer_bus;
-       size_t buffer_size;
-       size_t used;
-       struct descriptor buffer[0];
-};
-
-struct context {
-       struct fw_ohci *ohci;
-       u32 regs;
-       int total_allocation;
-
-       /*
-        * List of page-sized buffers for storing DMA descriptors.
-        * Head of list contains buffers in use and tail of list contains
-        * free buffers.
-        */
-       struct list_head buffer_list;
-
-       /*
-        * Pointer to a buffer inside buffer_list that contains the tail
-        * end of the current DMA program.
-        */
-       struct descriptor_buffer *buffer_tail;
-
-       /*
-        * The descriptor containing the branch address of the first
-        * descriptor that has not yet been filled by the device.
-        */
-       struct descriptor *last;
-
-       /*
-        * The last descriptor in the DMA program.  It contains the branch
-        * address that must be updated upon appending a new descriptor.
-        */
-       struct descriptor *prev;
-
-       descriptor_callback_t callback;
-
-       struct tasklet_struct tasklet;
-};
-
-#define IT_HEADER_SY(v)          ((v) <<  0)
-#define IT_HEADER_TCODE(v)       ((v) <<  4)
-#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
-#define IT_HEADER_TAG(v)         ((v) << 14)
-#define IT_HEADER_SPEED(v)       ((v) << 16)
-#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
-
-struct iso_context {
-       struct fw_iso_context base;
-       struct context context;
-       int excess_bytes;
-       void *header;
-       size_t header_length;
-};
-
-#define CONFIG_ROM_SIZE 1024
-
-struct fw_ohci {
-       struct fw_card card;
-
-       __iomem char *registers;
-       dma_addr_t self_id_bus;
-       __le32 *self_id_cpu;
-       struct tasklet_struct bus_reset_tasklet;
-       int node_id;
-       int generation;
-       int request_generation; /* for timestamping incoming requests */
-       u32 bus_seconds;
-
-       bool use_dualbuffer;
-       bool old_uninorth;
-       bool bus_reset_packet_quirk;
-
-       /*
-        * Spinlock for accessing fw_ohci data.  Never call out of
-        * this driver with this lock held.
-        */
-       spinlock_t lock;
-       u32 self_id_buffer[512];
-
-       /* Config rom buffers */
-       __be32 *config_rom;
-       dma_addr_t config_rom_bus;
-       __be32 *next_config_rom;
-       dma_addr_t next_config_rom_bus;
-       u32 next_header;
-
-       struct ar_context ar_request_ctx;
-       struct ar_context ar_response_ctx;
-       struct context at_request_ctx;
-       struct context at_response_ctx;
-
-       u32 it_context_mask;
-       struct iso_context *it_context_list;
-       u64 ir_context_channels;
-       u32 ir_context_mask;
-       struct iso_context *ir_context_list;
-};
-
-static inline struct fw_ohci *fw_ohci(struct fw_card *card)
-{
-       return container_of(card, struct fw_ohci, card);
-}
-
-#define IT_CONTEXT_CYCLE_MATCH_ENABLE  0x80000000
-#define IR_CONTEXT_BUFFER_FILL         0x80000000
-#define IR_CONTEXT_ISOCH_HEADER                0x40000000
-#define IR_CONTEXT_CYCLE_MATCH_ENABLE  0x20000000
-#define IR_CONTEXT_MULTI_CHANNEL_MODE  0x10000000
-#define IR_CONTEXT_DUAL_BUFFER_MODE    0x08000000
-
-#define CONTEXT_RUN    0x8000
-#define CONTEXT_WAKE   0x1000
-#define CONTEXT_DEAD   0x0800
-#define CONTEXT_ACTIVE 0x0400
-
-#define OHCI1394_MAX_AT_REQ_RETRIES    0xf
-#define OHCI1394_MAX_AT_RESP_RETRIES   0x2
-#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
-
-#define FW_OHCI_MAJOR                  240
-#define OHCI1394_REGISTER_SIZE         0x800
-#define OHCI_LOOP_COUNT                        500
-#define OHCI1394_PCI_HCI_Control       0x40
-#define SELF_ID_BUF_SIZE               0x800
-#define OHCI_TCODE_PHY_PACKET          0x0e
-#define OHCI_VERSION_1_1               0x010010
-
-static char ohci_driver_name[] = KBUILD_MODNAME;
-
-#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
-
-#define OHCI_PARAM_DEBUG_AT_AR         1
-#define OHCI_PARAM_DEBUG_SELFIDS       2
-#define OHCI_PARAM_DEBUG_IRQS          4
-#define OHCI_PARAM_DEBUG_BUSRESETS     8 /* only effective before chip init */
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
-       ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
-       ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
-       ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
-       ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
-       ", or a combination, or all = -1)");
-
-static void log_irqs(u32 evt)
-{
-       if (likely(!(param_debug &
-                       (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
-               return;
-
-       if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
-           !(evt & OHCI1394_busReset))
-               return;
-
-       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
-           evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
-           evt & OHCI1394_RQPkt                ? " AR_req"             : "",
-           evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
-           evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
-           evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
-           evt & OHCI1394_isochRx              ? " IR"                 : "",
-           evt & OHCI1394_isochTx              ? " IT"                 : "",
-           evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
-           evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
-           evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
-           evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
-           evt & OHCI1394_busReset             ? " busReset"           : "",
-           evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
-                   OHCI1394_RSPkt | OHCI1394_reqTxComplete |
-                   OHCI1394_respTxComplete | OHCI1394_isochRx |
-                   OHCI1394_isochTx | OHCI1394_postedWriteErr |
-                   OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
-                   OHCI1394_regAccessFail | OHCI1394_busReset)
-                                               ? " ?"                  : "");
-}
-
-static const char *speed[] = {
-       [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
-};
-static const char *power[] = {
-       [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
-       [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
-};
-static const char port[] = { '.', '-', 'p', 'c', };
-
-static char _p(u32 *s, int shift)
-{
-       return port[*s >> shift & 3];
-}
-
-static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
-{
-       if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
-               return;
-
-       fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
-                 self_id_count, generation, node_id);
-
-       for (; self_id_count--; ++s)
-               if ((*s & 1 << 23) == 0)
-                       fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
-                           "%s gc=%d %s %s%s%s\n",
-                           *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
-                           speed[*s >> 14 & 3], *s >> 16 & 63,
-                           power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
-                           *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-               else
-                       fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
-                           *s, *s >> 24 & 63,
-                           _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
-                           _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
-}
-
-static const char *evts[] = {
-       [0x00] = "evt_no_status",       [0x01] = "-reserved-",
-       [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
-       [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
-       [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
-       [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
-       [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
-       [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
-       [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
-       [0x10] = "-reserved-",          [0x11] = "ack_complete",
-       [0x12] = "ack_pending ",        [0x13] = "-reserved-",
-       [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
-       [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
-       [0x18] = "-reserved-",          [0x19] = "-reserved-",
-       [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
-       [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
-       [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
-       [0x20] = "pending/cancelled",
-};
-static const char *tcodes[] = {
-       [0x0] = "QW req",               [0x1] = "BW req",
-       [0x2] = "W resp",               [0x3] = "-reserved-",
-       [0x4] = "QR req",               [0x5] = "BR req",
-       [0x6] = "QR resp",              [0x7] = "BR resp",
-       [0x8] = "cycle start",          [0x9] = "Lk req",
-       [0xa] = "async stream packet",  [0xb] = "Lk resp",
-       [0xc] = "-reserved-",           [0xd] = "-reserved-",
-       [0xe] = "link internal",        [0xf] = "-reserved-",
-};
-static const char *phys[] = {
-       [0x0] = "phy config packet",    [0x1] = "link-on packet",
-       [0x2] = "self-id packet",       [0x3] = "-reserved-",
-};
-
-static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
-{
-       int tcode = header[0] >> 4 & 0xf;
-       char specific[12];
-
-       if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
-               return;
-
-       if (unlikely(evt >= ARRAY_SIZE(evts)))
-                       evt = 0x1f;
-
-       if (evt == OHCI1394_evt_bus_reset) {
-               fw_notify("A%c evt_bus_reset, generation %d\n",
-                   dir, (header[2] >> 16) & 0xff);
-               return;
-       }
-
-       if (header[0] == ~header[1]) {
-               fw_notify("A%c %s, %s, %08x\n",
-                   dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
-               return;
-       }
-
-       switch (tcode) {
-       case 0x0: case 0x6: case 0x8:
-               snprintf(specific, sizeof(specific), " = %08x",
-                        be32_to_cpu((__force __be32)header[3]));
-               break;
-       case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
-               snprintf(specific, sizeof(specific), " %x,%x",
-                        header[3] >> 16, header[3] & 0xffff);
-               break;
-       default:
-               specific[0] = '\0';
-       }
-
-       switch (tcode) {
-       case 0xe: case 0xa:
-               fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
-               break;
-       case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
-               fw_notify("A%c spd %x tl %02x, "
-                   "%04x -> %04x, %s, "
-                   "%s, %04x%08x%s\n",
-                   dir, speed, header[0] >> 10 & 0x3f,
-                   header[1] >> 16, header[0] >> 16, evts[evt],
-                   tcodes[tcode], header[1] & 0xffff, header[2], specific);
-               break;
-       default:
-               fw_notify("A%c spd %x tl %02x, "
-                   "%04x -> %04x, %s, "
-                   "%s%s\n",
-                   dir, speed, header[0] >> 10 & 0x3f,
-                   header[1] >> 16, header[0] >> 16, evts[evt],
-                   tcodes[tcode], specific);
-       }
-}
-
-#else
-
-#define log_irqs(evt)
-#define log_selfids(node_id, generation, self_id_count, sid)
-#define log_ar_at_event(dir, speed, header, evt)
-
-#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
-
-static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
-{
-       writel(data, ohci->registers + offset);
-}
-
-static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
-{
-       return readl(ohci->registers + offset);
-}
-
-static inline void flush_writes(const struct fw_ohci *ohci)
-{
-       /* Do a dummy read to flush writes. */
-       reg_read(ohci, OHCI1394_Version);
-}
-
-static int ohci_update_phy_reg(struct fw_card *card, int addr,
-                              int clear_bits, int set_bits)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       u32 val, old;
-
-       reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
-       flush_writes(ohci);
-       msleep(2);
-       val = reg_read(ohci, OHCI1394_PhyControl);
-       if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
-               fw_error("failed to set phy reg bits.\n");
-               return -EBUSY;
-       }
-
-       old = OHCI1394_PhyControl_ReadData(val);
-       old = (old & ~clear_bits) | set_bits;
-       reg_write(ohci, OHCI1394_PhyControl,
-                 OHCI1394_PhyControl_Write(addr, old));
-
-       return 0;
-}
-
-static int ar_context_add_page(struct ar_context *ctx)
-{
-       struct device *dev = ctx->ohci->card.device;
-       struct ar_buffer *ab;
-       dma_addr_t uninitialized_var(ab_bus);
-       size_t offset;
-
-       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-       if (ab == NULL)
-               return -ENOMEM;
-
-       ab->next = NULL;
-       memset(&ab->descriptor, 0, sizeof(ab->descriptor));
-       ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
-                                                   DESCRIPTOR_STATUS |
-                                                   DESCRIPTOR_BRANCH_ALWAYS);
-       offset = offsetof(struct ar_buffer, data);
-       ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
-       ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
-       ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
-       ab->descriptor.branch_address = 0;
-
-       ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
-       ctx->last_buffer->next = ab;
-       ctx->last_buffer = ab;
-
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
-       flush_writes(ctx->ohci);
-
-       return 0;
-}
-
-static void ar_context_release(struct ar_context *ctx)
-{
-       struct ar_buffer *ab, *ab_next;
-       size_t offset;
-       dma_addr_t ab_bus;
-
-       for (ab = ctx->current_buffer; ab; ab = ab_next) {
-               ab_next = ab->next;
-               offset = offsetof(struct ar_buffer, data);
-               ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-               dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
-                                 ab, ab_bus);
-       }
-}
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-#define cond_le32_to_cpu(v) \
-       (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
-#else
-#define cond_le32_to_cpu(v) le32_to_cpu(v)
-#endif
-
-static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-       struct fw_packet p;
-       u32 status, length, tcode;
-       int evt;
-
-       p.header[0] = cond_le32_to_cpu(buffer[0]);
-       p.header[1] = cond_le32_to_cpu(buffer[1]);
-       p.header[2] = cond_le32_to_cpu(buffer[2]);
-
-       tcode = (p.header[0] >> 4) & 0x0f;
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_READ_QUADLET_RESPONSE:
-               p.header[3] = (__force __u32) buffer[3];
-               p.header_length = 16;
-               p.payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST :
-               p.header[3] = cond_le32_to_cpu(buffer[3]);
-               p.header_length = 16;
-               p.payload_length = 0;
-               break;
-
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_READ_BLOCK_RESPONSE:
-       case TCODE_LOCK_REQUEST:
-       case TCODE_LOCK_RESPONSE:
-               p.header[3] = cond_le32_to_cpu(buffer[3]);
-               p.header_length = 16;
-               p.payload_length = p.header[3] >> 16;
-               break;
-
-       case TCODE_WRITE_RESPONSE:
-       case TCODE_READ_QUADLET_REQUEST:
-       case OHCI_TCODE_PHY_PACKET:
-               p.header_length = 12;
-               p.payload_length = 0;
-               break;
-
-       default:
-               /* FIXME: Stop context, discard everything, and restart? */
-               p.header_length = 0;
-               p.payload_length = 0;
-       }
-
-       p.payload = (void *) buffer + p.header_length;
-
-       /* FIXME: What to do about evt_* errors? */
-       length = (p.header_length + p.payload_length + 3) / 4;
-       status = cond_le32_to_cpu(buffer[length]);
-       evt    = (status >> 16) & 0x1f;
-
-       p.ack        = evt - 16;
-       p.speed      = (status >> 21) & 0x7;
-       p.timestamp  = status & 0xffff;
-       p.generation = ohci->request_generation;
-
-       log_ar_at_event('R', p.speed, p.header, evt);
-
-       /*
-        * The OHCI bus reset handler synthesizes a phy packet with
-        * the new generation number when a bus reset happens (see
-        * section 8.4.2.3).  This helps us determine when a request
-        * was received and make sure we send the response in the same
-        * generation.  We only need this for requests; for responses
-        * we use the unique tlabel for finding the matching
-        * request.
-        *
-        * Alas some chips sometimes emit bus reset packets with a
-        * wrong generation.  We set the correct generation for these
-        * at a slightly incorrect time (in bus_reset_tasklet).
-        */
-       if (evt == OHCI1394_evt_bus_reset) {
-               if (!ohci->bus_reset_packet_quirk)
-                       ohci->request_generation = (p.header[2] >> 16) & 0xff;
-       } else if (ctx == &ohci->ar_request_ctx) {
-               fw_core_handle_request(&ohci->card, &p);
-       } else {
-               fw_core_handle_response(&ohci->card, &p);
-       }
-
-       return buffer + length + 1;
-}
-
-static void ar_context_tasklet(unsigned long data)
-{
-       struct ar_context *ctx = (struct ar_context *)data;
-       struct fw_ohci *ohci = ctx->ohci;
-       struct ar_buffer *ab;
-       struct descriptor *d;
-       void *buffer, *end;
-
-       ab = ctx->current_buffer;
-       d = &ab->descriptor;
-
-       if (d->res_count == 0) {
-               size_t size, rest, offset;
-               dma_addr_t start_bus;
-               void *start;
-
-               /*
-                * This descriptor is finished and we may have a
-                * packet split across this and the next buffer. We
-                * reuse the page for reassembling the split packet.
-                */
-
-               offset = offsetof(struct ar_buffer, data);
-               start = buffer = ab;
-               start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-
-               ab = ab->next;
-               d = &ab->descriptor;
-               size = buffer + PAGE_SIZE - ctx->pointer;
-               rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
-               memmove(buffer, ctx->pointer, size);
-               memcpy(buffer + size, ab->data, rest);
-               ctx->current_buffer = ab;
-               ctx->pointer = (void *) ab->data + rest;
-               end = buffer + size + rest;
-
-               while (buffer < end)
-                       buffer = handle_ar_packet(ctx, buffer);
-
-               dma_free_coherent(ohci->card.device, PAGE_SIZE,
-                                 start, start_bus);
-               ar_context_add_page(ctx);
-       } else {
-               buffer = ctx->pointer;
-               ctx->pointer = end =
-                       (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
-
-               while (buffer < end)
-                       buffer = handle_ar_packet(ctx, buffer);
-       }
-}
-
-static int ar_context_init(struct ar_context *ctx,
-                          struct fw_ohci *ohci, u32 regs)
-{
-       struct ar_buffer ab;
-
-       ctx->regs        = regs;
-       ctx->ohci        = ohci;
-       ctx->last_buffer = &ab;
-       tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
-
-       ar_context_add_page(ctx);
-       ar_context_add_page(ctx);
-       ctx->current_buffer = ab.next;
-       ctx->pointer = ctx->current_buffer->data;
-
-       return 0;
-}
-
-static void ar_context_run(struct ar_context *ctx)
-{
-       struct ar_buffer *ab = ctx->current_buffer;
-       dma_addr_t ab_bus;
-       size_t offset;
-
-       offset = offsetof(struct ar_buffer, data);
-       ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-
-       reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
-       flush_writes(ctx->ohci);
-}
-
-static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
-{
-       int b, key;
-
-       b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
-       key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
-
-       /* figure out which descriptor the branch address goes in */
-       if (z == 2 && (b == 3 || key == 2))
-               return d;
-       else
-               return d + z - 1;
-}
-
-static void context_tasklet(unsigned long data)
-{
-       struct context *ctx = (struct context *) data;
-       struct descriptor *d, *last;
-       u32 address;
-       int z;
-       struct descriptor_buffer *desc;
-
-       desc = list_entry(ctx->buffer_list.next,
-                       struct descriptor_buffer, list);
-       last = ctx->last;
-       while (last->branch_address != 0) {
-               struct descriptor_buffer *old_desc = desc;
-               address = le32_to_cpu(last->branch_address);
-               z = address & 0xf;
-               address &= ~0xf;
-
-               /* If the branch address points to a buffer outside of the
-                * current buffer, advance to the next buffer. */
-               if (address < desc->buffer_bus ||
-                               address >= desc->buffer_bus + desc->used)
-                       desc = list_entry(desc->list.next,
-                                       struct descriptor_buffer, list);
-               d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
-               last = find_branch_descriptor(d, z);
-
-               if (!ctx->callback(ctx, d, last))
-                       break;
-
-               if (old_desc != desc) {
-                       /* If we've advanced to the next buffer, move the
-                        * previous buffer to the free list. */
-                       unsigned long flags;
-                       old_desc->used = 0;
-                       spin_lock_irqsave(&ctx->ohci->lock, flags);
-                       list_move_tail(&old_desc->list, &ctx->buffer_list);
-                       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-               }
-               ctx->last = last;
-       }
-}
-
-/*
- * Allocate a new buffer and add it to the list of free buffers for this
- * context.  Must be called with ohci->lock held.
- */
-static int context_add_buffer(struct context *ctx)
-{
-       struct descriptor_buffer *desc;
-       dma_addr_t uninitialized_var(bus_addr);
-       int offset;
-
-       /*
-        * 16MB of descriptors should be far more than enough for any DMA
-        * program.  This will catch run-away userspace or DoS attacks.
-        */
-       if (ctx->total_allocation >= 16*1024*1024)
-               return -ENOMEM;
-
-       desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
-                       &bus_addr, GFP_ATOMIC);
-       if (!desc)
-               return -ENOMEM;
-
-       offset = (void *)&desc->buffer - (void *)desc;
-       desc->buffer_size = PAGE_SIZE - offset;
-       desc->buffer_bus = bus_addr + offset;
-       desc->used = 0;
-
-       list_add_tail(&desc->list, &ctx->buffer_list);
-       ctx->total_allocation += PAGE_SIZE;
-
-       return 0;
-}
-
-static int context_init(struct context *ctx, struct fw_ohci *ohci,
-                       u32 regs, descriptor_callback_t callback)
-{
-       ctx->ohci = ohci;
-       ctx->regs = regs;
-       ctx->total_allocation = 0;
-
-       INIT_LIST_HEAD(&ctx->buffer_list);
-       if (context_add_buffer(ctx) < 0)
-               return -ENOMEM;
-
-       ctx->buffer_tail = list_entry(ctx->buffer_list.next,
-                       struct descriptor_buffer, list);
-
-       tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
-       ctx->callback = callback;
-
-       /*
-        * We put a dummy descriptor in the buffer that has a NULL
-        * branch address and looks like it's been sent.  That way we
-        * have a descriptor to append DMA programs to.
-        */
-       memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
-       ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
-       ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
-       ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
-       ctx->last = ctx->buffer_tail->buffer;
-       ctx->prev = ctx->buffer_tail->buffer;
-
-       return 0;
-}
-
-static void context_release(struct context *ctx)
-{
-       struct fw_card *card = &ctx->ohci->card;
-       struct descriptor_buffer *desc, *tmp;
-
-       list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
-               dma_free_coherent(card->device, PAGE_SIZE, desc,
-                       desc->buffer_bus -
-                       ((void *)&desc->buffer - (void *)desc));
-}
-
-/* Must be called with ohci->lock held */
-static struct descriptor *context_get_descriptors(struct context *ctx,
-                                                 int z, dma_addr_t *d_bus)
-{
-       struct descriptor *d = NULL;
-       struct descriptor_buffer *desc = ctx->buffer_tail;
-
-       if (z * sizeof(*d) > desc->buffer_size)
-               return NULL;
-
-       if (z * sizeof(*d) > desc->buffer_size - desc->used) {
-               /* No room for the descriptor in this buffer, so advance to the
-                * next one. */
-
-               if (desc->list.next == &ctx->buffer_list) {
-                       /* If there is no free buffer next in the list,
-                        * allocate one. */
-                       if (context_add_buffer(ctx) < 0)
-                               return NULL;
-               }
-               desc = list_entry(desc->list.next,
-                               struct descriptor_buffer, list);
-               ctx->buffer_tail = desc;
-       }
-
-       d = desc->buffer + desc->used / sizeof(*d);
-       memset(d, 0, z * sizeof(*d));
-       *d_bus = desc->buffer_bus + desc->used;
-
-       return d;
-}
-
-static void context_run(struct context *ctx, u32 extra)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-
-       reg_write(ohci, COMMAND_PTR(ctx->regs),
-                 le32_to_cpu(ctx->last->branch_address));
-       reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
-       reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
-       flush_writes(ohci);
-}
-
-static void context_append(struct context *ctx,
-                          struct descriptor *d, int z, int extra)
-{
-       dma_addr_t d_bus;
-       struct descriptor_buffer *desc = ctx->buffer_tail;
-
-       d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
-
-       desc->used += (z + extra) * sizeof(*d);
-       ctx->prev->branch_address = cpu_to_le32(d_bus | z);
-       ctx->prev = find_branch_descriptor(d, z);
-
-       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
-       flush_writes(ctx->ohci);
-}
-
-static void context_stop(struct context *ctx)
-{
-       u32 reg;
-       int i;
-
-       reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
-       flush_writes(ctx->ohci);
-
-       for (i = 0; i < 10; i++) {
-               reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
-               if ((reg & CONTEXT_ACTIVE) == 0)
-                       return;
-
-               mdelay(1);
-       }
-       fw_error("Error: DMA context still active (0x%08x)\n", reg);
-}
-
-struct driver_data {
-       struct fw_packet *packet;
-};
-
-/*
- * This function apppends a packet to the DMA queue for transmission.
- * Must always be called with the ochi->lock held to ensure proper
- * generation handling and locking around packet queue manipulation.
- */
-static int at_context_queue_packet(struct context *ctx,
-                                  struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = ctx->ohci;
-       dma_addr_t d_bus, uninitialized_var(payload_bus);
-       struct driver_data *driver_data;
-       struct descriptor *d, *last;
-       __le32 *header;
-       int z, tcode;
-       u32 reg;
-
-       d = context_get_descriptors(ctx, 4, &d_bus);
-       if (d == NULL) {
-               packet->ack = RCODE_SEND_ERROR;
-               return -1;
-       }
-
-       d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
-       d[0].res_count = cpu_to_le16(packet->timestamp);
-
-       /*
-        * The DMA format for asyncronous link packets is different
-        * from the IEEE1394 layout, so shift the fields around
-        * accordingly.  If header_length is 8, it's a PHY packet, to
-        * which we need to prepend an extra quadlet.
-        */
-
-       header = (__le32 *) &d[1];
-       switch (packet->header_length) {
-       case 16:
-       case 12:
-               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
-                                       (packet->header[0] & 0xffff0000));
-               header[2] = cpu_to_le32(packet->header[2]);
-
-               tcode = (packet->header[0] >> 4) & 0x0f;
-               if (TCODE_IS_BLOCK_PACKET(tcode))
-                       header[3] = cpu_to_le32(packet->header[3]);
-               else
-                       header[3] = (__force __le32) packet->header[3];
-
-               d[0].req_count = cpu_to_le16(packet->header_length);
-               break;
-
-       case 8:
-               header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32(packet->header[0]);
-               header[2] = cpu_to_le32(packet->header[1]);
-               d[0].req_count = cpu_to_le16(12);
-               break;
-
-       case 4:
-               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
-                                       (packet->speed << 16));
-               header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
-               d[0].req_count = cpu_to_le16(8);
-               break;
-
-       default:
-               /* BUG(); */
-               packet->ack = RCODE_SEND_ERROR;
-               return -1;
-       }
-
-       driver_data = (struct driver_data *) &d[3];
-       driver_data->packet = packet;
-       packet->driver_data = driver_data;
-
-       if (packet->payload_length > 0) {
-               payload_bus =
-                       dma_map_single(ohci->card.device, packet->payload,
-                                      packet->payload_length, DMA_TO_DEVICE);
-               if (dma_mapping_error(ohci->card.device, payload_bus)) {
-                       packet->ack = RCODE_SEND_ERROR;
-                       return -1;
-               }
-               packet->payload_bus = payload_bus;
-
-               d[2].req_count    = cpu_to_le16(packet->payload_length);
-               d[2].data_address = cpu_to_le32(payload_bus);
-               last = &d[2];
-               z = 3;
-       } else {
-               last = &d[0];
-               z = 2;
-       }
-
-       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
-                                    DESCRIPTOR_IRQ_ALWAYS |
-                                    DESCRIPTOR_BRANCH_ALWAYS);
-
-       /*
-        * If the controller and packet generations don't match, we need to
-        * bail out and try again.  If IntEvent.busReset is set, the AT context
-        * is halted, so appending to the context and trying to run it is
-        * futile.  Most controllers do the right thing and just flush the AT
-        * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
-        * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
-        * up stalling out.  So we just bail out in software and try again
-        * later, and everyone is happy.
-        * FIXME: Document how the locking works.
-        */
-       if (ohci->generation != packet->generation ||
-           reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
-               if (packet->payload_length > 0)
-                       dma_unmap_single(ohci->card.device, payload_bus,
-                                        packet->payload_length, DMA_TO_DEVICE);
-               packet->ack = RCODE_GENERATION;
-               return -1;
-       }
-
-       context_append(ctx, d, z, 4 - z);
-
-       /* If the context isn't already running, start it up. */
-       reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
-       if ((reg & CONTEXT_RUN) == 0)
-               context_run(ctx, 0);
-
-       return 0;
-}
-
-static int handle_at_packet(struct context *context,
-                           struct descriptor *d,
-                           struct descriptor *last)
-{
-       struct driver_data *driver_data;
-       struct fw_packet *packet;
-       struct fw_ohci *ohci = context->ohci;
-       int evt;
-
-       if (last->transfer_status == 0)
-               /* This descriptor isn't done yet, stop iteration. */
-               return 0;
-
-       driver_data = (struct driver_data *) &d[3];
-       packet = driver_data->packet;
-       if (packet == NULL)
-               /* This packet was cancelled, just continue. */
-               return 1;
-
-       if (packet->payload_bus)
-               dma_unmap_single(ohci->card.device, packet->payload_bus,
-                                packet->payload_length, DMA_TO_DEVICE);
-
-       evt = le16_to_cpu(last->transfer_status) & 0x1f;
-       packet->timestamp = le16_to_cpu(last->res_count);
-
-       log_ar_at_event('T', packet->speed, packet->header, evt);
-
-       switch (evt) {
-       case OHCI1394_evt_timeout:
-               /* Async response transmit timed out. */
-               packet->ack = RCODE_CANCELLED;
-               break;
-
-       case OHCI1394_evt_flushed:
-               /*
-                * The packet was flushed should give same error as
-                * when we try to use a stale generation count.
-                */
-               packet->ack = RCODE_GENERATION;
-               break;
-
-       case OHCI1394_evt_missing_ack:
-               /*
-                * Using a valid (current) generation count, but the
-                * node is not on the bus or not sending acks.
-                */
-               packet->ack = RCODE_NO_ACK;
-               break;
-
-       case ACK_COMPLETE + 0x10:
-       case ACK_PENDING + 0x10:
-       case ACK_BUSY_X + 0x10:
-       case ACK_BUSY_A + 0x10:
-       case ACK_BUSY_B + 0x10:
-       case ACK_DATA_ERROR + 0x10:
-       case ACK_TYPE_ERROR + 0x10:
-               packet->ack = evt - 0x10;
-               break;
-
-       default:
-               packet->ack = RCODE_SEND_ERROR;
-               break;
-       }
-
-       packet->callback(packet, &ohci->card, packet->ack);
-
-       return 1;
-}
-
-#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
-#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
-
-static void handle_local_rom(struct fw_ohci *ohci,
-                            struct fw_packet *packet, u32 csr)
-{
-       struct fw_packet response;
-       int tcode, length, i;
-
-       tcode = HEADER_GET_TCODE(packet->header[0]);
-       if (TCODE_IS_BLOCK_PACKET(tcode))
-               length = HEADER_GET_DATA_LENGTH(packet->header[3]);
-       else
-               length = 4;
-
-       i = csr - CSR_CONFIG_ROM;
-       if (i + length > CONFIG_ROM_SIZE) {
-               fw_fill_response(&response, packet->header,
-                                RCODE_ADDRESS_ERROR, NULL, 0);
-       } else if (!TCODE_IS_READ_REQUEST(tcode)) {
-               fw_fill_response(&response, packet->header,
-                                RCODE_TYPE_ERROR, NULL, 0);
-       } else {
-               fw_fill_response(&response, packet->header, RCODE_COMPLETE,
-                                (void *) ohci->config_rom + i, length);
-       }
-
-       fw_core_handle_response(&ohci->card, &response);
-}
-
-static void handle_local_lock(struct fw_ohci *ohci,
-                             struct fw_packet *packet, u32 csr)
-{
-       struct fw_packet response;
-       int tcode, length, ext_tcode, sel;
-       __be32 *payload, lock_old;
-       u32 lock_arg, lock_data;
-
-       tcode = HEADER_GET_TCODE(packet->header[0]);
-       length = HEADER_GET_DATA_LENGTH(packet->header[3]);
-       payload = packet->payload;
-       ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
-
-       if (tcode == TCODE_LOCK_REQUEST &&
-           ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
-               lock_arg = be32_to_cpu(payload[0]);
-               lock_data = be32_to_cpu(payload[1]);
-       } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
-               lock_arg = 0;
-               lock_data = 0;
-       } else {
-               fw_fill_response(&response, packet->header,
-                                RCODE_TYPE_ERROR, NULL, 0);
-               goto out;
-       }
-
-       sel = (csr - CSR_BUS_MANAGER_ID) / 4;
-       reg_write(ohci, OHCI1394_CSRData, lock_data);
-       reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
-       reg_write(ohci, OHCI1394_CSRControl, sel);
-
-       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
-               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
-       else
-               fw_notify("swap not done yet\n");
-
-       fw_fill_response(&response, packet->header,
-                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
- out:
-       fw_core_handle_response(&ohci->card, &response);
-}
-
-static void handle_local_request(struct context *ctx, struct fw_packet *packet)
-{
-       u64 offset;
-       u32 csr;
-
-       if (ctx == &ctx->ohci->at_request_ctx) {
-               packet->ack = ACK_PENDING;
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-       }
-
-       offset =
-               ((unsigned long long)
-                HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
-               packet->header[2];
-       csr = offset - CSR_REGISTER_BASE;
-
-       /* Handle config rom reads. */
-       if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
-               handle_local_rom(ctx->ohci, packet, csr);
-       else switch (csr) {
-       case CSR_BUS_MANAGER_ID:
-       case CSR_BANDWIDTH_AVAILABLE:
-       case CSR_CHANNELS_AVAILABLE_HI:
-       case CSR_CHANNELS_AVAILABLE_LO:
-               handle_local_lock(ctx->ohci, packet, csr);
-               break;
-       default:
-               if (ctx == &ctx->ohci->at_request_ctx)
-                       fw_core_handle_request(&ctx->ohci->card, packet);
-               else
-                       fw_core_handle_response(&ctx->ohci->card, packet);
-               break;
-       }
-
-       if (ctx == &ctx->ohci->at_response_ctx) {
-               packet->ack = ACK_COMPLETE;
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-       }
-}
-
-static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&ctx->ohci->lock, flags);
-
-       if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
-           ctx->ohci->generation == packet->generation) {
-               spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-               handle_local_request(ctx, packet);
-               return;
-       }
-
-       ret = at_context_queue_packet(ctx, packet);
-       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
-
-       if (ret < 0)
-               packet->callback(packet, &ctx->ohci->card, packet->ack);
-
-}
-
-static void bus_reset_tasklet(unsigned long data)
-{
-       struct fw_ohci *ohci = (struct fw_ohci *)data;
-       int self_id_count, i, j, reg;
-       int generation, new_generation;
-       unsigned long flags;
-       void *free_rom = NULL;
-       dma_addr_t free_rom_bus = 0;
-
-       reg = reg_read(ohci, OHCI1394_NodeID);
-       if (!(reg & OHCI1394_NodeID_idValid)) {
-               fw_notify("node ID not valid, new bus reset in progress\n");
-               return;
-       }
-       if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
-               fw_notify("malconfigured bus\n");
-               return;
-       }
-       ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
-                              OHCI1394_NodeID_nodeNumber);
-
-       reg = reg_read(ohci, OHCI1394_SelfIDCount);
-       if (reg & OHCI1394_SelfIDCount_selfIDError) {
-               fw_notify("inconsistent self IDs\n");
-               return;
-       }
-       /*
-        * The count in the SelfIDCount register is the number of
-        * bytes in the self ID receive buffer.  Since we also receive
-        * the inverted quadlets and a header quadlet, we shift one
-        * bit extra to get the actual number of self IDs.
-        */
-       self_id_count = (reg >> 3) & 0x3ff;
-       if (self_id_count == 0) {
-               fw_notify("inconsistent self IDs\n");
-               return;
-       }
-       generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
-       rmb();
-
-       for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
-               if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
-                       fw_notify("inconsistent self IDs\n");
-                       return;
-               }
-               ohci->self_id_buffer[j] =
-                               cond_le32_to_cpu(ohci->self_id_cpu[i]);
-       }
-       rmb();
-
-       /*
-        * Check the consistency of the self IDs we just read.  The
-        * problem we face is that a new bus reset can start while we
-        * read out the self IDs from the DMA buffer. If this happens,
-        * the DMA buffer will be overwritten with new self IDs and we
-        * will read out inconsistent data.  The OHCI specification
-        * (section 11.2) recommends a technique similar to
-        * linux/seqlock.h, where we remember the generation of the
-        * self IDs in the buffer before reading them out and compare
-        * it to the current generation after reading them out.  If
-        * the two generations match we know we have a consistent set
-        * of self IDs.
-        */
-
-       new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
-       if (new_generation != generation) {
-               fw_notify("recursive bus reset detected, "
-                         "discarding self ids\n");
-               return;
-       }
-
-       /* FIXME: Document how the locking works. */
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       ohci->generation = generation;
-       context_stop(&ohci->at_request_ctx);
-       context_stop(&ohci->at_response_ctx);
-       reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
-
-       if (ohci->bus_reset_packet_quirk)
-               ohci->request_generation = generation;
-
-       /*
-        * This next bit is unrelated to the AT context stuff but we
-        * have to do it under the spinlock also.  If a new config rom
-        * was set up before this reset, the old one is now no longer
-        * in use and we can free it. Update the config rom pointers
-        * to point to the current config rom and clear the
-        * next_config_rom pointer so a new udpate can take place.
-        */
-
-       if (ohci->next_config_rom != NULL) {
-               if (ohci->next_config_rom != ohci->config_rom) {
-                       free_rom      = ohci->config_rom;
-                       free_rom_bus  = ohci->config_rom_bus;
-               }
-               ohci->config_rom      = ohci->next_config_rom;
-               ohci->config_rom_bus  = ohci->next_config_rom_bus;
-               ohci->next_config_rom = NULL;
-
-               /*
-                * Restore config_rom image and manually update
-                * config_rom registers.  Writing the header quadlet
-                * will indicate that the config rom is ready, so we
-                * do that last.
-                */
-               reg_write(ohci, OHCI1394_BusOptions,
-                         be32_to_cpu(ohci->config_rom[2]));
-               ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
-               reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
-       }
-
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
-       reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
-       reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
-#endif
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       if (free_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 free_rom, free_rom_bus);
-
-       log_selfids(ohci->node_id, generation,
-                   self_id_count, ohci->self_id_buffer);
-
-       fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
-                                self_id_count, ohci->self_id_buffer);
-}
-
-static irqreturn_t irq_handler(int irq, void *data)
-{
-       struct fw_ohci *ohci = data;
-       u32 event, iso_event, cycle_time;
-       int i;
-
-       event = reg_read(ohci, OHCI1394_IntEventClear);
-
-       if (!event || !~event)
-               return IRQ_NONE;
-
-       /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
-       reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
-       log_irqs(event);
-
-       if (event & OHCI1394_selfIDComplete)
-               tasklet_schedule(&ohci->bus_reset_tasklet);
-
-       if (event & OHCI1394_RQPkt)
-               tasklet_schedule(&ohci->ar_request_ctx.tasklet);
-
-       if (event & OHCI1394_RSPkt)
-               tasklet_schedule(&ohci->ar_response_ctx.tasklet);
-
-       if (event & OHCI1394_reqTxComplete)
-               tasklet_schedule(&ohci->at_request_ctx.tasklet);
-
-       if (event & OHCI1394_respTxComplete)
-               tasklet_schedule(&ohci->at_response_ctx.tasklet);
-
-       iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
-       reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
-
-       while (iso_event) {
-               i = ffs(iso_event) - 1;
-               tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
-               iso_event &= ~(1 << i);
-       }
-
-       iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
-       reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
-
-       while (iso_event) {
-               i = ffs(iso_event) - 1;
-               tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
-               iso_event &= ~(1 << i);
-       }
-
-       if (unlikely(event & OHCI1394_regAccessFail))
-               fw_error("Register access failure - "
-                        "please notify linux1394-devel@lists.sf.net\n");
-
-       if (unlikely(event & OHCI1394_postedWriteErr))
-               fw_error("PCI posted write error\n");
-
-       if (unlikely(event & OHCI1394_cycleTooLong)) {
-               if (printk_ratelimit())
-                       fw_notify("isochronous cycle too long\n");
-               reg_write(ohci, OHCI1394_LinkControlSet,
-                         OHCI1394_LinkControl_cycleMaster);
-       }
-
-       if (event & OHCI1394_cycle64Seconds) {
-               cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-               if ((cycle_time & 0x80000000) == 0)
-                       ohci->bus_seconds++;
-       }
-
-       return IRQ_HANDLED;
-}
-
-static int software_reset(struct fw_ohci *ohci)
-{
-       int i;
-
-       reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
-
-       for (i = 0; i < OHCI_LOOP_COUNT; i++) {
-               if ((reg_read(ohci, OHCI1394_HCControlSet) &
-                    OHCI1394_HCControl_softReset) == 0)
-                       return 0;
-               msleep(1);
-       }
-
-       return -EBUSY;
-}
-
-static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct pci_dev *dev = to_pci_dev(card->device);
-       u32 lps;
-       int i;
-
-       if (software_reset(ohci)) {
-               fw_error("Failed to reset ohci card.\n");
-               return -EBUSY;
-       }
-
-       /*
-        * Now enable LPS, which we need in order to start accessing
-        * most of the registers.  In fact, on some cards (ALI M5251),
-        * accessing registers in the SClk domain without LPS enabled
-        * will lock up the machine.  Wait 50msec to make sure we have
-        * full link enabled.  However, with some cards (well, at least
-        * a JMicron PCIe card), we have to try again sometimes.
-        */
-       reg_write(ohci, OHCI1394_HCControlSet,
-                 OHCI1394_HCControl_LPS |
-                 OHCI1394_HCControl_postedWriteEnable);
-       flush_writes(ohci);
-
-       for (lps = 0, i = 0; !lps && i < 3; i++) {
-               msleep(50);
-               lps = reg_read(ohci, OHCI1394_HCControlSet) &
-                     OHCI1394_HCControl_LPS;
-       }
-
-       if (!lps) {
-               fw_error("Failed to set Link Power Status\n");
-               return -EIO;
-       }
-
-       reg_write(ohci, OHCI1394_HCControlClear,
-                 OHCI1394_HCControl_noByteSwapData);
-
-       reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
-       reg_write(ohci, OHCI1394_LinkControlClear,
-                 OHCI1394_LinkControl_rcvPhyPkt);
-       reg_write(ohci, OHCI1394_LinkControlSet,
-                 OHCI1394_LinkControl_rcvSelfID |
-                 OHCI1394_LinkControl_cycleTimerEnable |
-                 OHCI1394_LinkControl_cycleMaster);
-
-       reg_write(ohci, OHCI1394_ATRetries,
-                 OHCI1394_MAX_AT_REQ_RETRIES |
-                 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
-                 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
-
-       ar_context_run(&ohci->ar_request_ctx);
-       ar_context_run(&ohci->ar_response_ctx);
-
-       reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
-       reg_write(ohci, OHCI1394_IntEventClear, ~0);
-       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
-       reg_write(ohci, OHCI1394_IntMaskSet,
-                 OHCI1394_selfIDComplete |
-                 OHCI1394_RQPkt | OHCI1394_RSPkt |
-                 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
-                 OHCI1394_isochRx | OHCI1394_isochTx |
-                 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
-                 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
-                 OHCI1394_masterIntEnable);
-       if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
-               reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
-
-       /* Activate link_on bit and contender bit in our self ID packets.*/
-       if (ohci_update_phy_reg(card, 4, 0,
-                               PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
-               return -EIO;
-
-       /*
-        * When the link is not yet enabled, the atomic config rom
-        * update mechanism described below in ohci_set_config_rom()
-        * is not active.  We have to update ConfigRomHeader and
-        * BusOptions manually, and the write to ConfigROMmap takes
-        * effect immediately.  We tie this to the enabling of the
-        * link, so we have a valid config rom before enabling - the
-        * OHCI requires that ConfigROMhdr and BusOptions have valid
-        * values before enabling.
-        *
-        * However, when the ConfigROMmap is written, some controllers
-        * always read back quadlets 0 and 2 from the config rom to
-        * the ConfigRomHeader and BusOptions registers on bus reset.
-        * They shouldn't do that in this initial case where the link
-        * isn't enabled.  This means we have to use the same
-        * workaround here, setting the bus header to 0 and then write
-        * the right values in the bus reset tasklet.
-        */
-
-       if (config_rom) {
-               ohci->next_config_rom =
-                       dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                          &ohci->next_config_rom_bus,
-                                          GFP_KERNEL);
-               if (ohci->next_config_rom == NULL)
-                       return -ENOMEM;
-
-               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
-               fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
-       } else {
-               /*
-                * In the suspend case, config_rom is NULL, which
-                * means that we just reuse the old config rom.
-                */
-               ohci->next_config_rom = ohci->config_rom;
-               ohci->next_config_rom_bus = ohci->config_rom_bus;
-       }
-
-       ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
-       ohci->next_config_rom[0] = 0;
-       reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
-       reg_write(ohci, OHCI1394_BusOptions,
-                 be32_to_cpu(ohci->next_config_rom[2]));
-       reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
-
-       reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
-
-       if (request_irq(dev->irq, irq_handler,
-                       IRQF_SHARED, ohci_driver_name, ohci)) {
-               fw_error("Failed to allocate shared interrupt %d.\n",
-                        dev->irq);
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->config_rom, ohci->config_rom_bus);
-               return -EIO;
-       }
-
-       reg_write(ohci, OHCI1394_HCControlSet,
-                 OHCI1394_HCControl_linkEnable |
-                 OHCI1394_HCControl_BIBimageValid);
-       flush_writes(ohci);
-
-       /*
-        * We are ready to go, initiate bus reset to finish the
-        * initialization.
-        */
-
-       fw_core_initiate_bus_reset(&ohci->card, 1);
-
-       return 0;
-}
-
-static int ohci_set_config_rom(struct fw_card *card,
-                              u32 *config_rom, size_t length)
-{
-       struct fw_ohci *ohci;
-       unsigned long flags;
-       int ret = -EBUSY;
-       __be32 *next_config_rom;
-       dma_addr_t uninitialized_var(next_config_rom_bus);
-
-       ohci = fw_ohci(card);
-
-       /*
-        * When the OHCI controller is enabled, the config rom update
-        * mechanism is a bit tricky, but easy enough to use.  See
-        * section 5.5.6 in the OHCI specification.
-        *
-        * The OHCI controller caches the new config rom address in a
-        * shadow register (ConfigROMmapNext) and needs a bus reset
-        * for the changes to take place.  When the bus reset is
-        * detected, the controller loads the new values for the
-        * ConfigRomHeader and BusOptions registers from the specified
-        * config rom and loads ConfigROMmap from the ConfigROMmapNext
-        * shadow register. All automatically and atomically.
-        *
-        * Now, there's a twist to this story.  The automatic load of
-        * ConfigRomHeader and BusOptions doesn't honor the
-        * noByteSwapData bit, so with a be32 config rom, the
-        * controller will load be32 values in to these registers
-        * during the atomic update, even on litte endian
-        * architectures.  The workaround we use is to put a 0 in the
-        * header quadlet; 0 is endian agnostic and means that the
-        * config rom isn't ready yet.  In the bus reset tasklet we
-        * then set up the real values for the two registers.
-        *
-        * We use ohci->lock to avoid racing with the code that sets
-        * ohci->next_config_rom to NULL (see bus_reset_tasklet).
-        */
-
-       next_config_rom =
-               dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                  &next_config_rom_bus, GFP_KERNEL);
-       if (next_config_rom == NULL)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ohci->next_config_rom == NULL) {
-               ohci->next_config_rom = next_config_rom;
-               ohci->next_config_rom_bus = next_config_rom_bus;
-
-               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
-               fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
-                                 length * 4);
-
-               ohci->next_header = config_rom[0];
-               ohci->next_config_rom[0] = 0;
-
-               reg_write(ohci, OHCI1394_ConfigROMmap,
-                         ohci->next_config_rom_bus);
-               ret = 0;
-       }
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       /*
-        * Now initiate a bus reset to have the changes take
-        * effect. We clean up the old config rom memory and DMA
-        * mappings in the bus reset tasklet, since the OHCI
-        * controller could need to access it before the bus reset
-        * takes effect.
-        */
-       if (ret == 0)
-               fw_core_initiate_bus_reset(&ohci->card, 1);
-       else
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 next_config_rom, next_config_rom_bus);
-
-       return ret;
-}
-
-static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-
-       at_context_transmit(&ohci->at_request_ctx, packet);
-}
-
-static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-
-       at_context_transmit(&ohci->at_response_ctx, packet);
-}
-
-static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct context *ctx = &ohci->at_request_ctx;
-       struct driver_data *driver_data = packet->driver_data;
-       int ret = -ENOENT;
-
-       tasklet_disable(&ctx->tasklet);
-
-       if (packet->ack != 0)
-               goto out;
-
-       if (packet->payload_bus)
-               dma_unmap_single(ohci->card.device, packet->payload_bus,
-                                packet->payload_length, DMA_TO_DEVICE);
-
-       log_ar_at_event('T', packet->speed, packet->header, 0x20);
-       driver_data->packet = NULL;
-       packet->ack = RCODE_CANCELLED;
-       packet->callback(packet, &ohci->card, packet->ack);
-       ret = 0;
- out:
-       tasklet_enable(&ctx->tasklet);
-
-       return ret;
-}
-
-static int ohci_enable_phys_dma(struct fw_card *card,
-                               int node_id, int generation)
-{
-#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
-       return 0;
-#else
-       struct fw_ohci *ohci = fw_ohci(card);
-       unsigned long flags;
-       int n, ret = 0;
-
-       /*
-        * FIXME:  Make sure this bitmask is cleared when we clear the busReset
-        * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
-        */
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ohci->generation != generation) {
-               ret = -ESTALE;
-               goto out;
-       }
-
-       /*
-        * Note, if the node ID contains a non-local bus ID, physical DMA is
-        * enabled for _all_ nodes on remote buses.
-        */
-
-       n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
-       if (n < 32)
-               reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
-       else
-               reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
-
-       flush_writes(ohci);
- out:
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       return ret;
-#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
-}
-
-static u64 ohci_get_bus_time(struct fw_card *card)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       u32 cycle_time;
-       u64 bus_time;
-
-       cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
-       bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
-
-       return bus_time;
-}
-
-static void copy_iso_headers(struct iso_context *ctx, void *p)
-{
-       int i = ctx->header_length;
-
-       if (i + ctx->base.header_size > PAGE_SIZE)
-               return;
-
-       /*
-        * The iso header is byteswapped to little endian by
-        * the controller, but the remaining header quadlets
-        * are big endian.  We want to present all the headers
-        * as big endian, so we have to swap the first quadlet.
-        */
-       if (ctx->base.header_size > 0)
-               *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
-       if (ctx->base.header_size > 4)
-               *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
-       if (ctx->base.header_size > 8)
-               memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
-       ctx->header_length += ctx->base.header_size;
-}
-
-static int handle_ir_dualbuffer_packet(struct context *context,
-                                      struct descriptor *d,
-                                      struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-       struct db_descriptor *db = (struct db_descriptor *) d;
-       __le32 *ir_header;
-       size_t header_length;
-       void *p, *end;
-
-       if (db->first_res_count != 0 && db->second_res_count != 0) {
-               if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
-                       /* This descriptor isn't done yet, stop iteration. */
-                       return 0;
-               }
-               ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
-       }
-
-       header_length = le16_to_cpu(db->first_req_count) -
-               le16_to_cpu(db->first_res_count);
-
-       p = db + 1;
-       end = p + header_length;
-       while (p < end) {
-               copy_iso_headers(ctx, p);
-               ctx->excess_bytes +=
-                       (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
-               p += max(ctx->base.header_size, (size_t)8);
-       }
-
-       ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
-               le16_to_cpu(db->second_res_count);
-
-       if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
-               ir_header = (__le32 *) (db + 1);
-               ctx->base.callback(&ctx->base,
-                                  le32_to_cpu(ir_header[0]) & 0xffff,
-                                  ctx->header_length, ctx->header,
-                                  ctx->base.callback_data);
-               ctx->header_length = 0;
-       }
-
-       return 1;
-}
-
-static int handle_ir_packet_per_buffer(struct context *context,
-                                      struct descriptor *d,
-                                      struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-       struct descriptor *pd;
-       __le32 *ir_header;
-       void *p;
-
-       for (pd = d; pd <= last; pd++) {
-               if (pd->transfer_status)
-                       break;
-       }
-       if (pd > last)
-               /* Descriptor(s) not done yet, stop iteration */
-               return 0;
-
-       p = last + 1;
-       copy_iso_headers(ctx, p);
-
-       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
-               ir_header = (__le32 *) p;
-               ctx->base.callback(&ctx->base,
-                                  le32_to_cpu(ir_header[0]) & 0xffff,
-                                  ctx->header_length, ctx->header,
-                                  ctx->base.callback_data);
-               ctx->header_length = 0;
-       }
-
-       return 1;
-}
-
-static int handle_it_packet(struct context *context,
-                           struct descriptor *d,
-                           struct descriptor *last)
-{
-       struct iso_context *ctx =
-               container_of(context, struct iso_context, context);
-
-       if (last->transfer_status == 0)
-               /* This descriptor isn't done yet, stop iteration. */
-               return 0;
-
-       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
-               ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
-                                  0, NULL, ctx->base.callback_data);
-
-       return 1;
-}
-
-static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
-                               int type, int channel, size_t header_size)
-{
-       struct fw_ohci *ohci = fw_ohci(card);
-       struct iso_context *ctx, *list;
-       descriptor_callback_t callback;
-       u64 *channels, dont_care = ~0ULL;
-       u32 *mask, regs;
-       unsigned long flags;
-       int index, ret = -ENOMEM;
-
-       if (type == FW_ISO_CONTEXT_TRANSMIT) {
-               channels = &dont_care;
-               mask = &ohci->it_context_mask;
-               list = ohci->it_context_list;
-               callback = handle_it_packet;
-       } else {
-               channels = &ohci->ir_context_channels;
-               mask = &ohci->ir_context_mask;
-               list = ohci->ir_context_list;
-               if (ohci->use_dualbuffer)
-                       callback = handle_ir_dualbuffer_packet;
-               else
-                       callback = handle_ir_packet_per_buffer;
-       }
-
-       spin_lock_irqsave(&ohci->lock, flags);
-       index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
-       if (index >= 0) {
-               *channels &= ~(1ULL << channel);
-               *mask &= ~(1 << index);
-       }
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       if (index < 0)
-               return ERR_PTR(-EBUSY);
-
-       if (type == FW_ISO_CONTEXT_TRANSMIT)
-               regs = OHCI1394_IsoXmitContextBase(index);
-       else
-               regs = OHCI1394_IsoRcvContextBase(index);
-
-       ctx = &list[index];
-       memset(ctx, 0, sizeof(*ctx));
-       ctx->header_length = 0;
-       ctx->header = (void *) __get_free_page(GFP_KERNEL);
-       if (ctx->header == NULL)
-               goto out;
-
-       ret = context_init(&ctx->context, ohci, regs, callback);
-       if (ret < 0)
-               goto out_with_header;
-
-       return &ctx->base;
-
- out_with_header:
-       free_page((unsigned long)ctx->header);
- out:
-       spin_lock_irqsave(&ohci->lock, flags);
-       *mask |= 1 << index;
-       spin_unlock_irqrestore(&ohci->lock, flags);
-
-       return ERR_PTR(ret);
-}
-
-static int ohci_start_iso(struct fw_iso_context *base,
-                         s32 cycle, u32 sync, u32 tags)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct fw_ohci *ohci = ctx->context.ohci;
-       u32 control, match;
-       int index;
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               match = 0;
-               if (cycle >= 0)
-                       match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
-                               (cycle & 0x7fff) << 16;
-
-               reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
-               reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
-               context_run(&ctx->context, match);
-       } else {
-               index = ctx - ohci->ir_context_list;
-               control = IR_CONTEXT_ISOCH_HEADER;
-               if (ohci->use_dualbuffer)
-                       control |= IR_CONTEXT_DUAL_BUFFER_MODE;
-               match = (tags << 28) | (sync << 8) | ctx->base.channel;
-               if (cycle >= 0) {
-                       match |= (cycle & 0x07fff) << 12;
-                       control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
-               }
-
-               reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
-               reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
-               reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
-               context_run(&ctx->context, control);
-       }
-
-       return 0;
-}
-
-static int ohci_stop_iso(struct fw_iso_context *base)
-{
-       struct fw_ohci *ohci = fw_ohci(base->card);
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       int index;
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
-       } else {
-               index = ctx - ohci->ir_context_list;
-               reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
-       }
-       flush_writes(ohci);
-       context_stop(&ctx->context);
-
-       return 0;
-}
-
-static void ohci_free_iso_context(struct fw_iso_context *base)
-{
-       struct fw_ohci *ohci = fw_ohci(base->card);
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       unsigned long flags;
-       int index;
-
-       ohci_stop_iso(base);
-       context_release(&ctx->context);
-       free_page((unsigned long)ctx->header);
-
-       spin_lock_irqsave(&ohci->lock, flags);
-
-       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
-               index = ctx - ohci->it_context_list;
-               ohci->it_context_mask |= 1 << index;
-       } else {
-               index = ctx - ohci->ir_context_list;
-               ohci->ir_context_mask |= 1 << index;
-               ohci->ir_context_channels |= 1ULL << base->channel;
-       }
-
-       spin_unlock_irqrestore(&ohci->lock, flags);
-}
-
-static int ohci_queue_iso_transmit(struct fw_iso_context *base,
-                                  struct fw_iso_packet *packet,
-                                  struct fw_iso_buffer *buffer,
-                                  unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct descriptor *d, *last, *pd;
-       struct fw_iso_packet *p;
-       __le32 *header;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, payload_z, irq;
-       u32 payload_index, payload_end_index, next_page_index;
-       int page, end_page, i, length, offset;
-
-       /*
-        * FIXME: Cycle lost behavior should be configurable: lose
-        * packet, retransmit or terminate..
-        */
-
-       p = packet;
-       payload_index = payload;
-
-       if (p->skip)
-               z = 1;
-       else
-               z = 2;
-       if (p->header_length > 0)
-               z++;
-
-       /* Determine the first page the payload isn't contained in. */
-       end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
-       if (p->payload_length > 0)
-               payload_z = end_page - (payload_index >> PAGE_SHIFT);
-       else
-               payload_z = 0;
-
-       z += payload_z;
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
-
-       d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
-       if (d == NULL)
-               return -ENOMEM;
-
-       if (!p->skip) {
-               d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
-               d[0].req_count = cpu_to_le16(8);
-
-               header = (__le32 *) &d[1];
-               header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
-                                       IT_HEADER_TAG(p->tag) |
-                                       IT_HEADER_TCODE(TCODE_STREAM_DATA) |
-                                       IT_HEADER_CHANNEL(ctx->base.channel) |
-                                       IT_HEADER_SPEED(ctx->base.speed));
-               header[1] =
-                       cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
-                                                         p->payload_length));
-       }
-
-       if (p->header_length > 0) {
-               d[2].req_count    = cpu_to_le16(p->header_length);
-               d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
-               memcpy(&d[z], p->header, p->header_length);
-       }
-
-       pd = d + z - payload_z;
-       payload_end_index = payload_index + p->payload_length;
-       for (i = 0; i < payload_z; i++) {
-               page               = payload_index >> PAGE_SHIFT;
-               offset             = payload_index & ~PAGE_MASK;
-               next_page_index    = (page + 1) << PAGE_SHIFT;
-               length             =
-                       min(next_page_index, payload_end_index) - payload_index;
-               pd[i].req_count    = cpu_to_le16(length);
-
-               page_bus = page_private(buffer->pages[page]);
-               pd[i].data_address = cpu_to_le32(page_bus + offset);
-
-               payload_index += length;
-       }
-
-       if (p->interrupt)
-               irq = DESCRIPTOR_IRQ_ALWAYS;
-       else
-               irq = DESCRIPTOR_NO_IRQ;
-
-       last = z == 2 ? d : d + z - 1;
-       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
-                                    DESCRIPTOR_STATUS |
-                                    DESCRIPTOR_BRANCH_ALWAYS |
-                                    irq);
-
-       context_append(&ctx->context, d, z, header_z);
-
-       return 0;
-}
-
-static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
-                                            struct fw_iso_packet *packet,
-                                            struct fw_iso_buffer *buffer,
-                                            unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct db_descriptor *db = NULL;
-       struct descriptor *d;
-       struct fw_iso_packet *p;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, length, rest;
-       int page, offset, packet_count, header_size;
-
-       /*
-        * FIXME: Cycle lost behavior should be configurable: lose
-        * packet, retransmit or terminate..
-        */
-
-       p = packet;
-       z = 2;
-
-       /*
-        * The OHCI controller puts the isochronous header and trailer in the
-        * buffer, so we need at least 8 bytes.
-        */
-       packet_count = p->header_length / ctx->base.header_size;
-       header_size = packet_count * max(ctx->base.header_size, (size_t)8);
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
-       page     = payload >> PAGE_SHIFT;
-       offset   = payload & ~PAGE_MASK;
-       rest     = p->payload_length;
-
-       /* FIXME: make packet-per-buffer/dual-buffer a context option */
-       while (rest > 0) {
-               d = context_get_descriptors(&ctx->context,
-                                           z + header_z, &d_bus);
-               if (d == NULL)
-                       return -ENOMEM;
-
-               db = (struct db_descriptor *) d;
-               db->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                         DESCRIPTOR_BRANCH_ALWAYS);
-               db->first_size =
-                   cpu_to_le16(max(ctx->base.header_size, (size_t)8));
-               if (p->skip && rest == p->payload_length) {
-                       db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
-                       db->first_req_count = db->first_size;
-               } else {
-                       db->first_req_count = cpu_to_le16(header_size);
-               }
-               db->first_res_count = db->first_req_count;
-               db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
-
-               if (p->skip && rest == p->payload_length)
-                       length = 4;
-               else if (offset + rest < PAGE_SIZE)
-                       length = rest;
-               else
-                       length = PAGE_SIZE - offset;
-
-               db->second_req_count = cpu_to_le16(length);
-               db->second_res_count = db->second_req_count;
-               page_bus = page_private(buffer->pages[page]);
-               db->second_buffer = cpu_to_le32(page_bus + offset);
-
-               if (p->interrupt && length == rest)
-                       db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
-               context_append(&ctx->context, d, z, header_z);
-               offset = (offset + length) & ~PAGE_MASK;
-               rest -= length;
-               if (offset == 0)
-                       page++;
-       }
-
-       return 0;
-}
-
-static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
-                                       struct fw_iso_packet *packet,
-                                       struct fw_iso_buffer *buffer,
-                                       unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       struct descriptor *d = NULL, *pd = NULL;
-       struct fw_iso_packet *p = packet;
-       dma_addr_t d_bus, page_bus;
-       u32 z, header_z, rest;
-       int i, j, length;
-       int page, offset, packet_count, header_size, payload_per_buffer;
-
-       /*
-        * The OHCI controller puts the isochronous header and trailer in the
-        * buffer, so we need at least 8 bytes.
-        */
-       packet_count = p->header_length / ctx->base.header_size;
-       header_size  = max(ctx->base.header_size, (size_t)8);
-
-       /* Get header size in number of descriptors. */
-       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
-       page     = payload >> PAGE_SHIFT;
-       offset   = payload & ~PAGE_MASK;
-       payload_per_buffer = p->payload_length / packet_count;
-
-       for (i = 0; i < packet_count; i++) {
-               /* d points to the header descriptor */
-               z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
-               d = context_get_descriptors(&ctx->context,
-                               z + header_z, &d_bus);
-               if (d == NULL)
-                       return -ENOMEM;
-
-               d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
-                                             DESCRIPTOR_INPUT_MORE);
-               if (p->skip && i == 0)
-                       d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
-               d->req_count    = cpu_to_le16(header_size);
-               d->res_count    = d->req_count;
-               d->transfer_status = 0;
-               d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
-
-               rest = payload_per_buffer;
-               for (j = 1; j < z; j++) {
-                       pd = d + j;
-                       pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                                 DESCRIPTOR_INPUT_MORE);
-
-                       if (offset + rest < PAGE_SIZE)
-                               length = rest;
-                       else
-                               length = PAGE_SIZE - offset;
-                       pd->req_count = cpu_to_le16(length);
-                       pd->res_count = pd->req_count;
-                       pd->transfer_status = 0;
-
-                       page_bus = page_private(buffer->pages[page]);
-                       pd->data_address = cpu_to_le32(page_bus + offset);
-
-                       offset = (offset + length) & ~PAGE_MASK;
-                       rest -= length;
-                       if (offset == 0)
-                               page++;
-               }
-               pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
-                                         DESCRIPTOR_INPUT_LAST |
-                                         DESCRIPTOR_BRANCH_ALWAYS);
-               if (p->interrupt && i == packet_count - 1)
-                       pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
-               context_append(&ctx->context, d, z, header_z);
-       }
-
-       return 0;
-}
-
-static int ohci_queue_iso(struct fw_iso_context *base,
-                         struct fw_iso_packet *packet,
-                         struct fw_iso_buffer *buffer,
-                         unsigned long payload)
-{
-       struct iso_context *ctx = container_of(base, struct iso_context, base);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&ctx->context.ohci->lock, flags);
-       if (base->type == FW_ISO_CONTEXT_TRANSMIT)
-               ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
-       else if (ctx->context.ohci->use_dualbuffer)
-               ret = ohci_queue_iso_receive_dualbuffer(base, packet,
-                                                       buffer, payload);
-       else
-               ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
-                                                       buffer, payload);
-       spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
-
-       return ret;
-}
-
-static const struct fw_card_driver ohci_driver = {
-       .enable                 = ohci_enable,
-       .update_phy_reg         = ohci_update_phy_reg,
-       .set_config_rom         = ohci_set_config_rom,
-       .send_request           = ohci_send_request,
-       .send_response          = ohci_send_response,
-       .cancel_packet          = ohci_cancel_packet,
-       .enable_phys_dma        = ohci_enable_phys_dma,
-       .get_bus_time           = ohci_get_bus_time,
-
-       .allocate_iso_context   = ohci_allocate_iso_context,
-       .free_iso_context       = ohci_free_iso_context,
-       .queue_iso              = ohci_queue_iso,
-       .start_iso              = ohci_start_iso,
-       .stop_iso               = ohci_stop_iso,
-};
-
-#ifdef CONFIG_PPC_PMAC
-static void ohci_pmac_on(struct pci_dev *dev)
-{
-       if (machine_is(powermac)) {
-               struct device_node *ofn = pci_device_to_OF_node(dev);
-
-               if (ofn) {
-                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
-                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
-               }
-       }
-}
-
-static void ohci_pmac_off(struct pci_dev *dev)
-{
-       if (machine_is(powermac)) {
-               struct device_node *ofn = pci_device_to_OF_node(dev);
-
-               if (ofn) {
-                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
-                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
-               }
-       }
-}
-#else
-#define ohci_pmac_on(dev)
-#define ohci_pmac_off(dev)
-#endif /* CONFIG_PPC_PMAC */
-
-static int __devinit pci_probe(struct pci_dev *dev,
-                              const struct pci_device_id *ent)
-{
-       struct fw_ohci *ohci;
-       u32 bus_options, max_receive, link_speed, version;
-       u64 guid;
-       int err;
-       size_t size;
-
-       ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
-       if (ohci == NULL) {
-               err = -ENOMEM;
-               goto fail;
-       }
-
-       fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
-
-       ohci_pmac_on(dev);
-
-       err = pci_enable_device(dev);
-       if (err) {
-               fw_error("Failed to enable OHCI hardware\n");
-               goto fail_free;
-       }
-
-       pci_set_master(dev);
-       pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
-       pci_set_drvdata(dev, ohci);
-
-       spin_lock_init(&ohci->lock);
-
-       tasklet_init(&ohci->bus_reset_tasklet,
-                    bus_reset_tasklet, (unsigned long)ohci);
-
-       err = pci_request_region(dev, 0, ohci_driver_name);
-       if (err) {
-               fw_error("MMIO resource unavailable\n");
-               goto fail_disable;
-       }
-
-       ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
-       if (ohci->registers == NULL) {
-               fw_error("Failed to remap registers\n");
-               err = -ENXIO;
-               goto fail_iomem;
-       }
-
-       version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-       ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
-
-/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
-#if !defined(CONFIG_X86_32)
-       /* dual-buffer mode is broken with descriptor addresses above 2G */
-       if (dev->vendor == PCI_VENDOR_ID_TI &&
-           dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
-               ohci->use_dualbuffer = false;
-#endif
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
-       ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
-                            dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
-       ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
-
-       ar_context_init(&ohci->ar_request_ctx, ohci,
-                       OHCI1394_AsReqRcvContextControlSet);
-
-       ar_context_init(&ohci->ar_response_ctx, ohci,
-                       OHCI1394_AsRspRcvContextControlSet);
-
-       context_init(&ohci->at_request_ctx, ohci,
-                    OHCI1394_AsReqTrContextControlSet, handle_at_packet);
-
-       context_init(&ohci->at_response_ctx, ohci,
-                    OHCI1394_AsRspTrContextControlSet, handle_at_packet);
-
-       reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
-       ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
-       reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
-       size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
-       ohci->it_context_list = kzalloc(size, GFP_KERNEL);
-
-       reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
-       ohci->ir_context_channels = ~0ULL;
-       ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
-       reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
-       size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
-       ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
-
-       if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
-               err = -ENOMEM;
-               goto fail_contexts;
-       }
-
-       /* self-id dma buffer allocation */
-       ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
-                                              SELF_ID_BUF_SIZE,
-                                              &ohci->self_id_bus,
-                                              GFP_KERNEL);
-       if (ohci->self_id_cpu == NULL) {
-               err = -ENOMEM;
-               goto fail_contexts;
-       }
-
-       bus_options = reg_read(ohci, OHCI1394_BusOptions);
-       max_receive = (bus_options >> 12) & 0xf;
-       link_speed = bus_options & 0x7;
-       guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
-               reg_read(ohci, OHCI1394_GUIDLo);
-
-       err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
-       if (err)
-               goto fail_self_id;
-
-       fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
-                 dev_name(&dev->dev), version >> 16, version & 0xff);
-
-       return 0;
-
- fail_self_id:
-       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-                         ohci->self_id_cpu, ohci->self_id_bus);
- fail_contexts:
-       kfree(ohci->ir_context_list);
-       kfree(ohci->it_context_list);
-       context_release(&ohci->at_response_ctx);
-       context_release(&ohci->at_request_ctx);
-       ar_context_release(&ohci->ar_response_ctx);
-       ar_context_release(&ohci->ar_request_ctx);
-       pci_iounmap(dev, ohci->registers);
- fail_iomem:
-       pci_release_region(dev, 0);
- fail_disable:
-       pci_disable_device(dev);
- fail_free:
-       kfree(&ohci->card);
-       ohci_pmac_off(dev);
- fail:
-       if (err == -ENOMEM)
-               fw_error("Out of memory\n");
-
-       return err;
-}
-
-static void pci_remove(struct pci_dev *dev)
-{
-       struct fw_ohci *ohci;
-
-       ohci = pci_get_drvdata(dev);
-       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
-       flush_writes(ohci);
-       fw_core_remove_card(&ohci->card);
-
-       /*
-        * FIXME: Fail all pending packets here, now that the upper
-        * layers can't queue any more.
-        */
-
-       software_reset(ohci);
-       free_irq(dev->irq, ohci);
-
-       if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->next_config_rom, ohci->next_config_rom_bus);
-       if (ohci->config_rom)
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 ohci->config_rom, ohci->config_rom_bus);
-       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-                         ohci->self_id_cpu, ohci->self_id_bus);
-       ar_context_release(&ohci->ar_request_ctx);
-       ar_context_release(&ohci->ar_response_ctx);
-       context_release(&ohci->at_request_ctx);
-       context_release(&ohci->at_response_ctx);
-       kfree(ohci->it_context_list);
-       kfree(ohci->ir_context_list);
-       pci_iounmap(dev, ohci->registers);
-       pci_release_region(dev, 0);
-       pci_disable_device(dev);
-       kfree(&ohci->card);
-       ohci_pmac_off(dev);
-
-       fw_notify("Removed fw-ohci device.\n");
-}
-
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
-       struct fw_ohci *ohci = pci_get_drvdata(dev);
-       int err;
-
-       software_reset(ohci);
-       free_irq(dev->irq, ohci);
-       err = pci_save_state(dev);
-       if (err) {
-               fw_error("pci_save_state failed\n");
-               return err;
-       }
-       err = pci_set_power_state(dev, pci_choose_state(dev, state));
-       if (err)
-               fw_error("pci_set_power_state failed with %d\n", err);
-       ohci_pmac_off(dev);
-
-       return 0;
-}
-
-static int pci_resume(struct pci_dev *dev)
-{
-       struct fw_ohci *ohci = pci_get_drvdata(dev);
-       int err;
-
-       ohci_pmac_on(dev);
-       pci_set_power_state(dev, PCI_D0);
-       pci_restore_state(dev);
-       err = pci_enable_device(dev);
-       if (err) {
-               fw_error("pci_enable_device failed\n");
-               return err;
-       }
-
-       return ohci_enable(&ohci->card, NULL, 0);
-}
-#endif
-
-static struct pci_device_id pci_table[] = {
-       { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
-       { }
-};
-
-MODULE_DEVICE_TABLE(pci, pci_table);
-
-static struct pci_driver fw_ohci_pci_driver = {
-       .name           = ohci_driver_name,
-       .id_table       = pci_table,
-       .probe          = pci_probe,
-       .remove         = pci_remove,
-#ifdef CONFIG_PM
-       .resume         = pci_resume,
-       .suspend        = pci_suspend,
-#endif
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
-MODULE_LICENSE("GPL");
-
-/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
-MODULE_ALIAS("ohci1394");
-#endif
-
-static int __init fw_ohci_init(void)
-{
-       return pci_register_driver(&fw_ohci_pci_driver);
-}
-
-static void __exit fw_ohci_cleanup(void)
-{
-       pci_unregister_driver(&fw_ohci_pci_driver);
-}
-
-module_init(fw_ohci_init);
-module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
deleted file mode 100644 (file)
index a2fbb62..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-#ifndef __fw_ohci_h
-#define __fw_ohci_h
-
-/* OHCI register map */
-
-#define OHCI1394_Version                      0x000
-#define OHCI1394_GUID_ROM                     0x004
-#define OHCI1394_ATRetries                    0x008
-#define OHCI1394_CSRData                      0x00C
-#define OHCI1394_CSRCompareData               0x010
-#define OHCI1394_CSRControl                   0x014
-#define OHCI1394_ConfigROMhdr                 0x018
-#define OHCI1394_BusID                        0x01C
-#define OHCI1394_BusOptions                   0x020
-#define OHCI1394_GUIDHi                       0x024
-#define OHCI1394_GUIDLo                       0x028
-#define OHCI1394_ConfigROMmap                 0x034
-#define OHCI1394_PostedWriteAddressLo         0x038
-#define OHCI1394_PostedWriteAddressHi         0x03C
-#define OHCI1394_VendorID                     0x040
-#define OHCI1394_HCControlSet                 0x050
-#define OHCI1394_HCControlClear               0x054
-#define  OHCI1394_HCControl_BIBimageValid      0x80000000
-#define  OHCI1394_HCControl_noByteSwapData     0x40000000
-#define  OHCI1394_HCControl_programPhyEnable   0x00800000
-#define  OHCI1394_HCControl_aPhyEnhanceEnable  0x00400000
-#define  OHCI1394_HCControl_LPS                        0x00080000
-#define  OHCI1394_HCControl_postedWriteEnable  0x00040000
-#define  OHCI1394_HCControl_linkEnable         0x00020000
-#define  OHCI1394_HCControl_softReset          0x00010000
-#define OHCI1394_SelfIDBuffer                 0x064
-#define OHCI1394_SelfIDCount                  0x068
-#define  OHCI1394_SelfIDCount_selfIDError      0x80000000
-#define OHCI1394_IRMultiChanMaskHiSet         0x070
-#define OHCI1394_IRMultiChanMaskHiClear       0x074
-#define OHCI1394_IRMultiChanMaskLoSet         0x078
-#define OHCI1394_IRMultiChanMaskLoClear       0x07C
-#define OHCI1394_IntEventSet                  0x080
-#define OHCI1394_IntEventClear                0x084
-#define OHCI1394_IntMaskSet                   0x088
-#define OHCI1394_IntMaskClear                 0x08C
-#define OHCI1394_IsoXmitIntEventSet           0x090
-#define OHCI1394_IsoXmitIntEventClear         0x094
-#define OHCI1394_IsoXmitIntMaskSet            0x098
-#define OHCI1394_IsoXmitIntMaskClear          0x09C
-#define OHCI1394_IsoRecvIntEventSet           0x0A0
-#define OHCI1394_IsoRecvIntEventClear         0x0A4
-#define OHCI1394_IsoRecvIntMaskSet            0x0A8
-#define OHCI1394_IsoRecvIntMaskClear          0x0AC
-#define OHCI1394_InitialBandwidthAvailable    0x0B0
-#define OHCI1394_InitialChannelsAvailableHi   0x0B4
-#define OHCI1394_InitialChannelsAvailableLo   0x0B8
-#define OHCI1394_FairnessControl              0x0DC
-#define OHCI1394_LinkControlSet               0x0E0
-#define OHCI1394_LinkControlClear             0x0E4
-#define   OHCI1394_LinkControl_rcvSelfID       (1 << 9)
-#define   OHCI1394_LinkControl_rcvPhyPkt       (1 << 10)
-#define   OHCI1394_LinkControl_cycleTimerEnable        (1 << 20)
-#define   OHCI1394_LinkControl_cycleMaster     (1 << 21)
-#define   OHCI1394_LinkControl_cycleSource     (1 << 22)
-#define OHCI1394_NodeID                       0x0E8
-#define   OHCI1394_NodeID_idValid             0x80000000
-#define   OHCI1394_NodeID_nodeNumber          0x0000003f
-#define   OHCI1394_NodeID_busNumber           0x0000ffc0
-#define OHCI1394_PhyControl                   0x0EC
-#define   OHCI1394_PhyControl_Read(addr)       (((addr) << 8) | 0x00008000)
-#define   OHCI1394_PhyControl_ReadDone         0x80000000
-#define   OHCI1394_PhyControl_ReadData(r)      (((r) & 0x00ff0000) >> 16)
-#define   OHCI1394_PhyControl_Write(addr, data)        (((addr) << 8) | (data) | 0x00004000)
-#define   OHCI1394_PhyControl_WriteDone                0x00004000
-#define OHCI1394_IsochronousCycleTimer        0x0F0
-#define OHCI1394_AsReqFilterHiSet             0x100
-#define OHCI1394_AsReqFilterHiClear           0x104
-#define OHCI1394_AsReqFilterLoSet             0x108
-#define OHCI1394_AsReqFilterLoClear           0x10C
-#define OHCI1394_PhyReqFilterHiSet            0x110
-#define OHCI1394_PhyReqFilterHiClear          0x114
-#define OHCI1394_PhyReqFilterLoSet            0x118
-#define OHCI1394_PhyReqFilterLoClear          0x11C
-#define OHCI1394_PhyUpperBound                0x120
-
-#define OHCI1394_AsReqTrContextBase           0x180
-#define OHCI1394_AsReqTrContextControlSet     0x180
-#define OHCI1394_AsReqTrContextControlClear   0x184
-#define OHCI1394_AsReqTrCommandPtr            0x18C
-
-#define OHCI1394_AsRspTrContextBase           0x1A0
-#define OHCI1394_AsRspTrContextControlSet     0x1A0
-#define OHCI1394_AsRspTrContextControlClear   0x1A4
-#define OHCI1394_AsRspTrCommandPtr            0x1AC
-
-#define OHCI1394_AsReqRcvContextBase          0x1C0
-#define OHCI1394_AsReqRcvContextControlSet    0x1C0
-#define OHCI1394_AsReqRcvContextControlClear  0x1C4
-#define OHCI1394_AsReqRcvCommandPtr           0x1CC
-
-#define OHCI1394_AsRspRcvContextBase          0x1E0
-#define OHCI1394_AsRspRcvContextControlSet    0x1E0
-#define OHCI1394_AsRspRcvContextControlClear  0x1E4
-#define OHCI1394_AsRspRcvCommandPtr           0x1EC
-
-/* Isochronous transmit registers */
-#define OHCI1394_IsoXmitContextBase(n)           (0x200 + 16 * (n))
-#define OHCI1394_IsoXmitContextControlSet(n)     (0x200 + 16 * (n))
-#define OHCI1394_IsoXmitContextControlClear(n)   (0x204 + 16 * (n))
-#define OHCI1394_IsoXmitCommandPtr(n)            (0x20C + 16 * (n))
-
-/* Isochronous receive registers */
-#define OHCI1394_IsoRcvContextBase(n)         (0x400 + 32 * (n))
-#define OHCI1394_IsoRcvContextControlSet(n)   (0x400 + 32 * (n))
-#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
-#define OHCI1394_IsoRcvCommandPtr(n)          (0x40C + 32 * (n))
-#define OHCI1394_IsoRcvContextMatch(n)        (0x410 + 32 * (n))
-
-/* Interrupts Mask/Events */
-#define OHCI1394_reqTxComplete         0x00000001
-#define OHCI1394_respTxComplete                0x00000002
-#define OHCI1394_ARRQ                  0x00000004
-#define OHCI1394_ARRS                  0x00000008
-#define OHCI1394_RQPkt                 0x00000010
-#define OHCI1394_RSPkt                 0x00000020
-#define OHCI1394_isochTx               0x00000040
-#define OHCI1394_isochRx               0x00000080
-#define OHCI1394_postedWriteErr                0x00000100
-#define OHCI1394_lockRespErr           0x00000200
-#define OHCI1394_selfIDComplete                0x00010000
-#define OHCI1394_busReset              0x00020000
-#define OHCI1394_regAccessFail         0x00040000
-#define OHCI1394_phy                   0x00080000
-#define OHCI1394_cycleSynch            0x00100000
-#define OHCI1394_cycle64Seconds                0x00200000
-#define OHCI1394_cycleLost             0x00400000
-#define OHCI1394_cycleInconsistent     0x00800000
-#define OHCI1394_unrecoverableError    0x01000000
-#define OHCI1394_cycleTooLong          0x02000000
-#define OHCI1394_phyRegRcvd            0x04000000
-#define OHCI1394_masterIntEnable       0x80000000
-
-#define OHCI1394_evt_no_status         0x0
-#define OHCI1394_evt_long_packet       0x2
-#define OHCI1394_evt_missing_ack       0x3
-#define OHCI1394_evt_underrun          0x4
-#define OHCI1394_evt_overrun           0x5
-#define OHCI1394_evt_descriptor_read   0x6
-#define OHCI1394_evt_data_read         0x7
-#define OHCI1394_evt_data_write                0x8
-#define OHCI1394_evt_bus_reset         0x9
-#define OHCI1394_evt_timeout           0xa
-#define OHCI1394_evt_tcode_err         0xb
-#define OHCI1394_evt_reserved_b                0xc
-#define OHCI1394_evt_reserved_c                0xd
-#define OHCI1394_evt_unknown           0xe
-#define OHCI1394_evt_flushed           0xf
-
-#define OHCI1394_phy_tcode             0xe
-
-#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
deleted file mode 100644 (file)
index 2bcf515..0000000
+++ /dev/null
@@ -1,1644 +0,0 @@
-/*
- * SBP2 driver (SCSI over IEEE1394)
- *
- * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * The basic structure of this driver is based on the old storage driver,
- * drivers/ieee1394/sbp2.c, originally written by
- *     James Goodwin <jamesg@filanet.com>
- * with later contributions and ongoing maintenance from
- *     Ben Collins <bcollins@debian.org>,
- *     Stefan Richter <stefanr@s5r6.in-berlin.de>
- * and many others.
- */
-
-#include <linux/blkdev.h>
-#include <linux/bug.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/mod_devicetable.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/scatterlist.h>
-#include <linux/string.h>
-#include <linux/stringify.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <asm/system.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include "fw-device.h"
-#include "fw-topology.h"
-#include "fw-transaction.h"
-
-/*
- * So far only bridges from Oxford Semiconductor are known to support
- * concurrent logins. Depending on firmware, four or two concurrent logins
- * are possible on OXFW911 and newer Oxsemi bridges.
- *
- * Concurrent logins are useful together with cluster filesystems.
- */
-static int sbp2_param_exclusive_login = 1;
-module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
-MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
-                "(default = Y, use N for concurrent initiators)");
-
-/*
- * Flags for firmware oddities
- *
- * - 128kB max transfer
- *   Limit transfer size. Necessary for some old bridges.
- *
- * - 36 byte inquiry
- *   When scsi_mod probes the device, let the inquiry command look like that
- *   from MS Windows.
- *
- * - skip mode page 8
- *   Suppress sending of mode_sense for mode page 8 if the device pretends to
- *   support the SCSI Primary Block commands instead of Reduced Block Commands.
- *
- * - fix capacity
- *   Tell sd_mod to correct the last sector number reported by read_capacity.
- *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
- *   Don't use this with devices which don't have this bug.
- *
- * - delay inquiry
- *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
- *
- * - power condition
- *   Set the power condition field in the START STOP UNIT commands sent by
- *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
- *   Some disks need this to spin down or to resume properly.
- *
- * - override internal blacklist
- *   Instead of adding to the built-in blacklist, use only the workarounds
- *   specified in the module load parameter.
- *   Useful if a blacklist entry interfered with a non-broken device.
- */
-#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
-#define SBP2_WORKAROUND_INQUIRY_36     0x2
-#define SBP2_WORKAROUND_MODE_SENSE_8   0x4
-#define SBP2_WORKAROUND_FIX_CAPACITY   0x8
-#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
-#define SBP2_INQUIRY_DELAY             12
-#define SBP2_WORKAROUND_POWER_CONDITION        0x20
-#define SBP2_WORKAROUND_OVERRIDE       0x100
-
-static int sbp2_param_workarounds;
-module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
-MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
-       ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
-       ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
-       ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
-       ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
-       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
-       ", set power condition in start stop unit = "
-                                 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
-       ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
-       ", or a combination)");
-
-/* I don't know why the SCSI stack doesn't define something like this... */
-typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
-
-static const char sbp2_driver_name[] = "sbp2";
-
-/*
- * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
- * and one struct scsi_device per sbp2_logical_unit.
- */
-struct sbp2_logical_unit {
-       struct sbp2_target *tgt;
-       struct list_head link;
-       struct fw_address_handler address_handler;
-       struct list_head orb_list;
-
-       u64 command_block_agent_address;
-       u16 lun;
-       int login_id;
-
-       /*
-        * The generation is updated once we've logged in or reconnected
-        * to the logical unit.  Thus, I/O to the device will automatically
-        * fail and get retried if it happens in a window where the device
-        * is not ready, e.g. after a bus reset but before we reconnect.
-        */
-       int generation;
-       int retries;
-       struct delayed_work work;
-       bool has_sdev;
-       bool blocked;
-};
-
-/*
- * We create one struct sbp2_target per IEEE 1212 Unit Directory
- * and one struct Scsi_Host per sbp2_target.
- */
-struct sbp2_target {
-       struct kref kref;
-       struct fw_unit *unit;
-       const char *bus_id;
-       struct list_head lu_list;
-
-       u64 management_agent_address;
-       u64 guid;
-       int directory_id;
-       int node_id;
-       int address_high;
-       unsigned int workarounds;
-       unsigned int mgt_orb_timeout;
-       unsigned int max_payload;
-
-       int dont_block; /* counter for each logical unit */
-       int blocked;    /* ditto */
-};
-
-/* Impossible login_id, to detect logout attempt before successful login */
-#define INVALID_LOGIN_ID 0x10000
-
-/*
- * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
- * provided in the config rom. Most devices do provide a value, which
- * we'll use for login management orbs, but with some sane limits.
- */
-#define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
-#define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
-#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
-#define SBP2_ORB_NULL                  0x80000000
-#define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
-#define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
-
-/*
- * The default maximum s/g segment size of a FireWire controller is
- * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
- * be quadlet-aligned, we set the length limit to 0xffff & ~3.
- */
-#define SBP2_MAX_SEG_SIZE              0xfffc
-
-/* Unit directory keys */
-#define SBP2_CSR_UNIT_CHARACTERISTICS  0x3a
-#define SBP2_CSR_FIRMWARE_REVISION     0x3c
-#define SBP2_CSR_LOGICAL_UNIT_NUMBER   0x14
-#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY        0xd4
-
-/* Management orb opcodes */
-#define SBP2_LOGIN_REQUEST             0x0
-#define SBP2_QUERY_LOGINS_REQUEST      0x1
-#define SBP2_RECONNECT_REQUEST         0x3
-#define SBP2_SET_PASSWORD_REQUEST      0x4
-#define SBP2_LOGOUT_REQUEST            0x7
-#define SBP2_ABORT_TASK_REQUEST                0xb
-#define SBP2_ABORT_TASK_SET            0xc
-#define SBP2_LOGICAL_UNIT_RESET                0xe
-#define SBP2_TARGET_RESET_REQUEST      0xf
-
-/* Offsets for command block agent registers */
-#define SBP2_AGENT_STATE               0x00
-#define SBP2_AGENT_RESET               0x04
-#define SBP2_ORB_POINTER               0x08
-#define SBP2_DOORBELL                  0x10
-#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
-
-/* Status write response codes */
-#define SBP2_STATUS_REQUEST_COMPLETE   0x0
-#define SBP2_STATUS_TRANSPORT_FAILURE  0x1
-#define SBP2_STATUS_ILLEGAL_REQUEST    0x2
-#define SBP2_STATUS_VENDOR_DEPENDENT   0x3
-
-#define STATUS_GET_ORB_HIGH(v)         ((v).status & 0xffff)
-#define STATUS_GET_SBP_STATUS(v)       (((v).status >> 16) & 0xff)
-#define STATUS_GET_LEN(v)              (((v).status >> 24) & 0x07)
-#define STATUS_GET_DEAD(v)             (((v).status >> 27) & 0x01)
-#define STATUS_GET_RESPONSE(v)         (((v).status >> 28) & 0x03)
-#define STATUS_GET_SOURCE(v)           (((v).status >> 30) & 0x03)
-#define STATUS_GET_ORB_LOW(v)          ((v).orb_low)
-#define STATUS_GET_DATA(v)             ((v).data)
-
-struct sbp2_status {
-       u32 status;
-       u32 orb_low;
-       u8 data[24];
-};
-
-struct sbp2_pointer {
-       __be32 high;
-       __be32 low;
-};
-
-struct sbp2_orb {
-       struct fw_transaction t;
-       struct kref kref;
-       dma_addr_t request_bus;
-       int rcode;
-       struct sbp2_pointer pointer;
-       void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
-       struct list_head link;
-};
-
-#define MANAGEMENT_ORB_LUN(v)                  ((v))
-#define MANAGEMENT_ORB_FUNCTION(v)             ((v) << 16)
-#define MANAGEMENT_ORB_RECONNECT(v)            ((v) << 20)
-#define MANAGEMENT_ORB_EXCLUSIVE(v)            ((v) ? 1 << 28 : 0)
-#define MANAGEMENT_ORB_REQUEST_FORMAT(v)       ((v) << 29)
-#define MANAGEMENT_ORB_NOTIFY                  ((1) << 31)
-
-#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)      ((v))
-#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)      ((v) << 16)
-
-struct sbp2_management_orb {
-       struct sbp2_orb base;
-       struct {
-               struct sbp2_pointer password;
-               struct sbp2_pointer response;
-               __be32 misc;
-               __be32 length;
-               struct sbp2_pointer status_fifo;
-       } request;
-       __be32 response[4];
-       dma_addr_t response_bus;
-       struct completion done;
-       struct sbp2_status status;
-};
-
-struct sbp2_login_response {
-       __be32 misc;
-       struct sbp2_pointer command_block_agent;
-       __be32 reconnect_hold;
-};
-#define COMMAND_ORB_DATA_SIZE(v)       ((v))
-#define COMMAND_ORB_PAGE_SIZE(v)       ((v) << 16)
-#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
-#define COMMAND_ORB_MAX_PAYLOAD(v)     ((v) << 20)
-#define COMMAND_ORB_SPEED(v)           ((v) << 24)
-#define COMMAND_ORB_DIRECTION          ((1) << 27)
-#define COMMAND_ORB_REQUEST_FORMAT(v)  ((v) << 29)
-#define COMMAND_ORB_NOTIFY             ((1) << 31)
-
-struct sbp2_command_orb {
-       struct sbp2_orb base;
-       struct {
-               struct sbp2_pointer next;
-               struct sbp2_pointer data_descriptor;
-               __be32 misc;
-               u8 command_block[12];
-       } request;
-       struct scsi_cmnd *cmd;
-       scsi_done_fn_t done;
-       struct sbp2_logical_unit *lu;
-
-       struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
-       dma_addr_t page_table_bus;
-};
-
-#define SBP2_ROM_VALUE_WILDCARD ~0         /* match all */
-#define SBP2_ROM_VALUE_MISSING  0xff000000 /* not present in the unit dir. */
-
-/*
- * List of devices with known bugs.
- *
- * The firmware_revision field, masked with 0xffff00, is the best
- * indicator for the type of bridge chip of a device.  It yields a few
- * false positives but this did not break correctly behaving devices
- * so far.
- */
-static const struct {
-       u32 firmware_revision;
-       u32 model;
-       unsigned int workarounds;
-} sbp2_workarounds_table[] = {
-       /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
-               .firmware_revision      = 0x002800,
-               .model                  = 0x001010,
-               .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
-                                         SBP2_WORKAROUND_MODE_SENSE_8 |
-                                         SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
-               .firmware_revision      = 0x002800,
-               .model                  = 0x000000,
-               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY |
-                                         SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* Initio bridges, actually only needed for some older ones */ {
-               .firmware_revision      = 0x000200,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
-       },
-       /* PL-3507 bridge with Prolific firmware */ {
-               .firmware_revision      = 0x012800,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_POWER_CONDITION,
-       },
-       /* Symbios bridge */ {
-               .firmware_revision      = 0xa0b800,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
-       },
-       /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
-               .firmware_revision      = 0x002600,
-               .model                  = SBP2_ROM_VALUE_WILDCARD,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
-       },
-       /*
-        * iPod 2nd generation: needs 128k max transfer size workaround
-        * iPod 3rd generation: needs fix capacity workaround
-        */
-       {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000000,
-               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
-                                         SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod 4th generation */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000021,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod mini */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000022,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod mini */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x000023,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       },
-       /* iPod Photo */ {
-               .firmware_revision      = 0x0a2700,
-               .model                  = 0x00007e,
-               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
-       }
-};
-
-static void free_orb(struct kref *kref)
-{
-       struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
-
-       kfree(orb);
-}
-
-static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
-                             int tcode, int destination, int source,
-                             int generation, int speed,
-                             unsigned long long offset,
-                             void *payload, size_t length, void *callback_data)
-{
-       struct sbp2_logical_unit *lu = callback_data;
-       struct sbp2_orb *orb;
-       struct sbp2_status status;
-       size_t header_size;
-       unsigned long flags;
-
-       if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
-           length == 0 || length > sizeof(status)) {
-               fw_send_response(card, request, RCODE_TYPE_ERROR);
-               return;
-       }
-
-       header_size = min(length, 2 * sizeof(u32));
-       fw_memcpy_from_be32(&status, payload, header_size);
-       if (length > header_size)
-               memcpy(status.data, payload + 8, length - header_size);
-       if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
-               fw_notify("non-orb related status write, not handled\n");
-               fw_send_response(card, request, RCODE_COMPLETE);
-               return;
-       }
-
-       /* Lookup the orb corresponding to this status write. */
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(orb, &lu->orb_list, link) {
-               if (STATUS_GET_ORB_HIGH(status) == 0 &&
-                   STATUS_GET_ORB_LOW(status) == orb->request_bus) {
-                       orb->rcode = RCODE_COMPLETE;
-                       list_del(&orb->link);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&orb->link != &lu->orb_list)
-               orb->callback(orb, &status);
-       else
-               fw_error("status write for unknown orb\n");
-
-       kref_put(&orb->kref, free_orb);
-
-       fw_send_response(card, request, RCODE_COMPLETE);
-}
-
-static void complete_transaction(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct sbp2_orb *orb = data;
-       unsigned long flags;
-
-       /*
-        * This is a little tricky.  We can get the status write for
-        * the orb before we get this callback.  The status write
-        * handler above will assume the orb pointer transaction was
-        * successful and set the rcode to RCODE_COMPLETE for the orb.
-        * So this callback only sets the rcode if it hasn't already
-        * been set and only does the cleanup if the transaction
-        * failed and we didn't already get a status write.
-        */
-       spin_lock_irqsave(&card->lock, flags);
-
-       if (orb->rcode == -1)
-               orb->rcode = rcode;
-       if (orb->rcode != RCODE_COMPLETE) {
-               list_del(&orb->link);
-               spin_unlock_irqrestore(&card->lock, flags);
-               orb->callback(orb, NULL);
-       } else {
-               spin_unlock_irqrestore(&card->lock, flags);
-       }
-
-       kref_put(&orb->kref, free_orb);
-}
-
-static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
-                         int node_id, int generation, u64 offset)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       unsigned long flags;
-
-       orb->pointer.high = 0;
-       orb->pointer.low = cpu_to_be32(orb->request_bus);
-
-       spin_lock_irqsave(&device->card->lock, flags);
-       list_add_tail(&orb->link, &lu->orb_list);
-       spin_unlock_irqrestore(&device->card->lock, flags);
-
-       /* Take a ref for the orb list and for the transaction callback. */
-       kref_get(&orb->kref);
-       kref_get(&orb->kref);
-
-       fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
-                       node_id, generation, device->max_speed, offset,
-                       &orb->pointer, sizeof(orb->pointer),
-                       complete_transaction, orb);
-}
-
-static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_orb *orb, *next;
-       struct list_head list;
-       unsigned long flags;
-       int retval = -ENOENT;
-
-       INIT_LIST_HEAD(&list);
-       spin_lock_irqsave(&device->card->lock, flags);
-       list_splice_init(&lu->orb_list, &list);
-       spin_unlock_irqrestore(&device->card->lock, flags);
-
-       list_for_each_entry_safe(orb, next, &list, link) {
-               retval = 0;
-               if (fw_cancel_transaction(device->card, &orb->t) == 0)
-                       continue;
-
-               orb->rcode = RCODE_CANCELLED;
-               orb->callback(orb, NULL);
-       }
-
-       return retval;
-}
-
-static void complete_management_orb(struct sbp2_orb *base_orb,
-                                   struct sbp2_status *status)
-{
-       struct sbp2_management_orb *orb =
-               container_of(base_orb, struct sbp2_management_orb, base);
-
-       if (status)
-               memcpy(&orb->status, status, sizeof(*status));
-       complete(&orb->done);
-}
-
-static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
-                                   int generation, int function,
-                                   int lun_or_login_id, void *response)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_management_orb *orb;
-       unsigned int timeout;
-       int retval = -ENOMEM;
-
-       if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
-               return 0;
-
-       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
-       if (orb == NULL)
-               return -ENOMEM;
-
-       kref_init(&orb->base.kref);
-       orb->response_bus =
-               dma_map_single(device->card->device, &orb->response,
-                              sizeof(orb->response), DMA_FROM_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->response_bus))
-               goto fail_mapping_response;
-
-       orb->request.response.high = 0;
-       orb->request.response.low  = cpu_to_be32(orb->response_bus);
-
-       orb->request.misc = cpu_to_be32(
-               MANAGEMENT_ORB_NOTIFY |
-               MANAGEMENT_ORB_FUNCTION(function) |
-               MANAGEMENT_ORB_LUN(lun_or_login_id));
-       orb->request.length = cpu_to_be32(
-               MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
-
-       orb->request.status_fifo.high =
-               cpu_to_be32(lu->address_handler.offset >> 32);
-       orb->request.status_fifo.low  =
-               cpu_to_be32(lu->address_handler.offset);
-
-       if (function == SBP2_LOGIN_REQUEST) {
-               /* Ask for 2^2 == 4 seconds reconnect grace period */
-               orb->request.misc |= cpu_to_be32(
-                       MANAGEMENT_ORB_RECONNECT(2) |
-                       MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
-               timeout = lu->tgt->mgt_orb_timeout;
-       } else {
-               timeout = SBP2_ORB_TIMEOUT;
-       }
-
-       init_completion(&orb->done);
-       orb->base.callback = complete_management_orb;
-
-       orb->base.request_bus =
-               dma_map_single(device->card->device, &orb->request,
-                              sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->base.request_bus))
-               goto fail_mapping_request;
-
-       sbp2_send_orb(&orb->base, lu, node_id, generation,
-                     lu->tgt->management_agent_address);
-
-       wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
-
-       retval = -EIO;
-       if (sbp2_cancel_orbs(lu) == 0) {
-               fw_error("%s: orb reply timed out, rcode=0x%02x\n",
-                        lu->tgt->bus_id, orb->base.rcode);
-               goto out;
-       }
-
-       if (orb->base.rcode != RCODE_COMPLETE) {
-               fw_error("%s: management write failed, rcode 0x%02x\n",
-                        lu->tgt->bus_id, orb->base.rcode);
-               goto out;
-       }
-
-       if (STATUS_GET_RESPONSE(orb->status) != 0 ||
-           STATUS_GET_SBP_STATUS(orb->status) != 0) {
-               fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
-                        STATUS_GET_RESPONSE(orb->status),
-                        STATUS_GET_SBP_STATUS(orb->status));
-               goto out;
-       }
-
-       retval = 0;
- out:
-       dma_unmap_single(device->card->device, orb->base.request_bus,
-                        sizeof(orb->request), DMA_TO_DEVICE);
- fail_mapping_request:
-       dma_unmap_single(device->card->device, orb->response_bus,
-                        sizeof(orb->response), DMA_FROM_DEVICE);
- fail_mapping_response:
-       if (response)
-               memcpy(response, orb->response, sizeof(orb->response));
-       kref_put(&orb->base.kref, free_orb);
-
-       return retval;
-}
-
-static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       __be32 d = 0;
-
-       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
-                          lu->tgt->node_id, lu->generation, device->max_speed,
-                          lu->command_block_agent_address + SBP2_AGENT_RESET,
-                          &d, sizeof(d));
-}
-
-static void complete_agent_reset_write_no_wait(struct fw_card *card,
-               int rcode, void *payload, size_t length, void *data)
-{
-       kfree(data);
-}
-
-static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct fw_transaction *t;
-       static __be32 d;
-
-       t = kmalloc(sizeof(*t), GFP_ATOMIC);
-       if (t == NULL)
-               return;
-
-       fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
-                       lu->tgt->node_id, lu->generation, device->max_speed,
-                       lu->command_block_agent_address + SBP2_AGENT_RESET,
-                       &d, sizeof(d), complete_agent_reset_write_no_wait, t);
-}
-
-static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
-{
-       /*
-        * We may access dont_block without taking card->lock here:
-        * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
-        * are currently serialized against each other.
-        * And a wrong result in sbp2_conditionally_block()'s access of
-        * dont_block is rather harmless, it simply misses its first chance.
-        */
-       --lu->tgt->dont_block;
-}
-
-/*
- * Blocks lu->tgt if all of the following conditions are met:
- *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
- *     logical units have been finished (indicated by dont_block == 0).
- *   - lu->generation is stale.
- *
- * Note, scsi_block_requests() must be called while holding card->lock,
- * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
- * unblock the target.
- */
-static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
-{
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       if (!tgt->dont_block && !lu->blocked &&
-           lu->generation != card->generation) {
-               lu->blocked = true;
-               if (++tgt->blocked == 1)
-                       scsi_block_requests(shost);
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/*
- * Unblocks lu->tgt as soon as all its logical units can be unblocked.
- * Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section.  On the other hand, running it inside
- * the section might clash with shost->host_lock.
- */
-static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
-{
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-       bool unblock = false;
-
-       spin_lock_irqsave(&card->lock, flags);
-       if (lu->blocked && lu->generation == card->generation) {
-               lu->blocked = false;
-               unblock = --tgt->blocked == 0;
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (unblock)
-               scsi_unblock_requests(shost);
-}
-
-/*
- * Prevents future blocking of tgt and unblocks it.
- * Note, it is harmless to run scsi_unblock_requests() outside the
- * card->lock protected section.  On the other hand, running it inside
- * the section might clash with shost->host_lock.
- */
-static void sbp2_unblock(struct sbp2_target *tgt)
-{
-       struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       ++tgt->dont_block;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       scsi_unblock_requests(shost);
-}
-
-static int sbp2_lun2int(u16 lun)
-{
-       struct scsi_lun eight_bytes_lun;
-
-       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
-       eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
-       eight_bytes_lun.scsi_lun[1] = lun & 0xff;
-
-       return scsilun_to_int(&eight_bytes_lun);
-}
-
-static void sbp2_release_target(struct kref *kref)
-{
-       struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
-       struct sbp2_logical_unit *lu, *next;
-       struct Scsi_Host *shost =
-               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       struct scsi_device *sdev;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-
-       /* prevent deadlocks */
-       sbp2_unblock(tgt);
-
-       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
-               sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
-               if (sdev) {
-                       scsi_remove_device(sdev);
-                       scsi_device_put(sdev);
-               }
-               if (lu->login_id != INVALID_LOGIN_ID) {
-                       int generation, node_id;
-                       /*
-                        * tgt->node_id may be obsolete here if we failed
-                        * during initial login or after a bus reset where
-                        * the topology changed.
-                        */
-                       generation = device->generation;
-                       smp_rmb(); /* node_id vs. generation */
-                       node_id    = device->node_id;
-                       sbp2_send_management_orb(lu, node_id, generation,
-                                                SBP2_LOGOUT_REQUEST,
-                                                lu->login_id, NULL);
-               }
-               fw_core_remove_address_handler(&lu->address_handler);
-               list_del(&lu->link);
-               kfree(lu);
-       }
-       scsi_remove_host(shost);
-       fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
-
-       fw_unit_put(tgt->unit);
-       scsi_host_put(shost);
-       fw_device_put(device);
-}
-
-static struct workqueue_struct *sbp2_wq;
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
-       kref_put(&tgt->kref, sbp2_release_target);
-}
-
-/*
- * Always get the target's kref when scheduling work on one its units.
- * Each workqueue job is responsible to call sbp2_target_put() upon return.
- */
-static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-{
-       kref_get(&lu->tgt->kref);
-       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
-               sbp2_target_put(lu->tgt);
-}
-
-/*
- * Write retransmit retry values into the BUSY_TIMEOUT register.
- * - The single-phase retry protocol is supported by all SBP-2 devices, but the
- *   default retry_limit value is 0 (i.e. never retry transmission). We write a
- *   saner value after logging into the device.
- * - The dual-phase retry protocol is optional to implement, and if not
- *   supported, writes to the dual-phase portion of the register will be
- *   ignored. We try to write the original 1394-1995 default here.
- * - In the case of devices that are also SBP-3-compliant, all writes are
- *   ignored, as the register is read-only, but contains single-phase retry of
- *   15, which is what we're trying to set for all SBP-2 device anyway, so this
- *   write attempt is safe and yields more consistent behavior for all devices.
- *
- * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
- * and section 6.4 of the SBP-3 spec for further details.
- */
-static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
-{
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
-
-       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
-                          lu->tgt->node_id, lu->generation, device->max_speed,
-                          CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
-                          &d, sizeof(d));
-}
-
-static void sbp2_reconnect(struct work_struct *work);
-
-static void sbp2_login(struct work_struct *work)
-{
-       struct sbp2_logical_unit *lu =
-               container_of(work, struct sbp2_logical_unit, work.work);
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-       struct Scsi_Host *shost;
-       struct scsi_device *sdev;
-       struct sbp2_login_response response;
-       int generation, node_id, local_node_id;
-
-       if (fw_device_is_shutdown(device))
-               goto out;
-
-       generation    = device->generation;
-       smp_rmb();    /* node IDs must not be older than generation */
-       node_id       = device->node_id;
-       local_node_id = device->card->node_id;
-
-       /* If this is a re-login attempt, log out, or we might be rejected. */
-       if (lu->has_sdev)
-               sbp2_send_management_orb(lu, device->node_id, generation,
-                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-
-       if (sbp2_send_management_orb(lu, node_id, generation,
-                               SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
-               if (lu->retries++ < 5) {
-                       sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-               } else {
-                       fw_error("%s: failed to login to LUN %04x\n",
-                                tgt->bus_id, lu->lun);
-                       /* Let any waiting I/O fail from now on. */
-                       sbp2_unblock(lu->tgt);
-               }
-               goto out;
-       }
-
-       tgt->node_id      = node_id;
-       tgt->address_high = local_node_id << 16;
-       smp_wmb();        /* node IDs must not be older than generation */
-       lu->generation    = generation;
-
-       lu->command_block_agent_address =
-               ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
-                     << 32) | be32_to_cpu(response.command_block_agent.low);
-       lu->login_id = be32_to_cpu(response.misc) & 0xffff;
-
-       fw_notify("%s: logged in to LUN %04x (%d retries)\n",
-                 tgt->bus_id, lu->lun, lu->retries);
-
-       /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
-       sbp2_set_busy_timeout(lu);
-
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
-       sbp2_agent_reset(lu);
-
-       /* This was a re-login. */
-       if (lu->has_sdev) {
-               sbp2_cancel_orbs(lu);
-               sbp2_conditionally_unblock(lu);
-               goto out;
-       }
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
-               ssleep(SBP2_INQUIRY_DELAY);
-
-       shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
-       sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
-       /*
-        * FIXME:  We are unable to perform reconnects while in sbp2_login().
-        * Therefore __scsi_add_device() will get into trouble if a bus reset
-        * happens in parallel.  It will either fail or leave us with an
-        * unusable sdev.  As a workaround we check for this and retry the
-        * whole login and SCSI probing.
-        */
-
-       /* Reported error during __scsi_add_device() */
-       if (IS_ERR(sdev))
-               goto out_logout_login;
-
-       /* Unreported error during __scsi_add_device() */
-       smp_rmb(); /* get current card generation */
-       if (generation != device->card->generation) {
-               scsi_remove_device(sdev);
-               scsi_device_put(sdev);
-               goto out_logout_login;
-       }
-
-       /* No error during __scsi_add_device() */
-       lu->has_sdev = true;
-       scsi_device_put(sdev);
-       sbp2_allow_block(lu);
-       goto out;
-
- out_logout_login:
-       smp_rmb(); /* generation may have changed */
-       generation = device->generation;
-       smp_rmb(); /* node_id must not be older than generation */
-
-       sbp2_send_management_orb(lu, device->node_id, generation,
-                                SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
-       /*
-        * If a bus reset happened, sbp2_update will have requeued
-        * lu->work already.  Reset the work from reconnect to login.
-        */
-       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- out:
-       sbp2_target_put(tgt);
-}
-
-static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
-{
-       struct sbp2_logical_unit *lu;
-
-       lu = kmalloc(sizeof(*lu), GFP_KERNEL);
-       if (!lu)
-               return -ENOMEM;
-
-       lu->address_handler.length           = 0x100;
-       lu->address_handler.address_callback = sbp2_status_write;
-       lu->address_handler.callback_data    = lu;
-
-       if (fw_core_add_address_handler(&lu->address_handler,
-                                       &fw_high_memory_region) < 0) {
-               kfree(lu);
-               return -ENOMEM;
-       }
-
-       lu->tgt      = tgt;
-       lu->lun      = lun_entry & 0xffff;
-       lu->login_id = INVALID_LOGIN_ID;
-       lu->retries  = 0;
-       lu->has_sdev = false;
-       lu->blocked  = false;
-       ++tgt->dont_block;
-       INIT_LIST_HEAD(&lu->orb_list);
-       INIT_DELAYED_WORK(&lu->work, sbp2_login);
-
-       list_add_tail(&lu->link, &tgt->lu_list);
-       return 0;
-}
-
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
-{
-       struct fw_csr_iterator ci;
-       int key, value;
-
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value))
-               if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
-                   sbp2_add_logical_unit(tgt, value) < 0)
-                       return -ENOMEM;
-       return 0;
-}
-
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
-                             u32 *model, u32 *firmware_revision)
-{
-       struct fw_csr_iterator ci;
-       int key, value;
-       unsigned int timeout;
-
-       fw_csr_iterator_init(&ci, directory);
-       while (fw_csr_iterator_next(&ci, &key, &value)) {
-               switch (key) {
-
-               case CSR_DEPENDENT_INFO | CSR_OFFSET:
-                       tgt->management_agent_address =
-                                       CSR_REGISTER_BASE + 4 * value;
-                       break;
-
-               case CSR_DIRECTORY_ID:
-                       tgt->directory_id = value;
-                       break;
-
-               case CSR_MODEL:
-                       *model = value;
-                       break;
-
-               case SBP2_CSR_FIRMWARE_REVISION:
-                       *firmware_revision = value;
-                       break;
-
-               case SBP2_CSR_UNIT_CHARACTERISTICS:
-                       /* the timeout value is stored in 500ms units */
-                       timeout = ((unsigned int) value >> 8 & 0xff) * 500;
-                       timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
-                       tgt->mgt_orb_timeout =
-                                 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
-
-                       if (timeout > tgt->mgt_orb_timeout)
-                               fw_notify("%s: config rom contains %ds "
-                                         "management ORB timeout, limiting "
-                                         "to %ds\n", tgt->bus_id,
-                                         timeout / 1000,
-                                         tgt->mgt_orb_timeout / 1000);
-                       break;
-
-               case SBP2_CSR_LOGICAL_UNIT_NUMBER:
-                       if (sbp2_add_logical_unit(tgt, value) < 0)
-                               return -ENOMEM;
-                       break;
-
-               case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
-                       /* Adjust for the increment in the iterator */
-                       if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
-                               return -ENOMEM;
-                       break;
-               }
-       }
-       return 0;
-}
-
-static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
-                                 u32 firmware_revision)
-{
-       int i;
-       unsigned int w = sbp2_param_workarounds;
-
-       if (w)
-               fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
-                         "if you need the workarounds parameter for %s\n",
-                         tgt->bus_id);
-
-       if (w & SBP2_WORKAROUND_OVERRIDE)
-               goto out;
-
-       for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
-
-               if (sbp2_workarounds_table[i].firmware_revision !=
-                   (firmware_revision & 0xffffff00))
-                       continue;
-
-               if (sbp2_workarounds_table[i].model != model &&
-                   sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
-                       continue;
-
-               w |= sbp2_workarounds_table[i].workarounds;
-               break;
-       }
- out:
-       if (w)
-               fw_notify("Workarounds for %s: 0x%x "
-                         "(firmware_revision 0x%06x, model_id 0x%06x)\n",
-                         tgt->bus_id, w, firmware_revision, model);
-       tgt->workarounds = w;
-}
-
-static struct scsi_host_template scsi_driver_template;
-
-static int sbp2_probe(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct fw_device *device = fw_device(unit->device.parent);
-       struct sbp2_target *tgt;
-       struct sbp2_logical_unit *lu;
-       struct Scsi_Host *shost;
-       u32 model, firmware_revision;
-
-       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
-               BUG_ON(dma_set_max_seg_size(device->card->device,
-                                           SBP2_MAX_SEG_SIZE));
-
-       shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
-       if (shost == NULL)
-               return -ENOMEM;
-
-       tgt = (struct sbp2_target *)shost->hostdata;
-       unit->device.driver_data = tgt;
-       tgt->unit = unit;
-       kref_init(&tgt->kref);
-       INIT_LIST_HEAD(&tgt->lu_list);
-       tgt->bus_id = dev_name(&unit->device);
-       tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
-
-       if (fw_device_enable_phys_dma(device) < 0)
-               goto fail_shost_put;
-
-       if (scsi_add_host(shost, &unit->device) < 0)
-               goto fail_shost_put;
-
-       fw_device_get(device);
-       fw_unit_get(unit);
-
-       /* implicit directory ID */
-       tgt->directory_id = ((unit->directory - device->config_rom) * 4
-                            + CSR_CONFIG_ROM) & 0xffffff;
-
-       firmware_revision = SBP2_ROM_VALUE_MISSING;
-       model             = SBP2_ROM_VALUE_MISSING;
-
-       if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
-                              &firmware_revision) < 0)
-               goto fail_tgt_put;
-
-       sbp2_init_workarounds(tgt, model, firmware_revision);
-
-       /*
-        * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
-        * and so on up to 4096 bytes.  The SBP-2 max_payload field
-        * specifies the max payload size as 2 ^ (max_payload + 2), so
-        * if we set this to max_speed + 7, we get the right value.
-        */
-       tgt->max_payload = min(device->max_speed + 7, 10U);
-       tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
-
-       /* Do the login in a workqueue so we can easily reschedule retries. */
-       list_for_each_entry(lu, &tgt->lu_list, link)
-               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-       return 0;
-
- fail_tgt_put:
-       sbp2_target_put(tgt);
-       return -ENOMEM;
-
- fail_shost_put:
-       scsi_host_put(shost);
-       return -ENOMEM;
-}
-
-static int sbp2_remove(struct device *dev)
-{
-       struct fw_unit *unit = fw_unit(dev);
-       struct sbp2_target *tgt = unit->device.driver_data;
-
-       sbp2_target_put(tgt);
-       return 0;
-}
-
-static void sbp2_reconnect(struct work_struct *work)
-{
-       struct sbp2_logical_unit *lu =
-               container_of(work, struct sbp2_logical_unit, work.work);
-       struct sbp2_target *tgt = lu->tgt;
-       struct fw_device *device = fw_device(tgt->unit->device.parent);
-       int generation, node_id, local_node_id;
-
-       if (fw_device_is_shutdown(device))
-               goto out;
-
-       generation    = device->generation;
-       smp_rmb();    /* node IDs must not be older than generation */
-       node_id       = device->node_id;
-       local_node_id = device->card->node_id;
-
-       if (sbp2_send_management_orb(lu, node_id, generation,
-                                    SBP2_RECONNECT_REQUEST,
-                                    lu->login_id, NULL) < 0) {
-               /*
-                * If reconnect was impossible even though we are in the
-                * current generation, fall back and try to log in again.
-                *
-                * We could check for "Function rejected" status, but
-                * looking at the bus generation as simpler and more general.
-                */
-               smp_rmb(); /* get current card generation */
-               if (generation == device->card->generation ||
-                   lu->retries++ >= 5) {
-                       fw_error("%s: failed to reconnect\n", tgt->bus_id);
-                       lu->retries = 0;
-                       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
-               }
-               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
-               goto out;
-       }
-
-       tgt->node_id      = node_id;
-       tgt->address_high = local_node_id << 16;
-       smp_wmb();        /* node IDs must not be older than generation */
-       lu->generation    = generation;
-
-       fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
-                 tgt->bus_id, lu->lun, lu->retries);
-
-       sbp2_agent_reset(lu);
-       sbp2_cancel_orbs(lu);
-       sbp2_conditionally_unblock(lu);
- out:
-       sbp2_target_put(tgt);
-}
-
-static void sbp2_update(struct fw_unit *unit)
-{
-       struct sbp2_target *tgt = unit->device.driver_data;
-       struct sbp2_logical_unit *lu;
-
-       fw_device_enable_phys_dma(fw_device(unit->device.parent));
-
-       /*
-        * Fw-core serializes sbp2_update() against sbp2_remove().
-        * Iteration over tgt->lu_list is therefore safe here.
-        */
-       list_for_each_entry(lu, &tgt->lu_list, link) {
-               sbp2_conditionally_block(lu);
-               lu->retries = 0;
-               sbp2_queue_work(lu, 0);
-       }
-}
-
-#define SBP2_UNIT_SPEC_ID_ENTRY        0x0000609e
-#define SBP2_SW_VERSION_ENTRY  0x00010483
-
-static const struct fw_device_id sbp2_id_table[] = {
-       {
-               .match_flags  = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
-               .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
-               .version      = SBP2_SW_VERSION_ENTRY,
-       },
-       { }
-};
-
-static struct fw_driver sbp2_driver = {
-       .driver   = {
-               .owner  = THIS_MODULE,
-               .name   = sbp2_driver_name,
-               .bus    = &fw_bus_type,
-               .probe  = sbp2_probe,
-               .remove = sbp2_remove,
-       },
-       .update   = sbp2_update,
-       .id_table = sbp2_id_table,
-};
-
-static void sbp2_unmap_scatterlist(struct device *card_device,
-                                  struct sbp2_command_orb *orb)
-{
-       if (scsi_sg_count(orb->cmd))
-               dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
-                            scsi_sg_count(orb->cmd),
-                            orb->cmd->sc_data_direction);
-
-       if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
-               dma_unmap_single(card_device, orb->page_table_bus,
-                                sizeof(orb->page_table), DMA_TO_DEVICE);
-}
-
-static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
-{
-       int sam_status;
-
-       sense_data[0] = 0x70;
-       sense_data[1] = 0x0;
-       sense_data[2] = sbp2_status[1];
-       sense_data[3] = sbp2_status[4];
-       sense_data[4] = sbp2_status[5];
-       sense_data[5] = sbp2_status[6];
-       sense_data[6] = sbp2_status[7];
-       sense_data[7] = 10;
-       sense_data[8] = sbp2_status[8];
-       sense_data[9] = sbp2_status[9];
-       sense_data[10] = sbp2_status[10];
-       sense_data[11] = sbp2_status[11];
-       sense_data[12] = sbp2_status[2];
-       sense_data[13] = sbp2_status[3];
-       sense_data[14] = sbp2_status[12];
-       sense_data[15] = sbp2_status[13];
-
-       sam_status = sbp2_status[0] & 0x3f;
-
-       switch (sam_status) {
-       case SAM_STAT_GOOD:
-       case SAM_STAT_CHECK_CONDITION:
-       case SAM_STAT_CONDITION_MET:
-       case SAM_STAT_BUSY:
-       case SAM_STAT_RESERVATION_CONFLICT:
-       case SAM_STAT_COMMAND_TERMINATED:
-               return DID_OK << 16 | sam_status;
-
-       default:
-               return DID_ERROR << 16;
-       }
-}
-
-static void complete_command_orb(struct sbp2_orb *base_orb,
-                                struct sbp2_status *status)
-{
-       struct sbp2_command_orb *orb =
-               container_of(base_orb, struct sbp2_command_orb, base);
-       struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent);
-       int result;
-
-       if (status != NULL) {
-               if (STATUS_GET_DEAD(*status))
-                       sbp2_agent_reset_no_wait(orb->lu);
-
-               switch (STATUS_GET_RESPONSE(*status)) {
-               case SBP2_STATUS_REQUEST_COMPLETE:
-                       result = DID_OK << 16;
-                       break;
-               case SBP2_STATUS_TRANSPORT_FAILURE:
-                       result = DID_BUS_BUSY << 16;
-                       break;
-               case SBP2_STATUS_ILLEGAL_REQUEST:
-               case SBP2_STATUS_VENDOR_DEPENDENT:
-               default:
-                       result = DID_ERROR << 16;
-                       break;
-               }
-
-               if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
-                       result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
-                                                          orb->cmd->sense_buffer);
-       } else {
-               /*
-                * If the orb completes with status == NULL, something
-                * went wrong, typically a bus reset happened mid-orb
-                * or when sending the write (less likely).
-                */
-               result = DID_BUS_BUSY << 16;
-               sbp2_conditionally_block(orb->lu);
-       }
-
-       dma_unmap_single(device->card->device, orb->base.request_bus,
-                        sizeof(orb->request), DMA_TO_DEVICE);
-       sbp2_unmap_scatterlist(device->card->device, orb);
-
-       orb->cmd->result = result;
-       orb->done(orb->cmd);
-}
-
-static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
-               struct fw_device *device, struct sbp2_logical_unit *lu)
-{
-       struct scatterlist *sg = scsi_sglist(orb->cmd);
-       int i, n;
-
-       n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
-                      orb->cmd->sc_data_direction);
-       if (n == 0)
-               goto fail;
-
-       /*
-        * Handle the special case where there is only one element in
-        * the scatter list by converting it to an immediate block
-        * request. This is also a workaround for broken devices such
-        * as the second generation iPod which doesn't support page
-        * tables.
-        */
-       if (n == 1) {
-               orb->request.data_descriptor.high =
-                       cpu_to_be32(lu->tgt->address_high);
-               orb->request.data_descriptor.low  =
-                       cpu_to_be32(sg_dma_address(sg));
-               orb->request.misc |=
-                       cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
-               return 0;
-       }
-
-       for_each_sg(sg, sg, n, i) {
-               orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
-               orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
-       }
-
-       orb->page_table_bus =
-               dma_map_single(device->card->device, orb->page_table,
-                              sizeof(orb->page_table), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->page_table_bus))
-               goto fail_page_table;
-
-       /*
-        * The data_descriptor pointer is the one case where we need
-        * to fill in the node ID part of the address.  All other
-        * pointers assume that the data referenced reside on the
-        * initiator (i.e. us), but data_descriptor can refer to data
-        * on other nodes so we need to put our ID in descriptor.high.
-        */
-       orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
-       orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
-       orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
-                                        COMMAND_ORB_DATA_SIZE(n));
-
-       return 0;
-
- fail_page_table:
-       dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
-                    scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
- fail:
-       return -ENOMEM;
-}
-
-/* SCSI stack integration */
-
-static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
-{
-       struct sbp2_logical_unit *lu = cmd->device->hostdata;
-       struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
-       struct sbp2_command_orb *orb;
-       int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
-
-       /*
-        * Bidirectional commands are not yet implemented, and unknown
-        * transfer direction not handled.
-        */
-       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
-               fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
-               cmd->result = DID_ERROR << 16;
-               done(cmd);
-               return 0;
-       }
-
-       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
-       if (orb == NULL) {
-               fw_notify("failed to alloc orb\n");
-               return SCSI_MLQUEUE_HOST_BUSY;
-       }
-
-       /* Initialize rcode to something not RCODE_COMPLETE. */
-       orb->base.rcode = -1;
-       kref_init(&orb->base.kref);
-
-       orb->lu   = lu;
-       orb->done = done;
-       orb->cmd  = cmd;
-
-       orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
-       orb->request.misc = cpu_to_be32(
-               COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
-               COMMAND_ORB_SPEED(device->max_speed) |
-               COMMAND_ORB_NOTIFY);
-
-       if (cmd->sc_data_direction == DMA_FROM_DEVICE)
-               orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
-
-       generation = device->generation;
-       smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */
-
-       if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
-               goto out;
-
-       memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
-
-       orb->base.callback = complete_command_orb;
-       orb->base.request_bus =
-               dma_map_single(device->card->device, &orb->request,
-                              sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
-               sbp2_unmap_scatterlist(device->card->device, orb);
-               goto out;
-       }
-
-       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
-                     lu->command_block_agent_address + SBP2_ORB_POINTER);
-       retval = 0;
- out:
-       kref_put(&orb->base.kref, free_orb);
-       return retval;
-}
-
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
-{
-       struct sbp2_logical_unit *lu = sdev->hostdata;
-
-       /* (Re-)Adding logical units via the SCSI stack is not supported. */
-       if (!lu)
-               return -ENOSYS;
-
-       sdev->allow_restart = 1;
-
-       /* SBP-2 requires quadlet alignment of the data buffers. */
-       blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
-               sdev->inquiry_len = 36;
-
-       return 0;
-}
-
-static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
-{
-       struct sbp2_logical_unit *lu = sdev->hostdata;
-
-       sdev->use_10_for_rw = 1;
-
-       if (sbp2_param_exclusive_login)
-               sdev->manage_start_stop = 1;
-
-       if (sdev->type == TYPE_ROM)
-               sdev->use_10_for_ms = 1;
-
-       if (sdev->type == TYPE_DISK &&
-           lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
-               sdev->skip_ms_page_8 = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
-               sdev->fix_capacity = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
-               sdev->start_stop_pwr_cond = 1;
-
-       if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
-               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
-
-       blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
-
-       return 0;
-}
-
-/*
- * Called by scsi stack when something has really gone wrong.  Usually
- * called when a command has timed-out for some reason.
- */
-static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
-{
-       struct sbp2_logical_unit *lu = cmd->device->hostdata;
-
-       fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
-       sbp2_agent_reset(lu);
-       sbp2_cancel_orbs(lu);
-
-       return SUCCESS;
-}
-
-/*
- * Format of /sys/bus/scsi/devices/.../ieee1394_id:
- * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
- *
- * This is the concatenation of target port identifier and logical unit
- * identifier as per SAM-2...SAM-4 annex A.
- */
-static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       struct scsi_device *sdev = to_scsi_device(dev);
-       struct sbp2_logical_unit *lu;
-
-       if (!sdev)
-               return 0;
-
-       lu = sdev->hostdata;
-
-       return sprintf(buf, "%016llx:%06x:%04x\n",
-                       (unsigned long long)lu->tgt->guid,
-                       lu->tgt->directory_id, lu->lun);
-}
-
-static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
-
-static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
-       &dev_attr_ieee1394_id,
-       NULL
-};
-
-static struct scsi_host_template scsi_driver_template = {
-       .module                 = THIS_MODULE,
-       .name                   = "SBP-2 IEEE-1394",
-       .proc_name              = sbp2_driver_name,
-       .queuecommand           = sbp2_scsi_queuecommand,
-       .slave_alloc            = sbp2_scsi_slave_alloc,
-       .slave_configure        = sbp2_scsi_slave_configure,
-       .eh_abort_handler       = sbp2_scsi_abort,
-       .this_id                = -1,
-       .sg_tablesize           = SG_ALL,
-       .use_clustering         = ENABLE_CLUSTERING,
-       .cmd_per_lun            = 1,
-       .can_queue              = 1,
-       .sdev_attrs             = sbp2_scsi_sysfs_attrs,
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("SCSI over IEEE1394");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
-
-/* Provide a module alias so root-on-sbp2 initrds don't break. */
-#ifndef CONFIG_IEEE1394_SBP2_MODULE
-MODULE_ALIAS("sbp2");
-#endif
-
-static int __init sbp2_init(void)
-{
-       sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
-       if (!sbp2_wq)
-               return -ENOMEM;
-
-       return driver_register(&sbp2_driver.driver);
-}
-
-static void __exit sbp2_cleanup(void)
-{
-       driver_unregister(&sbp2_driver.driver);
-       destroy_workqueue(sbp2_wq);
-}
-
-module_init(sbp2_init);
-module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
deleted file mode 100644 (file)
index d0deecc..0000000
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Incremental bus scan, based on bus topology
- *
- * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <asm/bug.h>
-#include <asm/system.h>
-#include "fw-transaction.h"
-#include "fw-topology.h"
-
-#define SELF_ID_PHY_ID(q)              (((q) >> 24) & 0x3f)
-#define SELF_ID_EXTENDED(q)            (((q) >> 23) & 0x01)
-#define SELF_ID_LINK_ON(q)             (((q) >> 22) & 0x01)
-#define SELF_ID_GAP_COUNT(q)           (((q) >> 16) & 0x3f)
-#define SELF_ID_PHY_SPEED(q)           (((q) >> 14) & 0x03)
-#define SELF_ID_CONTENDER(q)           (((q) >> 11) & 0x01)
-#define SELF_ID_PHY_INITIATOR(q)       (((q) >>  1) & 0x01)
-#define SELF_ID_MORE_PACKETS(q)                (((q) >>  0) & 0x01)
-
-#define SELF_ID_EXT_SEQUENCE(q)                (((q) >> 20) & 0x07)
-
-static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
-{
-       u32 q;
-       int port_type, shift, seq;
-
-       *total_port_count = 0;
-       *child_port_count = 0;
-
-       shift = 6;
-       q = *sid;
-       seq = 0;
-
-       while (1) {
-               port_type = (q >> shift) & 0x03;
-               switch (port_type) {
-               case SELFID_PORT_CHILD:
-                       (*child_port_count)++;
-               case SELFID_PORT_PARENT:
-               case SELFID_PORT_NCONN:
-                       (*total_port_count)++;
-               case SELFID_PORT_NONE:
-                       break;
-               }
-
-               shift -= 2;
-               if (shift == 0) {
-                       if (!SELF_ID_MORE_PACKETS(q))
-                               return sid + 1;
-
-                       shift = 16;
-                       sid++;
-                       q = *sid;
-
-                       /*
-                        * Check that the extra packets actually are
-                        * extended self ID packets and that the
-                        * sequence numbers in the extended self ID
-                        * packets increase as expected.
-                        */
-
-                       if (!SELF_ID_EXTENDED(q) ||
-                           seq != SELF_ID_EXT_SEQUENCE(q))
-                               return NULL;
-
-                       seq++;
-               }
-       }
-}
-
-static int get_port_type(u32 *sid, int port_index)
-{
-       int index, shift;
-
-       index = (port_index + 5) / 8;
-       shift = 16 - ((port_index + 5) & 7) * 2;
-       return (sid[index] >> shift) & 0x03;
-}
-
-static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
-{
-       struct fw_node *node;
-
-       node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
-                      GFP_ATOMIC);
-       if (node == NULL)
-               return NULL;
-
-       node->color = color;
-       node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
-       node->link_on = SELF_ID_LINK_ON(sid);
-       node->phy_speed = SELF_ID_PHY_SPEED(sid);
-       node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
-       node->port_count = port_count;
-
-       atomic_set(&node->ref_count, 1);
-       INIT_LIST_HEAD(&node->link);
-
-       return node;
-}
-
-/*
- * Compute the maximum hop count for this node and it's children.  The
- * maximum hop count is the maximum number of connections between any
- * two nodes in the subtree rooted at this node.  We need this for
- * setting the gap count.  As we build the tree bottom up in
- * build_tree() below, this is fairly easy to do: for each node we
- * maintain the max hop count and the max depth, ie the number of hops
- * to the furthest leaf.  Computing the max hop count breaks down into
- * two cases: either the path goes through this node, in which case
- * the hop count is the sum of the two biggest child depths plus 2.
- * Or it could be the case that the max hop path is entirely
- * containted in a child tree, in which case the max hop count is just
- * the max hop count of this child.
- */
-static void update_hop_count(struct fw_node *node)
-{
-       int depths[2] = { -1, -1 };
-       int max_child_hops = 0;
-       int i;
-
-       for (i = 0; i < node->port_count; i++) {
-               if (node->ports[i] == NULL)
-                       continue;
-
-               if (node->ports[i]->max_hops > max_child_hops)
-                       max_child_hops = node->ports[i]->max_hops;
-
-               if (node->ports[i]->max_depth > depths[0]) {
-                       depths[1] = depths[0];
-                       depths[0] = node->ports[i]->max_depth;
-               } else if (node->ports[i]->max_depth > depths[1])
-                       depths[1] = node->ports[i]->max_depth;
-       }
-
-       node->max_depth = depths[0] + 1;
-       node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
-}
-
-static inline struct fw_node *fw_node(struct list_head *l)
-{
-       return list_entry(l, struct fw_node, link);
-}
-
-/**
- * build_tree - Build the tree representation of the topology
- * @self_ids: array of self IDs to create the tree from
- * @self_id_count: the length of the self_ids array
- * @local_id: the node ID of the local node
- *
- * This function builds the tree representation of the topology given
- * by the self IDs from the latest bus reset.  During the construction
- * of the tree, the function checks that the self IDs are valid and
- * internally consistent.  On succcess this function returns the
- * fw_node corresponding to the local card otherwise NULL.
- */
-static struct fw_node *build_tree(struct fw_card *card,
-                                 u32 *sid, int self_id_count)
-{
-       struct fw_node *node, *child, *local_node, *irm_node;
-       struct list_head stack, *h;
-       u32 *next_sid, *end, q;
-       int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
-       int gap_count;
-       bool beta_repeaters_present;
-
-       local_node = NULL;
-       node = NULL;
-       INIT_LIST_HEAD(&stack);
-       stack_depth = 0;
-       end = sid + self_id_count;
-       phy_id = 0;
-       irm_node = NULL;
-       gap_count = SELF_ID_GAP_COUNT(*sid);
-       beta_repeaters_present = false;
-
-       while (sid < end) {
-               next_sid = count_ports(sid, &port_count, &child_port_count);
-
-               if (next_sid == NULL) {
-                       fw_error("Inconsistent extended self IDs.\n");
-                       return NULL;
-               }
-
-               q = *sid;
-               if (phy_id != SELF_ID_PHY_ID(q)) {
-                       fw_error("PHY ID mismatch in self ID: %d != %d.\n",
-                                phy_id, SELF_ID_PHY_ID(q));
-                       return NULL;
-               }
-
-               if (child_port_count > stack_depth) {
-                       fw_error("Topology stack underflow\n");
-                       return NULL;
-               }
-
-               /*
-                * Seek back from the top of our stack to find the
-                * start of the child nodes for this node.
-                */
-               for (i = 0, h = &stack; i < child_port_count; i++)
-                       h = h->prev;
-               /*
-                * When the stack is empty, this yields an invalid value,
-                * but that pointer will never be dereferenced.
-                */
-               child = fw_node(h);
-
-               node = fw_node_create(q, port_count, card->color);
-               if (node == NULL) {
-                       fw_error("Out of memory while building topology.\n");
-                       return NULL;
-               }
-
-               if (phy_id == (card->node_id & 0x3f))
-                       local_node = node;
-
-               if (SELF_ID_CONTENDER(q))
-                       irm_node = node;
-
-               parent_count = 0;
-
-               for (i = 0; i < port_count; i++) {
-                       switch (get_port_type(sid, i)) {
-                       case SELFID_PORT_PARENT:
-                               /*
-                                * Who's your daddy?  We dont know the
-                                * parent node at this time, so we
-                                * temporarily abuse node->color for
-                                * remembering the entry in the
-                                * node->ports array where the parent
-                                * node should be.  Later, when we
-                                * handle the parent node, we fix up
-                                * the reference.
-                                */
-                               parent_count++;
-                               node->color = i;
-                               break;
-
-                       case SELFID_PORT_CHILD:
-                               node->ports[i] = child;
-                               /*
-                                * Fix up parent reference for this
-                                * child node.
-                                */
-                               child->ports[child->color] = node;
-                               child->color = card->color;
-                               child = fw_node(child->link.next);
-                               break;
-                       }
-               }
-
-               /*
-                * Check that the node reports exactly one parent
-                * port, except for the root, which of course should
-                * have no parents.
-                */
-               if ((next_sid == end && parent_count != 0) ||
-                   (next_sid < end && parent_count != 1)) {
-                       fw_error("Parent port inconsistency for node %d: "
-                                "parent_count=%d\n", phy_id, parent_count);
-                       return NULL;
-               }
-
-               /* Pop the child nodes off the stack and push the new node. */
-               __list_del(h->prev, &stack);
-               list_add_tail(&node->link, &stack);
-               stack_depth += 1 - child_port_count;
-
-               if (node->phy_speed == SCODE_BETA &&
-                   parent_count + child_port_count > 1)
-                       beta_repeaters_present = true;
-
-               /*
-                * If PHYs report different gap counts, set an invalid count
-                * which will force a gap count reconfiguration and a reset.
-                */
-               if (SELF_ID_GAP_COUNT(q) != gap_count)
-                       gap_count = 0;
-
-               update_hop_count(node);
-
-               sid = next_sid;
-               phy_id++;
-       }
-
-       card->root_node = node;
-       card->irm_node = irm_node;
-       card->gap_count = gap_count;
-       card->beta_repeaters_present = beta_repeaters_present;
-
-       return local_node;
-}
-
-typedef void (*fw_node_callback_t)(struct fw_card * card,
-                                  struct fw_node * node,
-                                  struct fw_node * parent);
-
-static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
-                            fw_node_callback_t callback)
-{
-       struct list_head list;
-       struct fw_node *node, *next, *child, *parent;
-       int i;
-
-       INIT_LIST_HEAD(&list);
-
-       fw_node_get(root);
-       list_add_tail(&root->link, &list);
-       parent = NULL;
-       list_for_each_entry(node, &list, link) {
-               node->color = card->color;
-
-               for (i = 0; i < node->port_count; i++) {
-                       child = node->ports[i];
-                       if (!child)
-                               continue;
-                       if (child->color == card->color)
-                               parent = child;
-                       else {
-                               fw_node_get(child);
-                               list_add_tail(&child->link, &list);
-                       }
-               }
-
-               callback(card, node, parent);
-       }
-
-       list_for_each_entry_safe(node, next, &list, link)
-               fw_node_put(node);
-}
-
-static void report_lost_node(struct fw_card *card,
-                            struct fw_node *node, struct fw_node *parent)
-{
-       fw_node_event(card, node, FW_NODE_DESTROYED);
-       fw_node_put(node);
-
-       /* Topology has changed - reset bus manager retry counter */
-       card->bm_retries = 0;
-}
-
-static void report_found_node(struct fw_card *card,
-                             struct fw_node *node, struct fw_node *parent)
-{
-       int b_path = (node->phy_speed == SCODE_BETA);
-
-       if (parent != NULL) {
-               /* min() macro doesn't work here with gcc 3.4 */
-               node->max_speed = parent->max_speed < node->phy_speed ?
-                                       parent->max_speed : node->phy_speed;
-               node->b_path = parent->b_path && b_path;
-       } else {
-               node->max_speed = node->phy_speed;
-               node->b_path = b_path;
-       }
-
-       fw_node_event(card, node, FW_NODE_CREATED);
-
-       /* Topology has changed - reset bus manager retry counter */
-       card->bm_retries = 0;
-}
-
-void fw_destroy_nodes(struct fw_card *card)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       card->color++;
-       if (card->local_node != NULL)
-               for_each_fw_node(card, card->local_node, report_lost_node);
-       card->local_node = NULL;
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
-{
-       struct fw_node *tree;
-       int i;
-
-       tree = node1->ports[port];
-       node0->ports[port] = tree;
-       for (i = 0; i < tree->port_count; i++) {
-               if (tree->ports[i] == node1) {
-                       tree->ports[i] = node0;
-                       break;
-               }
-       }
-}
-
-/**
- * update_tree - compare the old topology tree for card with the new
- * one specified by root.  Queue the nodes and mark them as either
- * found, lost or updated.  Update the nodes in the card topology tree
- * as we go.
- */
-static void update_tree(struct fw_card *card, struct fw_node *root)
-{
-       struct list_head list0, list1;
-       struct fw_node *node0, *node1, *next1;
-       int i, event;
-
-       INIT_LIST_HEAD(&list0);
-       list_add_tail(&card->local_node->link, &list0);
-       INIT_LIST_HEAD(&list1);
-       list_add_tail(&root->link, &list1);
-
-       node0 = fw_node(list0.next);
-       node1 = fw_node(list1.next);
-
-       while (&node0->link != &list0) {
-               WARN_ON(node0->port_count != node1->port_count);
-
-               if (node0->link_on && !node1->link_on)
-                       event = FW_NODE_LINK_OFF;
-               else if (!node0->link_on && node1->link_on)
-                       event = FW_NODE_LINK_ON;
-               else if (node1->initiated_reset && node1->link_on)
-                       event = FW_NODE_INITIATED_RESET;
-               else
-                       event = FW_NODE_UPDATED;
-
-               node0->node_id = node1->node_id;
-               node0->color = card->color;
-               node0->link_on = node1->link_on;
-               node0->initiated_reset = node1->initiated_reset;
-               node0->max_hops = node1->max_hops;
-               node1->color = card->color;
-               fw_node_event(card, node0, event);
-
-               if (card->root_node == node1)
-                       card->root_node = node0;
-               if (card->irm_node == node1)
-                       card->irm_node = node0;
-
-               for (i = 0; i < node0->port_count; i++) {
-                       if (node0->ports[i] && node1->ports[i]) {
-                               /*
-                                * This port didn't change, queue the
-                                * connected node for further
-                                * investigation.
-                                */
-                               if (node0->ports[i]->color == card->color)
-                                       continue;
-                               list_add_tail(&node0->ports[i]->link, &list0);
-                               list_add_tail(&node1->ports[i]->link, &list1);
-                       } else if (node0->ports[i]) {
-                               /*
-                                * The nodes connected here were
-                                * unplugged; unref the lost nodes and
-                                * queue FW_NODE_LOST callbacks for
-                                * them.
-                                */
-
-                               for_each_fw_node(card, node0->ports[i],
-                                                report_lost_node);
-                               node0->ports[i] = NULL;
-                       } else if (node1->ports[i]) {
-                               /*
-                                * One or more node were connected to
-                                * this port. Move the new nodes into
-                                * the tree and queue FW_NODE_CREATED
-                                * callbacks for them.
-                                */
-                               move_tree(node0, node1, i);
-                               for_each_fw_node(card, node0->ports[i],
-                                                report_found_node);
-                       }
-               }
-
-               node0 = fw_node(node0->link.next);
-               next1 = fw_node(node1->link.next);
-               fw_node_put(node1);
-               node1 = next1;
-       }
-}
-
-static void update_topology_map(struct fw_card *card,
-                               u32 *self_ids, int self_id_count)
-{
-       int node_count;
-
-       card->topology_map[1]++;
-       node_count = (card->root_node->node_id & 0x3f) + 1;
-       card->topology_map[2] = (node_count << 16) | self_id_count;
-       card->topology_map[0] = (self_id_count + 2) << 16;
-       memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
-       fw_compute_block_crc(card->topology_map);
-}
-
-void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
-                             int self_id_count, u32 *self_ids)
-{
-       struct fw_node *local_node;
-       unsigned long flags;
-
-       /*
-        * If the selfID buffer is not the immediate successor of the
-        * previously processed one, we cannot reliably compare the
-        * old and new topologies.
-        */
-       if (!is_next_generation(generation, card->generation) &&
-           card->local_node != NULL) {
-               fw_notify("skipped bus generations, destroying all nodes\n");
-               fw_destroy_nodes(card);
-               card->bm_retries = 0;
-       }
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       card->broadcast_channel_allocated = false;
-       card->node_id = node_id;
-       /*
-        * Update node_id before generation to prevent anybody from using
-        * a stale node_id together with a current generation.
-        */
-       smp_wmb();
-       card->generation = generation;
-       card->reset_jiffies = jiffies;
-       fw_schedule_bm_work(card, 0);
-
-       local_node = build_tree(card, self_ids, self_id_count);
-
-       update_topology_map(card, self_ids, self_id_count);
-
-       card->color++;
-
-       if (local_node == NULL) {
-               fw_error("topology build failed\n");
-               /* FIXME: We need to issue a bus reset in this case. */
-       } else if (card->local_node == NULL) {
-               card->local_node = local_node;
-               for_each_fw_node(card, local_node, report_found_node);
-       } else {
-               update_tree(card, local_node);
-       }
-
-       spin_unlock_irqrestore(&card->lock, flags);
-}
-EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
deleted file mode 100644 (file)
index 3c497bb..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_topology_h
-#define __fw_topology_h
-
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/atomic.h>
-
-enum {
-       FW_NODE_CREATED,
-       FW_NODE_UPDATED,
-       FW_NODE_DESTROYED,
-       FW_NODE_LINK_ON,
-       FW_NODE_LINK_OFF,
-       FW_NODE_INITIATED_RESET,
-};
-
-struct fw_node {
-       u16 node_id;
-       u8 color;
-       u8 port_count;
-       u8 link_on : 1;
-       u8 initiated_reset : 1;
-       u8 b_path : 1;
-       u8 phy_speed : 2; /* As in the self ID packet. */
-       u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
-                          * local node to this node. */
-       u8 max_depth : 4; /* Maximum depth to any leaf node */
-       u8 max_hops : 4;  /* Max hops in this sub tree */
-       atomic_t ref_count;
-
-       /* For serializing node topology into a list. */
-       struct list_head link;
-
-       /* Upper layer specific data. */
-       void *data;
-
-       struct fw_node *ports[0];
-};
-
-static inline struct fw_node *fw_node_get(struct fw_node *node)
-{
-       atomic_inc(&node->ref_count);
-
-       return node;
-}
-
-static inline void fw_node_put(struct fw_node *node)
-{
-       if (atomic_dec_and_test(&node->ref_count))
-               kfree(node);
-}
-
-struct fw_card;
-void fw_destroy_nodes(struct fw_card *card);
-
-int fw_compute_block_crc(u32 *block);
-
-#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
deleted file mode 100644 (file)
index 283dac6..0000000
+++ /dev/null
@@ -1,970 +0,0 @@
-/*
- * Core IEEE1394 transaction logic
- *
- * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/completion.h>
-#include <linux/idr.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/list.h>
-#include <linux/kthread.h>
-#include <asm/uaccess.h>
-
-#include "fw-transaction.h"
-#include "fw-topology.h"
-#include "fw-device.h"
-
-#define HEADER_PRI(pri)                        ((pri) << 0)
-#define HEADER_TCODE(tcode)            ((tcode) << 4)
-#define HEADER_RETRY(retry)            ((retry) << 8)
-#define HEADER_TLABEL(tlabel)          ((tlabel) << 10)
-#define HEADER_DESTINATION(destination)        ((destination) << 16)
-#define HEADER_SOURCE(source)          ((source) << 16)
-#define HEADER_RCODE(rcode)            ((rcode) << 12)
-#define HEADER_OFFSET_HIGH(offset_high)        ((offset_high) << 0)
-#define HEADER_DATA_LENGTH(length)     ((length) << 16)
-#define HEADER_EXTENDED_TCODE(tcode)   ((tcode) << 0)
-
-#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
-#define HEADER_GET_TLABEL(q)           (((q) >> 10) & 0x3f)
-#define HEADER_GET_RCODE(q)            (((q) >> 12) & 0x0f)
-#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_SOURCE(q)           (((q) >> 16) & 0xffff)
-#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
-#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
-#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
-
-#define HEADER_DESTINATION_IS_BROADCAST(q) \
-       (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
-
-#define PHY_CONFIG_GAP_COUNT(gap_count)        (((gap_count) << 16) | (1 << 22))
-#define PHY_CONFIG_ROOT_ID(node_id)    ((((node_id) & 0x3f) << 24) | (1 << 23))
-#define PHY_IDENTIFIER(id)             ((id) << 30)
-
-static int close_transaction(struct fw_transaction *transaction,
-                            struct fw_card *card, int rcode)
-{
-       struct fw_transaction *t;
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(t, &card->transaction_list, link) {
-               if (t == transaction) {
-                       list_del(&t->link);
-                       card->tlabel_mask &= ~(1 << t->tlabel);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&t->link != &card->transaction_list) {
-               t->callback(card, rcode, NULL, 0, t->callback_data);
-               return 0;
-       }
-
-       return -ENOENT;
-}
-
-/*
- * Only valid for transactions that are potentially pending (ie have
- * been sent).
- */
-int fw_cancel_transaction(struct fw_card *card,
-                         struct fw_transaction *transaction)
-{
-       /*
-        * Cancel the packet transmission if it's still queued.  That
-        * will call the packet transmission callback which cancels
-        * the transaction.
-        */
-
-       if (card->driver->cancel_packet(card, &transaction->packet) == 0)
-               return 0;
-
-       /*
-        * If the request packet has already been sent, we need to see
-        * if the transaction is still pending and remove it in that case.
-        */
-
-       return close_transaction(transaction, card, RCODE_CANCELLED);
-}
-EXPORT_SYMBOL(fw_cancel_transaction);
-
-static void transmit_complete_callback(struct fw_packet *packet,
-                                      struct fw_card *card, int status)
-{
-       struct fw_transaction *t =
-           container_of(packet, struct fw_transaction, packet);
-
-       switch (status) {
-       case ACK_COMPLETE:
-               close_transaction(t, card, RCODE_COMPLETE);
-               break;
-       case ACK_PENDING:
-               t->timestamp = packet->timestamp;
-               break;
-       case ACK_BUSY_X:
-       case ACK_BUSY_A:
-       case ACK_BUSY_B:
-               close_transaction(t, card, RCODE_BUSY);
-               break;
-       case ACK_DATA_ERROR:
-               close_transaction(t, card, RCODE_DATA_ERROR);
-               break;
-       case ACK_TYPE_ERROR:
-               close_transaction(t, card, RCODE_TYPE_ERROR);
-               break;
-       default:
-               /*
-                * In this case the ack is really a juju specific
-                * rcode, so just forward that to the callback.
-                */
-               close_transaction(t, card, status);
-               break;
-       }
-}
-
-static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
-               int destination_id, int source_id, int generation, int speed,
-               unsigned long long offset, void *payload, size_t length)
-{
-       int ext_tcode;
-
-       if (tcode == TCODE_STREAM_DATA) {
-               packet->header[0] =
-                       HEADER_DATA_LENGTH(length) |
-                       destination_id |
-                       HEADER_TCODE(TCODE_STREAM_DATA);
-               packet->header_length = 4;
-               packet->payload = payload;
-               packet->payload_length = length;
-
-               goto common;
-       }
-
-       if (tcode > 0x10) {
-               ext_tcode = tcode & ~0x10;
-               tcode = TCODE_LOCK_REQUEST;
-       } else
-               ext_tcode = 0;
-
-       packet->header[0] =
-               HEADER_RETRY(RETRY_X) |
-               HEADER_TLABEL(tlabel) |
-               HEADER_TCODE(tcode) |
-               HEADER_DESTINATION(destination_id);
-       packet->header[1] =
-               HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
-       packet->header[2] =
-               offset;
-
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-               packet->header[3] = *(u32 *)payload;
-               packet->header_length = 16;
-               packet->payload_length = 0;
-               break;
-
-       case TCODE_LOCK_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               packet->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(ext_tcode);
-               packet->header_length = 16;
-               packet->payload = payload;
-               packet->payload_length = length;
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               packet->header_length = 12;
-               packet->payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-               packet->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(ext_tcode);
-               packet->header_length = 16;
-               packet->payload_length = 0;
-               break;
-       }
- common:
-       packet->speed = speed;
-       packet->generation = generation;
-       packet->ack = 0;
-       packet->payload_bus = 0;
-}
-
-/**
- * This function provides low-level access to the IEEE1394 transaction
- * logic.  Most C programs would use either fw_read(), fw_write() or
- * fw_lock() instead - those function are convenience wrappers for
- * this function.  The fw_send_request() function is primarily
- * provided as a flexible, one-stop entry point for languages bindings
- * and protocol bindings.
- *
- * FIXME: Document this function further, in particular the possible
- * values for rcode in the callback.  In short, we map ACK_COMPLETE to
- * RCODE_COMPLETE, internal errors set errno and set rcode to
- * RCODE_SEND_ERROR (which is out of range for standard ieee1394
- * rcodes).  All other rcodes are forwarded unchanged.  For all
- * errors, payload is NULL, length is 0.
- *
- * Can not expect the callback to be called before the function
- * returns, though this does happen in some cases (ACK_COMPLETE and
- * errors).
- *
- * The payload is only used for write requests and must not be freed
- * until the callback has been called.
- *
- * @param card the card from which to send the request
- * @param tcode the tcode for this transaction.  Do not use
- *   TCODE_LOCK_REQUEST directly, instead use TCODE_LOCK_MASK_SWAP
- *   etc. to specify tcode and ext_tcode.
- * @param node_id the destination node ID (bus ID and PHY ID concatenated)
- * @param generation the generation for which node_id is valid
- * @param speed the speed to use for sending the request
- * @param offset the 48 bit offset on the destination node
- * @param payload the data payload for the request subaction
- * @param length the length in bytes of the data to read
- * @param callback function to be called when the transaction is completed
- * @param callback_data pointer to arbitrary data, which will be
- *   passed to the callback
- *
- * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
- * needs to synthesize @destination_id with fw_stream_packet_destination_id().
- */
-void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
-                    int destination_id, int generation, int speed,
-                    unsigned long long offset, void *payload, size_t length,
-                    fw_transaction_callback_t callback, void *callback_data)
-{
-       unsigned long flags;
-       int tlabel;
-
-       /*
-        * Bump the flush timer up 100ms first of all so we
-        * don't race with a flush timer callback.
-        */
-
-       mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
-
-       /*
-        * Allocate tlabel from the bitmap and put the transaction on
-        * the list while holding the card spinlock.
-        */
-
-       spin_lock_irqsave(&card->lock, flags);
-
-       tlabel = card->current_tlabel;
-       if (card->tlabel_mask & (1 << tlabel)) {
-               spin_unlock_irqrestore(&card->lock, flags);
-               callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
-               return;
-       }
-
-       card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
-       card->tlabel_mask |= (1 << tlabel);
-
-       t->node_id = destination_id;
-       t->tlabel = tlabel;
-       t->callback = callback;
-       t->callback_data = callback_data;
-
-       fw_fill_request(&t->packet, tcode, t->tlabel,
-                       destination_id, card->node_id, generation,
-                       speed, offset, payload, length);
-       t->packet.callback = transmit_complete_callback;
-
-       list_add_tail(&t->link, &card->transaction_list);
-
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       card->driver->send_request(card, &t->packet);
-}
-EXPORT_SYMBOL(fw_send_request);
-
-struct transaction_callback_data {
-       struct completion done;
-       void *payload;
-       int rcode;
-};
-
-static void transaction_callback(struct fw_card *card, int rcode,
-                                void *payload, size_t length, void *data)
-{
-       struct transaction_callback_data *d = data;
-
-       if (rcode == RCODE_COMPLETE)
-               memcpy(d->payload, payload, length);
-       d->rcode = rcode;
-       complete(&d->done);
-}
-
-/**
- * fw_run_transaction - send request and sleep until transaction is completed
- *
- * Returns the RCODE.
- */
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-                      int generation, int speed, unsigned long long offset,
-                      void *payload, size_t length)
-{
-       struct transaction_callback_data d;
-       struct fw_transaction t;
-
-       init_completion(&d.done);
-       d.payload = payload;
-       fw_send_request(card, &t, tcode, destination_id, generation, speed,
-                       offset, payload, length, transaction_callback, &d);
-       wait_for_completion(&d.done);
-
-       return d.rcode;
-}
-EXPORT_SYMBOL(fw_run_transaction);
-
-static DEFINE_MUTEX(phy_config_mutex);
-static DECLARE_COMPLETION(phy_config_done);
-
-static void transmit_phy_packet_callback(struct fw_packet *packet,
-                                        struct fw_card *card, int status)
-{
-       complete(&phy_config_done);
-}
-
-static struct fw_packet phy_config_packet = {
-       .header_length  = 8,
-       .payload_length = 0,
-       .speed          = SCODE_100,
-       .callback       = transmit_phy_packet_callback,
-};
-
-void fw_send_phy_config(struct fw_card *card,
-                       int node_id, int generation, int gap_count)
-{
-       long timeout = DIV_ROUND_UP(HZ, 10);
-       u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
-                  PHY_CONFIG_ROOT_ID(node_id) |
-                  PHY_CONFIG_GAP_COUNT(gap_count);
-
-       mutex_lock(&phy_config_mutex);
-
-       phy_config_packet.header[0] = data;
-       phy_config_packet.header[1] = ~data;
-       phy_config_packet.generation = generation;
-       INIT_COMPLETION(phy_config_done);
-
-       card->driver->send_request(card, &phy_config_packet);
-       wait_for_completion_timeout(&phy_config_done, timeout);
-
-       mutex_unlock(&phy_config_mutex);
-}
-
-void fw_flush_transactions(struct fw_card *card)
-{
-       struct fw_transaction *t, *next;
-       struct list_head list;
-       unsigned long flags;
-
-       INIT_LIST_HEAD(&list);
-       spin_lock_irqsave(&card->lock, flags);
-       list_splice_init(&card->transaction_list, &list);
-       card->tlabel_mask = 0;
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       list_for_each_entry_safe(t, next, &list, link) {
-               card->driver->cancel_packet(card, &t->packet);
-
-               /*
-                * At this point cancel_packet will never call the
-                * transaction callback, since we just took all the
-                * transactions out of the list.  So do it here.
-                */
-               t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
-       }
-}
-
-static struct fw_address_handler *lookup_overlapping_address_handler(
-       struct list_head *list, unsigned long long offset, size_t length)
-{
-       struct fw_address_handler *handler;
-
-       list_for_each_entry(handler, list, link) {
-               if (handler->offset < offset + length &&
-                   offset < handler->offset + handler->length)
-                       return handler;
-       }
-
-       return NULL;
-}
-
-static struct fw_address_handler *lookup_enclosing_address_handler(
-       struct list_head *list, unsigned long long offset, size_t length)
-{
-       struct fw_address_handler *handler;
-
-       list_for_each_entry(handler, list, link) {
-               if (handler->offset <= offset &&
-                   offset + length <= handler->offset + handler->length)
-                       return handler;
-       }
-
-       return NULL;
-}
-
-static DEFINE_SPINLOCK(address_handler_lock);
-static LIST_HEAD(address_handler_list);
-
-const struct fw_address_region fw_high_memory_region =
-       { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL,  };
-EXPORT_SYMBOL(fw_high_memory_region);
-
-#if 0
-const struct fw_address_region fw_low_memory_region =
-       { .start = 0x000000000000ULL, .end = 0x000100000000ULL,  };
-const struct fw_address_region fw_private_region =
-       { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
-const struct fw_address_region fw_csr_region =
-       { .start = CSR_REGISTER_BASE,
-         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
-const struct fw_address_region fw_unit_space_region =
-       { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
-#endif  /*  0  */
-
-/**
- * fw_core_add_address_handler - register for incoming requests
- * @handler: callback
- * @region: region in the IEEE 1212 node space address range
- *
- * region->start, ->end, and handler->length have to be quadlet-aligned.
- *
- * When a request is received that falls within the specified address range,
- * the specified callback is invoked.  The parameters passed to the callback
- * give the details of the particular request.
- *
- * Return value:  0 on success, non-zero otherwise.
- * The start offset of the handler's address region is determined by
- * fw_core_add_address_handler() and is returned in handler->offset.
- */
-int fw_core_add_address_handler(struct fw_address_handler *handler,
-                               const struct fw_address_region *region)
-{
-       struct fw_address_handler *other;
-       unsigned long flags;
-       int ret = -EBUSY;
-
-       if (region->start & 0xffff000000000003ULL ||
-           region->end   & 0xffff000000000003ULL ||
-           region->start >= region->end ||
-           handler->length & 3 ||
-           handler->length == 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-
-       handler->offset = region->start;
-       while (handler->offset + handler->length <= region->end) {
-               other =
-                   lookup_overlapping_address_handler(&address_handler_list,
-                                                      handler->offset,
-                                                      handler->length);
-               if (other != NULL) {
-                       handler->offset += other->length;
-               } else {
-                       list_add_tail(&handler->link, &address_handler_list);
-                       ret = 0;
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(fw_core_add_address_handler);
-
-/**
- * fw_core_remove_address_handler - unregister an address handler
- */
-void fw_core_remove_address_handler(struct fw_address_handler *handler)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-       list_del(&handler->link);
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-}
-EXPORT_SYMBOL(fw_core_remove_address_handler);
-
-struct fw_request {
-       struct fw_packet response;
-       u32 request_header[4];
-       int ack;
-       u32 length;
-       u32 data[0];
-};
-
-static void free_response_callback(struct fw_packet *packet,
-                                  struct fw_card *card, int status)
-{
-       struct fw_request *request;
-
-       request = container_of(packet, struct fw_request, response);
-       kfree(request);
-}
-
-void fw_fill_response(struct fw_packet *response, u32 *request_header,
-                     int rcode, void *payload, size_t length)
-{
-       int tcode, tlabel, extended_tcode, source, destination;
-
-       tcode          = HEADER_GET_TCODE(request_header[0]);
-       tlabel         = HEADER_GET_TLABEL(request_header[0]);
-       source         = HEADER_GET_DESTINATION(request_header[0]);
-       destination    = HEADER_GET_SOURCE(request_header[1]);
-       extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
-
-       response->header[0] =
-               HEADER_RETRY(RETRY_1) |
-               HEADER_TLABEL(tlabel) |
-               HEADER_DESTINATION(destination);
-       response->header[1] =
-               HEADER_SOURCE(source) |
-               HEADER_RCODE(rcode);
-       response->header[2] = 0;
-
-       switch (tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-       case TCODE_WRITE_BLOCK_REQUEST:
-               response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
-               response->header_length = 12;
-               response->payload_length = 0;
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               response->header[0] |=
-                       HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
-               if (payload != NULL)
-                       response->header[3] = *(u32 *)payload;
-               else
-                       response->header[3] = 0;
-               response->header_length = 16;
-               response->payload_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-       case TCODE_LOCK_REQUEST:
-               response->header[0] |= HEADER_TCODE(tcode + 2);
-               response->header[3] =
-                       HEADER_DATA_LENGTH(length) |
-                       HEADER_EXTENDED_TCODE(extended_tcode);
-               response->header_length = 16;
-               response->payload = payload;
-               response->payload_length = length;
-               break;
-
-       default:
-               BUG();
-               return;
-       }
-
-       response->payload_bus = 0;
-}
-EXPORT_SYMBOL(fw_fill_response);
-
-static struct fw_request *allocate_request(struct fw_packet *p)
-{
-       struct fw_request *request;
-       u32 *data, length;
-       int request_tcode, t;
-
-       request_tcode = HEADER_GET_TCODE(p->header[0]);
-       switch (request_tcode) {
-       case TCODE_WRITE_QUADLET_REQUEST:
-               data = &p->header[3];
-               length = 4;
-               break;
-
-       case TCODE_WRITE_BLOCK_REQUEST:
-       case TCODE_LOCK_REQUEST:
-               data = p->payload;
-               length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       case TCODE_READ_QUADLET_REQUEST:
-               data = NULL;
-               length = 4;
-               break;
-
-       case TCODE_READ_BLOCK_REQUEST:
-               data = NULL;
-               length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       default:
-               fw_error("ERROR - corrupt request received - %08x %08x %08x\n",
-                        p->header[0], p->header[1], p->header[2]);
-               return NULL;
-       }
-
-       request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
-       if (request == NULL)
-               return NULL;
-
-       t = (p->timestamp & 0x1fff) + 4000;
-       if (t >= 8000)
-               t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
-       else
-               t = (p->timestamp & ~0x1fff) + t;
-
-       request->response.speed = p->speed;
-       request->response.timestamp = t;
-       request->response.generation = p->generation;
-       request->response.ack = 0;
-       request->response.callback = free_response_callback;
-       request->ack = p->ack;
-       request->length = length;
-       if (data)
-               memcpy(request->data, data, length);
-
-       memcpy(request->request_header, p->header, sizeof(p->header));
-
-       return request;
-}
-
-void fw_send_response(struct fw_card *card,
-                     struct fw_request *request, int rcode)
-{
-       /* unified transaction or broadcast transaction: don't respond */
-       if (request->ack != ACK_PENDING ||
-           HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
-               kfree(request);
-               return;
-       }
-
-       if (rcode == RCODE_COMPLETE)
-               fw_fill_response(&request->response, request->request_header,
-                                rcode, request->data, request->length);
-       else
-               fw_fill_response(&request->response, request->request_header,
-                                rcode, NULL, 0);
-
-       card->driver->send_response(card, &request->response);
-}
-EXPORT_SYMBOL(fw_send_response);
-
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
-{
-       struct fw_address_handler *handler;
-       struct fw_request *request;
-       unsigned long long offset;
-       unsigned long flags;
-       int tcode, destination, source;
-
-       if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
-               return;
-
-       request = allocate_request(p);
-       if (request == NULL) {
-               /* FIXME: send statically allocated busy packet. */
-               return;
-       }
-
-       offset      =
-               ((unsigned long long)
-                HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
-       tcode       = HEADER_GET_TCODE(p->header[0]);
-       destination = HEADER_GET_DESTINATION(p->header[0]);
-       source      = HEADER_GET_SOURCE(p->header[1]);
-
-       spin_lock_irqsave(&address_handler_lock, flags);
-       handler = lookup_enclosing_address_handler(&address_handler_list,
-                                                  offset, request->length);
-       spin_unlock_irqrestore(&address_handler_lock, flags);
-
-       /*
-        * FIXME: lookup the fw_node corresponding to the sender of
-        * this request and pass that to the address handler instead
-        * of the node ID.  We may also want to move the address
-        * allocations to fw_node so we only do this callback if the
-        * upper layers registered it for this node.
-        */
-
-       if (handler == NULL)
-               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
-       else
-               handler->address_callback(card, request,
-                                         tcode, destination, source,
-                                         p->generation, p->speed, offset,
-                                         request->data, request->length,
-                                         handler->callback_data);
-}
-EXPORT_SYMBOL(fw_core_handle_request);
-
-void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
-{
-       struct fw_transaction *t;
-       unsigned long flags;
-       u32 *data;
-       size_t data_length;
-       int tcode, tlabel, destination, source, rcode;
-
-       tcode       = HEADER_GET_TCODE(p->header[0]);
-       tlabel      = HEADER_GET_TLABEL(p->header[0]);
-       destination = HEADER_GET_DESTINATION(p->header[0]);
-       source      = HEADER_GET_SOURCE(p->header[1]);
-       rcode       = HEADER_GET_RCODE(p->header[1]);
-
-       spin_lock_irqsave(&card->lock, flags);
-       list_for_each_entry(t, &card->transaction_list, link) {
-               if (t->node_id == source && t->tlabel == tlabel) {
-                       list_del(&t->link);
-                       card->tlabel_mask &= ~(1 << t->tlabel);
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&card->lock, flags);
-
-       if (&t->link == &card->transaction_list) {
-               fw_notify("Unsolicited response (source %x, tlabel %x)\n",
-                         source, tlabel);
-               return;
-       }
-
-       /*
-        * FIXME: sanity check packet, is length correct, does tcodes
-        * and addresses match.
-        */
-
-       switch (tcode) {
-       case TCODE_READ_QUADLET_RESPONSE:
-               data = (u32 *) &p->header[3];
-               data_length = 4;
-               break;
-
-       case TCODE_WRITE_RESPONSE:
-               data = NULL;
-               data_length = 0;
-               break;
-
-       case TCODE_READ_BLOCK_RESPONSE:
-       case TCODE_LOCK_RESPONSE:
-               data = p->payload;
-               data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
-               break;
-
-       default:
-               /* Should never happen, this is just to shut up gcc. */
-               data = NULL;
-               data_length = 0;
-               break;
-       }
-
-       /*
-        * The response handler may be executed while the request handler
-        * is still pending.  Cancel the request handler.
-        */
-       card->driver->cancel_packet(card, &t->packet);
-
-       t->callback(card, rcode, data, data_length, t->callback_data);
-}
-EXPORT_SYMBOL(fw_core_handle_response);
-
-static const struct fw_address_region topology_map_region =
-       { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
-         .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
-
-static void handle_topology_map(struct fw_card *card, struct fw_request *request,
-               int tcode, int destination, int source, int generation,
-               int speed, unsigned long long offset,
-               void *payload, size_t length, void *callback_data)
-{
-       int i, start, end;
-       __be32 *map;
-
-       if (!TCODE_IS_READ_REQUEST(tcode)) {
-               fw_send_response(card, request, RCODE_TYPE_ERROR);
-               return;
-       }
-
-       if ((offset & 3) > 0 || (length & 3) > 0) {
-               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
-               return;
-       }
-
-       start = (offset - topology_map_region.start) / 4;
-       end = start + length / 4;
-       map = payload;
-
-       for (i = 0; i < length / 4; i++)
-               map[i] = cpu_to_be32(card->topology_map[start + i]);
-
-       fw_send_response(card, request, RCODE_COMPLETE);
-}
-
-static struct fw_address_handler topology_map = {
-       .length                 = 0x200,
-       .address_callback       = handle_topology_map,
-};
-
-static const struct fw_address_region registers_region =
-       { .start = CSR_REGISTER_BASE,
-         .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
-
-static void handle_registers(struct fw_card *card, struct fw_request *request,
-               int tcode, int destination, int source, int generation,
-               int speed, unsigned long long offset,
-               void *payload, size_t length, void *callback_data)
-{
-       int reg = offset & ~CSR_REGISTER_BASE;
-       unsigned long long bus_time;
-       __be32 *data = payload;
-       int rcode = RCODE_COMPLETE;
-
-       switch (reg) {
-       case CSR_CYCLE_TIME:
-       case CSR_BUS_TIME:
-               if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
-                       rcode = RCODE_TYPE_ERROR;
-                       break;
-               }
-
-               bus_time = card->driver->get_bus_time(card);
-               if (reg == CSR_CYCLE_TIME)
-                       *data = cpu_to_be32(bus_time);
-               else
-                       *data = cpu_to_be32(bus_time >> 25);
-               break;
-
-       case CSR_BROADCAST_CHANNEL:
-               if (tcode == TCODE_READ_QUADLET_REQUEST)
-                       *data = cpu_to_be32(card->broadcast_channel);
-               else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
-                       card->broadcast_channel =
-                           (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
-                           BROADCAST_CHANNEL_INITIAL;
-               else
-                       rcode = RCODE_TYPE_ERROR;
-               break;
-
-       case CSR_BUS_MANAGER_ID:
-       case CSR_BANDWIDTH_AVAILABLE:
-       case CSR_CHANNELS_AVAILABLE_HI:
-       case CSR_CHANNELS_AVAILABLE_LO:
-               /*
-                * FIXME: these are handled by the OHCI hardware and
-                * the stack never sees these request. If we add
-                * support for a new type of controller that doesn't
-                * handle this in hardware we need to deal with these
-                * transactions.
-                */
-               BUG();
-               break;
-
-       case CSR_BUSY_TIMEOUT:
-               /* FIXME: Implement this. */
-
-       default:
-               rcode = RCODE_ADDRESS_ERROR;
-               break;
-       }
-
-       fw_send_response(card, request, rcode);
-}
-
-static struct fw_address_handler registers = {
-       .length                 = 0x400,
-       .address_callback       = handle_registers,
-};
-
-MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
-MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
-MODULE_LICENSE("GPL");
-
-static const u32 vendor_textual_descriptor[] = {
-       /* textual descriptor leaf () */
-       0x00060000,
-       0x00000000,
-       0x00000000,
-       0x4c696e75,             /* L i n u */
-       0x78204669,             /* x   F i */
-       0x72657769,             /* r e w i */
-       0x72650000,             /* r e     */
-};
-
-static const u32 model_textual_descriptor[] = {
-       /* model descriptor leaf () */
-       0x00030000,
-       0x00000000,
-       0x00000000,
-       0x4a756a75,             /* J u j u */
-};
-
-static struct fw_descriptor vendor_id_descriptor = {
-       .length = ARRAY_SIZE(vendor_textual_descriptor),
-       .immediate = 0x03d00d1e,
-       .key = 0x81000000,
-       .data = vendor_textual_descriptor,
-};
-
-static struct fw_descriptor model_id_descriptor = {
-       .length = ARRAY_SIZE(model_textual_descriptor),
-       .immediate = 0x17000001,
-       .key = 0x81000000,
-       .data = model_textual_descriptor,
-};
-
-static int __init fw_core_init(void)
-{
-       int ret;
-
-       ret = bus_register(&fw_bus_type);
-       if (ret < 0)
-               return ret;
-
-       fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
-       if (fw_cdev_major < 0) {
-               bus_unregister(&fw_bus_type);
-               return fw_cdev_major;
-       }
-
-       fw_core_add_address_handler(&topology_map, &topology_map_region);
-       fw_core_add_address_handler(&registers, &registers_region);
-       fw_core_add_descriptor(&vendor_id_descriptor);
-       fw_core_add_descriptor(&model_id_descriptor);
-
-       return 0;
-}
-
-static void __exit fw_core_cleanup(void)
-{
-       unregister_chrdev(fw_cdev_major, "firewire");
-       bus_unregister(&fw_bus_type);
-       idr_destroy(&fw_device_idr);
-}
-
-module_init(fw_core_init);
-module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
deleted file mode 100644 (file)
index dfa7990..0000000
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __fw_transaction_h
-#define __fw_transaction_h
-
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/firewire-constants.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/spinlock_types.h>
-#include <linux/timer.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
-
-#define TCODE_IS_READ_REQUEST(tcode)   (((tcode) & ~1) == 4)
-#define TCODE_IS_BLOCK_PACKET(tcode)   (((tcode) &  1) != 0)
-#define TCODE_IS_REQUEST(tcode)                (((tcode) &  2) == 0)
-#define TCODE_IS_RESPONSE(tcode)       (((tcode) &  2) != 0)
-#define TCODE_HAS_REQUEST_DATA(tcode)  (((tcode) & 12) != 4)
-#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
-
-#define LOCAL_BUS 0xffc0
-
-#define SELFID_PORT_CHILD      0x3
-#define SELFID_PORT_PARENT     0x2
-#define SELFID_PORT_NCONN      0x1
-#define SELFID_PORT_NONE       0x0
-
-#define PHY_PACKET_CONFIG      0x0
-#define PHY_PACKET_LINK_ON     0x1
-#define PHY_PACKET_SELF_ID     0x2
-
-/* Bit fields _within_ the PHY registers. */
-#define PHY_LINK_ACTIVE                0x80
-#define PHY_CONTENDER          0x40
-#define PHY_BUS_RESET          0x40
-#define PHY_BUS_SHORT_RESET    0x40
-
-#define CSR_REGISTER_BASE              0xfffff0000000ULL
-
-/* register offsets relative to CSR_REGISTER_BASE */
-#define CSR_STATE_CLEAR                        0x0
-#define CSR_STATE_SET                  0x4
-#define CSR_NODE_IDS                   0x8
-#define CSR_RESET_START                        0xc
-#define CSR_SPLIT_TIMEOUT_HI           0x18
-#define CSR_SPLIT_TIMEOUT_LO           0x1c
-#define CSR_CYCLE_TIME                 0x200
-#define CSR_BUS_TIME                   0x204
-#define CSR_BUSY_TIMEOUT               0x210
-#define CSR_BUS_MANAGER_ID             0x21c
-#define CSR_BANDWIDTH_AVAILABLE                0x220
-#define CSR_CHANNELS_AVAILABLE         0x224
-#define CSR_CHANNELS_AVAILABLE_HI      0x224
-#define CSR_CHANNELS_AVAILABLE_LO      0x228
-#define CSR_BROADCAST_CHANNEL          0x234
-#define CSR_CONFIG_ROM                 0x400
-#define CSR_CONFIG_ROM_END             0x800
-#define CSR_FCP_COMMAND                        0xB00
-#define CSR_FCP_RESPONSE               0xD00
-#define CSR_FCP_END                    0xF00
-#define CSR_TOPOLOGY_MAP               0x1000
-#define CSR_TOPOLOGY_MAP_END           0x1400
-#define CSR_SPEED_MAP                  0x2000
-#define CSR_SPEED_MAP_END              0x3000
-
-#define BANDWIDTH_AVAILABLE_INITIAL    4915
-#define BROADCAST_CHANNEL_INITIAL      (1 << 31 | 31)
-#define BROADCAST_CHANNEL_VALID                (1 << 30)
-
-#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
-#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
-{
-       u32    *dst = _dst;
-       __be32 *src = _src;
-       int i;
-
-       for (i = 0; i < size / 4; i++)
-               dst[i] = be32_to_cpu(src[i]);
-}
-
-static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
-{
-       fw_memcpy_from_be32(_dst, _src, size);
-}
-
-struct fw_card;
-struct fw_packet;
-struct fw_node;
-struct fw_request;
-
-struct fw_descriptor {
-       struct list_head link;
-       size_t length;
-       u32 immediate;
-       u32 key;
-       const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
-typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
-                                    struct fw_card *card, int status);
-
-typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
-                                         void *data, size_t length,
-                                         void *callback_data);
-
-/*
- * Important note:  The callback must guarantee that either fw_send_response()
- * or kfree() is called on the @request.
- */
-typedef void (*fw_address_callback_t)(struct fw_card *card,
-                                     struct fw_request *request,
-                                     int tcode, int destination, int source,
-                                     int generation, int speed,
-                                     unsigned long long offset,
-                                     void *data, size_t length,
-                                     void *callback_data);
-
-struct fw_packet {
-       int speed;
-       int generation;
-       u32 header[4];
-       size_t header_length;
-       void *payload;
-       size_t payload_length;
-       dma_addr_t payload_bus;
-       u32 timestamp;
-
-       /*
-        * This callback is called when the packet transmission has
-        * completed; for successful transmission, the status code is
-        * the ack received from the destination, otherwise it's a
-        * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
-        * The callback can be called from tasklet context and thus
-        * must never block.
-        */
-       fw_packet_callback_t callback;
-       int ack;
-       struct list_head link;
-       void *driver_data;
-};
-
-struct fw_transaction {
-       int node_id; /* The generation is implied; it is always the current. */
-       int tlabel;
-       int timestamp;
-       struct list_head link;
-
-       struct fw_packet packet;
-
-       /*
-        * The data passed to the callback is valid only during the
-        * callback.
-        */
-       fw_transaction_callback_t callback;
-       void *callback_data;
-};
-
-struct fw_address_handler {
-       u64 offset;
-       size_t length;
-       fw_address_callback_t address_callback;
-       void *callback_data;
-       struct list_head link;
-};
-
-struct fw_address_region {
-       u64 start;
-       u64 end;
-};
-
-extern const struct fw_address_region fw_high_memory_region;
-
-int fw_core_add_address_handler(struct fw_address_handler *handler,
-                               const struct fw_address_region *region);
-void fw_core_remove_address_handler(struct fw_address_handler *handler);
-void fw_fill_response(struct fw_packet *response, u32 *request_header,
-                     int rcode, void *payload, size_t length);
-void fw_send_response(struct fw_card *card,
-                     struct fw_request *request, int rcode);
-
-extern struct bus_type fw_bus_type;
-
-struct fw_card {
-       const struct fw_card_driver *driver;
-       struct device *device;
-       struct kref kref;
-       struct completion done;
-
-       int node_id;
-       int generation;
-       int current_tlabel, tlabel_mask;
-       struct list_head transaction_list;
-       struct timer_list flush_timer;
-       unsigned long reset_jiffies;
-
-       unsigned long long guid;
-       unsigned max_receive;
-       int link_speed;
-       int config_rom_generation;
-
-       spinlock_t lock; /* Take this lock when handling the lists in
-                         * this struct. */
-       struct fw_node *local_node;
-       struct fw_node *root_node;
-       struct fw_node *irm_node;
-       u8 color; /* must be u8 to match the definition in struct fw_node */
-       int gap_count;
-       bool beta_repeaters_present;
-
-       int index;
-
-       struct list_head link;
-
-       /* Work struct for BM duties. */
-       struct delayed_work work;
-       int bm_retries;
-       int bm_generation;
-
-       bool broadcast_channel_allocated;
-       u32 broadcast_channel;
-       u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
-};
-
-static inline struct fw_card *fw_card_get(struct fw_card *card)
-{
-       kref_get(&card->kref);
-
-       return card;
-}
-
-void fw_card_release(struct kref *kref);
-
-static inline void fw_card_put(struct fw_card *card)
-{
-       kref_put(&card->kref, fw_card_release);
-}
-
-extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-
-/*
- * Check whether new_generation is the immediate successor of old_generation.
- * Take counter roll-over at 255 (as per to OHCI) into account.
- */
-static inline bool is_next_generation(int new_generation, int old_generation)
-{
-       return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
-}
-
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-
-struct fw_iso_packet {
-       u16 payload_length;     /* Length of indirect payload. */
-       u32 interrupt : 1;      /* Generate interrupt on this packet */
-       u32 skip : 1;           /* Set to not send packet at all. */
-       u32 tag : 2;
-       u32 sy : 4;
-       u32 header_length : 8;  /* Length of immediate header. */
-       u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT        0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0       1
-#define FW_ISO_CONTEXT_MATCH_TAG1       2
-#define FW_ISO_CONTEXT_MATCH_TAG2       4
-#define FW_ISO_CONTEXT_MATCH_TAG3       8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS  15
-
-struct fw_iso_context;
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
-                                 u32 cycle, size_t header_length,
-                                 void *header, void *data);
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction.  Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space.  We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-
-struct fw_iso_buffer {
-       enum dma_data_direction direction;
-       struct page **pages;
-       int page_count;
-};
-
-struct fw_iso_context {
-       struct fw_card *card;
-       int type;
-       int channel;
-       int speed;
-       size_t header_size;
-       fw_iso_callback_t callback;
-       void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
-                      int page_count, enum dma_data_direction direction);
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
-               int type, int channel, int speed, size_t header_size,
-               fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
-                        struct fw_iso_packet *packet,
-                        struct fw_iso_buffer *buffer,
-                        unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
-                        int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
-void fw_iso_resource_manage(struct fw_card *card, int generation,
-               u64 channels_mask, int *channel, int *bandwidth, bool allocate);
-
-struct fw_card_driver {
-       /*
-        * Enable the given card with the given initial config rom.
-        * This function is expected to activate the card, and either
-        * enable the PHY or set the link_on bit and initiate a bus
-        * reset.
-        */
-       int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
-
-       int (*update_phy_reg)(struct fw_card *card, int address,
-                             int clear_bits, int set_bits);
-
-       /*
-        * Update the config rom for an enabled card.  This function
-        * should change the config rom that is presented on the bus
-        * an initiate a bus reset.
-        */
-       int (*set_config_rom)(struct fw_card *card,
-                             u32 *config_rom, size_t length);
-
-       void (*send_request)(struct fw_card *card, struct fw_packet *packet);
-       void (*send_response)(struct fw_card *card, struct fw_packet *packet);
-       /* Calling cancel is valid once a packet has been submitted. */
-       int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
-
-       /*
-        * Allow the specified node ID to do direct DMA out and in of
-        * host memory.  The card will disable this for all node when
-        * a bus reset happens, so driver need to reenable this after
-        * bus reset.  Returns 0 on success, -ENODEV if the card
-        * doesn't support this, -ESTALE if the generation doesn't
-        * match.
-        */
-       int (*enable_phys_dma)(struct fw_card *card,
-                              int node_id, int generation);
-
-       u64 (*get_bus_time)(struct fw_card *card);
-
-       struct fw_iso_context *
-       (*allocate_iso_context)(struct fw_card *card,
-                               int type, int channel, size_t header_size);
-       void (*free_iso_context)(struct fw_iso_context *ctx);
-
-       int (*start_iso)(struct fw_iso_context *ctx,
-                        s32 cycle, u32 sync, u32 tags);
-
-       int (*queue_iso)(struct fw_iso_context *ctx,
-                        struct fw_iso_packet *packet,
-                        struct fw_iso_buffer *buffer,
-                        unsigned long payload);
-
-       int (*stop_iso)(struct fw_iso_context *ctx);
-};
-
-int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
-
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
-               int tcode, int destination_id, int generation, int speed,
-               unsigned long long offset, void *payload, size_t length,
-               fw_transaction_callback_t callback, void *callback_data);
-int fw_cancel_transaction(struct fw_card *card,
-                         struct fw_transaction *transaction);
-void fw_flush_transactions(struct fw_card *card);
-int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
-                      int generation, int speed, unsigned long long offset,
-                      void *payload, size_t length);
-void fw_send_phy_config(struct fw_card *card,
-                       int node_id, int generation, int gap_count);
-
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
-       return tag << 14 | channel << 8 | sy;
-}
-
-/*
- * Called by the topology code to inform the device code of node
- * activity; found, lost, or updated nodes.
- */
-void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
-
-/* API used by card level drivers */
-
-void fw_card_initialize(struct fw_card *card,
-               const struct fw_card_driver *driver, struct device *device);
-int fw_card_add(struct fw_card *card,
-               u32 max_receive, u32 link_speed, u64 guid);
-void fw_core_remove_card(struct fw_card *card);
-void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
-               int generation, int self_id_count, u32 *self_ids);
-void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
-void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
-
-extern int fw_irm_set_broadcast_channel_register(struct device *dev,
-                                                void *data);
-
-#endif /* __fw_transaction_h */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
new file mode 100644 (file)
index 0000000..ecddd11
--- /dev/null
@@ -0,0 +1,2636 @@
+/*
+ * Driver for OHCI 1394 controllers
+ *
+ * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pmac_feature.h>
+#endif
+
+#include "core.h"
+#include "ohci.h"
+
+#define DESCRIPTOR_OUTPUT_MORE         0
+#define DESCRIPTOR_OUTPUT_LAST         (1 << 12)
+#define DESCRIPTOR_INPUT_MORE          (2 << 12)
+#define DESCRIPTOR_INPUT_LAST          (3 << 12)
+#define DESCRIPTOR_STATUS              (1 << 11)
+#define DESCRIPTOR_KEY_IMMEDIATE       (2 << 8)
+#define DESCRIPTOR_PING                        (1 << 7)
+#define DESCRIPTOR_YY                  (1 << 6)
+#define DESCRIPTOR_NO_IRQ              (0 << 4)
+#define DESCRIPTOR_IRQ_ERROR           (1 << 4)
+#define DESCRIPTOR_IRQ_ALWAYS          (3 << 4)
+#define DESCRIPTOR_BRANCH_ALWAYS       (3 << 2)
+#define DESCRIPTOR_WAIT                        (3 << 0)
+
+struct descriptor {
+       __le16 req_count;
+       __le16 control;
+       __le32 data_address;
+       __le32 branch_address;
+       __le16 res_count;
+       __le16 transfer_status;
+} __attribute__((aligned(16)));
+
+struct db_descriptor {
+       __le16 first_size;
+       __le16 control;
+       __le16 second_req_count;
+       __le16 first_req_count;
+       __le32 branch_address;
+       __le16 second_res_count;
+       __le16 first_res_count;
+       __le32 reserved0;
+       __le32 first_buffer;
+       __le32 second_buffer;
+       __le32 reserved1;
+} __attribute__((aligned(16)));
+
+#define CONTROL_SET(regs)      (regs)
+#define CONTROL_CLEAR(regs)    ((regs) + 4)
+#define COMMAND_PTR(regs)      ((regs) + 12)
+#define CONTEXT_MATCH(regs)    ((regs) + 16)
+
+struct ar_buffer {
+       struct descriptor descriptor;
+       struct ar_buffer *next;
+       __le32 data[0];
+};
+
+struct ar_context {
+       struct fw_ohci *ohci;
+       struct ar_buffer *current_buffer;
+       struct ar_buffer *last_buffer;
+       void *pointer;
+       u32 regs;
+       struct tasklet_struct tasklet;
+};
+
+struct context;
+
+typedef int (*descriptor_callback_t)(struct context *ctx,
+                                    struct descriptor *d,
+                                    struct descriptor *last);
+
+/*
+ * A buffer that contains a block of DMA-able coherent memory used for
+ * storing a portion of a DMA descriptor program.
+ */
+struct descriptor_buffer {
+       struct list_head list;
+       dma_addr_t buffer_bus;
+       size_t buffer_size;
+       size_t used;
+       struct descriptor buffer[0];
+};
+
+struct context {
+       struct fw_ohci *ohci;
+       u32 regs;
+       int total_allocation;
+
+       /*
+        * List of page-sized buffers for storing DMA descriptors.
+        * Head of list contains buffers in use and tail of list contains
+        * free buffers.
+        */
+       struct list_head buffer_list;
+
+       /*
+        * Pointer to a buffer inside buffer_list that contains the tail
+        * end of the current DMA program.
+        */
+       struct descriptor_buffer *buffer_tail;
+
+       /*
+        * The descriptor containing the branch address of the first
+        * descriptor that has not yet been filled by the device.
+        */
+       struct descriptor *last;
+
+       /*
+        * The last descriptor in the DMA program.  It contains the branch
+        * address that must be updated upon appending a new descriptor.
+        */
+       struct descriptor *prev;
+
+       descriptor_callback_t callback;
+
+       struct tasklet_struct tasklet;
+};
+
+#define IT_HEADER_SY(v)          ((v) <<  0)
+#define IT_HEADER_TCODE(v)       ((v) <<  4)
+#define IT_HEADER_CHANNEL(v)     ((v) <<  8)
+#define IT_HEADER_TAG(v)         ((v) << 14)
+#define IT_HEADER_SPEED(v)       ((v) << 16)
+#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
+
+struct iso_context {
+       struct fw_iso_context base;
+       struct context context;
+       int excess_bytes;
+       void *header;
+       size_t header_length;
+};
+
+#define CONFIG_ROM_SIZE 1024
+
+struct fw_ohci {
+       struct fw_card card;
+
+       __iomem char *registers;
+       dma_addr_t self_id_bus;
+       __le32 *self_id_cpu;
+       struct tasklet_struct bus_reset_tasklet;
+       int node_id;
+       int generation;
+       int request_generation; /* for timestamping incoming requests */
+       atomic_t bus_seconds;
+
+       bool use_dualbuffer;
+       bool old_uninorth;
+       bool bus_reset_packet_quirk;
+
+       /*
+        * Spinlock for accessing fw_ohci data.  Never call out of
+        * this driver with this lock held.
+        */
+       spinlock_t lock;
+       u32 self_id_buffer[512];
+
+       /* Config rom buffers */
+       __be32 *config_rom;
+       dma_addr_t config_rom_bus;
+       __be32 *next_config_rom;
+       dma_addr_t next_config_rom_bus;
+       u32 next_header;
+
+       struct ar_context ar_request_ctx;
+       struct ar_context ar_response_ctx;
+       struct context at_request_ctx;
+       struct context at_response_ctx;
+
+       u32 it_context_mask;
+       struct iso_context *it_context_list;
+       u64 ir_context_channels;
+       u32 ir_context_mask;
+       struct iso_context *ir_context_list;
+};
+
+static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+{
+       return container_of(card, struct fw_ohci, card);
+}
+
+#define IT_CONTEXT_CYCLE_MATCH_ENABLE  0x80000000
+#define IR_CONTEXT_BUFFER_FILL         0x80000000
+#define IR_CONTEXT_ISOCH_HEADER                0x40000000
+#define IR_CONTEXT_CYCLE_MATCH_ENABLE  0x20000000
+#define IR_CONTEXT_MULTI_CHANNEL_MODE  0x10000000
+#define IR_CONTEXT_DUAL_BUFFER_MODE    0x08000000
+
+#define CONTEXT_RUN    0x8000
+#define CONTEXT_WAKE   0x1000
+#define CONTEXT_DEAD   0x0800
+#define CONTEXT_ACTIVE 0x0400
+
+#define OHCI1394_MAX_AT_REQ_RETRIES    0xf
+#define OHCI1394_MAX_AT_RESP_RETRIES   0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
+
+#define OHCI1394_REGISTER_SIZE         0x800
+#define OHCI_LOOP_COUNT                        500
+#define OHCI1394_PCI_HCI_Control       0x40
+#define SELF_ID_BUF_SIZE               0x800
+#define OHCI_TCODE_PHY_PACKET          0x0e
+#define OHCI_VERSION_1_1               0x010010
+
+static char ohci_driver_name[] = KBUILD_MODNAME;
+
+#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
+
+#define OHCI_PARAM_DEBUG_AT_AR         1
+#define OHCI_PARAM_DEBUG_SELFIDS       2
+#define OHCI_PARAM_DEBUG_IRQS          4
+#define OHCI_PARAM_DEBUG_BUSRESETS     8 /* only effective before chip init */
+
+static int param_debug;
+module_param_named(debug, param_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
+       ", AT/AR events = "     __stringify(OHCI_PARAM_DEBUG_AT_AR)
+       ", self-IDs = "         __stringify(OHCI_PARAM_DEBUG_SELFIDS)
+       ", IRQs = "             __stringify(OHCI_PARAM_DEBUG_IRQS)
+       ", busReset events = "  __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
+       ", or a combination, or all = -1)");
+
+static void log_irqs(u32 evt)
+{
+       if (likely(!(param_debug &
+                       (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
+               return;
+
+       if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
+           !(evt & OHCI1394_busReset))
+               return;
+
+       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+           evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
+           evt & OHCI1394_RQPkt                ? " AR_req"             : "",
+           evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
+           evt & OHCI1394_reqTxComplete        ? " AT_req"             : "",
+           evt & OHCI1394_respTxComplete       ? " AT_resp"            : "",
+           evt & OHCI1394_isochRx              ? " IR"                 : "",
+           evt & OHCI1394_isochTx              ? " IT"                 : "",
+           evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
+           evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
+           evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
+           evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
+           evt & OHCI1394_busReset             ? " busReset"           : "",
+           evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
+                   OHCI1394_RSPkt | OHCI1394_reqTxComplete |
+                   OHCI1394_respTxComplete | OHCI1394_isochRx |
+                   OHCI1394_isochTx | OHCI1394_postedWriteErr |
+                   OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+                   OHCI1394_regAccessFail | OHCI1394_busReset)
+                                               ? " ?"                  : "");
+}
+
+static const char *speed[] = {
+       [0] = "S100", [1] = "S200", [2] = "S400",    [3] = "beta",
+};
+static const char *power[] = {
+       [0] = "+0W",  [1] = "+15W", [2] = "+30W",    [3] = "+45W",
+       [4] = "-3W",  [5] = " ?W",  [6] = "-3..-6W", [7] = "-3..-10W",
+};
+static const char port[] = { '.', '-', 'p', 'c', };
+
+static char _p(u32 *s, int shift)
+{
+       return port[*s >> shift & 3];
+}
+
+static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
+{
+       if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
+               return;
+
+       fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
+                 self_id_count, generation, node_id);
+
+       for (; self_id_count--; ++s)
+               if ((*s & 1 << 23) == 0)
+                       fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
+                           "%s gc=%d %s %s%s%s\n",
+                           *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
+                           speed[*s >> 14 & 3], *s >> 16 & 63,
+                           power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
+                           *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
+               else
+                       fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
+                           *s, *s >> 24 & 63,
+                           _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
+                           _p(s,  8), _p(s,  6), _p(s,  4), _p(s,  2));
+}
+
+static const char *evts[] = {
+       [0x00] = "evt_no_status",       [0x01] = "-reserved-",
+       [0x02] = "evt_long_packet",     [0x03] = "evt_missing_ack",
+       [0x04] = "evt_underrun",        [0x05] = "evt_overrun",
+       [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+       [0x08] = "evt_data_write",      [0x09] = "evt_bus_reset",
+       [0x0a] = "evt_timeout",         [0x0b] = "evt_tcode_err",
+       [0x0c] = "-reserved-",          [0x0d] = "-reserved-",
+       [0x0e] = "evt_unknown",         [0x0f] = "evt_flushed",
+       [0x10] = "-reserved-",          [0x11] = "ack_complete",
+       [0x12] = "ack_pending ",        [0x13] = "-reserved-",
+       [0x14] = "ack_busy_X",          [0x15] = "ack_busy_A",
+       [0x16] = "ack_busy_B",          [0x17] = "-reserved-",
+       [0x18] = "-reserved-",          [0x19] = "-reserved-",
+       [0x1a] = "-reserved-",          [0x1b] = "ack_tardy",
+       [0x1c] = "-reserved-",          [0x1d] = "ack_data_error",
+       [0x1e] = "ack_type_error",      [0x1f] = "-reserved-",
+       [0x20] = "pending/cancelled",
+};
+static const char *tcodes[] = {
+       [0x0] = "QW req",               [0x1] = "BW req",
+       [0x2] = "W resp",               [0x3] = "-reserved-",
+       [0x4] = "QR req",               [0x5] = "BR req",
+       [0x6] = "QR resp",              [0x7] = "BR resp",
+       [0x8] = "cycle start",          [0x9] = "Lk req",
+       [0xa] = "async stream packet",  [0xb] = "Lk resp",
+       [0xc] = "-reserved-",           [0xd] = "-reserved-",
+       [0xe] = "link internal",        [0xf] = "-reserved-",
+};
+static const char *phys[] = {
+       [0x0] = "phy config packet",    [0x1] = "link-on packet",
+       [0x2] = "self-id packet",       [0x3] = "-reserved-",
+};
+
+static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
+{
+       int tcode = header[0] >> 4 & 0xf;
+       char specific[12];
+
+       if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
+               return;
+
+       if (unlikely(evt >= ARRAY_SIZE(evts)))
+                       evt = 0x1f;
+
+       if (evt == OHCI1394_evt_bus_reset) {
+               fw_notify("A%c evt_bus_reset, generation %d\n",
+                   dir, (header[2] >> 16) & 0xff);
+               return;
+       }
+
+       if (header[0] == ~header[1]) {
+               fw_notify("A%c %s, %s, %08x\n",
+                   dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
+               return;
+       }
+
+       switch (tcode) {
+       case 0x0: case 0x6: case 0x8:
+               snprintf(specific, sizeof(specific), " = %08x",
+                        be32_to_cpu((__force __be32)header[3]));
+               break;
+       case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
+               snprintf(specific, sizeof(specific), " %x,%x",
+                        header[3] >> 16, header[3] & 0xffff);
+               break;
+       default:
+               specific[0] = '\0';
+       }
+
+       switch (tcode) {
+       case 0xe: case 0xa:
+               fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
+               break;
+       case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
+               fw_notify("A%c spd %x tl %02x, "
+                   "%04x -> %04x, %s, "
+                   "%s, %04x%08x%s\n",
+                   dir, speed, header[0] >> 10 & 0x3f,
+                   header[1] >> 16, header[0] >> 16, evts[evt],
+                   tcodes[tcode], header[1] & 0xffff, header[2], specific);
+               break;
+       default:
+               fw_notify("A%c spd %x tl %02x, "
+                   "%04x -> %04x, %s, "
+                   "%s%s\n",
+                   dir, speed, header[0] >> 10 & 0x3f,
+                   header[1] >> 16, header[0] >> 16, evts[evt],
+                   tcodes[tcode], specific);
+       }
+}
+
+#else
+
+#define log_irqs(evt)
+#define log_selfids(node_id, generation, self_id_count, sid)
+#define log_ar_at_event(dir, speed, header, evt)
+
+#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
+
+static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
+{
+       writel(data, ohci->registers + offset);
+}
+
+static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
+{
+       return readl(ohci->registers + offset);
+}
+
+static inline void flush_writes(const struct fw_ohci *ohci)
+{
+       /* Do a dummy read to flush writes. */
+       reg_read(ohci, OHCI1394_Version);
+}
+
+static int ohci_update_phy_reg(struct fw_card *card, int addr,
+                              int clear_bits, int set_bits)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       u32 val, old;
+
+       reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+       flush_writes(ohci);
+       msleep(2);
+       val = reg_read(ohci, OHCI1394_PhyControl);
+       if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
+               fw_error("failed to set phy reg bits.\n");
+               return -EBUSY;
+       }
+
+       old = OHCI1394_PhyControl_ReadData(val);
+       old = (old & ~clear_bits) | set_bits;
+       reg_write(ohci, OHCI1394_PhyControl,
+                 OHCI1394_PhyControl_Write(addr, old));
+
+       return 0;
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+       struct device *dev = ctx->ohci->card.device;
+       struct ar_buffer *ab;
+       dma_addr_t uninitialized_var(ab_bus);
+       size_t offset;
+
+       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+       if (ab == NULL)
+               return -ENOMEM;
+
+       ab->next = NULL;
+       memset(&ab->descriptor, 0, sizeof(ab->descriptor));
+       ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+                                                   DESCRIPTOR_STATUS |
+                                                   DESCRIPTOR_BRANCH_ALWAYS);
+       offset = offsetof(struct ar_buffer, data);
+       ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
+       ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
+       ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
+       ab->descriptor.branch_address = 0;
+
+       ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
+       ctx->last_buffer->next = ab;
+       ctx->last_buffer = ab;
+
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+       flush_writes(ctx->ohci);
+
+       return 0;
+}
+
+static void ar_context_release(struct ar_context *ctx)
+{
+       struct ar_buffer *ab, *ab_next;
+       size_t offset;
+       dma_addr_t ab_bus;
+
+       for (ab = ctx->current_buffer; ab; ab = ab_next) {
+               ab_next = ab->next;
+               offset = offsetof(struct ar_buffer, data);
+               ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+               dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
+                                 ab, ab_bus);
+       }
+}
+
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#define cond_le32_to_cpu(v) \
+       (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+#else
+#define cond_le32_to_cpu(v) le32_to_cpu(v)
+#endif
+
+static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+       struct fw_packet p;
+       u32 status, length, tcode;
+       int evt;
+
+       p.header[0] = cond_le32_to_cpu(buffer[0]);
+       p.header[1] = cond_le32_to_cpu(buffer[1]);
+       p.header[2] = cond_le32_to_cpu(buffer[2]);
+
+       tcode = (p.header[0] >> 4) & 0x0f;
+       switch (tcode) {
+       case TCODE_WRITE_QUADLET_REQUEST:
+       case TCODE_READ_QUADLET_RESPONSE:
+               p.header[3] = (__force __u32) buffer[3];
+               p.header_length = 16;
+               p.payload_length = 0;
+               break;
+
+       case TCODE_READ_BLOCK_REQUEST :
+               p.header[3] = cond_le32_to_cpu(buffer[3]);
+               p.header_length = 16;
+               p.payload_length = 0;
+               break;
+
+       case TCODE_WRITE_BLOCK_REQUEST:
+       case TCODE_READ_BLOCK_RESPONSE:
+       case TCODE_LOCK_REQUEST:
+       case TCODE_LOCK_RESPONSE:
+               p.header[3] = cond_le32_to_cpu(buffer[3]);
+               p.header_length = 16;
+               p.payload_length = p.header[3] >> 16;
+               break;
+
+       case TCODE_WRITE_RESPONSE:
+       case TCODE_READ_QUADLET_REQUEST:
+       case OHCI_TCODE_PHY_PACKET:
+               p.header_length = 12;
+               p.payload_length = 0;
+               break;
+
+       default:
+               /* FIXME: Stop context, discard everything, and restart? */
+               p.header_length = 0;
+               p.payload_length = 0;
+       }
+
+       p.payload = (void *) buffer + p.header_length;
+
+       /* FIXME: What to do about evt_* errors? */
+       length = (p.header_length + p.payload_length + 3) / 4;
+       status = cond_le32_to_cpu(buffer[length]);
+       evt    = (status >> 16) & 0x1f;
+
+       p.ack        = evt - 16;
+       p.speed      = (status >> 21) & 0x7;
+       p.timestamp  = status & 0xffff;
+       p.generation = ohci->request_generation;
+
+       log_ar_at_event('R', p.speed, p.header, evt);
+
+       /*
+        * The OHCI bus reset handler synthesizes a phy packet with
+        * the new generation number when a bus reset happens (see
+        * section 8.4.2.3).  This helps us determine when a request
+        * was received and make sure we send the response in the same
+        * generation.  We only need this for requests; for responses
+        * we use the unique tlabel for finding the matching
+        * request.
+        *
+        * Alas some chips sometimes emit bus reset packets with a
+        * wrong generation.  We set the correct generation for these
+        * at a slightly incorrect time (in bus_reset_tasklet).
+        */
+       if (evt == OHCI1394_evt_bus_reset) {
+               if (!ohci->bus_reset_packet_quirk)
+                       ohci->request_generation = (p.header[2] >> 16) & 0xff;
+       } else if (ctx == &ohci->ar_request_ctx) {
+               fw_core_handle_request(&ohci->card, &p);
+       } else {
+               fw_core_handle_response(&ohci->card, &p);
+       }
+
+       return buffer + length + 1;
+}
+
+static void ar_context_tasklet(unsigned long data)
+{
+       struct ar_context *ctx = (struct ar_context *)data;
+       struct fw_ohci *ohci = ctx->ohci;
+       struct ar_buffer *ab;
+       struct descriptor *d;
+       void *buffer, *end;
+
+       ab = ctx->current_buffer;
+       d = &ab->descriptor;
+
+       if (d->res_count == 0) {
+               size_t size, rest, offset;
+               dma_addr_t start_bus;
+               void *start;
+
+               /*
+                * This descriptor is finished and we may have a
+                * packet split across this and the next buffer. We
+                * reuse the page for reassembling the split packet.
+                */
+
+               offset = offsetof(struct ar_buffer, data);
+               start = buffer = ab;
+               start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+
+               ab = ab->next;
+               d = &ab->descriptor;
+               size = buffer + PAGE_SIZE - ctx->pointer;
+               rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+               memmove(buffer, ctx->pointer, size);
+               memcpy(buffer + size, ab->data, rest);
+               ctx->current_buffer = ab;
+               ctx->pointer = (void *) ab->data + rest;
+               end = buffer + size + rest;
+
+               while (buffer < end)
+                       buffer = handle_ar_packet(ctx, buffer);
+
+               dma_free_coherent(ohci->card.device, PAGE_SIZE,
+                                 start, start_bus);
+               ar_context_add_page(ctx);
+       } else {
+               buffer = ctx->pointer;
+               ctx->pointer = end =
+                       (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+
+               while (buffer < end)
+                       buffer = handle_ar_packet(ctx, buffer);
+       }
+}
+
+static int ar_context_init(struct ar_context *ctx,
+                          struct fw_ohci *ohci, u32 regs)
+{
+       struct ar_buffer ab;
+
+       ctx->regs        = regs;
+       ctx->ohci        = ohci;
+       ctx->last_buffer = &ab;
+       tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
+
+       ar_context_add_page(ctx);
+       ar_context_add_page(ctx);
+       ctx->current_buffer = ab.next;
+       ctx->pointer = ctx->current_buffer->data;
+
+       return 0;
+}
+
+static void ar_context_run(struct ar_context *ctx)
+{
+       struct ar_buffer *ab = ctx->current_buffer;
+       dma_addr_t ab_bus;
+       size_t offset;
+
+       offset = offsetof(struct ar_buffer, data);
+       ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+
+       reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
+       flush_writes(ctx->ohci);
+}
+
+static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
+{
+       int b, key;
+
+       b   = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
+       key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
+
+       /* figure out which descriptor the branch address goes in */
+       if (z == 2 && (b == 3 || key == 2))
+               return d;
+       else
+               return d + z - 1;
+}
+
+static void context_tasklet(unsigned long data)
+{
+       struct context *ctx = (struct context *) data;
+       struct descriptor *d, *last;
+       u32 address;
+       int z;
+       struct descriptor_buffer *desc;
+
+       desc = list_entry(ctx->buffer_list.next,
+                       struct descriptor_buffer, list);
+       last = ctx->last;
+       while (last->branch_address != 0) {
+               struct descriptor_buffer *old_desc = desc;
+               address = le32_to_cpu(last->branch_address);
+               z = address & 0xf;
+               address &= ~0xf;
+
+               /* If the branch address points to a buffer outside of the
+                * current buffer, advance to the next buffer. */
+               if (address < desc->buffer_bus ||
+                               address >= desc->buffer_bus + desc->used)
+                       desc = list_entry(desc->list.next,
+                                       struct descriptor_buffer, list);
+               d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
+               last = find_branch_descriptor(d, z);
+
+               if (!ctx->callback(ctx, d, last))
+                       break;
+
+               if (old_desc != desc) {
+                       /* If we've advanced to the next buffer, move the
+                        * previous buffer to the free list. */
+                       unsigned long flags;
+                       old_desc->used = 0;
+                       spin_lock_irqsave(&ctx->ohci->lock, flags);
+                       list_move_tail(&old_desc->list, &ctx->buffer_list);
+                       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+               }
+               ctx->last = last;
+       }
+}
+
+/*
+ * Allocate a new buffer and add it to the list of free buffers for this
+ * context.  Must be called with ohci->lock held.
+ */
+static int context_add_buffer(struct context *ctx)
+{
+       struct descriptor_buffer *desc;
+       dma_addr_t uninitialized_var(bus_addr);
+       int offset;
+
+       /*
+        * 16MB of descriptors should be far more than enough for any DMA
+        * program.  This will catch run-away userspace or DoS attacks.
+        */
+       if (ctx->total_allocation >= 16*1024*1024)
+               return -ENOMEM;
+
+       desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
+                       &bus_addr, GFP_ATOMIC);
+       if (!desc)
+               return -ENOMEM;
+
+       offset = (void *)&desc->buffer - (void *)desc;
+       desc->buffer_size = PAGE_SIZE - offset;
+       desc->buffer_bus = bus_addr + offset;
+       desc->used = 0;
+
+       list_add_tail(&desc->list, &ctx->buffer_list);
+       ctx->total_allocation += PAGE_SIZE;
+
+       return 0;
+}
+
+static int context_init(struct context *ctx, struct fw_ohci *ohci,
+                       u32 regs, descriptor_callback_t callback)
+{
+       ctx->ohci = ohci;
+       ctx->regs = regs;
+       ctx->total_allocation = 0;
+
+       INIT_LIST_HEAD(&ctx->buffer_list);
+       if (context_add_buffer(ctx) < 0)
+               return -ENOMEM;
+
+       ctx->buffer_tail = list_entry(ctx->buffer_list.next,
+                       struct descriptor_buffer, list);
+
+       tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
+       ctx->callback = callback;
+
+       /*
+        * We put a dummy descriptor in the buffer that has a NULL
+        * branch address and looks like it's been sent.  That way we
+        * have a descriptor to append DMA programs to.
+        */
+       memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
+       ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
+       ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
+       ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
+       ctx->last = ctx->buffer_tail->buffer;
+       ctx->prev = ctx->buffer_tail->buffer;
+
+       return 0;
+}
+
+static void context_release(struct context *ctx)
+{
+       struct fw_card *card = &ctx->ohci->card;
+       struct descriptor_buffer *desc, *tmp;
+
+       list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
+               dma_free_coherent(card->device, PAGE_SIZE, desc,
+                       desc->buffer_bus -
+                       ((void *)&desc->buffer - (void *)desc));
+}
+
+/* Must be called with ohci->lock held */
+static struct descriptor *context_get_descriptors(struct context *ctx,
+                                                 int z, dma_addr_t *d_bus)
+{
+       struct descriptor *d = NULL;
+       struct descriptor_buffer *desc = ctx->buffer_tail;
+
+       if (z * sizeof(*d) > desc->buffer_size)
+               return NULL;
+
+       if (z * sizeof(*d) > desc->buffer_size - desc->used) {
+               /* No room for the descriptor in this buffer, so advance to the
+                * next one. */
+
+               if (desc->list.next == &ctx->buffer_list) {
+                       /* If there is no free buffer next in the list,
+                        * allocate one. */
+                       if (context_add_buffer(ctx) < 0)
+                               return NULL;
+               }
+               desc = list_entry(desc->list.next,
+                               struct descriptor_buffer, list);
+               ctx->buffer_tail = desc;
+       }
+
+       d = desc->buffer + desc->used / sizeof(*d);
+       memset(d, 0, z * sizeof(*d));
+       *d_bus = desc->buffer_bus + desc->used;
+
+       return d;
+}
+
+static void context_run(struct context *ctx, u32 extra)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+
+       reg_write(ohci, COMMAND_PTR(ctx->regs),
+                 le32_to_cpu(ctx->last->branch_address));
+       reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
+       reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+       flush_writes(ohci);
+}
+
+static void context_append(struct context *ctx,
+                          struct descriptor *d, int z, int extra)
+{
+       dma_addr_t d_bus;
+       struct descriptor_buffer *desc = ctx->buffer_tail;
+
+       d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
+
+       desc->used += (z + extra) * sizeof(*d);
+       ctx->prev->branch_address = cpu_to_le32(d_bus | z);
+       ctx->prev = find_branch_descriptor(d, z);
+
+       reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
+       flush_writes(ctx->ohci);
+}
+
+static void context_stop(struct context *ctx)
+{
+       u32 reg;
+       int i;
+
+       reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+       flush_writes(ctx->ohci);
+
+       for (i = 0; i < 10; i++) {
+               reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+               if ((reg & CONTEXT_ACTIVE) == 0)
+                       return;
+
+               mdelay(1);
+       }
+       fw_error("Error: DMA context still active (0x%08x)\n", reg);
+}
+
+struct driver_data {
+       struct fw_packet *packet;
+};
+
+/*
+ * This function apppends a packet to the DMA queue for transmission.
+ * Must always be called with the ochi->lock held to ensure proper
+ * generation handling and locking around packet queue manipulation.
+ */
+static int at_context_queue_packet(struct context *ctx,
+                                  struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = ctx->ohci;
+       dma_addr_t d_bus, uninitialized_var(payload_bus);
+       struct driver_data *driver_data;
+       struct descriptor *d, *last;
+       __le32 *header;
+       int z, tcode;
+       u32 reg;
+
+       d = context_get_descriptors(ctx, 4, &d_bus);
+       if (d == NULL) {
+               packet->ack = RCODE_SEND_ERROR;
+               return -1;
+       }
+
+       d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+       d[0].res_count = cpu_to_le16(packet->timestamp);
+
+       /*
+        * The DMA format for asyncronous link packets is different
+        * from the IEEE1394 layout, so shift the fields around
+        * accordingly.  If header_length is 8, it's a PHY packet, to
+        * which we need to prepend an extra quadlet.
+        */
+
+       header = (__le32 *) &d[1];
+       switch (packet->header_length) {
+       case 16:
+       case 12:
+               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
+                                       (packet->header[0] & 0xffff0000));
+               header[2] = cpu_to_le32(packet->header[2]);
+
+               tcode = (packet->header[0] >> 4) & 0x0f;
+               if (TCODE_IS_BLOCK_PACKET(tcode))
+                       header[3] = cpu_to_le32(packet->header[3]);
+               else
+                       header[3] = (__force __le32) packet->header[3];
+
+               d[0].req_count = cpu_to_le16(packet->header_length);
+               break;
+
+       case 8:
+               header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32(packet->header[0]);
+               header[2] = cpu_to_le32(packet->header[1]);
+               d[0].req_count = cpu_to_le16(12);
+               break;
+
+       case 4:
+               header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
+                                       (packet->speed << 16));
+               header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
+               d[0].req_count = cpu_to_le16(8);
+               break;
+
+       default:
+               /* BUG(); */
+               packet->ack = RCODE_SEND_ERROR;
+               return -1;
+       }
+
+       driver_data = (struct driver_data *) &d[3];
+       driver_data->packet = packet;
+       packet->driver_data = driver_data;
+
+       if (packet->payload_length > 0) {
+               payload_bus =
+                       dma_map_single(ohci->card.device, packet->payload,
+                                      packet->payload_length, DMA_TO_DEVICE);
+               if (dma_mapping_error(ohci->card.device, payload_bus)) {
+                       packet->ack = RCODE_SEND_ERROR;
+                       return -1;
+               }
+               packet->payload_bus = payload_bus;
+
+               d[2].req_count    = cpu_to_le16(packet->payload_length);
+               d[2].data_address = cpu_to_le32(payload_bus);
+               last = &d[2];
+               z = 3;
+       } else {
+               last = &d[0];
+               z = 2;
+       }
+
+       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+                                    DESCRIPTOR_IRQ_ALWAYS |
+                                    DESCRIPTOR_BRANCH_ALWAYS);
+
+       /*
+        * If the controller and packet generations don't match, we need to
+        * bail out and try again.  If IntEvent.busReset is set, the AT context
+        * is halted, so appending to the context and trying to run it is
+        * futile.  Most controllers do the right thing and just flush the AT
+        * queue (per section 7.2.3.2 of the OHCI 1.1 specification), but
+        * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
+        * up stalling out.  So we just bail out in software and try again
+        * later, and everyone is happy.
+        * FIXME: Document how the locking works.
+        */
+       if (ohci->generation != packet->generation ||
+           reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
+               if (packet->payload_length > 0)
+                       dma_unmap_single(ohci->card.device, payload_bus,
+                                        packet->payload_length, DMA_TO_DEVICE);
+               packet->ack = RCODE_GENERATION;
+               return -1;
+       }
+
+       context_append(ctx, d, z, 4 - z);
+
+       /* If the context isn't already running, start it up. */
+       reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
+       if ((reg & CONTEXT_RUN) == 0)
+               context_run(ctx, 0);
+
+       return 0;
+}
+
+static int handle_at_packet(struct context *context,
+                           struct descriptor *d,
+                           struct descriptor *last)
+{
+       struct driver_data *driver_data;
+       struct fw_packet *packet;
+       struct fw_ohci *ohci = context->ohci;
+       int evt;
+
+       if (last->transfer_status == 0)
+               /* This descriptor isn't done yet, stop iteration. */
+               return 0;
+
+       driver_data = (struct driver_data *) &d[3];
+       packet = driver_data->packet;
+       if (packet == NULL)
+               /* This packet was cancelled, just continue. */
+               return 1;
+
+       if (packet->payload_bus)
+               dma_unmap_single(ohci->card.device, packet->payload_bus,
+                                packet->payload_length, DMA_TO_DEVICE);
+
+       evt = le16_to_cpu(last->transfer_status) & 0x1f;
+       packet->timestamp = le16_to_cpu(last->res_count);
+
+       log_ar_at_event('T', packet->speed, packet->header, evt);
+
+       switch (evt) {
+       case OHCI1394_evt_timeout:
+               /* Async response transmit timed out. */
+               packet->ack = RCODE_CANCELLED;
+               break;
+
+       case OHCI1394_evt_flushed:
+               /*
+                * The packet was flushed should give same error as
+                * when we try to use a stale generation count.
+                */
+               packet->ack = RCODE_GENERATION;
+               break;
+
+       case OHCI1394_evt_missing_ack:
+               /*
+                * Using a valid (current) generation count, but the
+                * node is not on the bus or not sending acks.
+                */
+               packet->ack = RCODE_NO_ACK;
+               break;
+
+       case ACK_COMPLETE + 0x10:
+       case ACK_PENDING + 0x10:
+       case ACK_BUSY_X + 0x10:
+       case ACK_BUSY_A + 0x10:
+       case ACK_BUSY_B + 0x10:
+       case ACK_DATA_ERROR + 0x10:
+       case ACK_TYPE_ERROR + 0x10:
+               packet->ack = evt - 0x10;
+               break;
+
+       default:
+               packet->ack = RCODE_SEND_ERROR;
+               break;
+       }
+
+       packet->callback(packet, &ohci->card, packet->ack);
+
+       return 1;
+}
+
+#define HEADER_GET_DESTINATION(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_TCODE(q)            (((q) >> 4) & 0x0f)
+#define HEADER_GET_OFFSET_HIGH(q)      (((q) >> 0) & 0xffff)
+#define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
+#define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
+
+static void handle_local_rom(struct fw_ohci *ohci,
+                            struct fw_packet *packet, u32 csr)
+{
+       struct fw_packet response;
+       int tcode, length, i;
+
+       tcode = HEADER_GET_TCODE(packet->header[0]);
+       if (TCODE_IS_BLOCK_PACKET(tcode))
+               length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+       else
+               length = 4;
+
+       i = csr - CSR_CONFIG_ROM;
+       if (i + length > CONFIG_ROM_SIZE) {
+               fw_fill_response(&response, packet->header,
+                                RCODE_ADDRESS_ERROR, NULL, 0);
+       } else if (!TCODE_IS_READ_REQUEST(tcode)) {
+               fw_fill_response(&response, packet->header,
+                                RCODE_TYPE_ERROR, NULL, 0);
+       } else {
+               fw_fill_response(&response, packet->header, RCODE_COMPLETE,
+                                (void *) ohci->config_rom + i, length);
+       }
+
+       fw_core_handle_response(&ohci->card, &response);
+}
+
+static void handle_local_lock(struct fw_ohci *ohci,
+                             struct fw_packet *packet, u32 csr)
+{
+       struct fw_packet response;
+       int tcode, length, ext_tcode, sel;
+       __be32 *payload, lock_old;
+       u32 lock_arg, lock_data;
+
+       tcode = HEADER_GET_TCODE(packet->header[0]);
+       length = HEADER_GET_DATA_LENGTH(packet->header[3]);
+       payload = packet->payload;
+       ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
+
+       if (tcode == TCODE_LOCK_REQUEST &&
+           ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
+               lock_arg = be32_to_cpu(payload[0]);
+               lock_data = be32_to_cpu(payload[1]);
+       } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
+               lock_arg = 0;
+               lock_data = 0;
+       } else {
+               fw_fill_response(&response, packet->header,
+                                RCODE_TYPE_ERROR, NULL, 0);
+               goto out;
+       }
+
+       sel = (csr - CSR_BUS_MANAGER_ID) / 4;
+       reg_write(ohci, OHCI1394_CSRData, lock_data);
+       reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
+       reg_write(ohci, OHCI1394_CSRControl, sel);
+
+       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
+       else
+               fw_notify("swap not done yet\n");
+
+       fw_fill_response(&response, packet->header,
+                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
+ out:
+       fw_core_handle_response(&ohci->card, &response);
+}
+
+static void handle_local_request(struct context *ctx, struct fw_packet *packet)
+{
+       u64 offset;
+       u32 csr;
+
+       if (ctx == &ctx->ohci->at_request_ctx) {
+               packet->ack = ACK_PENDING;
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+       }
+
+       offset =
+               ((unsigned long long)
+                HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
+               packet->header[2];
+       csr = offset - CSR_REGISTER_BASE;
+
+       /* Handle config rom reads. */
+       if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
+               handle_local_rom(ctx->ohci, packet, csr);
+       else switch (csr) {
+       case CSR_BUS_MANAGER_ID:
+       case CSR_BANDWIDTH_AVAILABLE:
+       case CSR_CHANNELS_AVAILABLE_HI:
+       case CSR_CHANNELS_AVAILABLE_LO:
+               handle_local_lock(ctx->ohci, packet, csr);
+               break;
+       default:
+               if (ctx == &ctx->ohci->at_request_ctx)
+                       fw_core_handle_request(&ctx->ohci->card, packet);
+               else
+                       fw_core_handle_response(&ctx->ohci->card, packet);
+               break;
+       }
+
+       if (ctx == &ctx->ohci->at_response_ctx) {
+               packet->ack = ACK_COMPLETE;
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+       }
+}
+
+static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->ohci->lock, flags);
+
+       if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
+           ctx->ohci->generation == packet->generation) {
+               spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+               handle_local_request(ctx, packet);
+               return;
+       }
+
+       ret = at_context_queue_packet(ctx, packet);
+       spin_unlock_irqrestore(&ctx->ohci->lock, flags);
+
+       if (ret < 0)
+               packet->callback(packet, &ctx->ohci->card, packet->ack);
+
+}
+
+static void bus_reset_tasklet(unsigned long data)
+{
+       struct fw_ohci *ohci = (struct fw_ohci *)data;
+       int self_id_count, i, j, reg;
+       int generation, new_generation;
+       unsigned long flags;
+       void *free_rom = NULL;
+       dma_addr_t free_rom_bus = 0;
+
+       reg = reg_read(ohci, OHCI1394_NodeID);
+       if (!(reg & OHCI1394_NodeID_idValid)) {
+               fw_notify("node ID not valid, new bus reset in progress\n");
+               return;
+       }
+       if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
+               fw_notify("malconfigured bus\n");
+               return;
+       }
+       ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
+                              OHCI1394_NodeID_nodeNumber);
+
+       reg = reg_read(ohci, OHCI1394_SelfIDCount);
+       if (reg & OHCI1394_SelfIDCount_selfIDError) {
+               fw_notify("inconsistent self IDs\n");
+               return;
+       }
+       /*
+        * The count in the SelfIDCount register is the number of
+        * bytes in the self ID receive buffer.  Since we also receive
+        * the inverted quadlets and a header quadlet, we shift one
+        * bit extra to get the actual number of self IDs.
+        */
+       self_id_count = (reg >> 3) & 0x3ff;
+       if (self_id_count == 0) {
+               fw_notify("inconsistent self IDs\n");
+               return;
+       }
+       generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
+       rmb();
+
+       for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
+               if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
+                       fw_notify("inconsistent self IDs\n");
+                       return;
+               }
+               ohci->self_id_buffer[j] =
+                               cond_le32_to_cpu(ohci->self_id_cpu[i]);
+       }
+       rmb();
+
+       /*
+        * Check the consistency of the self IDs we just read.  The
+        * problem we face is that a new bus reset can start while we
+        * read out the self IDs from the DMA buffer. If this happens,
+        * the DMA buffer will be overwritten with new self IDs and we
+        * will read out inconsistent data.  The OHCI specification
+        * (section 11.2) recommends a technique similar to
+        * linux/seqlock.h, where we remember the generation of the
+        * self IDs in the buffer before reading them out and compare
+        * it to the current generation after reading them out.  If
+        * the two generations match we know we have a consistent set
+        * of self IDs.
+        */
+
+       new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
+       if (new_generation != generation) {
+               fw_notify("recursive bus reset detected, "
+                         "discarding self ids\n");
+               return;
+       }
+
+       /* FIXME: Document how the locking works. */
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       ohci->generation = generation;
+       context_stop(&ohci->at_request_ctx);
+       context_stop(&ohci->at_response_ctx);
+       reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
+
+       if (ohci->bus_reset_packet_quirk)
+               ohci->request_generation = generation;
+
+       /*
+        * This next bit is unrelated to the AT context stuff but we
+        * have to do it under the spinlock also.  If a new config rom
+        * was set up before this reset, the old one is now no longer
+        * in use and we can free it. Update the config rom pointers
+        * to point to the current config rom and clear the
+        * next_config_rom pointer so a new udpate can take place.
+        */
+
+       if (ohci->next_config_rom != NULL) {
+               if (ohci->next_config_rom != ohci->config_rom) {
+                       free_rom      = ohci->config_rom;
+                       free_rom_bus  = ohci->config_rom_bus;
+               }
+               ohci->config_rom      = ohci->next_config_rom;
+               ohci->config_rom_bus  = ohci->next_config_rom_bus;
+               ohci->next_config_rom = NULL;
+
+               /*
+                * Restore config_rom image and manually update
+                * config_rom registers.  Writing the header quadlet
+                * will indicate that the config rom is ready, so we
+                * do that last.
+                */
+               reg_write(ohci, OHCI1394_BusOptions,
+                         be32_to_cpu(ohci->config_rom[2]));
+               ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
+               reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
+       }
+
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+       reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
+       reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
+#endif
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       if (free_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 free_rom, free_rom_bus);
+
+       log_selfids(ohci->node_id, generation,
+                   self_id_count, ohci->self_id_buffer);
+
+       fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
+                                self_id_count, ohci->self_id_buffer);
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+       struct fw_ohci *ohci = data;
+       u32 event, iso_event, cycle_time;
+       int i;
+
+       event = reg_read(ohci, OHCI1394_IntEventClear);
+
+       if (!event || !~event)
+               return IRQ_NONE;
+
+       /* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
+       reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+       log_irqs(event);
+
+       if (event & OHCI1394_selfIDComplete)
+               tasklet_schedule(&ohci->bus_reset_tasklet);
+
+       if (event & OHCI1394_RQPkt)
+               tasklet_schedule(&ohci->ar_request_ctx.tasklet);
+
+       if (event & OHCI1394_RSPkt)
+               tasklet_schedule(&ohci->ar_response_ctx.tasklet);
+
+       if (event & OHCI1394_reqTxComplete)
+               tasklet_schedule(&ohci->at_request_ctx.tasklet);
+
+       if (event & OHCI1394_respTxComplete)
+               tasklet_schedule(&ohci->at_response_ctx.tasklet);
+
+       iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+       reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+
+       while (iso_event) {
+               i = ffs(iso_event) - 1;
+               tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
+               iso_event &= ~(1 << i);
+       }
+
+       iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+       reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+
+       while (iso_event) {
+               i = ffs(iso_event) - 1;
+               tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
+               iso_event &= ~(1 << i);
+       }
+
+       if (unlikely(event & OHCI1394_regAccessFail))
+               fw_error("Register access failure - "
+                        "please notify linux1394-devel@lists.sf.net\n");
+
+       if (unlikely(event & OHCI1394_postedWriteErr))
+               fw_error("PCI posted write error\n");
+
+       if (unlikely(event & OHCI1394_cycleTooLong)) {
+               if (printk_ratelimit())
+                       fw_notify("isochronous cycle too long\n");
+               reg_write(ohci, OHCI1394_LinkControlSet,
+                         OHCI1394_LinkControl_cycleMaster);
+       }
+
+       if (event & OHCI1394_cycle64Seconds) {
+               cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+               if ((cycle_time & 0x80000000) == 0)
+                       atomic_inc(&ohci->bus_seconds);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int software_reset(struct fw_ohci *ohci)
+{
+       int i;
+
+       reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+
+       for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+               if ((reg_read(ohci, OHCI1394_HCControlSet) &
+                    OHCI1394_HCControl_softReset) == 0)
+                       return 0;
+               msleep(1);
+       }
+
+       return -EBUSY;
+}
+
+static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct pci_dev *dev = to_pci_dev(card->device);
+       u32 lps;
+       int i;
+
+       if (software_reset(ohci)) {
+               fw_error("Failed to reset ohci card.\n");
+               return -EBUSY;
+       }
+
+       /*
+        * Now enable LPS, which we need in order to start accessing
+        * most of the registers.  In fact, on some cards (ALI M5251),
+        * accessing registers in the SClk domain without LPS enabled
+        * will lock up the machine.  Wait 50msec to make sure we have
+        * full link enabled.  However, with some cards (well, at least
+        * a JMicron PCIe card), we have to try again sometimes.
+        */
+       reg_write(ohci, OHCI1394_HCControlSet,
+                 OHCI1394_HCControl_LPS |
+                 OHCI1394_HCControl_postedWriteEnable);
+       flush_writes(ohci);
+
+       for (lps = 0, i = 0; !lps && i < 3; i++) {
+               msleep(50);
+               lps = reg_read(ohci, OHCI1394_HCControlSet) &
+                     OHCI1394_HCControl_LPS;
+       }
+
+       if (!lps) {
+               fw_error("Failed to set Link Power Status\n");
+               return -EIO;
+       }
+
+       reg_write(ohci, OHCI1394_HCControlClear,
+                 OHCI1394_HCControl_noByteSwapData);
+
+       reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
+       reg_write(ohci, OHCI1394_LinkControlClear,
+                 OHCI1394_LinkControl_rcvPhyPkt);
+       reg_write(ohci, OHCI1394_LinkControlSet,
+                 OHCI1394_LinkControl_rcvSelfID |
+                 OHCI1394_LinkControl_cycleTimerEnable |
+                 OHCI1394_LinkControl_cycleMaster);
+
+       reg_write(ohci, OHCI1394_ATRetries,
+                 OHCI1394_MAX_AT_REQ_RETRIES |
+                 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
+                 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
+
+       ar_context_run(&ohci->ar_request_ctx);
+       ar_context_run(&ohci->ar_response_ctx);
+
+       reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
+       reg_write(ohci, OHCI1394_IntEventClear, ~0);
+       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+       reg_write(ohci, OHCI1394_IntMaskSet,
+                 OHCI1394_selfIDComplete |
+                 OHCI1394_RQPkt | OHCI1394_RSPkt |
+                 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
+                 OHCI1394_isochRx | OHCI1394_isochTx |
+                 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
+                 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+                 OHCI1394_masterIntEnable);
+       if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
+               reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
+
+       /* Activate link_on bit and contender bit in our self ID packets.*/
+       if (ohci_update_phy_reg(card, 4, 0,
+                               PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
+               return -EIO;
+
+       /*
+        * When the link is not yet enabled, the atomic config rom
+        * update mechanism described below in ohci_set_config_rom()
+        * is not active.  We have to update ConfigRomHeader and
+        * BusOptions manually, and the write to ConfigROMmap takes
+        * effect immediately.  We tie this to the enabling of the
+        * link, so we have a valid config rom before enabling - the
+        * OHCI requires that ConfigROMhdr and BusOptions have valid
+        * values before enabling.
+        *
+        * However, when the ConfigROMmap is written, some controllers
+        * always read back quadlets 0 and 2 from the config rom to
+        * the ConfigRomHeader and BusOptions registers on bus reset.
+        * They shouldn't do that in this initial case where the link
+        * isn't enabled.  This means we have to use the same
+        * workaround here, setting the bus header to 0 and then write
+        * the right values in the bus reset tasklet.
+        */
+
+       if (config_rom) {
+               ohci->next_config_rom =
+                       dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                          &ohci->next_config_rom_bus,
+                                          GFP_KERNEL);
+               if (ohci->next_config_rom == NULL)
+                       return -ENOMEM;
+
+               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+               fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
+       } else {
+               /*
+                * In the suspend case, config_rom is NULL, which
+                * means that we just reuse the old config rom.
+                */
+               ohci->next_config_rom = ohci->config_rom;
+               ohci->next_config_rom_bus = ohci->config_rom_bus;
+       }
+
+       ohci->next_header = be32_to_cpu(ohci->next_config_rom[0]);
+       ohci->next_config_rom[0] = 0;
+       reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
+       reg_write(ohci, OHCI1394_BusOptions,
+                 be32_to_cpu(ohci->next_config_rom[2]));
+       reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
+
+       reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
+
+       if (request_irq(dev->irq, irq_handler,
+                       IRQF_SHARED, ohci_driver_name, ohci)) {
+               fw_error("Failed to allocate shared interrupt %d.\n",
+                        dev->irq);
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->config_rom, ohci->config_rom_bus);
+               return -EIO;
+       }
+
+       reg_write(ohci, OHCI1394_HCControlSet,
+                 OHCI1394_HCControl_linkEnable |
+                 OHCI1394_HCControl_BIBimageValid);
+       flush_writes(ohci);
+
+       /*
+        * We are ready to go, initiate bus reset to finish the
+        * initialization.
+        */
+
+       fw_core_initiate_bus_reset(&ohci->card, 1);
+
+       return 0;
+}
+
+static int ohci_set_config_rom(struct fw_card *card,
+                              u32 *config_rom, size_t length)
+{
+       struct fw_ohci *ohci;
+       unsigned long flags;
+       int ret = -EBUSY;
+       __be32 *next_config_rom;
+       dma_addr_t uninitialized_var(next_config_rom_bus);
+
+       ohci = fw_ohci(card);
+
+       /*
+        * When the OHCI controller is enabled, the config rom update
+        * mechanism is a bit tricky, but easy enough to use.  See
+        * section 5.5.6 in the OHCI specification.
+        *
+        * The OHCI controller caches the new config rom address in a
+        * shadow register (ConfigROMmapNext) and needs a bus reset
+        * for the changes to take place.  When the bus reset is
+        * detected, the controller loads the new values for the
+        * ConfigRomHeader and BusOptions registers from the specified
+        * config rom and loads ConfigROMmap from the ConfigROMmapNext
+        * shadow register. All automatically and atomically.
+        *
+        * Now, there's a twist to this story.  The automatic load of
+        * ConfigRomHeader and BusOptions doesn't honor the
+        * noByteSwapData bit, so with a be32 config rom, the
+        * controller will load be32 values in to these registers
+        * during the atomic update, even on litte endian
+        * architectures.  The workaround we use is to put a 0 in the
+        * header quadlet; 0 is endian agnostic and means that the
+        * config rom isn't ready yet.  In the bus reset tasklet we
+        * then set up the real values for the two registers.
+        *
+        * We use ohci->lock to avoid racing with the code that sets
+        * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+        */
+
+       next_config_rom =
+               dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                  &next_config_rom_bus, GFP_KERNEL);
+       if (next_config_rom == NULL)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ohci->next_config_rom == NULL) {
+               ohci->next_config_rom = next_config_rom;
+               ohci->next_config_rom_bus = next_config_rom_bus;
+
+               memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
+               fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
+                                 length * 4);
+
+               ohci->next_header = config_rom[0];
+               ohci->next_config_rom[0] = 0;
+
+               reg_write(ohci, OHCI1394_ConfigROMmap,
+                         ohci->next_config_rom_bus);
+               ret = 0;
+       }
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       /*
+        * Now initiate a bus reset to have the changes take
+        * effect. We clean up the old config rom memory and DMA
+        * mappings in the bus reset tasklet, since the OHCI
+        * controller could need to access it before the bus reset
+        * takes effect.
+        */
+       if (ret == 0)
+               fw_core_initiate_bus_reset(&ohci->card, 1);
+       else
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 next_config_rom, next_config_rom_bus);
+
+       return ret;
+}
+
+static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+
+       at_context_transmit(&ohci->at_request_ctx, packet);
+}
+
+static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+
+       at_context_transmit(&ohci->at_response_ctx, packet);
+}
+
+static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct context *ctx = &ohci->at_request_ctx;
+       struct driver_data *driver_data = packet->driver_data;
+       int ret = -ENOENT;
+
+       tasklet_disable(&ctx->tasklet);
+
+       if (packet->ack != 0)
+               goto out;
+
+       if (packet->payload_bus)
+               dma_unmap_single(ohci->card.device, packet->payload_bus,
+                                packet->payload_length, DMA_TO_DEVICE);
+
+       log_ar_at_event('T', packet->speed, packet->header, 0x20);
+       driver_data->packet = NULL;
+       packet->ack = RCODE_CANCELLED;
+       packet->callback(packet, &ohci->card, packet->ack);
+       ret = 0;
+ out:
+       tasklet_enable(&ctx->tasklet);
+
+       return ret;
+}
+
+static int ohci_enable_phys_dma(struct fw_card *card,
+                               int node_id, int generation)
+{
+#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
+       return 0;
+#else
+       struct fw_ohci *ohci = fw_ohci(card);
+       unsigned long flags;
+       int n, ret = 0;
+
+       /*
+        * FIXME:  Make sure this bitmask is cleared when we clear the busReset
+        * interrupt bit.  Clear physReqResourceAllBuses on bus reset.
+        */
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ohci->generation != generation) {
+               ret = -ESTALE;
+               goto out;
+       }
+
+       /*
+        * Note, if the node ID contains a non-local bus ID, physical DMA is
+        * enabled for _all_ nodes on remote buses.
+        */
+
+       n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
+       if (n < 32)
+               reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
+       else
+               reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
+
+       flush_writes(ohci);
+ out:
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       return ret;
+#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
+}
+
+static u64 ohci_get_bus_time(struct fw_card *card)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       u32 cycle_time;
+       u64 bus_time;
+
+       cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+       bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
+
+       return bus_time;
+}
+
+static void copy_iso_headers(struct iso_context *ctx, void *p)
+{
+       int i = ctx->header_length;
+
+       if (i + ctx->base.header_size > PAGE_SIZE)
+               return;
+
+       /*
+        * The iso header is byteswapped to little endian by
+        * the controller, but the remaining header quadlets
+        * are big endian.  We want to present all the headers
+        * as big endian, so we have to swap the first quadlet.
+        */
+       if (ctx->base.header_size > 0)
+               *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
+       if (ctx->base.header_size > 4)
+               *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
+       if (ctx->base.header_size > 8)
+               memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
+       ctx->header_length += ctx->base.header_size;
+}
+
+static int handle_ir_dualbuffer_packet(struct context *context,
+                                      struct descriptor *d,
+                                      struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+       struct db_descriptor *db = (struct db_descriptor *) d;
+       __le32 *ir_header;
+       size_t header_length;
+       void *p, *end;
+
+       if (db->first_res_count != 0 && db->second_res_count != 0) {
+               if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
+                       /* This descriptor isn't done yet, stop iteration. */
+                       return 0;
+               }
+               ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
+       }
+
+       header_length = le16_to_cpu(db->first_req_count) -
+               le16_to_cpu(db->first_res_count);
+
+       p = db + 1;
+       end = p + header_length;
+       while (p < end) {
+               copy_iso_headers(ctx, p);
+               ctx->excess_bytes +=
+                       (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
+               p += max(ctx->base.header_size, (size_t)8);
+       }
+
+       ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
+               le16_to_cpu(db->second_res_count);
+
+       if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
+               ir_header = (__le32 *) (db + 1);
+               ctx->base.callback(&ctx->base,
+                                  le32_to_cpu(ir_header[0]) & 0xffff,
+                                  ctx->header_length, ctx->header,
+                                  ctx->base.callback_data);
+               ctx->header_length = 0;
+       }
+
+       return 1;
+}
+
+static int handle_ir_packet_per_buffer(struct context *context,
+                                      struct descriptor *d,
+                                      struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+       struct descriptor *pd;
+       __le32 *ir_header;
+       void *p;
+
+       for (pd = d; pd <= last; pd++) {
+               if (pd->transfer_status)
+                       break;
+       }
+       if (pd > last)
+               /* Descriptor(s) not done yet, stop iteration */
+               return 0;
+
+       p = last + 1;
+       copy_iso_headers(ctx, p);
+
+       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
+               ir_header = (__le32 *) p;
+               ctx->base.callback(&ctx->base,
+                                  le32_to_cpu(ir_header[0]) & 0xffff,
+                                  ctx->header_length, ctx->header,
+                                  ctx->base.callback_data);
+               ctx->header_length = 0;
+       }
+
+       return 1;
+}
+
+static int handle_it_packet(struct context *context,
+                           struct descriptor *d,
+                           struct descriptor *last)
+{
+       struct iso_context *ctx =
+               container_of(context, struct iso_context, context);
+
+       if (last->transfer_status == 0)
+               /* This descriptor isn't done yet, stop iteration. */
+               return 0;
+
+       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+               ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
+                                  0, NULL, ctx->base.callback_data);
+
+       return 1;
+}
+
+static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
+                               int type, int channel, size_t header_size)
+{
+       struct fw_ohci *ohci = fw_ohci(card);
+       struct iso_context *ctx, *list;
+       descriptor_callback_t callback;
+       u64 *channels, dont_care = ~0ULL;
+       u32 *mask, regs;
+       unsigned long flags;
+       int index, ret = -ENOMEM;
+
+       if (type == FW_ISO_CONTEXT_TRANSMIT) {
+               channels = &dont_care;
+               mask = &ohci->it_context_mask;
+               list = ohci->it_context_list;
+               callback = handle_it_packet;
+       } else {
+               channels = &ohci->ir_context_channels;
+               mask = &ohci->ir_context_mask;
+               list = ohci->ir_context_list;
+               if (ohci->use_dualbuffer)
+                       callback = handle_ir_dualbuffer_packet;
+               else
+                       callback = handle_ir_packet_per_buffer;
+       }
+
+       spin_lock_irqsave(&ohci->lock, flags);
+       index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
+       if (index >= 0) {
+               *channels &= ~(1ULL << channel);
+               *mask &= ~(1 << index);
+       }
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       if (index < 0)
+               return ERR_PTR(-EBUSY);
+
+       if (type == FW_ISO_CONTEXT_TRANSMIT)
+               regs = OHCI1394_IsoXmitContextBase(index);
+       else
+               regs = OHCI1394_IsoRcvContextBase(index);
+
+       ctx = &list[index];
+       memset(ctx, 0, sizeof(*ctx));
+       ctx->header_length = 0;
+       ctx->header = (void *) __get_free_page(GFP_KERNEL);
+       if (ctx->header == NULL)
+               goto out;
+
+       ret = context_init(&ctx->context, ohci, regs, callback);
+       if (ret < 0)
+               goto out_with_header;
+
+       return &ctx->base;
+
+ out_with_header:
+       free_page((unsigned long)ctx->header);
+ out:
+       spin_lock_irqsave(&ohci->lock, flags);
+       *mask |= 1 << index;
+       spin_unlock_irqrestore(&ohci->lock, flags);
+
+       return ERR_PTR(ret);
+}
+
+static int ohci_start_iso(struct fw_iso_context *base,
+                         s32 cycle, u32 sync, u32 tags)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct fw_ohci *ohci = ctx->context.ohci;
+       u32 control, match;
+       int index;
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               match = 0;
+               if (cycle >= 0)
+                       match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
+                               (cycle & 0x7fff) << 16;
+
+               reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
+               reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
+               context_run(&ctx->context, match);
+       } else {
+               index = ctx - ohci->ir_context_list;
+               control = IR_CONTEXT_ISOCH_HEADER;
+               if (ohci->use_dualbuffer)
+                       control |= IR_CONTEXT_DUAL_BUFFER_MODE;
+               match = (tags << 28) | (sync << 8) | ctx->base.channel;
+               if (cycle >= 0) {
+                       match |= (cycle & 0x07fff) << 12;
+                       control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
+               }
+
+               reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
+               reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
+               reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
+               context_run(&ctx->context, control);
+       }
+
+       return 0;
+}
+
+static int ohci_stop_iso(struct fw_iso_context *base)
+{
+       struct fw_ohci *ohci = fw_ohci(base->card);
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       int index;
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
+       } else {
+               index = ctx - ohci->ir_context_list;
+               reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
+       }
+       flush_writes(ohci);
+       context_stop(&ctx->context);
+
+       return 0;
+}
+
+static void ohci_free_iso_context(struct fw_iso_context *base)
+{
+       struct fw_ohci *ohci = fw_ohci(base->card);
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       unsigned long flags;
+       int index;
+
+       ohci_stop_iso(base);
+       context_release(&ctx->context);
+       free_page((unsigned long)ctx->header);
+
+       spin_lock_irqsave(&ohci->lock, flags);
+
+       if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
+               index = ctx - ohci->it_context_list;
+               ohci->it_context_mask |= 1 << index;
+       } else {
+               index = ctx - ohci->ir_context_list;
+               ohci->ir_context_mask |= 1 << index;
+               ohci->ir_context_channels |= 1ULL << base->channel;
+       }
+
+       spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
+static int ohci_queue_iso_transmit(struct fw_iso_context *base,
+                                  struct fw_iso_packet *packet,
+                                  struct fw_iso_buffer *buffer,
+                                  unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct descriptor *d, *last, *pd;
+       struct fw_iso_packet *p;
+       __le32 *header;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, payload_z, irq;
+       u32 payload_index, payload_end_index, next_page_index;
+       int page, end_page, i, length, offset;
+
+       /*
+        * FIXME: Cycle lost behavior should be configurable: lose
+        * packet, retransmit or terminate..
+        */
+
+       p = packet;
+       payload_index = payload;
+
+       if (p->skip)
+               z = 1;
+       else
+               z = 2;
+       if (p->header_length > 0)
+               z++;
+
+       /* Determine the first page the payload isn't contained in. */
+       end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
+       if (p->payload_length > 0)
+               payload_z = end_page - (payload_index >> PAGE_SHIFT);
+       else
+               payload_z = 0;
+
+       z += payload_z;
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
+
+       d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
+       if (d == NULL)
+               return -ENOMEM;
+
+       if (!p->skip) {
+               d[0].control   = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
+               d[0].req_count = cpu_to_le16(8);
+
+               header = (__le32 *) &d[1];
+               header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
+                                       IT_HEADER_TAG(p->tag) |
+                                       IT_HEADER_TCODE(TCODE_STREAM_DATA) |
+                                       IT_HEADER_CHANNEL(ctx->base.channel) |
+                                       IT_HEADER_SPEED(ctx->base.speed));
+               header[1] =
+                       cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
+                                                         p->payload_length));
+       }
+
+       if (p->header_length > 0) {
+               d[2].req_count    = cpu_to_le16(p->header_length);
+               d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
+               memcpy(&d[z], p->header, p->header_length);
+       }
+
+       pd = d + z - payload_z;
+       payload_end_index = payload_index + p->payload_length;
+       for (i = 0; i < payload_z; i++) {
+               page               = payload_index >> PAGE_SHIFT;
+               offset             = payload_index & ~PAGE_MASK;
+               next_page_index    = (page + 1) << PAGE_SHIFT;
+               length             =
+                       min(next_page_index, payload_end_index) - payload_index;
+               pd[i].req_count    = cpu_to_le16(length);
+
+               page_bus = page_private(buffer->pages[page]);
+               pd[i].data_address = cpu_to_le32(page_bus + offset);
+
+               payload_index += length;
+       }
+
+       if (p->interrupt)
+               irq = DESCRIPTOR_IRQ_ALWAYS;
+       else
+               irq = DESCRIPTOR_NO_IRQ;
+
+       last = z == 2 ? d : d + z - 1;
+       last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
+                                    DESCRIPTOR_STATUS |
+                                    DESCRIPTOR_BRANCH_ALWAYS |
+                                    irq);
+
+       context_append(&ctx->context, d, z, header_z);
+
+       return 0;
+}
+
+static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
+                                            struct fw_iso_packet *packet,
+                                            struct fw_iso_buffer *buffer,
+                                            unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct db_descriptor *db = NULL;
+       struct descriptor *d;
+       struct fw_iso_packet *p;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, length, rest;
+       int page, offset, packet_count, header_size;
+
+       /*
+        * FIXME: Cycle lost behavior should be configurable: lose
+        * packet, retransmit or terminate..
+        */
+
+       p = packet;
+       z = 2;
+
+       /*
+        * The OHCI controller puts the isochronous header and trailer in the
+        * buffer, so we need at least 8 bytes.
+        */
+       packet_count = p->header_length / ctx->base.header_size;
+       header_size = packet_count * max(ctx->base.header_size, (size_t)8);
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+       page     = payload >> PAGE_SHIFT;
+       offset   = payload & ~PAGE_MASK;
+       rest     = p->payload_length;
+
+       /* FIXME: make packet-per-buffer/dual-buffer a context option */
+       while (rest > 0) {
+               d = context_get_descriptors(&ctx->context,
+                                           z + header_z, &d_bus);
+               if (d == NULL)
+                       return -ENOMEM;
+
+               db = (struct db_descriptor *) d;
+               db->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                         DESCRIPTOR_BRANCH_ALWAYS);
+               db->first_size =
+                   cpu_to_le16(max(ctx->base.header_size, (size_t)8));
+               if (p->skip && rest == p->payload_length) {
+                       db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+                       db->first_req_count = db->first_size;
+               } else {
+                       db->first_req_count = cpu_to_le16(header_size);
+               }
+               db->first_res_count = db->first_req_count;
+               db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
+
+               if (p->skip && rest == p->payload_length)
+                       length = 4;
+               else if (offset + rest < PAGE_SIZE)
+                       length = rest;
+               else
+                       length = PAGE_SIZE - offset;
+
+               db->second_req_count = cpu_to_le16(length);
+               db->second_res_count = db->second_req_count;
+               page_bus = page_private(buffer->pages[page]);
+               db->second_buffer = cpu_to_le32(page_bus + offset);
+
+               if (p->interrupt && length == rest)
+                       db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+               context_append(&ctx->context, d, z, header_z);
+               offset = (offset + length) & ~PAGE_MASK;
+               rest -= length;
+               if (offset == 0)
+                       page++;
+       }
+
+       return 0;
+}
+
+static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
+                                       struct fw_iso_packet *packet,
+                                       struct fw_iso_buffer *buffer,
+                                       unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       struct descriptor *d = NULL, *pd = NULL;
+       struct fw_iso_packet *p = packet;
+       dma_addr_t d_bus, page_bus;
+       u32 z, header_z, rest;
+       int i, j, length;
+       int page, offset, packet_count, header_size, payload_per_buffer;
+
+       /*
+        * The OHCI controller puts the isochronous header and trailer in the
+        * buffer, so we need at least 8 bytes.
+        */
+       packet_count = p->header_length / ctx->base.header_size;
+       header_size  = max(ctx->base.header_size, (size_t)8);
+
+       /* Get header size in number of descriptors. */
+       header_z = DIV_ROUND_UP(header_size, sizeof(*d));
+       page     = payload >> PAGE_SHIFT;
+       offset   = payload & ~PAGE_MASK;
+       payload_per_buffer = p->payload_length / packet_count;
+
+       for (i = 0; i < packet_count; i++) {
+               /* d points to the header descriptor */
+               z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
+               d = context_get_descriptors(&ctx->context,
+                               z + header_z, &d_bus);
+               if (d == NULL)
+                       return -ENOMEM;
+
+               d->control      = cpu_to_le16(DESCRIPTOR_STATUS |
+                                             DESCRIPTOR_INPUT_MORE);
+               if (p->skip && i == 0)
+                       d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
+               d->req_count    = cpu_to_le16(header_size);
+               d->res_count    = d->req_count;
+               d->transfer_status = 0;
+               d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
+
+               rest = payload_per_buffer;
+               for (j = 1; j < z; j++) {
+                       pd = d + j;
+                       pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                                 DESCRIPTOR_INPUT_MORE);
+
+                       if (offset + rest < PAGE_SIZE)
+                               length = rest;
+                       else
+                               length = PAGE_SIZE - offset;
+                       pd->req_count = cpu_to_le16(length);
+                       pd->res_count = pd->req_count;
+                       pd->transfer_status = 0;
+
+                       page_bus = page_private(buffer->pages[page]);
+                       pd->data_address = cpu_to_le32(page_bus + offset);
+
+                       offset = (offset + length) & ~PAGE_MASK;
+                       rest -= length;
+                       if (offset == 0)
+                               page++;
+               }
+               pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
+                                         DESCRIPTOR_INPUT_LAST |
+                                         DESCRIPTOR_BRANCH_ALWAYS);
+               if (p->interrupt && i == packet_count - 1)
+                       pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
+
+               context_append(&ctx->context, d, z, header_z);
+       }
+
+       return 0;
+}
+
+static int ohci_queue_iso(struct fw_iso_context *base,
+                         struct fw_iso_packet *packet,
+                         struct fw_iso_buffer *buffer,
+                         unsigned long payload)
+{
+       struct iso_context *ctx = container_of(base, struct iso_context, base);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->context.ohci->lock, flags);
+       if (base->type == FW_ISO_CONTEXT_TRANSMIT)
+               ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
+       else if (ctx->context.ohci->use_dualbuffer)
+               ret = ohci_queue_iso_receive_dualbuffer(base, packet,
+                                                       buffer, payload);
+       else
+               ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
+                                                       buffer, payload);
+       spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
+
+       return ret;
+}
+
+static const struct fw_card_driver ohci_driver = {
+       .enable                 = ohci_enable,
+       .update_phy_reg         = ohci_update_phy_reg,
+       .set_config_rom         = ohci_set_config_rom,
+       .send_request           = ohci_send_request,
+       .send_response          = ohci_send_response,
+       .cancel_packet          = ohci_cancel_packet,
+       .enable_phys_dma        = ohci_enable_phys_dma,
+       .get_bus_time           = ohci_get_bus_time,
+
+       .allocate_iso_context   = ohci_allocate_iso_context,
+       .free_iso_context       = ohci_free_iso_context,
+       .queue_iso              = ohci_queue_iso,
+       .start_iso              = ohci_start_iso,
+       .stop_iso               = ohci_stop_iso,
+};
+
+#ifdef CONFIG_PPC_PMAC
+static void ohci_pmac_on(struct pci_dev *dev)
+{
+       if (machine_is(powermac)) {
+               struct device_node *ofn = pci_device_to_OF_node(dev);
+
+               if (ofn) {
+                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
+                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
+               }
+       }
+}
+
+static void ohci_pmac_off(struct pci_dev *dev)
+{
+       if (machine_is(powermac)) {
+               struct device_node *ofn = pci_device_to_OF_node(dev);
+
+               if (ofn) {
+                       pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
+                       pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
+               }
+       }
+}
+#else
+#define ohci_pmac_on(dev)
+#define ohci_pmac_off(dev)
+#endif /* CONFIG_PPC_PMAC */
+
+static int __devinit pci_probe(struct pci_dev *dev,
+                              const struct pci_device_id *ent)
+{
+       struct fw_ohci *ohci;
+       u32 bus_options, max_receive, link_speed, version;
+       u64 guid;
+       int err;
+       size_t size;
+
+       ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
+       if (ohci == NULL) {
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
+
+       ohci_pmac_on(dev);
+
+       err = pci_enable_device(dev);
+       if (err) {
+               fw_error("Failed to enable OHCI hardware\n");
+               goto fail_free;
+       }
+
+       pci_set_master(dev);
+       pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+       pci_set_drvdata(dev, ohci);
+
+       spin_lock_init(&ohci->lock);
+
+       tasklet_init(&ohci->bus_reset_tasklet,
+                    bus_reset_tasklet, (unsigned long)ohci);
+
+       err = pci_request_region(dev, 0, ohci_driver_name);
+       if (err) {
+               fw_error("MMIO resource unavailable\n");
+               goto fail_disable;
+       }
+
+       ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
+       if (ohci->registers == NULL) {
+               fw_error("Failed to remap registers\n");
+               err = -ENXIO;
+               goto fail_iomem;
+       }
+
+       version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+       ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
+
+/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
+#if !defined(CONFIG_X86_32)
+       /* dual-buffer mode is broken with descriptor addresses above 2G */
+       if (dev->vendor == PCI_VENDOR_ID_TI &&
+           dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
+               ohci->use_dualbuffer = false;
+#endif
+
+#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+       ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
+                            dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
+#endif
+       ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+
+       ar_context_init(&ohci->ar_request_ctx, ohci,
+                       OHCI1394_AsReqRcvContextControlSet);
+
+       ar_context_init(&ohci->ar_response_ctx, ohci,
+                       OHCI1394_AsRspRcvContextControlSet);
+
+       context_init(&ohci->at_request_ctx, ohci,
+                    OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+
+       context_init(&ohci->at_response_ctx, ohci,
+                    OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+
+       reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
+       ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+       reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
+       size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
+       ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+
+       reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+       ohci->ir_context_channels = ~0ULL;
+       ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+       reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+       size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
+       ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+
+       if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
+               err = -ENOMEM;
+               goto fail_contexts;
+       }
+
+       /* self-id dma buffer allocation */
+       ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
+                                              SELF_ID_BUF_SIZE,
+                                              &ohci->self_id_bus,
+                                              GFP_KERNEL);
+       if (ohci->self_id_cpu == NULL) {
+               err = -ENOMEM;
+               goto fail_contexts;
+       }
+
+       bus_options = reg_read(ohci, OHCI1394_BusOptions);
+       max_receive = (bus_options >> 12) & 0xf;
+       link_speed = bus_options & 0x7;
+       guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
+               reg_read(ohci, OHCI1394_GUIDLo);
+
+       err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
+       if (err)
+               goto fail_self_id;
+
+       fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
+                 dev_name(&dev->dev), version >> 16, version & 0xff);
+
+       return 0;
+
+ fail_self_id:
+       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+                         ohci->self_id_cpu, ohci->self_id_bus);
+ fail_contexts:
+       kfree(ohci->ir_context_list);
+       kfree(ohci->it_context_list);
+       context_release(&ohci->at_response_ctx);
+       context_release(&ohci->at_request_ctx);
+       ar_context_release(&ohci->ar_response_ctx);
+       ar_context_release(&ohci->ar_request_ctx);
+       pci_iounmap(dev, ohci->registers);
+ fail_iomem:
+       pci_release_region(dev, 0);
+ fail_disable:
+       pci_disable_device(dev);
+ fail_free:
+       kfree(&ohci->card);
+       ohci_pmac_off(dev);
+ fail:
+       if (err == -ENOMEM)
+               fw_error("Out of memory\n");
+
+       return err;
+}
+
+static void pci_remove(struct pci_dev *dev)
+{
+       struct fw_ohci *ohci;
+
+       ohci = pci_get_drvdata(dev);
+       reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+       flush_writes(ohci);
+       fw_core_remove_card(&ohci->card);
+
+       /*
+        * FIXME: Fail all pending packets here, now that the upper
+        * layers can't queue any more.
+        */
+
+       software_reset(ohci);
+       free_irq(dev->irq, ohci);
+
+       if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->next_config_rom, ohci->next_config_rom_bus);
+       if (ohci->config_rom)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 ohci->config_rom, ohci->config_rom_bus);
+       dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
+                         ohci->self_id_cpu, ohci->self_id_bus);
+       ar_context_release(&ohci->ar_request_ctx);
+       ar_context_release(&ohci->ar_response_ctx);
+       context_release(&ohci->at_request_ctx);
+       context_release(&ohci->at_response_ctx);
+       kfree(ohci->it_context_list);
+       kfree(ohci->ir_context_list);
+       pci_iounmap(dev, ohci->registers);
+       pci_release_region(dev, 0);
+       pci_disable_device(dev);
+       kfree(&ohci->card);
+       ohci_pmac_off(dev);
+
+       fw_notify("Removed fw-ohci device.\n");
+}
+
+#ifdef CONFIG_PM
+static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+       struct fw_ohci *ohci = pci_get_drvdata(dev);
+       int err;
+
+       software_reset(ohci);
+       free_irq(dev->irq, ohci);
+       err = pci_save_state(dev);
+       if (err) {
+               fw_error("pci_save_state failed\n");
+               return err;
+       }
+       err = pci_set_power_state(dev, pci_choose_state(dev, state));
+       if (err)
+               fw_error("pci_set_power_state failed with %d\n", err);
+       ohci_pmac_off(dev);
+
+       return 0;
+}
+
+static int pci_resume(struct pci_dev *dev)
+{
+       struct fw_ohci *ohci = pci_get_drvdata(dev);
+       int err;
+
+       ohci_pmac_on(dev);
+       pci_set_power_state(dev, PCI_D0);
+       pci_restore_state(dev);
+       err = pci_enable_device(dev);
+       if (err) {
+               fw_error("pci_enable_device failed\n");
+               return err;
+       }
+
+       return ohci_enable(&ohci->card, NULL, 0);
+}
+#endif
+
+static struct pci_device_id pci_table[] = {
+       { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
+       { }
+};
+
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static struct pci_driver fw_ohci_pci_driver = {
+       .name           = ohci_driver_name,
+       .id_table       = pci_table,
+       .probe          = pci_probe,
+       .remove         = pci_remove,
+#ifdef CONFIG_PM
+       .resume         = pci_resume,
+       .suspend        = pci_suspend,
+#endif
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
+MODULE_LICENSE("GPL");
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
+MODULE_ALIAS("ohci1394");
+#endif
+
+static int __init fw_ohci_init(void)
+{
+       return pci_register_driver(&fw_ohci_pci_driver);
+}
+
+static void __exit fw_ohci_cleanup(void)
+{
+       pci_unregister_driver(&fw_ohci_pci_driver);
+}
+
+module_init(fw_ohci_init);
+module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
new file mode 100644 (file)
index 0000000..ba492d8
--- /dev/null
@@ -0,0 +1,157 @@
+#ifndef _FIREWIRE_OHCI_H
+#define _FIREWIRE_OHCI_H
+
+/* OHCI register map */
+
+#define OHCI1394_Version                      0x000
+#define OHCI1394_GUID_ROM                     0x004
+#define OHCI1394_ATRetries                    0x008
+#define OHCI1394_CSRData                      0x00C
+#define OHCI1394_CSRCompareData               0x010
+#define OHCI1394_CSRControl                   0x014
+#define OHCI1394_ConfigROMhdr                 0x018
+#define OHCI1394_BusID                        0x01C
+#define OHCI1394_BusOptions                   0x020
+#define OHCI1394_GUIDHi                       0x024
+#define OHCI1394_GUIDLo                       0x028
+#define OHCI1394_ConfigROMmap                 0x034
+#define OHCI1394_PostedWriteAddressLo         0x038
+#define OHCI1394_PostedWriteAddressHi         0x03C
+#define OHCI1394_VendorID                     0x040
+#define OHCI1394_HCControlSet                 0x050
+#define OHCI1394_HCControlClear               0x054
+#define  OHCI1394_HCControl_BIBimageValid      0x80000000
+#define  OHCI1394_HCControl_noByteSwapData     0x40000000
+#define  OHCI1394_HCControl_programPhyEnable   0x00800000
+#define  OHCI1394_HCControl_aPhyEnhanceEnable  0x00400000
+#define  OHCI1394_HCControl_LPS                        0x00080000
+#define  OHCI1394_HCControl_postedWriteEnable  0x00040000
+#define  OHCI1394_HCControl_linkEnable         0x00020000
+#define  OHCI1394_HCControl_softReset          0x00010000
+#define OHCI1394_SelfIDBuffer                 0x064
+#define OHCI1394_SelfIDCount                  0x068
+#define  OHCI1394_SelfIDCount_selfIDError      0x80000000
+#define OHCI1394_IRMultiChanMaskHiSet         0x070
+#define OHCI1394_IRMultiChanMaskHiClear       0x074
+#define OHCI1394_IRMultiChanMaskLoSet         0x078
+#define OHCI1394_IRMultiChanMaskLoClear       0x07C
+#define OHCI1394_IntEventSet                  0x080
+#define OHCI1394_IntEventClear                0x084
+#define OHCI1394_IntMaskSet                   0x088
+#define OHCI1394_IntMaskClear                 0x08C
+#define OHCI1394_IsoXmitIntEventSet           0x090
+#define OHCI1394_IsoXmitIntEventClear         0x094
+#define OHCI1394_IsoXmitIntMaskSet            0x098
+#define OHCI1394_IsoXmitIntMaskClear          0x09C
+#define OHCI1394_IsoRecvIntEventSet           0x0A0
+#define OHCI1394_IsoRecvIntEventClear         0x0A4
+#define OHCI1394_IsoRecvIntMaskSet            0x0A8
+#define OHCI1394_IsoRecvIntMaskClear          0x0AC
+#define OHCI1394_InitialBandwidthAvailable    0x0B0
+#define OHCI1394_InitialChannelsAvailableHi   0x0B4
+#define OHCI1394_InitialChannelsAvailableLo   0x0B8
+#define OHCI1394_FairnessControl              0x0DC
+#define OHCI1394_LinkControlSet               0x0E0
+#define OHCI1394_LinkControlClear             0x0E4
+#define   OHCI1394_LinkControl_rcvSelfID       (1 << 9)
+#define   OHCI1394_LinkControl_rcvPhyPkt       (1 << 10)
+#define   OHCI1394_LinkControl_cycleTimerEnable        (1 << 20)
+#define   OHCI1394_LinkControl_cycleMaster     (1 << 21)
+#define   OHCI1394_LinkControl_cycleSource     (1 << 22)
+#define OHCI1394_NodeID                       0x0E8
+#define   OHCI1394_NodeID_idValid             0x80000000
+#define   OHCI1394_NodeID_nodeNumber          0x0000003f
+#define   OHCI1394_NodeID_busNumber           0x0000ffc0
+#define OHCI1394_PhyControl                   0x0EC
+#define   OHCI1394_PhyControl_Read(addr)       (((addr) << 8) | 0x00008000)
+#define   OHCI1394_PhyControl_ReadDone         0x80000000
+#define   OHCI1394_PhyControl_ReadData(r)      (((r) & 0x00ff0000) >> 16)
+#define   OHCI1394_PhyControl_Write(addr, data)        (((addr) << 8) | (data) | 0x00004000)
+#define   OHCI1394_PhyControl_WriteDone                0x00004000
+#define OHCI1394_IsochronousCycleTimer        0x0F0
+#define OHCI1394_AsReqFilterHiSet             0x100
+#define OHCI1394_AsReqFilterHiClear           0x104
+#define OHCI1394_AsReqFilterLoSet             0x108
+#define OHCI1394_AsReqFilterLoClear           0x10C
+#define OHCI1394_PhyReqFilterHiSet            0x110
+#define OHCI1394_PhyReqFilterHiClear          0x114
+#define OHCI1394_PhyReqFilterLoSet            0x118
+#define OHCI1394_PhyReqFilterLoClear          0x11C
+#define OHCI1394_PhyUpperBound                0x120
+
+#define OHCI1394_AsReqTrContextBase           0x180
+#define OHCI1394_AsReqTrContextControlSet     0x180
+#define OHCI1394_AsReqTrContextControlClear   0x184
+#define OHCI1394_AsReqTrCommandPtr            0x18C
+
+#define OHCI1394_AsRspTrContextBase           0x1A0
+#define OHCI1394_AsRspTrContextControlSet     0x1A0
+#define OHCI1394_AsRspTrContextControlClear   0x1A4
+#define OHCI1394_AsRspTrCommandPtr            0x1AC
+
+#define OHCI1394_AsReqRcvContextBase          0x1C0
+#define OHCI1394_AsReqRcvContextControlSet    0x1C0
+#define OHCI1394_AsReqRcvContextControlClear  0x1C4
+#define OHCI1394_AsReqRcvCommandPtr           0x1CC
+
+#define OHCI1394_AsRspRcvContextBase          0x1E0
+#define OHCI1394_AsRspRcvContextControlSet    0x1E0
+#define OHCI1394_AsRspRcvContextControlClear  0x1E4
+#define OHCI1394_AsRspRcvCommandPtr           0x1EC
+
+/* Isochronous transmit registers */
+#define OHCI1394_IsoXmitContextBase(n)           (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlSet(n)     (0x200 + 16 * (n))
+#define OHCI1394_IsoXmitContextControlClear(n)   (0x204 + 16 * (n))
+#define OHCI1394_IsoXmitCommandPtr(n)            (0x20C + 16 * (n))
+
+/* Isochronous receive registers */
+#define OHCI1394_IsoRcvContextBase(n)         (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlSet(n)   (0x400 + 32 * (n))
+#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
+#define OHCI1394_IsoRcvCommandPtr(n)          (0x40C + 32 * (n))
+#define OHCI1394_IsoRcvContextMatch(n)        (0x410 + 32 * (n))
+
+/* Interrupts Mask/Events */
+#define OHCI1394_reqTxComplete         0x00000001
+#define OHCI1394_respTxComplete                0x00000002
+#define OHCI1394_ARRQ                  0x00000004
+#define OHCI1394_ARRS                  0x00000008
+#define OHCI1394_RQPkt                 0x00000010
+#define OHCI1394_RSPkt                 0x00000020
+#define OHCI1394_isochTx               0x00000040
+#define OHCI1394_isochRx               0x00000080
+#define OHCI1394_postedWriteErr                0x00000100
+#define OHCI1394_lockRespErr           0x00000200
+#define OHCI1394_selfIDComplete                0x00010000
+#define OHCI1394_busReset              0x00020000
+#define OHCI1394_regAccessFail         0x00040000
+#define OHCI1394_phy                   0x00080000
+#define OHCI1394_cycleSynch            0x00100000
+#define OHCI1394_cycle64Seconds                0x00200000
+#define OHCI1394_cycleLost             0x00400000
+#define OHCI1394_cycleInconsistent     0x00800000
+#define OHCI1394_unrecoverableError    0x01000000
+#define OHCI1394_cycleTooLong          0x02000000
+#define OHCI1394_phyRegRcvd            0x04000000
+#define OHCI1394_masterIntEnable       0x80000000
+
+#define OHCI1394_evt_no_status         0x0
+#define OHCI1394_evt_long_packet       0x2
+#define OHCI1394_evt_missing_ack       0x3
+#define OHCI1394_evt_underrun          0x4
+#define OHCI1394_evt_overrun           0x5
+#define OHCI1394_evt_descriptor_read   0x6
+#define OHCI1394_evt_data_read         0x7
+#define OHCI1394_evt_data_write                0x8
+#define OHCI1394_evt_bus_reset         0x9
+#define OHCI1394_evt_timeout           0xa
+#define OHCI1394_evt_tcode_err         0xb
+#define OHCI1394_evt_reserved_b                0xc
+#define OHCI1394_evt_reserved_c                0xd
+#define OHCI1394_evt_unknown           0xe
+#define OHCI1394_evt_flushed           0xf
+
+#define OHCI1394_phy_tcode             0xe
+
+#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
new file mode 100644 (file)
index 0000000..24c4563
--- /dev/null
@@ -0,0 +1,1656 @@
+/*
+ * SBP2 driver (SCSI over IEEE1394)
+ *
+ * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * The basic structure of this driver is based on the old storage driver,
+ * drivers/ieee1394/sbp2.c, originally written by
+ *     James Goodwin <jamesg@filanet.com>
+ * with later contributions and ongoing maintenance from
+ *     Ben Collins <bcollins@debian.org>,
+ *     Stefan Richter <stefanr@s5r6.in-berlin.de>
+ * and many others.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+#include <linux/workqueue.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/*
+ * So far only bridges from Oxford Semiconductor are known to support
+ * concurrent logins. Depending on firmware, four or two concurrent logins
+ * are possible on OXFW911 and newer Oxsemi bridges.
+ *
+ * Concurrent logins are useful together with cluster filesystems.
+ */
+static int sbp2_param_exclusive_login = 1;
+module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
+MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
+                "(default = Y, use N for concurrent initiators)");
+
+/*
+ * Flags for firmware oddities
+ *
+ * - 128kB max transfer
+ *   Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ *   When scsi_mod probes the device, let the inquiry command look like that
+ *   from MS Windows.
+ *
+ * - skip mode page 8
+ *   Suppress sending of mode_sense for mode page 8 if the device pretends to
+ *   support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ *   Tell sd_mod to correct the last sector number reported by read_capacity.
+ *   Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ *   Don't use this with devices which don't have this bug.
+ *
+ * - delay inquiry
+ *   Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
+ *
+ * - power condition
+ *   Set the power condition field in the START STOP UNIT commands sent by
+ *   sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
+ *   Some disks need this to spin down or to resume properly.
+ *
+ * - override internal blacklist
+ *   Instead of adding to the built-in blacklist, use only the workarounds
+ *   specified in the module load parameter.
+ *   Useful if a blacklist entry interfered with a non-broken device.
+ */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36     0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8   0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY   0x8
+#define SBP2_WORKAROUND_DELAY_INQUIRY  0x10
+#define SBP2_INQUIRY_DELAY             12
+#define SBP2_WORKAROUND_POWER_CONDITION        0x20
+#define SBP2_WORKAROUND_OVERRIDE       0x100
+
+static int sbp2_param_workarounds;
+module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+       ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+       ", 36 byte inquiry = "    __stringify(SBP2_WORKAROUND_INQUIRY_36)
+       ", skip mode page 8 = "   __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+       ", fix capacity = "       __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+       ", delay inquiry = "      __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
+       ", set power condition in start stop unit = "
+                                 __stringify(SBP2_WORKAROUND_POWER_CONDITION)
+       ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+       ", or a combination)");
+
+/* I don't know why the SCSI stack doesn't define something like this... */
+typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
+
+static const char sbp2_driver_name[] = "sbp2";
+
+/*
+ * We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
+ * and one struct scsi_device per sbp2_logical_unit.
+ */
+struct sbp2_logical_unit {
+       struct sbp2_target *tgt;
+       struct list_head link;
+       struct fw_address_handler address_handler;
+       struct list_head orb_list;
+
+       u64 command_block_agent_address;
+       u16 lun;
+       int login_id;
+
+       /*
+        * The generation is updated once we've logged in or reconnected
+        * to the logical unit.  Thus, I/O to the device will automatically
+        * fail and get retried if it happens in a window where the device
+        * is not ready, e.g. after a bus reset but before we reconnect.
+        */
+       int generation;
+       int retries;
+       struct delayed_work work;
+       bool has_sdev;
+       bool blocked;
+};
+
+/*
+ * We create one struct sbp2_target per IEEE 1212 Unit Directory
+ * and one struct Scsi_Host per sbp2_target.
+ */
+struct sbp2_target {
+       struct kref kref;
+       struct fw_unit *unit;
+       const char *bus_id;
+       struct list_head lu_list;
+
+       u64 management_agent_address;
+       u64 guid;
+       int directory_id;
+       int node_id;
+       int address_high;
+       unsigned int workarounds;
+       unsigned int mgt_orb_timeout;
+       unsigned int max_payload;
+
+       int dont_block; /* counter for each logical unit */
+       int blocked;    /* ditto */
+};
+
+static struct fw_device *target_device(struct sbp2_target *tgt)
+{
+       return fw_parent_device(tgt->unit);
+}
+
+/* Impossible login_id, to detect logout attempt before successful login */
+#define INVALID_LOGIN_ID 0x10000
+
+/*
+ * Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
+ * provided in the config rom. Most devices do provide a value, which
+ * we'll use for login management orbs, but with some sane limits.
+ */
+#define SBP2_MIN_LOGIN_ORB_TIMEOUT     5000U   /* Timeout in ms */
+#define SBP2_MAX_LOGIN_ORB_TIMEOUT     40000U  /* Timeout in ms */
+#define SBP2_ORB_TIMEOUT               2000U   /* Timeout in ms */
+#define SBP2_ORB_NULL                  0x80000000
+#define SBP2_RETRY_LIMIT               0xf             /* 15 retries */
+#define SBP2_CYCLE_LIMIT               (0xc8 << 12)    /* 200 125us cycles */
+
+/*
+ * The default maximum s/g segment size of a FireWire controller is
+ * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
+ * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+ */
+#define SBP2_MAX_SEG_SIZE              0xfffc
+
+/* Unit directory keys */
+#define SBP2_CSR_UNIT_CHARACTERISTICS  0x3a
+#define SBP2_CSR_FIRMWARE_REVISION     0x3c
+#define SBP2_CSR_LOGICAL_UNIT_NUMBER   0x14
+#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY        0xd4
+
+/* Management orb opcodes */
+#define SBP2_LOGIN_REQUEST             0x0
+#define SBP2_QUERY_LOGINS_REQUEST      0x1
+#define SBP2_RECONNECT_REQUEST         0x3
+#define SBP2_SET_PASSWORD_REQUEST      0x4
+#define SBP2_LOGOUT_REQUEST            0x7
+#define SBP2_ABORT_TASK_REQUEST                0xb
+#define SBP2_ABORT_TASK_SET            0xc
+#define SBP2_LOGICAL_UNIT_RESET                0xe
+#define SBP2_TARGET_RESET_REQUEST      0xf
+
+/* Offsets for command block agent registers */
+#define SBP2_AGENT_STATE               0x00
+#define SBP2_AGENT_RESET               0x04
+#define SBP2_ORB_POINTER               0x08
+#define SBP2_DOORBELL                  0x10
+#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
+
+/* Status write response codes */
+#define SBP2_STATUS_REQUEST_COMPLETE   0x0
+#define SBP2_STATUS_TRANSPORT_FAILURE  0x1
+#define SBP2_STATUS_ILLEGAL_REQUEST    0x2
+#define SBP2_STATUS_VENDOR_DEPENDENT   0x3
+
+#define STATUS_GET_ORB_HIGH(v)         ((v).status & 0xffff)
+#define STATUS_GET_SBP_STATUS(v)       (((v).status >> 16) & 0xff)
+#define STATUS_GET_LEN(v)              (((v).status >> 24) & 0x07)
+#define STATUS_GET_DEAD(v)             (((v).status >> 27) & 0x01)
+#define STATUS_GET_RESPONSE(v)         (((v).status >> 28) & 0x03)
+#define STATUS_GET_SOURCE(v)           (((v).status >> 30) & 0x03)
+#define STATUS_GET_ORB_LOW(v)          ((v).orb_low)
+#define STATUS_GET_DATA(v)             ((v).data)
+
+struct sbp2_status {
+       u32 status;
+       u32 orb_low;
+       u8 data[24];
+};
+
+struct sbp2_pointer {
+       __be32 high;
+       __be32 low;
+};
+
+struct sbp2_orb {
+       struct fw_transaction t;
+       struct kref kref;
+       dma_addr_t request_bus;
+       int rcode;
+       struct sbp2_pointer pointer;
+       void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
+       struct list_head link;
+};
+
+#define MANAGEMENT_ORB_LUN(v)                  ((v))
+#define MANAGEMENT_ORB_FUNCTION(v)             ((v) << 16)
+#define MANAGEMENT_ORB_RECONNECT(v)            ((v) << 20)
+#define MANAGEMENT_ORB_EXCLUSIVE(v)            ((v) ? 1 << 28 : 0)
+#define MANAGEMENT_ORB_REQUEST_FORMAT(v)       ((v) << 29)
+#define MANAGEMENT_ORB_NOTIFY                  ((1) << 31)
+
+#define MANAGEMENT_ORB_RESPONSE_LENGTH(v)      ((v))
+#define MANAGEMENT_ORB_PASSWORD_LENGTH(v)      ((v) << 16)
+
+struct sbp2_management_orb {
+       struct sbp2_orb base;
+       struct {
+               struct sbp2_pointer password;
+               struct sbp2_pointer response;
+               __be32 misc;
+               __be32 length;
+               struct sbp2_pointer status_fifo;
+       } request;
+       __be32 response[4];
+       dma_addr_t response_bus;
+       struct completion done;
+       struct sbp2_status status;
+};
+
+struct sbp2_login_response {
+       __be32 misc;
+       struct sbp2_pointer command_block_agent;
+       __be32 reconnect_hold;
+};
+#define COMMAND_ORB_DATA_SIZE(v)       ((v))
+#define COMMAND_ORB_PAGE_SIZE(v)       ((v) << 16)
+#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
+#define COMMAND_ORB_MAX_PAYLOAD(v)     ((v) << 20)
+#define COMMAND_ORB_SPEED(v)           ((v) << 24)
+#define COMMAND_ORB_DIRECTION          ((1) << 27)
+#define COMMAND_ORB_REQUEST_FORMAT(v)  ((v) << 29)
+#define COMMAND_ORB_NOTIFY             ((1) << 31)
+
+struct sbp2_command_orb {
+       struct sbp2_orb base;
+       struct {
+               struct sbp2_pointer next;
+               struct sbp2_pointer data_descriptor;
+               __be32 misc;
+               u8 command_block[12];
+       } request;
+       struct scsi_cmnd *cmd;
+       scsi_done_fn_t done;
+       struct sbp2_logical_unit *lu;
+
+       struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
+       dma_addr_t page_table_bus;
+};
+
+#define SBP2_ROM_VALUE_WILDCARD ~0         /* match all */
+#define SBP2_ROM_VALUE_MISSING  0xff000000 /* not present in the unit dir. */
+
+/*
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best
+ * indicator for the type of bridge chip of a device.  It yields a few
+ * false positives but this did not break correctly behaving devices
+ * so far.
+ */
+static const struct {
+       u32 firmware_revision;
+       u32 model;
+       unsigned int workarounds;
+} sbp2_workarounds_table[] = {
+       /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
+               .firmware_revision      = 0x002800,
+               .model                  = 0x001010,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36 |
+                                         SBP2_WORKAROUND_MODE_SENSE_8 |
+                                         SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
+               .firmware_revision      = 0x002800,
+               .model                  = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_DELAY_INQUIRY |
+                                         SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* Initio bridges, actually only needed for some older ones */ {
+               .firmware_revision      = 0x000200,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_INQUIRY_36,
+       },
+       /* PL-3507 bridge with Prolific firmware */ {
+               .firmware_revision      = 0x012800,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_POWER_CONDITION,
+       },
+       /* Symbios bridge */ {
+               .firmware_revision      = 0xa0b800,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+       /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
+               .firmware_revision      = 0x002600,
+               .model                  = SBP2_ROM_VALUE_WILDCARD,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS,
+       },
+       /*
+        * iPod 2nd generation: needs 128k max transfer size workaround
+        * iPod 3rd generation: needs fix capacity workaround
+        */
+       {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000000,
+               .workarounds            = SBP2_WORKAROUND_128K_MAX_TRANS |
+                                         SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod 4th generation */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000021,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod mini */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000022,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod mini */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x000023,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       },
+       /* iPod Photo */ {
+               .firmware_revision      = 0x0a2700,
+               .model                  = 0x00007e,
+               .workarounds            = SBP2_WORKAROUND_FIX_CAPACITY,
+       }
+};
+
+static void free_orb(struct kref *kref)
+{
+       struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
+
+       kfree(orb);
+}
+
+static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
+                             int tcode, int destination, int source,
+                             int generation, int speed,
+                             unsigned long long offset,
+                             void *payload, size_t length, void *callback_data)
+{
+       struct sbp2_logical_unit *lu = callback_data;
+       struct sbp2_orb *orb;
+       struct sbp2_status status;
+       size_t header_size;
+       unsigned long flags;
+
+       if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
+           length == 0 || length > sizeof(status)) {
+               fw_send_response(card, request, RCODE_TYPE_ERROR);
+               return;
+       }
+
+       header_size = min(length, 2 * sizeof(u32));
+       fw_memcpy_from_be32(&status, payload, header_size);
+       if (length > header_size)
+               memcpy(status.data, payload + 8, length - header_size);
+       if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
+               fw_notify("non-orb related status write, not handled\n");
+               fw_send_response(card, request, RCODE_COMPLETE);
+               return;
+       }
+
+       /* Lookup the orb corresponding to this status write. */
+       spin_lock_irqsave(&card->lock, flags);
+       list_for_each_entry(orb, &lu->orb_list, link) {
+               if (STATUS_GET_ORB_HIGH(status) == 0 &&
+                   STATUS_GET_ORB_LOW(status) == orb->request_bus) {
+                       orb->rcode = RCODE_COMPLETE;
+                       list_del(&orb->link);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (&orb->link != &lu->orb_list)
+               orb->callback(orb, &status);
+       else
+               fw_error("status write for unknown orb\n");
+
+       kref_put(&orb->kref, free_orb);
+
+       fw_send_response(card, request, RCODE_COMPLETE);
+}
+
+static void complete_transaction(struct fw_card *card, int rcode,
+                                void *payload, size_t length, void *data)
+{
+       struct sbp2_orb *orb = data;
+       unsigned long flags;
+
+       /*
+        * This is a little tricky.  We can get the status write for
+        * the orb before we get this callback.  The status write
+        * handler above will assume the orb pointer transaction was
+        * successful and set the rcode to RCODE_COMPLETE for the orb.
+        * So this callback only sets the rcode if it hasn't already
+        * been set and only does the cleanup if the transaction
+        * failed and we didn't already get a status write.
+        */
+       spin_lock_irqsave(&card->lock, flags);
+
+       if (orb->rcode == -1)
+               orb->rcode = rcode;
+       if (orb->rcode != RCODE_COMPLETE) {
+               list_del(&orb->link);
+               spin_unlock_irqrestore(&card->lock, flags);
+               orb->callback(orb, NULL);
+       } else {
+               spin_unlock_irqrestore(&card->lock, flags);
+       }
+
+       kref_put(&orb->kref, free_orb);
+}
+
+static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
+                         int node_id, int generation, u64 offset)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       unsigned long flags;
+
+       orb->pointer.high = 0;
+       orb->pointer.low = cpu_to_be32(orb->request_bus);
+
+       spin_lock_irqsave(&device->card->lock, flags);
+       list_add_tail(&orb->link, &lu->orb_list);
+       spin_unlock_irqrestore(&device->card->lock, flags);
+
+       /* Take a ref for the orb list and for the transaction callback. */
+       kref_get(&orb->kref);
+       kref_get(&orb->kref);
+
+       fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
+                       node_id, generation, device->max_speed, offset,
+                       &orb->pointer, sizeof(orb->pointer),
+                       complete_transaction, orb);
+}
+
+static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       struct sbp2_orb *orb, *next;
+       struct list_head list;
+       unsigned long flags;
+       int retval = -ENOENT;
+
+       INIT_LIST_HEAD(&list);
+       spin_lock_irqsave(&device->card->lock, flags);
+       list_splice_init(&lu->orb_list, &list);
+       spin_unlock_irqrestore(&device->card->lock, flags);
+
+       list_for_each_entry_safe(orb, next, &list, link) {
+               retval = 0;
+               if (fw_cancel_transaction(device->card, &orb->t) == 0)
+                       continue;
+
+               orb->rcode = RCODE_CANCELLED;
+               orb->callback(orb, NULL);
+       }
+
+       return retval;
+}
+
+static void complete_management_orb(struct sbp2_orb *base_orb,
+                                   struct sbp2_status *status)
+{
+       struct sbp2_management_orb *orb =
+               container_of(base_orb, struct sbp2_management_orb, base);
+
+       if (status)
+               memcpy(&orb->status, status, sizeof(*status));
+       complete(&orb->done);
+}
+
+static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
+                                   int generation, int function,
+                                   int lun_or_login_id, void *response)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       struct sbp2_management_orb *orb;
+       unsigned int timeout;
+       int retval = -ENOMEM;
+
+       if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
+               return 0;
+
+       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+       if (orb == NULL)
+               return -ENOMEM;
+
+       kref_init(&orb->base.kref);
+       orb->response_bus =
+               dma_map_single(device->card->device, &orb->response,
+                              sizeof(orb->response), DMA_FROM_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->response_bus))
+               goto fail_mapping_response;
+
+       orb->request.response.high = 0;
+       orb->request.response.low  = cpu_to_be32(orb->response_bus);
+
+       orb->request.misc = cpu_to_be32(
+               MANAGEMENT_ORB_NOTIFY |
+               MANAGEMENT_ORB_FUNCTION(function) |
+               MANAGEMENT_ORB_LUN(lun_or_login_id));
+       orb->request.length = cpu_to_be32(
+               MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
+
+       orb->request.status_fifo.high =
+               cpu_to_be32(lu->address_handler.offset >> 32);
+       orb->request.status_fifo.low  =
+               cpu_to_be32(lu->address_handler.offset);
+
+       if (function == SBP2_LOGIN_REQUEST) {
+               /* Ask for 2^2 == 4 seconds reconnect grace period */
+               orb->request.misc |= cpu_to_be32(
+                       MANAGEMENT_ORB_RECONNECT(2) |
+                       MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
+               timeout = lu->tgt->mgt_orb_timeout;
+       } else {
+               timeout = SBP2_ORB_TIMEOUT;
+       }
+
+       init_completion(&orb->done);
+       orb->base.callback = complete_management_orb;
+
+       orb->base.request_bus =
+               dma_map_single(device->card->device, &orb->request,
+                              sizeof(orb->request), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
+               goto fail_mapping_request;
+
+       sbp2_send_orb(&orb->base, lu, node_id, generation,
+                     lu->tgt->management_agent_address);
+
+       wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
+
+       retval = -EIO;
+       if (sbp2_cancel_orbs(lu) == 0) {
+               fw_error("%s: orb reply timed out, rcode=0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
+               goto out;
+       }
+
+       if (orb->base.rcode != RCODE_COMPLETE) {
+               fw_error("%s: management write failed, rcode 0x%02x\n",
+                        lu->tgt->bus_id, orb->base.rcode);
+               goto out;
+       }
+
+       if (STATUS_GET_RESPONSE(orb->status) != 0 ||
+           STATUS_GET_SBP_STATUS(orb->status) != 0) {
+               fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
+                        STATUS_GET_RESPONSE(orb->status),
+                        STATUS_GET_SBP_STATUS(orb->status));
+               goto out;
+       }
+
+       retval = 0;
+ out:
+       dma_unmap_single(device->card->device, orb->base.request_bus,
+                        sizeof(orb->request), DMA_TO_DEVICE);
+ fail_mapping_request:
+       dma_unmap_single(device->card->device, orb->response_bus,
+                        sizeof(orb->response), DMA_FROM_DEVICE);
+ fail_mapping_response:
+       if (response)
+               memcpy(response, orb->response, sizeof(orb->response));
+       kref_put(&orb->base.kref, free_orb);
+
+       return retval;
+}
+
+static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       __be32 d = 0;
+
+       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
+                          lu->tgt->node_id, lu->generation, device->max_speed,
+                          lu->command_block_agent_address + SBP2_AGENT_RESET,
+                          &d, sizeof(d));
+}
+
+static void complete_agent_reset_write_no_wait(struct fw_card *card,
+               int rcode, void *payload, size_t length, void *data)
+{
+       kfree(data);
+}
+
+static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       struct fw_transaction *t;
+       static __be32 d;
+
+       t = kmalloc(sizeof(*t), GFP_ATOMIC);
+       if (t == NULL)
+               return;
+
+       fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
+                       lu->tgt->node_id, lu->generation, device->max_speed,
+                       lu->command_block_agent_address + SBP2_AGENT_RESET,
+                       &d, sizeof(d), complete_agent_reset_write_no_wait, t);
+}
+
+static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
+{
+       /*
+        * We may access dont_block without taking card->lock here:
+        * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
+        * are currently serialized against each other.
+        * And a wrong result in sbp2_conditionally_block()'s access of
+        * dont_block is rather harmless, it simply misses its first chance.
+        */
+       --lu->tgt->dont_block;
+}
+
+/*
+ * Blocks lu->tgt if all of the following conditions are met:
+ *   - Login, INQUIRY, and high-level SCSI setup of all of the target's
+ *     logical units have been finished (indicated by dont_block == 0).
+ *   - lu->generation is stale.
+ *
+ * Note, scsi_block_requests() must be called while holding card->lock,
+ * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
+ * unblock the target.
+ */
+static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = target_device(tgt)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (!tgt->dont_block && !lu->blocked &&
+           lu->generation != card->generation) {
+               lu->blocked = true;
+               if (++tgt->blocked == 1)
+                       scsi_block_requests(shost);
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/*
+ * Unblocks lu->tgt as soon as all its logical units can be unblocked.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
+{
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_card *card = target_device(tgt)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+       bool unblock = false;
+
+       spin_lock_irqsave(&card->lock, flags);
+       if (lu->blocked && lu->generation == card->generation) {
+               lu->blocked = false;
+               unblock = --tgt->blocked == 0;
+       }
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       if (unblock)
+               scsi_unblock_requests(shost);
+}
+
+/*
+ * Prevents future blocking of tgt and unblocks it.
+ * Note, it is harmless to run scsi_unblock_requests() outside the
+ * card->lock protected section.  On the other hand, running it inside
+ * the section might clash with shost->host_lock.
+ */
+static void sbp2_unblock(struct sbp2_target *tgt)
+{
+       struct fw_card *card = target_device(tgt)->card;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       unsigned long flags;
+
+       spin_lock_irqsave(&card->lock, flags);
+       ++tgt->dont_block;
+       spin_unlock_irqrestore(&card->lock, flags);
+
+       scsi_unblock_requests(shost);
+}
+
+static int sbp2_lun2int(u16 lun)
+{
+       struct scsi_lun eight_bytes_lun;
+
+       memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
+       eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
+       eight_bytes_lun.scsi_lun[1] = lun & 0xff;
+
+       return scsilun_to_int(&eight_bytes_lun);
+}
+
+static void sbp2_release_target(struct kref *kref)
+{
+       struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
+       struct sbp2_logical_unit *lu, *next;
+       struct Scsi_Host *shost =
+               container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       struct scsi_device *sdev;
+       struct fw_device *device = target_device(tgt);
+
+       /* prevent deadlocks */
+       sbp2_unblock(tgt);
+
+       list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+               sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+               if (sdev) {
+                       scsi_remove_device(sdev);
+                       scsi_device_put(sdev);
+               }
+               if (lu->login_id != INVALID_LOGIN_ID) {
+                       int generation, node_id;
+                       /*
+                        * tgt->node_id may be obsolete here if we failed
+                        * during initial login or after a bus reset where
+                        * the topology changed.
+                        */
+                       generation = device->generation;
+                       smp_rmb(); /* node_id vs. generation */
+                       node_id    = device->node_id;
+                       sbp2_send_management_orb(lu, node_id, generation,
+                                                SBP2_LOGOUT_REQUEST,
+                                                lu->login_id, NULL);
+               }
+               fw_core_remove_address_handler(&lu->address_handler);
+               list_del(&lu->link);
+               kfree(lu);
+       }
+       scsi_remove_host(shost);
+       fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
+
+       fw_unit_put(tgt->unit);
+       scsi_host_put(shost);
+       fw_device_put(device);
+}
+
+static struct workqueue_struct *sbp2_wq;
+
+static void sbp2_target_put(struct sbp2_target *tgt)
+{
+       kref_put(&tgt->kref, sbp2_release_target);
+}
+
+/*
+ * Always get the target's kref when scheduling work on one its units.
+ * Each workqueue job is responsible to call sbp2_target_put() upon return.
+ */
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+       kref_get(&lu->tgt->kref);
+       if (!queue_delayed_work(sbp2_wq, &lu->work, delay))
+               sbp2_target_put(lu->tgt);
+}
+
+/*
+ * Write retransmit retry values into the BUSY_TIMEOUT register.
+ * - The single-phase retry protocol is supported by all SBP-2 devices, but the
+ *   default retry_limit value is 0 (i.e. never retry transmission). We write a
+ *   saner value after logging into the device.
+ * - The dual-phase retry protocol is optional to implement, and if not
+ *   supported, writes to the dual-phase portion of the register will be
+ *   ignored. We try to write the original 1394-1995 default here.
+ * - In the case of devices that are also SBP-3-compliant, all writes are
+ *   ignored, as the register is read-only, but contains single-phase retry of
+ *   15, which is what we're trying to set for all SBP-2 device anyway, so this
+ *   write attempt is safe and yields more consistent behavior for all devices.
+ *
+ * See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
+ * and section 6.4 of the SBP-3 spec for further details.
+ */
+static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
+{
+       struct fw_device *device = target_device(lu->tgt);
+       __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
+
+       fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
+                          lu->tgt->node_id, lu->generation, device->max_speed,
+                          CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT,
+                          &d, sizeof(d));
+}
+
+static void sbp2_reconnect(struct work_struct *work);
+
+static void sbp2_login(struct work_struct *work)
+{
+       struct sbp2_logical_unit *lu =
+               container_of(work, struct sbp2_logical_unit, work.work);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = target_device(tgt);
+       struct Scsi_Host *shost;
+       struct scsi_device *sdev;
+       struct sbp2_login_response response;
+       int generation, node_id, local_node_id;
+
+       if (fw_device_is_shutdown(device))
+               goto out;
+
+       generation    = device->generation;
+       smp_rmb();    /* node IDs must not be older than generation */
+       node_id       = device->node_id;
+       local_node_id = device->card->node_id;
+
+       /* If this is a re-login attempt, log out, or we might be rejected. */
+       if (lu->has_sdev)
+               sbp2_send_management_orb(lu, device->node_id, generation,
+                               SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+
+       if (sbp2_send_management_orb(lu, node_id, generation,
+                               SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
+               if (lu->retries++ < 5) {
+                       sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+               } else {
+                       fw_error("%s: failed to login to LUN %04x\n",
+                                tgt->bus_id, lu->lun);
+                       /* Let any waiting I/O fail from now on. */
+                       sbp2_unblock(lu->tgt);
+               }
+               goto out;
+       }
+
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       smp_wmb();        /* node IDs must not be older than generation */
+       lu->generation    = generation;
+
+       lu->command_block_agent_address =
+               ((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
+                     << 32) | be32_to_cpu(response.command_block_agent.low);
+       lu->login_id = be32_to_cpu(response.misc) & 0xffff;
+
+       fw_notify("%s: logged in to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
+
+       /* set appropriate retry limit(s) in BUSY_TIMEOUT register */
+       sbp2_set_busy_timeout(lu);
+
+       PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+       sbp2_agent_reset(lu);
+
+       /* This was a re-login. */
+       if (lu->has_sdev) {
+               sbp2_cancel_orbs(lu);
+               sbp2_conditionally_unblock(lu);
+               goto out;
+       }
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
+               ssleep(SBP2_INQUIRY_DELAY);
+
+       shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+       sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
+       /*
+        * FIXME:  We are unable to perform reconnects while in sbp2_login().
+        * Therefore __scsi_add_device() will get into trouble if a bus reset
+        * happens in parallel.  It will either fail or leave us with an
+        * unusable sdev.  As a workaround we check for this and retry the
+        * whole login and SCSI probing.
+        */
+
+       /* Reported error during __scsi_add_device() */
+       if (IS_ERR(sdev))
+               goto out_logout_login;
+
+       /* Unreported error during __scsi_add_device() */
+       smp_rmb(); /* get current card generation */
+       if (generation != device->card->generation) {
+               scsi_remove_device(sdev);
+               scsi_device_put(sdev);
+               goto out_logout_login;
+       }
+
+       /* No error during __scsi_add_device() */
+       lu->has_sdev = true;
+       scsi_device_put(sdev);
+       sbp2_allow_block(lu);
+       goto out;
+
+ out_logout_login:
+       smp_rmb(); /* generation may have changed */
+       generation = device->generation;
+       smp_rmb(); /* node_id must not be older than generation */
+
+       sbp2_send_management_orb(lu, device->node_id, generation,
+                                SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
+       /*
+        * If a bus reset happened, sbp2_update will have requeued
+        * lu->work already.  Reset the work from reconnect to login.
+        */
+       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ out:
+       sbp2_target_put(tgt);
+}
+
+static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
+{
+       struct sbp2_logical_unit *lu;
+
+       lu = kmalloc(sizeof(*lu), GFP_KERNEL);
+       if (!lu)
+               return -ENOMEM;
+
+       lu->address_handler.length           = 0x100;
+       lu->address_handler.address_callback = sbp2_status_write;
+       lu->address_handler.callback_data    = lu;
+
+       if (fw_core_add_address_handler(&lu->address_handler,
+                                       &fw_high_memory_region) < 0) {
+               kfree(lu);
+               return -ENOMEM;
+       }
+
+       lu->tgt      = tgt;
+       lu->lun      = lun_entry & 0xffff;
+       lu->login_id = INVALID_LOGIN_ID;
+       lu->retries  = 0;
+       lu->has_sdev = false;
+       lu->blocked  = false;
+       ++tgt->dont_block;
+       INIT_LIST_HEAD(&lu->orb_list);
+       INIT_DELAYED_WORK(&lu->work, sbp2_login);
+
+       list_add_tail(&lu->link, &tgt->lu_list);
+       return 0;
+}
+
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value))
+               if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
+                   sbp2_add_logical_unit(tgt, value) < 0)
+                       return -ENOMEM;
+       return 0;
+}
+
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+                             u32 *model, u32 *firmware_revision)
+{
+       struct fw_csr_iterator ci;
+       int key, value;
+       unsigned int timeout;
+
+       fw_csr_iterator_init(&ci, directory);
+       while (fw_csr_iterator_next(&ci, &key, &value)) {
+               switch (key) {
+
+               case CSR_DEPENDENT_INFO | CSR_OFFSET:
+                       tgt->management_agent_address =
+                                       CSR_REGISTER_BASE + 4 * value;
+                       break;
+
+               case CSR_DIRECTORY_ID:
+                       tgt->directory_id = value;
+                       break;
+
+               case CSR_MODEL:
+                       *model = value;
+                       break;
+
+               case SBP2_CSR_FIRMWARE_REVISION:
+                       *firmware_revision = value;
+                       break;
+
+               case SBP2_CSR_UNIT_CHARACTERISTICS:
+                       /* the timeout value is stored in 500ms units */
+                       timeout = ((unsigned int) value >> 8 & 0xff) * 500;
+                       timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
+                       tgt->mgt_orb_timeout =
+                                 min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
+
+                       if (timeout > tgt->mgt_orb_timeout)
+                               fw_notify("%s: config rom contains %ds "
+                                         "management ORB timeout, limiting "
+                                         "to %ds\n", tgt->bus_id,
+                                         timeout / 1000,
+                                         tgt->mgt_orb_timeout / 1000);
+                       break;
+
+               case SBP2_CSR_LOGICAL_UNIT_NUMBER:
+                       if (sbp2_add_logical_unit(tgt, value) < 0)
+                               return -ENOMEM;
+                       break;
+
+               case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
+                       /* Adjust for the increment in the iterator */
+                       if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
+                               return -ENOMEM;
+                       break;
+               }
+       }
+       return 0;
+}
+
+static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
+                                 u32 firmware_revision)
+{
+       int i;
+       unsigned int w = sbp2_param_workarounds;
+
+       if (w)
+               fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
+                         "if you need the workarounds parameter for %s\n",
+                         tgt->bus_id);
+
+       if (w & SBP2_WORKAROUND_OVERRIDE)
+               goto out;
+
+       for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+
+               if (sbp2_workarounds_table[i].firmware_revision !=
+                   (firmware_revision & 0xffffff00))
+                       continue;
+
+               if (sbp2_workarounds_table[i].model != model &&
+                   sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
+                       continue;
+
+               w |= sbp2_workarounds_table[i].workarounds;
+               break;
+       }
+ out:
+       if (w)
+               fw_notify("Workarounds for %s: 0x%x "
+                         "(firmware_revision 0x%06x, model_id 0x%06x)\n",
+                         tgt->bus_id, w, firmware_revision, model);
+       tgt->workarounds = w;
+}
+
+static struct scsi_host_template scsi_driver_template;
+
+static int sbp2_probe(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct fw_device *device = fw_parent_device(unit);
+       struct sbp2_target *tgt;
+       struct sbp2_logical_unit *lu;
+       struct Scsi_Host *shost;
+       u32 model, firmware_revision;
+
+       if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
+               BUG_ON(dma_set_max_seg_size(device->card->device,
+                                           SBP2_MAX_SEG_SIZE));
+
+       shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
+       if (shost == NULL)
+               return -ENOMEM;
+
+       tgt = (struct sbp2_target *)shost->hostdata;
+       dev_set_drvdata(&unit->device, tgt);
+       tgt->unit = unit;
+       kref_init(&tgt->kref);
+       INIT_LIST_HEAD(&tgt->lu_list);
+       tgt->bus_id = dev_name(&unit->device);
+       tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+
+       if (fw_device_enable_phys_dma(device) < 0)
+               goto fail_shost_put;
+
+       if (scsi_add_host(shost, &unit->device) < 0)
+               goto fail_shost_put;
+
+       fw_device_get(device);
+       fw_unit_get(unit);
+
+       /* implicit directory ID */
+       tgt->directory_id = ((unit->directory - device->config_rom) * 4
+                            + CSR_CONFIG_ROM) & 0xffffff;
+
+       firmware_revision = SBP2_ROM_VALUE_MISSING;
+       model             = SBP2_ROM_VALUE_MISSING;
+
+       if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
+                              &firmware_revision) < 0)
+               goto fail_tgt_put;
+
+       sbp2_init_workarounds(tgt, model, firmware_revision);
+
+       /*
+        * At S100 we can do 512 bytes per packet, at S200 1024 bytes,
+        * and so on up to 4096 bytes.  The SBP-2 max_payload field
+        * specifies the max payload size as 2 ^ (max_payload + 2), so
+        * if we set this to max_speed + 7, we get the right value.
+        */
+       tgt->max_payload = min(device->max_speed + 7, 10U);
+       tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+
+       /* Do the login in a workqueue so we can easily reschedule retries. */
+       list_for_each_entry(lu, &tgt->lu_list, link)
+               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+       return 0;
+
+ fail_tgt_put:
+       sbp2_target_put(tgt);
+       return -ENOMEM;
+
+ fail_shost_put:
+       scsi_host_put(shost);
+       return -ENOMEM;
+}
+
+static int sbp2_remove(struct device *dev)
+{
+       struct fw_unit *unit = fw_unit(dev);
+       struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+
+       sbp2_target_put(tgt);
+       return 0;
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+       struct sbp2_logical_unit *lu =
+               container_of(work, struct sbp2_logical_unit, work.work);
+       struct sbp2_target *tgt = lu->tgt;
+       struct fw_device *device = target_device(tgt);
+       int generation, node_id, local_node_id;
+
+       if (fw_device_is_shutdown(device))
+               goto out;
+
+       generation    = device->generation;
+       smp_rmb();    /* node IDs must not be older than generation */
+       node_id       = device->node_id;
+       local_node_id = device->card->node_id;
+
+       if (sbp2_send_management_orb(lu, node_id, generation,
+                                    SBP2_RECONNECT_REQUEST,
+                                    lu->login_id, NULL) < 0) {
+               /*
+                * If reconnect was impossible even though we are in the
+                * current generation, fall back and try to log in again.
+                *
+                * We could check for "Function rejected" status, but
+                * looking at the bus generation as simpler and more general.
+                */
+               smp_rmb(); /* get current card generation */
+               if (generation == device->card->generation ||
+                   lu->retries++ >= 5) {
+                       fw_error("%s: failed to reconnect\n", tgt->bus_id);
+                       lu->retries = 0;
+                       PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+               }
+               sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+               goto out;
+       }
+
+       tgt->node_id      = node_id;
+       tgt->address_high = local_node_id << 16;
+       smp_wmb();        /* node IDs must not be older than generation */
+       lu->generation    = generation;
+
+       fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+                 tgt->bus_id, lu->lun, lu->retries);
+
+       sbp2_agent_reset(lu);
+       sbp2_cancel_orbs(lu);
+       sbp2_conditionally_unblock(lu);
+ out:
+       sbp2_target_put(tgt);
+}
+
+static void sbp2_update(struct fw_unit *unit)
+{
+       struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+       struct sbp2_logical_unit *lu;
+
+       fw_device_enable_phys_dma(fw_parent_device(unit));
+
+       /*
+        * Fw-core serializes sbp2_update() against sbp2_remove().
+        * Iteration over tgt->lu_list is therefore safe here.
+        */
+       list_for_each_entry(lu, &tgt->lu_list, link) {
+               sbp2_conditionally_block(lu);
+               lu->retries = 0;
+               sbp2_queue_work(lu, 0);
+       }
+}
+
+#define SBP2_UNIT_SPEC_ID_ENTRY        0x0000609e
+#define SBP2_SW_VERSION_ENTRY  0x00010483
+
+static const struct ieee1394_device_id sbp2_id_table[] = {
+       {
+               .match_flags  = IEEE1394_MATCH_SPECIFIER_ID |
+                               IEEE1394_MATCH_VERSION,
+               .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
+               .version      = SBP2_SW_VERSION_ENTRY,
+       },
+       { }
+};
+
+static struct fw_driver sbp2_driver = {
+       .driver   = {
+               .owner  = THIS_MODULE,
+               .name   = sbp2_driver_name,
+               .bus    = &fw_bus_type,
+               .probe  = sbp2_probe,
+               .remove = sbp2_remove,
+       },
+       .update   = sbp2_update,
+       .id_table = sbp2_id_table,
+};
+
+static void sbp2_unmap_scatterlist(struct device *card_device,
+                                  struct sbp2_command_orb *orb)
+{
+       if (scsi_sg_count(orb->cmd))
+               dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
+                            scsi_sg_count(orb->cmd),
+                            orb->cmd->sc_data_direction);
+
+       if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
+               dma_unmap_single(card_device, orb->page_table_bus,
+                                sizeof(orb->page_table), DMA_TO_DEVICE);
+}
+
+static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
+{
+       int sam_status;
+
+       sense_data[0] = 0x70;
+       sense_data[1] = 0x0;
+       sense_data[2] = sbp2_status[1];
+       sense_data[3] = sbp2_status[4];
+       sense_data[4] = sbp2_status[5];
+       sense_data[5] = sbp2_status[6];
+       sense_data[6] = sbp2_status[7];
+       sense_data[7] = 10;
+       sense_data[8] = sbp2_status[8];
+       sense_data[9] = sbp2_status[9];
+       sense_data[10] = sbp2_status[10];
+       sense_data[11] = sbp2_status[11];
+       sense_data[12] = sbp2_status[2];
+       sense_data[13] = sbp2_status[3];
+       sense_data[14] = sbp2_status[12];
+       sense_data[15] = sbp2_status[13];
+
+       sam_status = sbp2_status[0] & 0x3f;
+
+       switch (sam_status) {
+       case SAM_STAT_GOOD:
+       case SAM_STAT_CHECK_CONDITION:
+       case SAM_STAT_CONDITION_MET:
+       case SAM_STAT_BUSY:
+       case SAM_STAT_RESERVATION_CONFLICT:
+       case SAM_STAT_COMMAND_TERMINATED:
+               return DID_OK << 16 | sam_status;
+
+       default:
+               return DID_ERROR << 16;
+       }
+}
+
+static void complete_command_orb(struct sbp2_orb *base_orb,
+                                struct sbp2_status *status)
+{
+       struct sbp2_command_orb *orb =
+               container_of(base_orb, struct sbp2_command_orb, base);
+       struct fw_device *device = target_device(orb->lu->tgt);
+       int result;
+
+       if (status != NULL) {
+               if (STATUS_GET_DEAD(*status))
+                       sbp2_agent_reset_no_wait(orb->lu);
+
+               switch (STATUS_GET_RESPONSE(*status)) {
+               case SBP2_STATUS_REQUEST_COMPLETE:
+                       result = DID_OK << 16;
+                       break;
+               case SBP2_STATUS_TRANSPORT_FAILURE:
+                       result = DID_BUS_BUSY << 16;
+                       break;
+               case SBP2_STATUS_ILLEGAL_REQUEST:
+               case SBP2_STATUS_VENDOR_DEPENDENT:
+               default:
+                       result = DID_ERROR << 16;
+                       break;
+               }
+
+               if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
+                       result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
+                                                          orb->cmd->sense_buffer);
+       } else {
+               /*
+                * If the orb completes with status == NULL, something
+                * went wrong, typically a bus reset happened mid-orb
+                * or when sending the write (less likely).
+                */
+               result = DID_BUS_BUSY << 16;
+               sbp2_conditionally_block(orb->lu);
+       }
+
+       dma_unmap_single(device->card->device, orb->base.request_bus,
+                        sizeof(orb->request), DMA_TO_DEVICE);
+       sbp2_unmap_scatterlist(device->card->device, orb);
+
+       orb->cmd->result = result;
+       orb->done(orb->cmd);
+}
+
+static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
+               struct fw_device *device, struct sbp2_logical_unit *lu)
+{
+       struct scatterlist *sg = scsi_sglist(orb->cmd);
+       int i, n;
+
+       n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
+                      orb->cmd->sc_data_direction);
+       if (n == 0)
+               goto fail;
+
+       /*
+        * Handle the special case where there is only one element in
+        * the scatter list by converting it to an immediate block
+        * request. This is also a workaround for broken devices such
+        * as the second generation iPod which doesn't support page
+        * tables.
+        */
+       if (n == 1) {
+               orb->request.data_descriptor.high =
+                       cpu_to_be32(lu->tgt->address_high);
+               orb->request.data_descriptor.low  =
+                       cpu_to_be32(sg_dma_address(sg));
+               orb->request.misc |=
+                       cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
+               return 0;
+       }
+
+       for_each_sg(sg, sg, n, i) {
+               orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
+               orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
+       }
+
+       orb->page_table_bus =
+               dma_map_single(device->card->device, orb->page_table,
+                              sizeof(orb->page_table), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->page_table_bus))
+               goto fail_page_table;
+
+       /*
+        * The data_descriptor pointer is the one case where we need
+        * to fill in the node ID part of the address.  All other
+        * pointers assume that the data referenced reside on the
+        * initiator (i.e. us), but data_descriptor can refer to data
+        * on other nodes so we need to put our ID in descriptor.high.
+        */
+       orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
+       orb->request.data_descriptor.low  = cpu_to_be32(orb->page_table_bus);
+       orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
+                                        COMMAND_ORB_DATA_SIZE(n));
+
+       return 0;
+
+ fail_page_table:
+       dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+                    scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
+ fail:
+       return -ENOMEM;
+}
+
+/* SCSI stack integration */
+
+static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
+{
+       struct sbp2_logical_unit *lu = cmd->device->hostdata;
+       struct fw_device *device = target_device(lu->tgt);
+       struct sbp2_command_orb *orb;
+       int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
+
+       /*
+        * Bidirectional commands are not yet implemented, and unknown
+        * transfer direction not handled.
+        */
+       if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
+               fw_error("Can't handle DMA_BIDIRECTIONAL, rejecting command\n");
+               cmd->result = DID_ERROR << 16;
+               done(cmd);
+               return 0;
+       }
+
+       orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
+       if (orb == NULL) {
+               fw_notify("failed to alloc orb\n");
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       /* Initialize rcode to something not RCODE_COMPLETE. */
+       orb->base.rcode = -1;
+       kref_init(&orb->base.kref);
+
+       orb->lu   = lu;
+       orb->done = done;
+       orb->cmd  = cmd;
+
+       orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
+       orb->request.misc = cpu_to_be32(
+               COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
+               COMMAND_ORB_SPEED(device->max_speed) |
+               COMMAND_ORB_NOTIFY);
+
+       if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+               orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
+
+       generation = device->generation;
+       smp_rmb();    /* sbp2_map_scatterlist looks at tgt->address_high */
+
+       if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
+               goto out;
+
+       memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
+
+       orb->base.callback = complete_command_orb;
+       orb->base.request_bus =
+               dma_map_single(device->card->device, &orb->request,
+                              sizeof(orb->request), DMA_TO_DEVICE);
+       if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
+               sbp2_unmap_scatterlist(device->card->device, orb);
+               goto out;
+       }
+
+       sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
+                     lu->command_block_agent_address + SBP2_ORB_POINTER);
+       retval = 0;
+ out:
+       kref_put(&orb->base.kref, free_orb);
+       return retval;
+}
+
+static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+{
+       struct sbp2_logical_unit *lu = sdev->hostdata;
+
+       /* (Re-)Adding logical units via the SCSI stack is not supported. */
+       if (!lu)
+               return -ENOSYS;
+
+       sdev->allow_restart = 1;
+
+       /* SBP-2 requires quadlet alignment of the data buffers. */
+       blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
+               sdev->inquiry_len = 36;
+
+       return 0;
+}
+
+static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
+{
+       struct sbp2_logical_unit *lu = sdev->hostdata;
+
+       sdev->use_10_for_rw = 1;
+
+       if (sbp2_param_exclusive_login)
+               sdev->manage_start_stop = 1;
+
+       if (sdev->type == TYPE_ROM)
+               sdev->use_10_for_ms = 1;
+
+       if (sdev->type == TYPE_DISK &&
+           lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+               sdev->skip_ms_page_8 = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
+               sdev->start_stop_pwr_cond = 1;
+
+       if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
+               blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512);
+
+       blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
+
+       return 0;
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.  Usually
+ * called when a command has timed-out for some reason.
+ */
+static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
+{
+       struct sbp2_logical_unit *lu = cmd->device->hostdata;
+
+       fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
+       sbp2_agent_reset(lu);
+       sbp2_cancel_orbs(lu);
+
+       return SUCCESS;
+}
+
+/*
+ * Format of /sys/bus/scsi/devices/.../ieee1394_id:
+ * u64 EUI-64 : u24 directory_ID : u16 LUN  (all printed in hexadecimal)
+ *
+ * This is the concatenation of target port identifier and logical unit
+ * identifier as per SAM-2...SAM-4 annex A.
+ */
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct sbp2_logical_unit *lu;
+
+       if (!sdev)
+               return 0;
+
+       lu = sdev->hostdata;
+
+       return sprintf(buf, "%016llx:%06x:%04x\n",
+                       (unsigned long long)lu->tgt->guid,
+                       lu->tgt->directory_id, lu->lun);
+}
+
+static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
+
+static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
+       &dev_attr_ieee1394_id,
+       NULL
+};
+
+static struct scsi_host_template scsi_driver_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "SBP-2 IEEE-1394",
+       .proc_name              = sbp2_driver_name,
+       .queuecommand           = sbp2_scsi_queuecommand,
+       .slave_alloc            = sbp2_scsi_slave_alloc,
+       .slave_configure        = sbp2_scsi_slave_configure,
+       .eh_abort_handler       = sbp2_scsi_abort,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .cmd_per_lun            = 1,
+       .can_queue              = 1,
+       .sdev_attrs             = sbp2_scsi_sysfs_attrs,
+};
+
+MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
+MODULE_DESCRIPTION("SCSI over IEEE1394");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
+
+/* Provide a module alias so root-on-sbp2 initrds don't break. */
+#ifndef CONFIG_IEEE1394_SBP2_MODULE
+MODULE_ALIAS("sbp2");
+#endif
+
+static int __init sbp2_init(void)
+{
+       sbp2_wq = create_singlethread_workqueue(KBUILD_MODNAME);
+       if (!sbp2_wq)
+               return -ENOMEM;
+
+       return driver_register(&sbp2_driver.driver);
+}
+
+static void __exit sbp2_cleanup(void)
+{
+       driver_unregister(&sbp2_driver.driver);
+       destroy_workqueue(sbp2_wq);
+}
+
+module_init(sbp2_init);
+module_exit(sbp2_cleanup);
index 05aa2d406ac64769ecfbb3f6827d5e7fb53915e3..d5ea8a68d338ef80996d20dd0bbbf4e27d3fafc0 100644 (file)
  * information is necessary as for the resource tree.
  */
 struct firmware_map_entry {
-       resource_size_t         start;  /* start of the memory range */
-       resource_size_t         end;    /* end of the memory range (incl.) */
+       /*
+        * start and end must be u64 rather than resource_size_t, because e820
+        * resources can lie at addresses above 4G.
+        */
+       u64                     start;  /* start of the memory range */
+       u64                     end;    /* end of the memory range (incl.) */
        const char              *type;  /* type of the memory range */
        struct list_head        list;   /* entry for the linked list */
        struct kobject          kobj;   /* kobject for each entry */
@@ -101,7 +105,7 @@ static LIST_HEAD(map_entries);
  * Common implementation of firmware_map_add() and firmware_map_add_early()
  * which expects a pre-allocated struct firmware_map_entry.
  **/
-static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
+static int firmware_map_add_entry(u64 start, u64 end,
                                  const char *type,
                                  struct firmware_map_entry *entry)
 {
@@ -132,8 +136,7 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
  *
  * Returns 0 on success, or -ENOMEM if no memory could be allocated.
  **/
-int firmware_map_add(resource_size_t start, resource_size_t end,
-                    const char *type)
+int firmware_map_add(u64 start, u64 end, const char *type)
 {
        struct firmware_map_entry *entry;
 
@@ -157,8 +160,7 @@ int firmware_map_add(resource_size_t start, resource_size_t end,
  *
  * Returns 0 on success, or -ENOMEM if no memory could be allocated.
  **/
-int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
-                                 const char *type)
+int __init firmware_map_add_early(u64 start, u64 end, const char *type)
 {
        struct firmware_map_entry *entry;
 
index c77c6c6d9d2c907eadf71c81f9a0aa3d5db4a37e..6ce0e2667a85f046987b9a7cd4f3a919b5630f5f 100644 (file)
@@ -105,7 +105,7 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
                ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
                                          root, tmp, &drm_debugfs_fops);
                if (!ent) {
-                       DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
+                       DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
                                  name, files[i].name);
                        drm_free(tmp, sizeof(struct drm_info_node),
                                 _DRM_DRIVER);
@@ -133,9 +133,9 @@ EXPORT_SYMBOL(drm_debugfs_create_files);
  * \param minor device minor number
  * \param root DRI debugfs dir entry.
  *
- * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
- * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
- * "/debugfs/dri/%minor%/%name%".
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
  */
 int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                     struct dentry *root)
@@ -148,7 +148,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
        sprintf(name, "%d", minor_id);
        minor->debugfs_root = debugfs_create_dir(name, root);
        if (!minor->debugfs_root) {
-               DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
                return -1;
        }
 
@@ -165,7 +165,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                ret = dev->driver->debugfs_init(minor);
                if (ret) {
                        DRM_ERROR("DRM: Driver failed to initialize "
-                                 "/debugfs/dri.\n");
+                                 "/sys/kernel/debug/dri.\n");
                        return ret;
                }
        }
index 019b7c5782367390783dfa8fada2046872a6e0c8..1bf7efd8d334acf7908d5f5dd02bb4fd6db1abe5 100644 (file)
@@ -339,7 +339,7 @@ static int __init drm_core_init(void)
 
        drm_debugfs_root = debugfs_create_dir("dri", NULL);
        if (!drm_debugfs_root) {
-               DRM_ERROR("Cannot create /debugfs/dri\n");
+               DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
                ret = -1;
                goto err_p3;
        }
index 89050684fe0d2c7b33a2fdf0a31287c5955e5778..387a8de1bc7e51f60af11c903d5852a93bc98e15 100644 (file)
@@ -343,7 +343,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
 #if defined(CONFIG_DEBUG_FS)
        ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
        if (ret) {
-               DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
+               DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
                goto err_g2;
        }
 #endif
index 9987ab8808356d13b01c2540c085181e4f7db791..85ec31b3ff00a09f1d18b0a9b66cd31b68d8ba21 100644 (file)
@@ -70,6 +70,11 @@ static ssize_t version_show(struct class *dev, char *buf)
                       CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
 }
 
+static char *drm_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
 static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
 
 /**
@@ -101,6 +106,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
        if (err)
                goto err_out_class;
 
+       class->nodename = drm_nodename;
+
        return class;
 
 err_out_class:
index 0ecf6b76a401c043e00a12b3b027a07ef14cce01..8e28e5993df5b5607896db9c8c30a226faf1790d 100644 (file)
@@ -504,6 +504,14 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
        info->fbops = &intelfb_ops;
 
        info->fix.line_length = fb->pitch;
+
+       /* setup aperture base/size for vesafb takeover */
+       info->aperture_base = dev->mode_config.fb_base;
+       if (IS_I9XX(dev))
+               info->aperture_size = pci_resource_len(dev->pdev, 2);
+       else
+               info->aperture_size = pci_resource_len(dev->pdev, 0);
+
        info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
        info->fix.smem_len = size;
 
index e9b436d2d94434c17fb1030d50528341edd71659..9e9421525fb9125fa962c2266550f67545855b39 100644 (file)
@@ -850,8 +850,14 @@ static const struct file_operations hiddev_fops = {
 #endif
 };
 
+static char *hiddev_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver hiddev_class = {
        .name =         "hiddev%d",
+       .nodename =     hiddev_nodename,
        .fops =         &hiddev_fops,
        .minor_base =   HIDDEV_MINOR_BASE,
 };
@@ -955,7 +961,6 @@ static int hiddev_usbd_probe(struct usb_interface *intf,
        return -ENODEV;
 }
 
-
 static /* const */ struct usb_driver hiddev_driver = {
        .name =         "hiddev",
        .probe =        hiddev_usbd_probe,
index f8090e137fef6ae3b9570433256eeed43f419539..2d5016691d400b9c3d545401d83918e8589ece8b 100644 (file)
@@ -950,6 +950,7 @@ config SENSORS_HDAPS
 config SENSORS_LIS3LV02D
        tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
        depends on ACPI && INPUT
+       select INPUT_POLLDEV
        select NEW_LEDS
        select LEDS_CLASS
        default n
@@ -977,6 +978,7 @@ config SENSORS_LIS3LV02D
 config SENSORS_LIS3_SPI
        tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
        depends on !ACPI && SPI_MASTER && INPUT
+       select INPUT_POLLDEV
        default n
        help
          This driver provides support for the LIS3LV02Dx accelerometer connected
index abca7e9f953b16aaaf61d0251f5b6f41782afefe..6679854c85b075b82d0a5eea82dbaaf36051853b 100644 (file)
@@ -27,9 +27,6 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/poll.h>
@@ -161,6 +158,7 @@ static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
 static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
 static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
 static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
+static struct axis_conversion lis3lv02d_axis_xy_swap = {2, 1, 3};
 static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
 static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
 static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
@@ -194,13 +192,16 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
        AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
        AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
+       AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
        AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
        AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
        AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
        AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
        AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd),
        AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
-       AXIS_DMI_MATCH("NC671xx", "HP Compaq 671", xy_swap_yz_inverted),
+       AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
+       AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
+       AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
        /* Intel-based HP Pavilion dv5 */
        AXIS_DMI_MATCH2("HPDV5_I",
                        PRODUCT_NAME, "HP Pavilion dv5",
@@ -216,7 +217,6 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
- * HP Compaq 8710x Notebook PC / Mobile Workstation
  * "NC2400" "HP Compaq nc2400"
  * "NX74x0" "HP Compaq nx74"
  * "NX6325" "HP Compaq nx6325"
@@ -324,7 +324,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
        flush_work(&hpled_led.work);
        led_classdev_unregister(&hpled_led.led_classdev);
 
-       return lis3lv02d_remove_fs();
+       return lis3lv02d_remove_fs(&lis3_dev);
 }
 
 
@@ -338,13 +338,7 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
 
 static int lis3lv02d_resume(struct acpi_device *device)
 {
-       /* put back the device in the right state (ACPI might turn it on) */
-       mutex_lock(&lis3_dev.lock);
-       if (lis3_dev.usage > 0)
-               lis3lv02d_poweron(&lis3_dev);
-       else
-               lis3lv02d_poweroff(&lis3_dev);
-       mutex_unlock(&lis3_dev.lock);
+       lis3lv02d_poweron(&lis3_dev);
        return 0;
 }
 #else
index 778eb77959837eed9c1172cfe4524d0273434b54..271338bdb6be55e5319349ef90fc127388ded3d9 100644 (file)
@@ -27,9 +27,7 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
+#include <linux/input-polldev.h>
 #include <linux/delay.h>
 #include <linux/wait.h>
 #include <linux/poll.h>
@@ -105,56 +103,39 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
 {
        int position[3];
 
-       position[0] = lis3_dev.read_data(lis3, OUTX);
-       position[1] = lis3_dev.read_data(lis3, OUTY);
-       position[2] = lis3_dev.read_data(lis3, OUTZ);
+       position[0] = lis3->read_data(lis3, OUTX);
+       position[1] = lis3->read_data(lis3, OUTY);
+       position[2] = lis3->read_data(lis3, OUTZ);
 
-       *x = lis3lv02d_get_axis(lis3_dev.ac.x, position);
-       *y = lis3lv02d_get_axis(lis3_dev.ac.y, position);
-       *z = lis3lv02d_get_axis(lis3_dev.ac.z, position);
+       *x = lis3lv02d_get_axis(lis3->ac.x, position);
+       *y = lis3lv02d_get_axis(lis3->ac.y, position);
+       *z = lis3lv02d_get_axis(lis3->ac.z, position);
 }
 
 void lis3lv02d_poweroff(struct lis3lv02d *lis3)
 {
-       lis3_dev.is_on = 0;
+       /* disable X,Y,Z axis and power down */
+       lis3->write(lis3, CTRL_REG1, 0x00);
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
 
 void lis3lv02d_poweron(struct lis3lv02d *lis3)
 {
-       lis3_dev.is_on = 1;
-       lis3_dev.init(lis3);
-}
-EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+       u8 reg;
 
-/*
- * To be called before starting to use the device. It makes sure that the
- * device will always be on until a call to lis3lv02d_decrease_use(). Not to be
- * used from interrupt context.
- */
-static void lis3lv02d_increase_use(struct lis3lv02d *dev)
-{
-       mutex_lock(&dev->lock);
-       dev->usage++;
-       if (dev->usage == 1) {
-               if (!dev->is_on)
-                       lis3lv02d_poweron(dev);
-       }
-       mutex_unlock(&dev->lock);
-}
+       lis3->init(lis3);
 
-/*
- * To be called whenever a usage of the device is stopped.
- * It will make sure to turn off the device when there is not usage.
- */
-static void lis3lv02d_decrease_use(struct lis3lv02d *dev)
-{
-       mutex_lock(&dev->lock);
-       dev->usage--;
-       if (dev->usage == 0)
-               lis3lv02d_poweroff(dev);
-       mutex_unlock(&dev->lock);
+       /*
+        * Common configuration
+        * BDU: LSB and MSB values are not updated until both have been read.
+        *      So the value read will always be correct.
+        */
+       lis3->read(lis3, CTRL_REG2, &reg);
+       reg |= CTRL2_BDU;
+       lis3->write(lis3, CTRL_REG2, reg);
 }
+EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
+
 
 static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
 {
@@ -198,15 +179,12 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
                printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
                return -EBUSY;
        }
-       lis3lv02d_increase_use(&lis3_dev);
-       printk("lis3: registered interrupt %d\n", lis3_dev.irq);
        return 0;
 }
 
 static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
 {
        fasync_helper(-1, file, 0, &lis3_dev.async_queue);
-       lis3lv02d_decrease_use(&lis3_dev);
        free_irq(lis3_dev.irq, &lis3_dev);
        clear_bit(0, &lis3_dev.misc_opened); /* release the device */
        return 0;
@@ -290,46 +268,16 @@ static struct miscdevice lis3lv02d_misc_device = {
        .fops    = &lis3lv02d_misc_fops,
 };
 
-/**
- * lis3lv02d_joystick_kthread - Kthread polling function
- * @data: unused - here to conform to threadfn prototype
- */
-static int lis3lv02d_joystick_kthread(void *data)
+static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
 {
        int x, y, z;
 
-       while (!kthread_should_stop()) {
-               lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
-               input_report_abs(lis3_dev.idev, ABS_X, x - lis3_dev.xcalib);
-               input_report_abs(lis3_dev.idev, ABS_Y, y - lis3_dev.ycalib);
-               input_report_abs(lis3_dev.idev, ABS_Z, z - lis3_dev.zcalib);
-
-               input_sync(lis3_dev.idev);
-
-               try_to_freeze();
-               msleep_interruptible(MDPS_POLL_INTERVAL);
-       }
-
-       return 0;
-}
-
-static int lis3lv02d_joystick_open(struct input_dev *input)
-{
-       lis3lv02d_increase_use(&lis3_dev);
-       lis3_dev.kthread = kthread_run(lis3lv02d_joystick_kthread, NULL, "klis3lv02d");
-       if (IS_ERR(lis3_dev.kthread)) {
-               lis3lv02d_decrease_use(&lis3_dev);
-               return PTR_ERR(lis3_dev.kthread);
-       }
-
-       return 0;
+       lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
+       input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
+       input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
+       input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
 }
 
-static void lis3lv02d_joystick_close(struct input_dev *input)
-{
-       kthread_stop(lis3_dev.kthread);
-       lis3lv02d_decrease_use(&lis3_dev);
-}
 
 static inline void lis3lv02d_calibrate_joystick(void)
 {
@@ -339,33 +287,36 @@ static inline void lis3lv02d_calibrate_joystick(void)
 
 int lis3lv02d_joystick_enable(void)
 {
+       struct input_dev *input_dev;
        int err;
 
        if (lis3_dev.idev)
                return -EINVAL;
 
-       lis3_dev.idev = input_allocate_device();
+       lis3_dev.idev = input_allocate_polled_device();
        if (!lis3_dev.idev)
                return -ENOMEM;
 
+       lis3_dev.idev->poll = lis3lv02d_joystick_poll;
+       lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
+       input_dev = lis3_dev.idev->input;
+
        lis3lv02d_calibrate_joystick();
 
-       lis3_dev.idev->name       = "ST LIS3LV02DL Accelerometer";
-       lis3_dev.idev->phys       = DRIVER_NAME "/input0";
-       lis3_dev.idev->id.bustype = BUS_HOST;
-       lis3_dev.idev->id.vendor  = 0;
-       lis3_dev.idev->dev.parent = &lis3_dev.pdev->dev;
-       lis3_dev.idev->open       = lis3lv02d_joystick_open;
-       lis3_dev.idev->close      = lis3lv02d_joystick_close;
+       input_dev->name       = "ST LIS3LV02DL Accelerometer";
+       input_dev->phys       = DRIVER_NAME "/input0";
+       input_dev->id.bustype = BUS_HOST;
+       input_dev->id.vendor  = 0;
+       input_dev->dev.parent = &lis3_dev.pdev->dev;
 
-       set_bit(EV_ABS, lis3_dev.idev->evbit);
-       input_set_abs_params(lis3_dev.idev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
-       input_set_abs_params(lis3_dev.idev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
-       input_set_abs_params(lis3_dev.idev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+       set_bit(EV_ABS, input_dev->evbit);
+       input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+       input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+       input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
 
-       err = input_register_device(lis3_dev.idev);
+       err = input_register_polled_device(lis3_dev.idev);
        if (err) {
-               input_free_device(lis3_dev.idev);
+               input_free_polled_device(lis3_dev.idev);
                lis3_dev.idev = NULL;
        }
 
@@ -378,8 +329,9 @@ void lis3lv02d_joystick_disable(void)
        if (!lis3_dev.idev)
                return;
 
-       misc_deregister(&lis3lv02d_misc_device);
-       input_unregister_device(lis3_dev.idev);
+       if (lis3_dev.irq)
+               misc_deregister(&lis3lv02d_misc_device);
+       input_unregister_polled_device(lis3_dev.idev);
        lis3_dev.idev = NULL;
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -390,9 +342,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
 {
        int x, y, z;
 
-       lis3lv02d_increase_use(&lis3_dev);
        lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
-       lis3lv02d_decrease_use(&lis3_dev);
        return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
 }
 
@@ -406,9 +356,7 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
                                struct device_attribute *attr,
                                const char *buf, size_t count)
 {
-       lis3lv02d_increase_use(&lis3_dev);
        lis3lv02d_calibrate_joystick();
-       lis3lv02d_decrease_use(&lis3_dev);
        return count;
 }
 
@@ -420,9 +368,7 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
        u8 ctrl;
        int val;
 
-       lis3lv02d_increase_use(&lis3_dev);
        lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
-       lis3lv02d_decrease_use(&lis3_dev);
        val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
        return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
 }
@@ -446,17 +392,17 @@ static struct attribute_group lis3lv02d_attribute_group = {
 
 static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
 {
-       lis3_dev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
-       if (IS_ERR(lis3_dev.pdev))
-               return PTR_ERR(lis3_dev.pdev);
+       lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+       if (IS_ERR(lis3->pdev))
+               return PTR_ERR(lis3->pdev);
 
-       return sysfs_create_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group);
+       return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
 }
 
-int lis3lv02d_remove_fs(void)
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
 {
-       sysfs_remove_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group);
-       platform_device_unregister(lis3_dev.pdev);
+       sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
+       platform_device_unregister(lis3->pdev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
@@ -482,18 +428,35 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
                break;
        default:
                printk(KERN_ERR DRIVER_NAME
-                       ": unknown sensor type 0x%X\n", lis3_dev.whoami);
+                       ": unknown sensor type 0x%X\n", dev->whoami);
                return -EINVAL;
        }
 
-       mutex_init(&dev->lock);
        lis3lv02d_add_fs(dev);
-       lis3lv02d_increase_use(dev);
+       lis3lv02d_poweron(dev);
 
        if (lis3lv02d_joystick_enable())
                printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
 
-       printk("lis3_init_device: irq %d\n", dev->irq);
+       /* passing in platform specific data is purely optional and only
+        * used by the SPI transport layer at the moment */
+       if (dev->pdata) {
+               struct lis3lv02d_platform_data *p = dev->pdata;
+
+               if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
+                       dev->write(dev, CLICK_CFG, p->click_flags);
+                       dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
+                       dev->write(dev, CLICK_LATENCY, p->click_latency);
+                       dev->write(dev, CLICK_WINDOW, p->click_window);
+                       dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
+                       dev->write(dev, CLICK_THSY_X,
+                                       (p->click_thresh_x & 0xf) |
+                                       (p->click_thresh_y << 4));
+               }
+
+               if (p->irq_cfg)
+                       dev->write(dev, CTRL_REG3, p->irq_cfg);
+       }
 
        /* bail if we did not get an IRQ from the bus layer */
        if (!dev->irq) {
@@ -502,11 +465,9 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
                goto out;
        }
 
-       printk("lis3: registering device\n");
        if (misc_register(&lis3lv02d_misc_device))
                printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
 out:
-       lis3lv02d_decrease_use(dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
index 745ec96806d485a0f93f33932e2a588e6a8ab546..e320e2f511f160ee7a81dac45fec755575f9ee22 100644 (file)
@@ -18,6 +18,8 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+#include <linux/platform_device.h>
+#include <linux/input-polldev.h>
 
 /*
  * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
  * They can also be connected via I²C.
  */
 
+#include <linux/lis3lv02d.h>
+
 /* 2-byte registers */
 #define LIS_DOUBLE_ID  0x3A /* LIS3LV02D[LQ] */
 /* 1-byte registers */
 #define LIS_SINGLE_ID  0x3B /* LIS[32]02DL and others */
 
-enum lis3lv02d_reg {
+enum lis3_reg {
        WHO_AM_I        = 0x0F,
        OFFSET_X        = 0x16,
        OFFSET_Y        = 0x17,
@@ -60,6 +64,19 @@ enum lis3lv02d_reg {
        FF_WU_THS_L     = 0x34,
        FF_WU_THS_H     = 0x35,
        FF_WU_DURATION  = 0x36,
+};
+
+enum lis302d_reg {
+       CLICK_CFG       = 0x38,
+       CLICK_SRC       = 0x39,
+       CLICK_THSY_X    = 0x3B,
+       CLICK_THSZ      = 0x3C,
+       CLICK_TIMELIMIT = 0x3D,
+       CLICK_LATENCY   = 0x3E,
+       CLICK_WINDOW    = 0x3F,
+};
+
+enum lis3lv02d_reg {
        DD_CFG          = 0x38,
        DD_SRC          = 0x39,
        DD_ACK          = 0x3A,
@@ -169,22 +186,20 @@ struct lis3lv02d {
        s16 (*read_data) (struct lis3lv02d *lis3, int reg);
        int                     mdps_max_val;
 
-       struct input_dev        *idev;     /* input device */
-       struct task_struct      *kthread;  /* kthread for input */
-       struct mutex            lock;
+       struct input_polled_dev *idev;     /* input device */
        struct platform_device  *pdev;     /* platform device */
        atomic_t                count;     /* interrupt count after last read */
        int                     xcalib;    /* calibrated null value for x */
        int                     ycalib;    /* calibrated null value for y */
        int                     zcalib;    /* calibrated null value for z */
-       unsigned char           is_on;     /* whether the device is on or off */
-       unsigned char           usage;     /* usage counter */
        struct axis_conversion  ac;        /* hw -> logical axis */
 
        u32                     irq;       /* IRQ number */
        struct fasync_struct    *async_queue; /* queue for the misc device */
        wait_queue_head_t       misc_wait; /* Wait queue for the misc device */
        unsigned long           misc_opened; /* bit0: whether the device is open */
+
+       struct lis3lv02d_platform_data *pdata;  /* for passing board config */
 };
 
 int lis3lv02d_init_device(struct lis3lv02d *lis3);
@@ -192,6 +207,6 @@ int lis3lv02d_joystick_enable(void);
 void lis3lv02d_joystick_disable(void);
 void lis3lv02d_poweroff(struct lis3lv02d *lis3);
 void lis3lv02d_poweron(struct lis3lv02d *lis3);
-int lis3lv02d_remove_fs(void);
+int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
 
 extern struct lis3lv02d lis3_dev;
index 07ae74b0e191103564e881801ec4bc27225d8470..3827ff04485f9a726c3d5c8f8e6ab1742c309b4e 100644 (file)
@@ -72,6 +72,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
        lis3_dev.write = lis3_spi_write;
        lis3_dev.irq = spi->irq;
        lis3_dev.ac = lis3lv02d_axis_normal;
+       lis3_dev.pdata = spi->dev.platform_data;
        spi_set_drvdata(spi, &lis3_dev);
 
        ret = lis3lv02d_init_device(&lis3_dev);
index ba1488bd84307bb90850977573d2e80c9778af69..c14ca144cffe035d7722b6bb9fb8720652ad11bc 100644 (file)
@@ -3,7 +3,8 @@
 
 int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 {
-       ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+       ide_drive_t *drive = dev_get_drvdata(dev);
+       ide_drive_t *pair = ide_get_pair_dev(drive);
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
        struct request_pm_state rqpm;
@@ -34,7 +35,8 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 
 int generic_ide_resume(struct device *dev)
 {
-       ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+       ide_drive_t *drive = dev_get_drvdata(dev);
+       ide_drive_t *pair = ide_get_pair_dev(drive);
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq;
        struct request_pm_state rqpm;
index f371b0de314f75374a6728d206dabb3a24ef676e..79e0af3fd158a229d2e1f2945e9d270297f4cc4f 100644 (file)
@@ -535,7 +535,7 @@ static int ide_register_port(ide_hwif_t *hwif)
 
        /* register with global device tree */
        dev_set_name(&hwif->gendev, hwif->name);
-       hwif->gendev.driver_data = hwif;
+       dev_set_drvdata(&hwif->gendev, hwif);
        if (hwif->gendev.parent == NULL)
                hwif->gendev.parent = hwif->dev;
        hwif->gendev.release = hwif_release_dev;
@@ -987,9 +987,9 @@ static void hwif_register_devices(ide_hwif_t *hwif)
                int ret;
 
                dev_set_name(dev, "%u.%u", hwif->index, i);
+               dev_set_drvdata(dev, drive);
                dev->parent = &hwif->gendev;
                dev->bus = &ide_bus_type;
-               dev->driver_data = drive;
                dev->release = drive_release_dev;
 
                ret = device_register(dev);
index ee9b55ecc62b10741d1c7693d00ca8209f66ca78..b579fbe88370c6c254c47ad4e681946b3a902a9d 100644 (file)
@@ -112,7 +112,7 @@ out:
 
 static int __devexit plat_ide_remove(struct platform_device *pdev)
 {
-       struct ide_host *host = pdev->dev.driver_data;
+       struct ide_host *host = dev_get_drvdata(&pdev->dev);
 
        ide_host_remove(host);
 
index a6dfeb0b3372898cb7c4a59dd2604c71981edd92..e76cac64c53307852a442adac5264fe387c625e3 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <asm/bug.h>
 #include <asm/byteorder.h>
@@ -387,6 +388,7 @@ csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
        if (!kv)
                return NULL;
 
+       kmemcheck_annotate_variable(kv->value.leaf.data[0]);
        CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
        CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
 
index 4ca103577c0a3d809964cd519ec4d793f7c2213e..f5c586c2bba658cdfbef07d7e268431c0f64bfdb 100644 (file)
@@ -361,7 +361,7 @@ static int eth1394_new_node(struct eth1394_host_info *hi,
        node_info->pdg.sz = 0;
        node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
 
-       ud->device.driver_data = node_info;
+       dev_set_drvdata(&ud->device, node_info);
        new_node->ud = ud;
 
        priv = netdev_priv(hi->dev);
@@ -406,7 +406,7 @@ static int eth1394_remove(struct device *dev)
        list_del(&old_node->list);
        kfree(old_node);
 
-       node_info = (struct eth1394_node_info*)ud->device.driver_data;
+       node_info = dev_get_drvdata(&ud->device);
 
        spin_lock_irqsave(&node_info->pdg.lock, flags);
        /* The partial datagram list should be empty, but we'll just
@@ -416,7 +416,7 @@ static int eth1394_remove(struct device *dev)
        spin_unlock_irqrestore(&node_info->pdg.lock, flags);
 
        kfree(node_info);
-       ud->device.driver_data = NULL;
+       dev_set_drvdata(&ud->device, NULL);
        return 0;
 }
 
@@ -688,7 +688,7 @@ static void ether1394_host_reset(struct hpsb_host *host)
        ether1394_reset_priv(dev, 0);
 
        list_for_each_entry(node, &priv->ip_node_list, list) {
-               node_info = node->ud->device.driver_data;
+               node_info = dev_get_drvdata(&node->ud->device);
 
                spin_lock_irqsave(&node_info->pdg.lock, flags);
 
@@ -872,8 +872,7 @@ static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
                if (!node)
                        return cpu_to_be16(0);
 
-               node_info =
-                   (struct eth1394_node_info *)node->ud->device.driver_data;
+               node_info = dev_get_drvdata(&node->ud->device);
 
                /* Update our speed/payload/fifo_offset table */
                node_info->maxpayload = maxpayload;
@@ -1080,7 +1079,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
                priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
        }
 
-       node_info = (struct eth1394_node_info *)ud->device.driver_data;
+       node_info = dev_get_drvdata(&ud->device);
 
        /* First, did we receive a fragmented or unfragmented datagram? */
        hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1617,8 +1616,7 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
                if (!node)
                        goto fail;
 
-               node_info =
-                   (struct eth1394_node_info *)node->ud->device.driver_data;
+               node_info = dev_get_drvdata(&node->ud->device);
                if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
                        goto fail;
 
index a6d55bebe61ac1b3013f387d009116f4385cbfe0..5122b5a8aa2db1aea54221a70377a70d6e6ed021 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/bitmap.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -39,7 +40,10 @@ struct nodemgr_csr_info {
        struct hpsb_host *host;
        nodeid_t nodeid;
        unsigned int generation;
+
+       kmemcheck_bitfield_begin(flags);
        unsigned int speed_unverified:1;
+       kmemcheck_bitfield_end(flags);
 };
 
 
@@ -1293,6 +1297,7 @@ static void nodemgr_node_scan_one(struct hpsb_host *host,
        u8 *speed;
 
        ci = kmalloc(sizeof(*ci), GFP_KERNEL);
+       kmemcheck_annotate_bitfield(ci, flags);
        if (!ci)
                return;
 
index a51ab233342de0f3e9fbf4be79d499ca3ab044cd..83b734aec923f795a5e4d79dcc162e68f5efaf5d 100644 (file)
@@ -718,7 +718,7 @@ static int sbp2_remove(struct device *dev)
        struct scsi_device *sdev;
 
        ud = container_of(dev, struct unit_directory, device);
-       lu = ud->device.driver_data;
+       lu = dev_get_drvdata(&ud->device);
        if (!lu)
                return 0;
 
@@ -746,7 +746,7 @@ static int sbp2_remove(struct device *dev)
 
 static int sbp2_update(struct unit_directory *ud)
 {
-       struct sbp2_lu *lu = ud->device.driver_data;
+       struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
 
        if (sbp2_reconnect_device(lu) != 0) {
                /*
@@ -815,7 +815,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
        atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
        INIT_WORK(&lu->protocol_work, NULL);
 
-       ud->device.driver_data = lu;
+       dev_set_drvdata(&ud->device, lu);
 
        hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
        if (!hi) {
@@ -1051,7 +1051,7 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
                hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
                                          lu->status_fifo_addr);
 
-       lu->ud->device.driver_data = NULL;
+       dev_set_drvdata(&lu->ud->device, NULL);
 
        module_put(hi->host->driver->owner);
 no_hi:
index 5c04cfb54cb9244dd93dd4f916a8be2538d7aa20..158a214da2f7fd36b3c215203edc368a1fdb1cb1 100644 (file)
@@ -760,9 +760,9 @@ int ib_device_register_sysfs(struct ib_device *device)
        int i;
 
        class_dev->class      = &ib_class;
-       class_dev->driver_data = device;
        class_dev->parent     = device->dma_device;
        dev_set_name(class_dev, device->name);
+       dev_set_drvdata(class_dev, device);
 
        INIT_LIST_HEAD(&device->port_list);
 
index 85905ab9391fec6703765117fcc29d0500d038d2..ce4e6eff4792519193ebe9aa29d6e8aff5aaf5c5 100644 (file)
@@ -636,7 +636,7 @@ static ssize_t  ehca_show_##name(struct device *dev,                       \
        struct hipz_query_hca *rblock;                                     \
        int data;                                                          \
                                                                           \
-       shca = dev->driver_data;                                           \
+       shca = dev_get_drvdata(dev);                                       \
                                                                           \
        rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);                      \
        if (!rblock) {                                                     \
@@ -680,7 +680,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
                                        struct device_attribute *attr,
                                        char *buf)
 {
-       struct ehca_shca *shca = dev->driver_data;
+       struct ehca_shca *shca = dev_get_drvdata(dev);
 
        return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
 
@@ -749,7 +749,7 @@ static int __devinit ehca_probe(struct of_device *dev,
 
        shca->ofdev = dev;
        shca->ipz_hca_handle.handle = *handle;
-       dev->dev.driver_data = shca;
+       dev_set_drvdata(&dev->dev, shca);
 
        ret = ehca_sense_attributes(shca);
        if (ret < 0) {
@@ -878,7 +878,7 @@ probe1:
 
 static int __devexit ehca_remove(struct of_device *dev)
 {
-       struct ehca_shca *shca = dev->dev.driver_data;
+       struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
        unsigned long flags;
        int ret;
 
index 5d445f48789b899528c1f30d1f1115f324d553a5..7c237e6ac7112f318c4a9fe941704948d612970e 100644 (file)
@@ -1265,8 +1265,14 @@ static struct device_type input_dev_type = {
        .uevent         = input_dev_uevent,
 };
 
+static char *input_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
+}
+
 struct class input_class = {
        .name           = "input",
+       .nodename       = input_nodename,
 };
 EXPORT_SYMBOL_GPL(input_class);
 
index 356b3a25efa24c4568a909c59dedaa7ebcbc040c..1c0b529c06aaa596b8fe4a663cd275ed4f39ead6 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/input.h>
 #include <linux/gameport.h>
 #include <linux/jiffies.h>
-#include <asm/timex.h>
+#include <linux/timex.h>
 
 #define DRIVER_DESC    "Analog joystick and gamepad driver"
 
index d6a30cee7bc79b63f5ed124d48dc36ac55bd9e05..6d67af5387adcfdff514133ca9f1ce9d0bf39f8b 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
+#include <linux/timex.h>
 #include <asm/io.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
index 69af8385ab141bc78329e103653d55103eb53558..2957d48e0045fa05d68bb38aafe6ec8c9d3462d8 100644 (file)
@@ -569,7 +569,7 @@ static int wm97xx_probe(struct device *dev)
        mutex_init(&wm->codec_mutex);
 
        wm->dev = dev;
-       dev->driver_data = wm;
+       dev_set_drvdata(dev, wm);
        wm->ac97 = to_ac97_t(dev);
 
        /* check that we have a supported codec */
index 928d2ed8865f6b296b2fe9550d599b016d06f606..b115726dc088b169003a725ba29de8a4171e58a9 100644 (file)
@@ -114,7 +114,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
                xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
                return -ENOMEM;
        }
-       dev->dev.driver_data = info;
+       dev_set_drvdata(&dev->dev, info);
        info->xbdev = dev;
        info->irq = -1;
        snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
@@ -186,7 +186,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
 
 static int xenkbd_resume(struct xenbus_device *dev)
 {
-       struct xenkbd_info *info = dev->dev.driver_data;
+       struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
        xenkbd_disconnect_backend(info);
        memset(info->page, 0, PAGE_SIZE);
@@ -195,7 +195,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
 
 static int xenkbd_remove(struct xenbus_device *dev)
 {
-       struct xenkbd_info *info = dev->dev.driver_data;
+       struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
        xenkbd_disconnect_backend(info);
        if (info->kbd)
@@ -266,7 +266,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
 static void xenkbd_backend_changed(struct xenbus_device *dev,
                                   enum xenbus_state backend_state)
 {
-       struct xenkbd_info *info = dev->dev.driver_data;
+       struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
        int ret, val;
 
        switch (backend_state) {
index 823ceba6efa8dcccc95b14cf72986195c52b3ae8..1128d3fba797f38a89f39849eea012bc9daa22d1 100644 (file)
@@ -1513,6 +1513,7 @@ static const struct file_operations _ctl_fops = {
 static struct miscdevice _dm_misc = {
        .minor          = MISC_DYNAMIC_MINOR,
        .name           = DM_NAME,
+       .devnode        = "mapper/control",
        .fops           = &_ctl_fops
 };
 
index bb42d99bc49de47163d90e01e6631aa8c1355aa8..b6da9c3873fef280ba89d00234053d3a62b11ec6 100644 (file)
@@ -48,7 +48,7 @@ MODULE_PARM_DESC(audio_std,
        "NICAM/A\n"
        "NICAM/B\n");
 
-static char firmware_name[FIRMWARE_NAME_MAX];
+static char firmware_name[30];
 module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
 MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
                                "default firmware name\n");
index a454ee8f1e438939ecfd584d7fa90cdd2ec9c7b1..479dd05762a5d57a3bbcccad3aeb14807f2e0665 100644 (file)
@@ -447,6 +447,15 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+static char *dvb_nodename(struct device *dev)
+{
+       struct dvb_device *dvbdev = dev_get_drvdata(dev);
+
+       return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d",
+               dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
+}
+
+
 static int __init init_dvbdev(void)
 {
        int retval;
@@ -469,6 +478,7 @@ static int __init init_dvbdev(void)
                goto error;
        }
        dvb_class->dev_uevent = dvb_uevent;
+       dvb_class->nodename = dvb_nodename;
        return 0;
 
 error:
index 97495154f74632d3b9e557dd759c148d1a86cd69..e441d274e6c117010f1da2ee91f083782f0823e4 100644 (file)
@@ -196,7 +196,7 @@ struct dvb_usb_device_properties {
 #define CYPRESS_FX2     3
        int        usb_ctrl;
        int        (*download_firmware) (struct usb_device *, const struct firmware *);
-       const char firmware[FIRMWARE_NAME_MAX];
+       const char *firmware;
        int        no_reconnect;
 
        int size_of_priv;
index ba3709bec3f0a42d84d34d2ba88e8bd62feb104a..ec2f45dde1643d72a849aa15004bbde91a1ff2c3 100644 (file)
@@ -747,8 +747,14 @@ static const struct file_operations dabusb_fops =
        .release =      dabusb_release,
 };
 
+static char *dabusb_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver dabusb_class = {
        .name =         "dabusb%d",
+       .nodename =     dabusb_nodename,
        .fops =         &dabusb_fops,
        .minor_base =   DABUSB_MINOR,
 };
index 08d3e17c450a322e3a57cd2fc8a1877bea599d4c..d09ce83a9429af98060b7d3dd27e21923f2431ea 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/dma-mapping.h>
 #include <media/videobuf-dma-contig.h>
 
@@ -25,6 +26,7 @@ struct videobuf_dma_contig_memory {
        void *vaddr;
        dma_addr_t dma_handle;
        unsigned long size;
+       int is_userptr;
 };
 
 #define MAGIC_DC_MEM 0x0733ac61
@@ -108,6 +110,82 @@ static struct vm_operations_struct videobuf_vm_ops = {
        .close    = videobuf_vm_close,
 };
 
+/**
+ * videobuf_dma_contig_user_put() - reset pointer to user space buffer
+ * @mem: per-buffer private videobuf-dma-contig data
+ *
+ * This function resets the user space pointer
+ */
+static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
+{
+       mem->is_userptr = 0;
+       mem->dma_handle = 0;
+       mem->size = 0;
+}
+
+/**
+ * videobuf_dma_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-dma-contig data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
+                                       struct videobuf_buffer *vb)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long prev_pfn, this_pfn;
+       unsigned long pages_done, user_address;
+       int ret;
+
+       mem->size = PAGE_ALIGN(vb->size);
+       mem->is_userptr = 0;
+       ret = -EINVAL;
+
+       down_read(&mm->mmap_sem);
+
+       vma = find_vma(mm, vb->baddr);
+       if (!vma)
+               goto out_up;
+
+       if ((vb->baddr + mem->size) > vma->vm_end)
+               goto out_up;
+
+       pages_done = 0;
+       prev_pfn = 0; /* kill warning */
+       user_address = vb->baddr;
+
+       while (pages_done < (mem->size >> PAGE_SHIFT)) {
+               ret = follow_pfn(vma, user_address, &this_pfn);
+               if (ret)
+                       break;
+
+               if (pages_done == 0)
+                       mem->dma_handle = this_pfn << PAGE_SHIFT;
+               else if (this_pfn != (prev_pfn + 1))
+                       ret = -EFAULT;
+
+               if (ret)
+                       break;
+
+               prev_pfn = this_pfn;
+               user_address += PAGE_SIZE;
+               pages_done++;
+       }
+
+       if (!ret)
+               mem->is_userptr = 1;
+
+ out_up:
+       up_read(&current->mm->mmap_sem);
+
+       return ret;
+}
+
 static void *__videobuf_alloc(size_t size)
 {
        struct videobuf_dma_contig_memory *mem;
@@ -154,12 +232,11 @@ static int __videobuf_iolock(struct videobuf_queue *q,
        case V4L2_MEMORY_USERPTR:
                dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
 
-               /* The only USERPTR currently supported is the one needed for
-                  read() method.
-                */
+               /* handle pointer from user space */
                if (vb->baddr)
-                       return -EINVAL;
+                       return videobuf_dma_contig_user_get(mem, vb);
 
+               /* allocate memory for the read() method */
                mem->size = PAGE_ALIGN(vb->size);
                mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
                                                &mem->dma_handle, GFP_KERNEL);
@@ -386,7 +463,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
           So, it should free memory only if the memory were allocated for
           read() operation.
         */
-       if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
+       if (buf->memory != V4L2_MEMORY_USERPTR)
                return;
 
        if (!mem)
@@ -394,6 +471,13 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
 
        MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
 
+       /* handle user space pointer case */
+       if (buf->baddr) {
+               videobuf_dma_contig_user_put(mem);
+               return;
+       }
+
+       /* read() method */
        dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
        mem->vaddr = NULL;
 }
index 386da1566fccb41f76a6828e3e5de2dde965ccfb..cb73051e43db8ca04e1eabbdafd65af4966f47ef 100644 (file)
@@ -35,7 +35,7 @@ struct pasic3_data {
  */
 void pasic3_write_register(struct device *dev, u32 reg, u8 val)
 {
-       struct pasic3_data *asic = dev->driver_data;
+       struct pasic3_data *asic = dev_get_drvdata(dev);
        int bus_shift = asic->bus_shift;
        void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
        void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
@@ -50,7 +50,7 @@ EXPORT_SYMBOL(pasic3_write_register); /* for leds-pasic3 */
  */
 u8 pasic3_read_register(struct device *dev, u32 reg)
 {
-       struct pasic3_data *asic = dev->driver_data;
+       struct pasic3_data *asic = dev_get_drvdata(dev);
        int bus_shift = asic->bus_shift;
        void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
        void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
index 11a6248cc1c1dd0af3a57ace1ec228ccdb3ed257..082c197ab9b861f3071f4dac1677bbcfb512dca4 100644 (file)
@@ -618,7 +618,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
 
                pdev->dev.parent = pcf->dev;
                pdev->dev.platform_data = &pdata->reg_init_data[i];
-               pdev->dev.driver_data = pcf;
+               dev_set_drvdata(&pdev->dev, pcf);
                pcf->regulator_pdev[i] = pdev;
 
                platform_device_add(pdev);
index cf30d06a0104a6ab4f6295a7abf2a8ad85700a9a..7c21bf7915696253b75064b3146a89bdf5a9c905 100644 (file)
@@ -265,7 +265,7 @@ static int wm8400_init(struct wm8400 *wm8400,
 
        mutex_init(&wm8400->io_lock);
 
-       wm8400->dev->driver_data = wm8400;
+       dev_set_drvdata(wm8400->dev, wm8400);
 
        /* Check that this is actually a WM8400 */
        ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg);
index 0207dd59090d195b4d0d21027e511cf484e14121..b5346b4db91a2021d43c470b09bd53fbdc7a4170 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
 #include <linux/idr.h>
@@ -891,6 +892,7 @@ struct c2port_device *c2port_device_register(char *name,
                return ERR_PTR(-EINVAL);
 
        c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
+       kmemcheck_annotate_bitfield(c2dev, flags);
        if (unlikely(!c2dev))
                return ERR_PTR(-ENOMEM);
 
index bbefe77c67a93dd4f88ae2c5e139f2405986eb2c..3ce2920e2bf3fb4f37b0223ec2fa7f23bb23a0e0 100644 (file)
@@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
                pnode = uv_node_to_pnode(nid);
                if (bid < 0 || gru_base[bid])
                        continue;
-               page = alloc_pages_node(nid, GFP_KERNEL, order);
+               page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
                if (!page)
                        goto fail;
                gru_base[bid] = page_address(page);
index 9172fcdee4e2f468e7780c965d8ffbf257cb75d8..c76677afda1b50591b855759fa711253d99458ee 100644 (file)
@@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
        mq->mmr_blade = uv_cpu_to_blade_id(cpu);
 
        nid = cpu_to_node(cpu);
-       page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+       page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                pg_order);
        if (page == NULL) {
                dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
index 01f282cd0989d7123b0cb1b74477cded1571919d..3b6383168c6916cf57c41015d8a10c22dee0d015 100644 (file)
@@ -2206,7 +2206,7 @@ config SKGE_DEBUG
        depends on SKGE && DEBUG_FS
        help
         This option adds the ability to dump driver state for debugging.
-        The file debugfs/skge/ethX displays the state of the internal
+        The file /sys/kernel/debug/skge/ethX displays the state of the internal
         transmit and receive rings.
 
         If unsure, say N.
@@ -2232,7 +2232,7 @@ config SKY2_DEBUG
        depends on SKY2 && DEBUG_FS
        help
         This option adds the ability to dump driver state for debugging.
-        The file debugfs/sky2/ethX displays the state of the internal
+        The file /sys/kernel/debug/sky2/ethX displays the state of the internal
         transmit and receive rings.
 
         If unsure, say N.
index 811d3517fce0c99895b4738df64a60d934f86fcf..11a0ba47b67782d920f3bf5cb36d3e24d4874b63 100644 (file)
@@ -1366,6 +1366,7 @@ static const struct file_operations tun_fops = {
 static struct miscdevice tun_miscdev = {
        .minor = TUN_MINOR,
        .name = "tun",
+       .devnode = "net/tun",
        .fops = &tun_fops,
 };
 
index 1fe5da4cf0a02d918942e0f19f7a36aaf9ef1264..60330f313f2781ea65630a484564ef7750011487 100644 (file)
@@ -432,7 +432,7 @@ struct i2400m {
        unsigned ready:1;               /* all probing steps done */
        unsigned rx_reorder:1;          /* RX reorder is enabled */
        u8 trace_msg_from_user;         /* echo rx msgs to 'trace' pipe */
-                                       /* typed u8 so debugfs/u8 can tweak */
+                                       /* typed u8 so /sys/kernel/debug/u8 can tweak */
        enum i2400m_system_state state;
        wait_queue_head_t state_wq;     /* Woken up when on state updates */
 
index 509b6f94f73b94930ad95f90441e6b4002223ee9..daf0c83527d88cd0f6e64ff29e26ef7c5387fe4b 100644 (file)
@@ -28,11 +28,10 @@ config ATH5K_DEBUG
          Say Y, if and you will get debug options for ath5k.
          To use this, you need to mount debugfs:
 
-         mkdir /debug/
-         mount -t debugfs debug /debug/
+         mount -t debugfs debug /sys/kernel/debug
 
          You will get access to files under:
-         /debug/ath5k/phy0/
+         /sys/kernel/debug/ath5k/phy0/
 
          To enable debug, pass the debug level to the debug module
          parameter. For example:
index d860fc375752d928b3d51e2ec7e6cca1fdd416d8..ab6a2d518af0ff07f0cd1ff1aec21f777208a644 100644 (file)
@@ -72,7 +72,7 @@ rdrf
        location that is to be read.  This parameter must be specified in
        hexadecimal (its possible to preceed preceding the number with a "0x").
 
-       Path: /debugfs/libertas_wireless/ethX/registers/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/registers/
 
        Usage:
                echo "0xa123" > rdmac ; cat rdmac
@@ -95,7 +95,7 @@ wrrf
 sleepparams
        This command is used to set the sleepclock configurations
 
-       Path: /debugfs/libertas_wireless/ethX/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/
 
        Usage:
                cat sleepparams: reads the current sleepclock configuration
@@ -115,7 +115,7 @@ subscribed_events
        The subscribed_events directory contains the interface for the
        subscribed events API.
 
-       Path: /debugfs/libertas_wireless/ethX/subscribed_events/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/subscribed_events/
 
        Each event is represented by a filename. Each filename consists of the
        following three fields:
@@ -165,7 +165,7 @@ subscribed_events
 extscan
        This command is used to do a specific scan.
 
-       Path: /debugfs/libertas_wireless/ethX/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/
 
        Usage: echo "SSID" > extscan
 
@@ -179,7 +179,7 @@ getscantable
        Display the current contents of the driver scan table (ie. get the
        scan results).
 
-       Path: /debugfs/libertas_wireless/ethX/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/
 
        Usage:
                cat getscantable
@@ -188,7 +188,7 @@ setuserscan
        Initiate a customized scan and retrieve the results
 
 
-       Path: /debugfs/libertas_wireless/ethX/
+       Path: /sys/kernel/debug/libertas_wireless/ethX/
 
     Usage:
        echo "[ARGS]" > setuserscan
index f8c2898d82b05b305e69c566e52e8291b5118348..06a46d7b3d6c2460aecd793d2827033c7c0348f6 100644 (file)
@@ -43,8 +43,8 @@ struct if_spi_card {
        struct lbs_private              *priv;
        struct libertas_spi_platform_data *pdata;
 
-       char                            helper_fw_name[FIRMWARE_NAME_MAX];
-       char                            main_fw_name[FIRMWARE_NAME_MAX];
+       char                            helper_fw_name[IF_SPI_FW_NAME_MAX];
+       char                            main_fw_name[IF_SPI_FW_NAME_MAX];
 
        /* The card ID and card revision, as reported by the hardware. */
        u16                             card_id;
@@ -1019,9 +1019,9 @@ static int if_spi_calculate_fw_names(u16 card_id,
                lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
                return -EAFNOSUPPORT;
        }
-       snprintf(helper_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d_hlp.bin",
+       snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
                 chip_id_to_device_name[i].name);
-       snprintf(main_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d.bin",
+       snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
                 chip_id_to_device_name[i].name);
        return 0;
 }
index 2103869cc5b06dc8940eb2a3c8c00cd89af6c8a5..f87eec410848fb023416170c0241a84938949f55 100644 (file)
@@ -22,6 +22,9 @@
 #define IF_SPI_CMD_BUF_SIZE 2400
 
 /***************** Firmware *****************/
+
+#define IF_SPI_FW_NAME_MAX 30
+
 struct chip_ident {
        u16 chip_id;
        u16 name;
index d649caebf08a1e93ffd9a136f7682340fe380f33..1844c5adf6e9fb1ad3807f5346174a75654a71c8 100644 (file)
@@ -61,11 +61,9 @@ static ssize_t if_usb_firmware_set(struct device *dev,
 {
        struct lbs_private *priv = to_net_dev(dev)->ml_priv;
        struct if_usb_card *cardp = priv->card;
-       char fwname[FIRMWARE_NAME_MAX];
        int ret;
 
-       sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
-       ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
+       ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
        if (ret == 0)
                return count;
 
@@ -88,11 +86,9 @@ static ssize_t if_usb_boot2_set(struct device *dev,
 {
        struct lbs_private *priv = to_net_dev(dev)->ml_priv;
        struct if_usb_card *cardp = priv->card;
-       char fwname[FIRMWARE_NAME_MAX];
        int ret;
 
-       sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
-       ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
+       ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
        if (ret == 0)
                return count;
 
index f673253879024d18b90557be62b145feb7e99ec7..8d88daeed0c6e51c7b73046468a6e8a2606de77d 100644 (file)
@@ -1212,7 +1212,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
        }
 
        info = netdev_priv(netdev);
-       dev->dev.driver_data = info;
+       dev_set_drvdata(&dev->dev, info);
 
        err = register_netdev(info->netdev);
        if (err) {
@@ -1233,7 +1233,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
 
  fail:
        free_netdev(netdev);
-       dev->dev.driver_data = NULL;
+       dev_set_drvdata(&dev->dev, NULL);
        return err;
 }
 
@@ -1275,7 +1275,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
  */
 static int netfront_resume(struct xenbus_device *dev)
 {
-       struct netfront_info *info = dev->dev.driver_data;
+       struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
@@ -1600,7 +1600,7 @@ static int xennet_connect(struct net_device *dev)
 static void backend_changed(struct xenbus_device *dev,
                            enum xenbus_state backend_state)
 {
-       struct netfront_info *np = dev->dev.driver_data;
+       struct netfront_info *np = dev_get_drvdata(&dev->dev);
        struct net_device *netdev = np->netdev;
 
        dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
@@ -1774,7 +1774,7 @@ static struct xenbus_device_id netfront_ids[] = {
 
 static int __devexit xennet_remove(struct xenbus_device *dev)
 {
-       struct netfront_info *info = dev->dev.driver_data;
+       struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
index f415fdd9a88599296a9c5195854c25192b8fbc0b..5b89f404e668876f53da1f0278523763e3512e16 100644 (file)
@@ -373,7 +373,7 @@ static int __init eisa_probe(struct parisc_device *dev)
        if (result >= 0) {
                /* FIXME : Don't enumerate the bus twice. */
                eisa_dev.root.dev = &dev->dev;
-               dev->dev.driver_data = &eisa_dev.root;
+               dev_set_drvdata(&dev->dev, &eisa_dev.root);
                eisa_dev.root.bus_base_addr = 0;
                eisa_dev.root.res = &eisa_dev.hba.io_space;
                eisa_dev.root.slots = result;
index e5999c4cedc81d64aad819f25a9d38a78de06f78..d46dd57450acd151d4d97a7d7016e54cedab1f54 100644 (file)
@@ -2010,7 +2010,7 @@ void __init sba_init(void)
 void * sba_get_iommu(struct parisc_device *pci_hba)
 {
        struct parisc_device *sba_dev = parisc_parent(pci_hba);
-       struct sba_device *sba = sba_dev->dev.driver_data;
+       struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
        char t = sba_dev->id.hw_type;
        int iocnum = (pci_hba->hw_path >> 3);   /* rope # */
 
@@ -2031,7 +2031,7 @@ void * sba_get_iommu(struct parisc_device *pci_hba)
 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
 {
        struct parisc_device *sba_dev = parisc_parent(pci_hba);
-       struct sba_device *sba = sba_dev->dev.driver_data;
+       struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
        char t = sba_dev->id.hw_type;
        int i;
        int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */
@@ -2073,7 +2073,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
 {
        struct parisc_device *sba_dev = parisc_parent(pci_hba);
-       struct sba_device *sba = sba_dev->dev.driver_data;
+       struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
        char t = sba_dev->id.hw_type;
        int base, size;
        int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));  /* rope # */
index ea31a452b153e55e8ce378000e02ee952a5e8991..5d6de380e42ba87526cdfa2b87f658431223be86 100644 (file)
@@ -376,14 +376,14 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
                        /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
        if (p)
                parport_count++;
-       dev->dev.driver_data = p;
+       dev_set_drvdata(&dev->dev, p);
 
        return 0;
 }
 
 static int __devexit parport_remove_chip(struct parisc_device *dev)
 {
-       struct parport *p = dev->dev.driver_data;
+       struct parport *p = dev_get_drvdata(&dev->dev);
        if (p) {
                struct parport_gsc_private *priv = p->private_data;
                struct parport_operations *ops = p->ops;
index 1a91bf9687af969c69f26c8e67fca08747c0d426..07bbb9b3b93fe1a46ba4e15dcd4d309d31b54ba9 100644 (file)
 #include <asm/setup.h>
 #include "pci.h"
 
+const char *pci_power_names[] = {
+       "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
+};
+EXPORT_SYMBOL_GPL(pci_power_names);
+
 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
 
 #ifdef CONFIG_PCI_DOMAINS
index e39982503863e4a1539d4f1f968a9dbf47bc3242..13ffdc35ea0eb2573f47d81953871ef61ffc8b48 100644 (file)
@@ -275,7 +275,7 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
        memset(device, 0, sizeof(struct device));
        device->bus = &pcie_port_bus_type;
        device->driver = NULL;
-       device->driver_data = NULL;
+       dev_set_drvdata(device, NULL);
        device->release = release_pcie_device;  /* callback to free pcie dev */
        dev_set_name(device, "%s:pcie%02x",
                 pci_name(parent), get_descriptor_id(port_type, service_type));
index 47cab31ff6e49c0780692ead159cf81fb4113c58..304ff6d5cf3b2ec13850a253bc908481027cd844 100644 (file)
@@ -394,7 +394,7 @@ static int pcmcia_device_probe(struct device * dev)
        p_drv = to_pcmcia_drv(dev->driver);
        s = p_dev->socket;
 
-       /* The PCMCIA code passes the match data in via dev->driver_data
+       /* The PCMCIA code passes the match data in via dev_set_drvdata(dev)
         * which is an ugly hack. Once the driver probe is called it may
         * and often will overwrite the match data so we must save it first
         *
@@ -404,7 +404,7 @@ static int pcmcia_device_probe(struct device * dev)
         * call which will then check whether there are two
         * pseudo devices, and if not, add the second one.
         */
-       did = p_dev->dev.driver_data;
+       did = dev_get_drvdata(&p_dev->dev);
 
        ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name);
 
@@ -499,7 +499,7 @@ static int pcmcia_device_remove(struct device * dev)
         * pseudo multi-function card, we need to unbind
         * all devices
         */
-       did = p_dev->dev.driver_data;
+       did = dev_get_drvdata(&p_dev->dev);
        if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
            (p_dev->socket->device_count != 0) &&
            (p_dev->device_no == 0))
@@ -828,7 +828,6 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
 {
        struct pcmcia_socket *s = dev->socket;
        const struct firmware *fw;
-       char path[FIRMWARE_NAME_MAX];
        int ret = -ENOMEM;
        int no_funcs;
        int old_funcs;
@@ -839,16 +838,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
 
        ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename);
 
-       if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) {
-               dev_printk(KERN_WARNING, &dev->dev,
-                          "pcmcia: CIS filename is too long [%s]\n",
-                          filename);
-               return -EINVAL;
-       }
-
-       snprintf(path, sizeof(path), "%s", filename);
-
-       if (request_firmware(&fw, path, &dev->dev) == 0) {
+       if (request_firmware(&fw, filename, &dev->dev) == 0) {
                if (fw->size >= CISTPL_MAX_CIS_SIZE) {
                        ret = -EINVAL;
                        dev_printk(KERN_ERR, &dev->dev,
@@ -988,7 +978,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
                        return 0;
        }
 
-       dev->dev.driver_data = (void *) did;
+       dev_set_drvdata(&dev->dev, did);
 
        return 1;
 }
index 1703b20cad5d215ae1a2295329d87a2b253380f1..6095f8daecd7f1eb31fc1a3bc14626ad0df33db5 100644 (file)
@@ -915,12 +915,9 @@ static int ds_ioctl(struct inode * inode, struct file * file,
                err = -EPERM;
                goto free_out;
        } else {
-               static int printed = 0;
-               if (!printed) {
-                       printk(KERN_WARNING "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
-                       printk(KERN_WARNING "MTD handling any more.\n");
-                       printed++;
-               }
+                       printk_once(KERN_WARNING
+                               "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
+                       printk_once(KERN_WARNING "MTD handling any more.\n");
        }
        err = -EINVAL;
        goto free_out;
index b79f31add39cdcc6307a295d7bad69b9b493ddc5..04dc734805c61418353e2b68802a6621121c3317 100644 (file)
@@ -364,7 +364,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
        int cstat, dstat;
        int count;
 
-       raw = cdev->dev.driver_data;
+       raw = dev_get_drvdata(&cdev->dev);
        req = (struct raw3215_req *) intparm;
        cstat = irb->scsw.cmd.cstat;
        dstat = irb->scsw.cmd.dstat;
@@ -652,7 +652,7 @@ static int raw3215_probe (struct ccw_device *cdev)
        int line;
 
        /* Console is special. */
-       if (raw3215[0] && (cdev->dev.driver_data == raw3215[0]))
+       if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
                return 0;
        raw = kmalloc(sizeof(struct raw3215_info) +
                      RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
@@ -686,7 +686,7 @@ static int raw3215_probe (struct ccw_device *cdev)
        }
        init_waitqueue_head(&raw->empty_wait);
 
-       cdev->dev.driver_data = raw;
+       dev_set_drvdata(&cdev->dev, raw);
        cdev->handler = raw3215_irq;
 
        return 0;
@@ -697,9 +697,9 @@ static void raw3215_remove (struct ccw_device *cdev)
        struct raw3215_info *raw;
 
        ccw_device_set_offline(cdev);
-       raw = cdev->dev.driver_data;
+       raw = dev_get_drvdata(&cdev->dev);
        if (raw) {
-               cdev->dev.driver_data = NULL;
+               dev_set_drvdata(&cdev->dev, NULL);
                kfree(raw->buffer);
                kfree(raw);
        }
@@ -709,7 +709,7 @@ static int raw3215_set_online (struct ccw_device *cdev)
 {
        struct raw3215_info *raw;
 
-       raw = cdev->dev.driver_data;
+       raw = dev_get_drvdata(&cdev->dev);
        if (!raw)
                return -ENODEV;
 
@@ -720,7 +720,7 @@ static int raw3215_set_offline (struct ccw_device *cdev)
 {
        struct raw3215_info *raw;
 
-       raw = cdev->dev.driver_data;
+       raw = dev_get_drvdata(&cdev->dev);
        if (!raw)
                return -ENODEV;
 
@@ -898,7 +898,7 @@ static int __init con3215_init(void)
        raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
        raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
        raw->cdev = cdev;
-       cdev->dev.driver_data = raw;
+       dev_set_drvdata(&cdev->dev, raw);
        cdev->handler = raw3215_irq;
 
        raw->flags |= RAW3215_FIXED;
index 81c151b5f0ac5b9543a55f2df00aae844a3684f6..acab7b2dfe8ab2a23174487dda6658a1c7d2ac5d 100644 (file)
@@ -357,7 +357,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
        struct raw3270_request *rq;
        int rc;
 
-       rp = (struct raw3270 *) cdev->dev.driver_data;
+       rp = dev_get_drvdata(&cdev->dev);
        if (!rp)
                return;
        rq = (struct raw3270_request *) intparm;
@@ -831,7 +831,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
        if (rp->minor == -1)
                return -EUSERS;
        rp->cdev = cdev;
-       cdev->dev.driver_data = rp;
+       dev_set_drvdata(&cdev->dev, rp);
        cdev->handler = raw3270_irq;
        return 0;
 }
@@ -1112,7 +1112,7 @@ raw3270_delete_device(struct raw3270 *rp)
        /* Disconnect from ccw_device. */
        cdev = rp->cdev;
        rp->cdev = NULL;
-       cdev->dev.driver_data = NULL;
+       dev_set_drvdata(&cdev->dev, NULL);
        cdev->handler = NULL;
 
        /* Put ccw_device structure. */
@@ -1136,7 +1136,7 @@ static ssize_t
 raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev->driver_data)->model);
+                       ((struct raw3270 *) dev_get_drvdata(dev))->model);
 }
 static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
 
@@ -1144,7 +1144,7 @@ static ssize_t
 raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev->driver_data)->rows);
+                       ((struct raw3270 *) dev_get_drvdata(dev))->rows);
 }
 static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
 
@@ -1152,7 +1152,7 @@ static ssize_t
 raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        return snprintf(buf, PAGE_SIZE, "%i\n",
-                       ((struct raw3270 *) dev->driver_data)->cols);
+                       ((struct raw3270 *) dev_get_drvdata(dev))->cols);
 }
 static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
 
@@ -1289,7 +1289,7 @@ raw3270_remove (struct ccw_device *cdev)
        struct raw3270_view *v;
        struct raw3270_notifier *np;
 
-       rp = cdev->dev.driver_data;
+       rp = dev_get_drvdata(&cdev->dev);
        /*
         * _remove is the opposite of _probe; it's probe that
         * should set up rp.  raw3270_remove gets entered for
@@ -1337,7 +1337,7 @@ raw3270_set_offline (struct ccw_device *cdev)
 {
        struct raw3270 *rp;
 
-       rp = cdev->dev.driver_data;
+       rp = dev_get_drvdata(&cdev->dev);
        if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
                return -EBUSY;
        raw3270_remove(cdev);
index 144d2a5e1a92f78ce91791d2f3fa8a161ce281e8..5a519fac37b7187505499ff33e95103da31a9e9a 100644 (file)
@@ -1289,7 +1289,7 @@ static int
 tape_34xx_online(struct ccw_device *cdev)
 {
        return tape_generic_online(
-               cdev->dev.driver_data,
+               dev_get_drvdata(&cdev->dev),
                &tape_discipline_34xx
        );
 }
index 23e6598bc4b5f01bd0137e1e50aecd37d9f2e784..418f72dd39b4d4836457edd8e87a9b6b25808566 100644 (file)
@@ -1703,7 +1703,7 @@ static struct ccw_device_id tape_3590_ids[] = {
 static int
 tape_3590_online(struct ccw_device *cdev)
 {
-       return tape_generic_online(cdev->dev.driver_data,
+       return tape_generic_online(dev_get_drvdata(&cdev->dev),
                                   &tape_discipline_3590);
 }
 
index 3ebaa8eb5c862a284aa740d2de7df361b27f39d0..595aa04cfd019699e09cd03b99e9774c7f99a100 100644 (file)
@@ -92,7 +92,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *
 {
        struct tape_device *tdev;
 
-       tdev = (struct tape_device *) dev->driver_data;
+       tdev = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
 }
 
@@ -104,7 +104,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b
 {
        struct tape_device *tdev;
 
-       tdev = (struct tape_device *) dev->driver_data;
+       tdev = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
 }
 
@@ -116,7 +116,7 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct tape_device *tdev;
 
-       tdev = (struct tape_device *) dev->driver_data;
+       tdev = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
                "OFFLINE" : tape_state_verbose[tdev->tape_state]);
 }
@@ -130,7 +130,7 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf
        struct tape_device *tdev;
        ssize_t rc;
 
-       tdev = (struct tape_device *) dev->driver_data;
+       tdev = dev_get_drvdata(dev);
        if (tdev->first_minor < 0)
                return scnprintf(buf, PAGE_SIZE, "N/A\n");
 
@@ -156,7 +156,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf
 {
        struct tape_device *tdev;
 
-       tdev = (struct tape_device *) dev->driver_data;
+       tdev = dev_get_drvdata(dev);
 
        return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
 }
@@ -440,7 +440,7 @@ tape_generic_offline(struct ccw_device *cdev)
 {
        struct tape_device *device;
 
-       device = cdev->dev.driver_data;
+       device = dev_get_drvdata(&cdev->dev);
        if (!device) {
                return -ENODEV;
        }
@@ -583,7 +583,7 @@ tape_generic_probe(struct ccw_device *cdev)
                tape_put_device(device);
                return ret;
        }
-       cdev->dev.driver_data = device;
+       dev_set_drvdata(&cdev->dev, device);
        cdev->handler = __tape_do_irq;
        device->cdev = cdev;
        ccw_device_get_id(cdev, &dev_id);
@@ -622,7 +622,7 @@ tape_generic_remove(struct ccw_device *cdev)
 {
        struct tape_device *    device;
 
-       device = cdev->dev.driver_data;
+       device = dev_get_drvdata(&cdev->dev);
        if (!device) {
                return;
        }
@@ -662,9 +662,9 @@ tape_generic_remove(struct ccw_device *cdev)
                        tape_cleanup_device(device);
        }
 
-       if (cdev->dev.driver_data != NULL) {
+       if (!dev_get_drvdata(&cdev->dev)) {
                sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
-               cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
+               dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
        }
 }
 
@@ -1060,7 +1060,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
        struct tape_request *request;
        int rc;
 
-       device = (struct tape_device *) cdev->dev.driver_data;
+       device = dev_get_drvdata(&cdev->dev);
        if (device == NULL) {
                return;
        }
index e925808c2149c9d45c9b2e64ede5de5b79798e4c..411cfa3c77196261c126b79356563505b8bb901e 100644 (file)
@@ -504,7 +504,7 @@ static ssize_t vmlogrdr_autopurge_store(struct device * dev,
                                        struct device_attribute *attr,
                                        const char * buf, size_t count)
 {
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
        ssize_t ret = count;
 
        switch (buf[0]) {
@@ -525,7 +525,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
        return sprintf(buf, "%u\n", priv->autopurge);
 }
 
@@ -541,7 +541,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
 
        char cp_command[80];
        char cp_response[80];
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
 
        if (buf[0] != '1')
                return -EINVAL;
@@ -578,7 +578,7 @@ static ssize_t vmlogrdr_autorecording_store(struct device *dev,
                                            struct device_attribute *attr,
                                            const char *buf, size_t count)
 {
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
        ssize_t ret = count;
 
        switch (buf[0]) {
@@ -599,7 +599,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev,
                                           struct device_attribute *attr,
                                           char *buf)
 {
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
        return sprintf(buf, "%u\n", priv->autorecording);
 }
 
@@ -612,7 +612,7 @@ static ssize_t vmlogrdr_recording_store(struct device * dev,
                                        struct device_attribute *attr,
                                        const char * buf, size_t count)
 {
-       struct vmlogrdr_priv_t *priv = dev->driver_data;
+       struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
        ssize_t ret;
 
        switch (buf[0]) {
index 92458219a9e9a893592242df1c73e2a314131220..7d9e67cb64714494043bf62449632a27780fa0be 100644 (file)
@@ -80,11 +80,11 @@ static DEFINE_MUTEX(vmur_mutex);
  *
  * Each ur device (urd) contains a reference to its corresponding ccw device
  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
- * ur device using the cdev->dev.driver_data pointer.
+ * ur device using dev_get_drvdata(&cdev->dev) pointer.
  *
  * urd references:
  * - ur_probe gets a urd reference, ur_remove drops the reference
- *   (cdev->dev.driver_data)
+ *   dev_get_drvdata(&cdev->dev)
  * - ur_open gets a urd reference, ur_relase drops the reference
  *   (urf->urd)
  *
@@ -92,7 +92,7 @@ static DEFINE_MUTEX(vmur_mutex);
  * - urdev_alloc get a cdev reference (urd->cdev)
  * - urdev_free drops the cdev reference (urd->cdev)
  *
- * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock
+ * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
  */
 static struct urdev *urdev_alloc(struct ccw_device *cdev)
 {
@@ -131,7 +131,7 @@ static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
        unsigned long flags;
 
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       urd = cdev->dev.driver_data;
+       urd = dev_get_drvdata(&cdev->dev);
        if (urd)
                urdev_get(urd);
        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
@@ -310,7 +310,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
                TRACE("ur_int_handler: unsolicited interrupt\n");
                return;
        }
-       urd = cdev->dev.driver_data;
+       urd = dev_get_drvdata(&cdev->dev);
        BUG_ON(!urd);
        /* On special conditions irb is an error pointer */
        if (IS_ERR(irb))
@@ -856,7 +856,7 @@ static int ur_probe(struct ccw_device *cdev)
                goto fail_remove_attr;
        }
        spin_lock_irq(get_ccwdev_lock(cdev));
-       cdev->dev.driver_data = urd;
+       dev_set_drvdata(&cdev->dev, urd);
        spin_unlock_irq(get_ccwdev_lock(cdev));
 
        mutex_unlock(&vmur_mutex);
@@ -996,8 +996,8 @@ static void ur_remove(struct ccw_device *cdev)
        ur_remove_attributes(&cdev->dev);
 
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
-       urdev_put(cdev->dev.driver_data);
-       cdev->dev.driver_data = NULL;
+       urdev_put(dev_get_drvdata(&cdev->dev));
+       dev_set_drvdata(&cdev->dev, NULL);
        spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
        mutex_unlock(&vmur_mutex);
index d40f7a934f94e217d9fc1db1a099a4953e0ad6fa..f370f8d460a721674f5537d79c8e528d23bb4b5e 100644 (file)
@@ -290,7 +290,7 @@ claw_probe(struct ccwgroup_device *cgdev)
        if (!get_device(&cgdev->dev))
                return -ENODEV;
        privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
-       cgdev->dev.driver_data = privptr;
+       dev_set_drvdata(&cgdev->dev, privptr);
        if (privptr == NULL) {
                probe_error(cgdev);
                put_device(&cgdev->dev);
@@ -597,14 +597,14 @@ claw_irq_handler(struct ccw_device *cdev,
 
        CLAW_DBF_TEXT(4, trace, "clawirq");
         /* Bypass all 'unsolicited interrupts' */
-       if (!cdev->dev.driver_data) {
+       privptr = dev_get_drvdata(&cdev->dev);
+       if (!privptr) {
                dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
                        " IRQ, c-%02x d-%02x\n",
                        irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
                CLAW_DBF_TEXT(2, trace, "badirq");
                 return;
         }
-       privptr = (struct claw_privbk *)cdev->dev.driver_data;
 
        /* Try to extract channel from driver data. */
        if (privptr->channel[READ].cdev == cdev)
@@ -1986,9 +1986,9 @@ probe_error( struct ccwgroup_device *cgdev)
        struct claw_privbk *privptr;
 
        CLAW_DBF_TEXT(4, trace, "proberr");
-       privptr = (struct claw_privbk *) cgdev->dev.driver_data;
+       privptr = dev_get_drvdata(&cgdev->dev);
        if (privptr != NULL) {
-               cgdev->dev.driver_data = NULL;
+               dev_set_drvdata(&cgdev->dev, NULL);
                kfree(privptr->p_env);
                kfree(privptr->p_mtc_envelope);
                kfree(privptr);
@@ -2917,9 +2917,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
        dev_info(&cgdev->dev, "add for %s\n",
                 dev_name(&cgdev->cdev[READ]->dev));
        CLAW_DBF_TEXT(2, setup, "new_dev");
-       privptr = cgdev->dev.driver_data;
-       cgdev->cdev[READ]->dev.driver_data = privptr;
-       cgdev->cdev[WRITE]->dev.driver_data = privptr;
+       privptr = dev_get_drvdata(&cgdev->dev);
+       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
        if (!privptr)
                return -ENODEV;
        p_env = privptr->p_env;
@@ -2956,9 +2956,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
                goto out;
        }
        dev->ml_priv = privptr;
-       cgdev->dev.driver_data = privptr;
-        cgdev->cdev[READ]->dev.driver_data = privptr;
-        cgdev->cdev[WRITE]->dev.driver_data = privptr;
+       dev_set_drvdata(&cgdev->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
+       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
        /* sysfs magic */
         SET_NETDEV_DEV(dev, &cgdev->dev);
        if (register_netdev(dev) != 0) {
@@ -3024,7 +3024,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
        int     ret;
 
        CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-       priv = cgdev->dev.driver_data;
+       priv = dev_get_drvdata(&cgdev->dev);
        if (!priv)
                return -ENODEV;
        ndev = priv->channel[READ].ndev;
@@ -3054,7 +3054,7 @@ claw_remove_device(struct ccwgroup_device *cgdev)
 
        BUG_ON(!cgdev);
        CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
-       priv = cgdev->dev.driver_data;
+       priv = dev_get_drvdata(&cgdev->dev);
        BUG_ON(!priv);
        dev_info(&cgdev->dev, " will be removed.\n");
        if (cgdev->state == CCWGROUP_ONLINE)
@@ -3069,9 +3069,9 @@ claw_remove_device(struct ccwgroup_device *cgdev)
        kfree(priv->channel[1].irb);
        priv->channel[1].irb=NULL;
        kfree(priv);
-       cgdev->dev.driver_data=NULL;
-       cgdev->cdev[READ]->dev.driver_data = NULL;
-       cgdev->cdev[WRITE]->dev.driver_data = NULL;
+       dev_set_drvdata(&cgdev->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
+       dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
        put_device(&cgdev->dev);
 
        return;
@@ -3087,7 +3087,7 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3101,7 +3101,7 @@ claw_hname_write(struct device *dev, struct device_attribute *attr,
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3125,7 +3125,7 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3139,7 +3139,7 @@ claw_adname_write(struct device *dev, struct device_attribute *attr,
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3163,7 +3163,7 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3178,7 +3178,7 @@ claw_apname_write(struct device *dev, struct device_attribute *attr,
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3212,7 +3212,7 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct claw_privbk *priv;
        struct claw_env * p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3227,7 +3227,7 @@ claw_wbuff_write(struct device *dev, struct device_attribute *attr,
        struct claw_env *  p_env;
        int nnn,max;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3254,7 +3254,7 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
        struct claw_privbk *priv;
        struct claw_env *  p_env;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
@@ -3269,7 +3269,7 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr,
        struct claw_env *p_env;
        int nnn,max;
 
-       priv = dev->driver_data;
+       priv = dev_get_drvdata(dev);
        if (!priv)
                return -ENODEV;
        p_env = priv->p_env;
index 07a25c3f94b69e1164250bb1de9736fe41c80f69..8c675905448b74c33983c7e5038e4b8688a15ce0 100644 (file)
@@ -1936,7 +1936,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
 {
         struct lcs_card *card;
 
-       card = (struct lcs_card *)dev->driver_data;
+       card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -1953,7 +1953,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
         struct lcs_card *card;
         int value;
 
-       card = (struct lcs_card *)dev->driver_data;
+       card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -1987,7 +1987,7 @@ lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct lcs_card *card;
 
-       card = (struct lcs_card *)dev->driver_data;
+       card = dev_get_drvdata(dev);
 
        return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
 }
@@ -1998,7 +1998,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
         struct lcs_card *card;
         int value;
 
-       card = (struct lcs_card *)dev->driver_data;
+       card = dev_get_drvdata(dev);
 
         if (!card)
                 return 0;
@@ -2017,7 +2017,7 @@ static ssize_t
 lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
                      const char *buf, size_t count)
 {
-       struct lcs_card *card = dev->driver_data;
+       struct lcs_card *card = dev_get_drvdata(dev);
        char *tmp;
        int i;
 
@@ -2070,7 +2070,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
                put_device(&ccwgdev->dev);
                return ret;
         }
-       ccwgdev->dev.driver_data = card;
+       dev_set_drvdata(&ccwgdev->dev, card);
        ccwgdev->cdev[0]->handler = lcs_irq;
        ccwgdev->cdev[1]->handler = lcs_irq;
        card->gdev = ccwgdev;
@@ -2087,7 +2087,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
        struct lcs_card *card;
 
        LCS_DBF_TEXT(2, setup, "regnetdv");
-       card = (struct lcs_card *)ccwgdev->dev.driver_data;
+       card = dev_get_drvdata(&ccwgdev->dev);
        if (card->dev->reg_state != NETREG_UNINITIALIZED)
                return 0;
        SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
@@ -2120,7 +2120,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
        enum lcs_dev_states recover_state;
        int rc;
 
-       card = (struct lcs_card *)ccwgdev->dev.driver_data;
+       card = dev_get_drvdata(&ccwgdev->dev);
        if (!card)
                return -ENODEV;
 
@@ -2226,7 +2226,7 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
        int ret;
 
        LCS_DBF_TEXT(3, setup, "shtdndev");
-       card = (struct lcs_card *)ccwgdev->dev.driver_data;
+       card = dev_get_drvdata(&ccwgdev->dev);
        if (!card)
                return -ENODEV;
        if (recovery_mode == 0) {
@@ -2293,7 +2293,7 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
 {
        struct lcs_card *card;
 
-       card = (struct lcs_card *)ccwgdev->dev.driver_data;
+       card = dev_get_drvdata(&ccwgdev->dev);
        if (!card)
                return;
 
index d58fea52557dd4338afffcdf06fb93f4d88ae710..6d668642af270e27b6b376ce99c2b3276cbc63a8 100644 (file)
@@ -34,8 +34,8 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
  *     sysfs related stuff
  */
 #define CARD_FROM_DEV(cdev) \
-       (struct lcs_card *) \
-       ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data;
+       (struct lcs_card *) dev_get_drvdata( \
+               &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
 /**
  * CCW commands used in this driver
  */
index fdb02d043d3ed844d8462977888d044797a21086..52574ce797b24f92e91edf49c20174d08fca28ed 100644 (file)
@@ -1452,7 +1452,7 @@ static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
@@ -1461,7 +1461,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->conn->netdev;
        char    *p;
        char    *tmp;
@@ -1518,7 +1518,8 @@ static DEVICE_ATTR(user, 0644, user_show, user_write);
 
 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
                            char *buf)
-{      struct netiucv_priv *priv = dev->driver_data;
+{
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%d\n", priv->conn->max_buffsize);
@@ -1527,7 +1528,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->conn->netdev;
        char         *e;
        int          bs1;
@@ -1575,7 +1576,7 @@ static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
@@ -1586,7 +1587,7 @@ static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
 static ssize_t conn_fsm_show (struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
@@ -1597,7 +1598,7 @@ static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
 static ssize_t maxmulti_show (struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
@@ -1607,7 +1608,7 @@ static ssize_t maxmulti_write (struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.maxmulti = 0;
@@ -1619,7 +1620,7 @@ static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
@@ -1628,7 +1629,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.maxcqueue = 0;
@@ -1640,7 +1641,7 @@ static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
@@ -1649,7 +1650,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.doios_single = 0;
@@ -1661,7 +1662,7 @@ static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
@@ -1670,7 +1671,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        priv->conn->prof.doios_multi = 0;
@@ -1682,7 +1683,7 @@ static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
@@ -1691,7 +1692,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.txlen = 0;
@@ -1703,7 +1704,7 @@ static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
@@ -1712,7 +1713,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.tx_time = 0;
@@ -1724,7 +1725,7 @@ static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
@@ -1733,7 +1734,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.tx_pending = 0;
@@ -1745,7 +1746,7 @@ static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 5, __func__);
        return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
@@ -1754,7 +1755,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
-       struct netiucv_priv *priv = dev->driver_data;
+       struct netiucv_priv *priv = dev_get_drvdata(dev);
 
        IUCV_DBF_TEXT(trace, 4, __func__);
        priv->conn->prof.tx_max_pending = 0;
@@ -1845,7 +1846,7 @@ static int netiucv_register_device(struct net_device *ndev)
        if (ret)
                goto out_unreg;
        priv->dev = dev;
-       dev->driver_data = priv;
+       dev_set_drvdata(dev, priv);
        return 0;
 
 out_unreg:
index ed0e3e55652afdec9c99d480458a5577e1e1a0d0..538135783aab017b0c2ba0ea782801912b0b93a1 100644 (file)
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
 
 static __devexit int aha1740_remove (struct device *dev)
 {
-       struct Scsi_Host *shpnt = dev->driver_data;
+       struct Scsi_Host *shpnt = dev_get_drvdata(dev);
        struct aha1740_hostdata *host = HOSTDATA (shpnt);
 
        scsi_remove_host(shpnt);
index 11d2602ae88ecd2ffd4c2ab8b759538dac8a02c8..869a11bdccbdf3cf63773b2bebc1ff1358d623a8 100644 (file)
@@ -1877,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        unsigned long wait_switch = 0;
        int rc;
 
-       vdev->dev.driver_data = NULL;
+       dev_set_drvdata(&vdev->dev, NULL);
 
        host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
        if (!host) {
@@ -1949,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
                        scsi_scan_host(host);
        }
 
-       vdev->dev.driver_data = hostdata;
+       dev_set_drvdata(&vdev->dev, hostdata);
        return 0;
 
       add_srp_port_failed:
@@ -1968,7 +1968,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
-       struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+       struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
        unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
        ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
index e2dd6a45924a5f598f6dafa05eff545b2aa5292b..d5eaf972710978a9b24464c4e20fadbdac5babae 100644 (file)
@@ -892,7 +892,7 @@ free_vport:
 
 static int ibmvstgt_remove(struct vio_dev *dev)
 {
-       struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
+       struct srp_target *target = dev_get_drvdata(&dev->dev);
        struct Scsi_Host *shost = target->shost;
        struct vio_port *vport = target->ldata;
 
index 15e2d132e8b9e07b99d39c4079557a3d16604cc3..2742ae8a3d091a13b989fb4f5a7de57e4f7d0490 100644 (file)
@@ -135,7 +135,7 @@ int srp_target_alloc(struct srp_target *target, struct device *dev,
        INIT_LIST_HEAD(&target->cmd_queue);
 
        target->dev = dev;
-       target->dev->driver_data = target;
+       dev_set_drvdata(target->dev, target);
 
        target->srp_iu_size = iu_size;
        target->rx_ring_size = nr;
index 2b02b1fb39a09842a1b18216699ac31b83eef3bb..8d0f0de76b6336098e3c76411002d1f8529244ae 100644 (file)
@@ -53,8 +53,7 @@
  * debugfs interface
  *
  * To access this interface the user should:
- * # mkdir /debug
- * # mount -t debugfs none /debug
+ * # mount -t debugfs none /sys/kernel/debug
  *
  * The lpfc debugfs directory hierarchy is:
  * lpfc/lpfcX/vportY
index 14f8fa9135be49c664f64d80c9633bf1d405f85c..54483cd3529e00e85df152efb5cb95c3c654d924 100644 (file)
@@ -122,7 +122,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
 
        info->type = port_type;
        info->line = ret;
-       ofdev->dev.driver_data = info;
+       dev_set_drvdata(&ofdev->dev, info);
        return 0;
 out:
        kfree(info);
@@ -135,7 +135,7 @@ out:
  */
 static int of_platform_serial_remove(struct of_device *ofdev)
 {
-       struct of_serial_info *info = ofdev->dev.driver_data;
+       struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
        switch (info->type) {
 #ifdef CONFIG_SERIAL_8250
        case PORT_8250 ... PORT_MAX_8250:
index f4573a96af2459620b0b4b4d5f09c6f87acaf11a..a32ccb44065eb58df94d84dbfe11d24c7952723f 100644 (file)
@@ -711,12 +711,12 @@ static int of_mpc83xx_spi_get_chipselects(struct device *dev)
                return 0;
        }
 
-       pinfo->gpios = kmalloc(ngpios * sizeof(pinfo->gpios), GFP_KERNEL);
+       pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
        if (!pinfo->gpios)
                return -ENOMEM;
-       memset(pinfo->gpios, -1, ngpios * sizeof(pinfo->gpios));
+       memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
 
-       pinfo->alow_flags = kzalloc(ngpios * sizeof(pinfo->alow_flags),
+       pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
                                    GFP_KERNEL);
        if (!pinfo->alow_flags) {
                ret = -ENOMEM;
index 9c62f787cc9cde11ed7b866a9c53e8f82c5eff18..39d0926d1a9015394043091929bfdbce0ce1ed28 100644 (file)
@@ -2336,7 +2336,7 @@ static int ATEN2011_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void ATEN2011_shutdown(struct usb_serial *serial)
+static void ATEN2011_release(struct usb_serial *serial)
 {
        int i;
        struct ATENINTL_port *ATEN2011_port;
@@ -2382,7 +2382,7 @@ static struct usb_serial_driver aten_serial_driver = {
        .tiocmget =             ATEN2011_tiocmget,
        .tiocmset =             ATEN2011_tiocmset,
        .attach =               ATEN2011_startup,
-       .shutdown =             ATEN2011_shutdown,
+       .release =              ATEN2011_release,
        .read_bulk_callback =   ATEN2011_bulk_in_callback,
        .read_int_callback =    ATEN2011_interrupt_callback,
 };
index 5e38ba10a3a90ed2a35687c4e73d636e1626a817..0a69672097a8195e3671e1d63b164c19fa0ababc 100644 (file)
@@ -417,7 +417,7 @@ static LIST_HEAD(thermal_hwmon_list);
 static ssize_t
 name_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct thermal_hwmon_device *hwmon = dev->driver_data;
+       struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
        return sprintf(buf, "%s\n", hwmon->type);
 }
 static DEVICE_ATTR(name, 0444, name_show, NULL);
@@ -488,7 +488,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
                result = PTR_ERR(hwmon->device);
                goto free_mem;
        }
-       hwmon->device->driver_data = hwmon;
+       dev_set_drvdata(hwmon->device, hwmon);
        result = device_create_file(hwmon->device, &dev_attr_name);
        if (result)
                goto unregister_hwmon_device;
index 5eee3f82be5d98d0cb1df531c9d78573c482ddbc..dcd49f1e96d006d1abb04271687d90f0f52a9ef5 100644 (file)
@@ -64,6 +64,7 @@ config USB_ARCH_HAS_EHCI
 config USB
        tristate "Support for Host-side USB"
        depends on USB_ARCH_HAS_HCD
+       select NLS  # for UTF-8 strings
        ---help---
          Universal Serial Bus (USB) is a specification for a serial bus
          subsystem which offers higher speeds and more features than the
index 0a3dc5ece634ba641167a738a1aed2cbe793e269..19cb7d5480d78636f3eb036dbd77c37fc99a16d9 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += host/
 obj-$(CONFIG_USB_OHCI_HCD)     += host/
 obj-$(CONFIG_USB_UHCI_HCD)     += host/
 obj-$(CONFIG_USB_FHCI_HCD)     += host/
+obj-$(CONFIG_USB_XHCI_HCD)     += host/
 obj-$(CONFIG_USB_SL811_HCD)    += host/
 obj-$(CONFIG_USB_U132_HCD)     += host/
 obj-$(CONFIG_USB_R8A66597_HCD) += host/
index 9cf9ff69e3e3505e08f9c2346b4d4095775a9981..d171b563e94c7e2bb10c092834799a434b6b8956 100644 (file)
@@ -306,6 +306,7 @@ enum {
 #define FW_GET_BYTE(p) *((__u8 *) (p))
 
 #define FW_DIR "ueagle-atm/"
+#define UEA_FW_NAME_MAX 30
 #define NB_MODEM 4
 
 #define BULK_TIMEOUT 300
@@ -1564,9 +1565,9 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
                file = cmv_file[sc->modem_index];
 
        strcpy(cmv_name, FW_DIR);
-       strlcat(cmv_name, file, FIRMWARE_NAME_MAX);
+       strlcat(cmv_name, file, UEA_FW_NAME_MAX);
        if (ver == 2)
-               strlcat(cmv_name, ".v2", FIRMWARE_NAME_MAX);
+               strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
 }
 
 static int request_cmvs_old(struct uea_softc *sc,
@@ -1574,7 +1575,7 @@ static int request_cmvs_old(struct uea_softc *sc,
 {
        int ret, size;
        u8 *data;
-       char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+       char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
 
        cmvs_file_name(sc, cmv_name, 1);
        ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
@@ -1608,7 +1609,7 @@ static int request_cmvs(struct uea_softc *sc,
        int ret, size;
        u32 crc;
        u8 *data;
-       char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */
+       char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
 
        cmvs_file_name(sc, cmv_name, 2);
        ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
index ddeb6919253734ebcb3b7a92b6294b30bb305d52..38bfdb0f666058061081e6035512843cb0eaa850 100644 (file)
@@ -937,9 +937,9 @@ static int acm_probe(struct usb_interface *intf,
        int buflen = intf->altsetting->extralen;
        struct usb_interface *control_interface;
        struct usb_interface *data_interface;
-       struct usb_endpoint_descriptor *epctrl;
-       struct usb_endpoint_descriptor *epread;
-       struct usb_endpoint_descriptor *epwrite;
+       struct usb_endpoint_descriptor *epctrl = NULL;
+       struct usb_endpoint_descriptor *epread = NULL;
+       struct usb_endpoint_descriptor *epwrite = NULL;
        struct usb_device *usb_dev = interface_to_usbdev(intf);
        struct acm *acm;
        int minor;
@@ -952,6 +952,7 @@ static int acm_probe(struct usb_interface *intf,
        unsigned long quirks;
        int num_rx_buf;
        int i;
+       int combined_interfaces = 0;
 
        /* normal quirks */
        quirks = (unsigned long)id->driver_info;
@@ -1033,9 +1034,15 @@ next_desc:
                        data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
                        control_interface = intf;
                } else {
-                       dev_dbg(&intf->dev,
-                                       "No union descriptor, giving up\n");
-                       return -ENODEV;
+                       if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
+                               dev_dbg(&intf->dev,"No union descriptor, giving up\n");
+                               return -ENODEV;
+                       } else {
+                               dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
+                               combined_interfaces = 1;
+                               control_interface = data_interface = intf;
+                               goto look_for_collapsed_interface;
+                       }
                }
        } else {
                control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
@@ -1049,6 +1056,36 @@ next_desc:
        if (data_interface_num != call_interface_num)
                dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
 
+       if (control_interface == data_interface) {
+               /* some broken devices designed for windows work this way */
+               dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
+               combined_interfaces = 1;
+               /* a popular other OS doesn't use it */
+               quirks |= NO_CAP_LINE;
+               if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
+                       dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
+                       return -EINVAL;
+               }
+look_for_collapsed_interface:
+               for (i = 0; i < 3; i++) {
+                       struct usb_endpoint_descriptor *ep;
+                       ep = &data_interface->cur_altsetting->endpoint[i].desc;
+
+                       if (usb_endpoint_is_int_in(ep))
+                               epctrl = ep;
+                       else if (usb_endpoint_is_bulk_out(ep))
+                               epwrite = ep;
+                       else if (usb_endpoint_is_bulk_in(ep))
+                               epread = ep;
+                       else
+                               return -EINVAL;
+               }
+               if (!epctrl || !epread || !epwrite)
+                       return -ENODEV;
+               else
+                       goto made_compressed_probe;
+       }
+
 skip_normal_probe:
 
        /*workaround for switched interfaces */
@@ -1068,10 +1105,11 @@ skip_normal_probe:
        }
 
        /* Accept probe requests only for the control interface */
-       if (intf != control_interface)
+       if (!combined_interfaces && intf != control_interface)
                return -ENODEV;
 
-       if (usb_interface_claimed(data_interface)) { /* valid in this context */
+       if (!combined_interfaces && usb_interface_claimed(data_interface)) {
+               /* valid in this context */
                dev_dbg(&intf->dev, "The data interface isn't available\n");
                return -EBUSY;
        }
@@ -1095,6 +1133,7 @@ skip_normal_probe:
                epread = epwrite;
                epwrite = t;
        }
+made_compressed_probe:
        dbg("interfaces are valid");
        for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
 
@@ -1112,12 +1151,15 @@ skip_normal_probe:
        ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
        readsize = le16_to_cpu(epread->wMaxPacketSize) *
                                (quirks == SINGLE_RX_URB ? 1 : 2);
+       acm->combined_interfaces = combined_interfaces;
        acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
        acm->control = control_interface;
        acm->data = data_interface;
        acm->minor = minor;
        acm->dev = usb_dev;
        acm->ctrl_caps = ac_management_function;
+       if (quirks & NO_CAP_LINE)
+               acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
        acm->ctrlsize = ctrlsize;
        acm->readsize = readsize;
        acm->rx_buflimit = num_rx_buf;
@@ -1223,9 +1265,10 @@ skip_normal_probe:
 
 skip_countries:
        usb_fill_int_urb(acm->ctrlurb, usb_dev,
-                       usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
-                       acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
-                       epctrl->bInterval);
+                        usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
+                        acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
+                        /* works around buggy devices */
+                        epctrl->bInterval ? epctrl->bInterval : 0xff);
        acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
        acm->ctrlurb->transfer_dma = acm->ctrl_dma;
 
@@ -1312,7 +1355,8 @@ static void acm_disconnect(struct usb_interface *intf)
                                                                acm->ctrl_dma);
        acm_read_buffers_free(acm);
 
-       usb_driver_release_interface(&acm_driver, intf == acm->control ?
+       if (!acm->combined_interfaces)
+               usb_driver_release_interface(&acm_driver, intf == acm->control ?
                                        acm->data : acm->control);
 
        if (acm->port.count == 0) {
@@ -1451,6 +1495,9 @@ static struct usb_device_id acm_ids[] = {
                                           Maybe we should define a new
                                           quirk for this. */
        },
+       { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+       },
 
        /* control interfaces with various AT-command sets */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
index 4c3856420adde6c55761950e0697822d9e496d1b..1602324808bafb510e374520b9eb0a264617dc02 100644 (file)
@@ -125,6 +125,7 @@ struct acm {
        unsigned char clocal;                           /* termios CLOCAL */
        unsigned int ctrl_caps;                         /* control capabilities from the class specific header */
        unsigned int susp_count;                        /* number of suspended interfaces */
+       int combined_interfaces:1;                      /* control and data collapsed */
        struct acm_wb *delayed_wb;                      /* write queued for a device about to be woken */
 };
 
@@ -133,3 +134,4 @@ struct acm {
 /* constants describing various quirks and errors */
 #define NO_UNION_NORMAL                        1
 #define SINGLE_RX_URB                  2
+#define NO_CAP_LINE                    4
index d2747a49b9744bacf2962abbe2ea1b8bd3b087b5..26c09f0257dbeff29a9c8f25f39eaf15453570c0 100644 (file)
@@ -1057,8 +1057,14 @@ static const struct file_operations usblp_fops = {
        .release =      usblp_release,
 };
 
+static char *usblp_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 static struct usb_class_driver usblp_class = {
        .name =         "lp%d",
+       .nodename =     usblp_nodename,
        .fops =         &usblp_fops,
        .minor_base =   USBLP_MINOR_BASE,
 };
index c40a9b284cc94217d9a45c1b09d838904571fa67..3703789d0d2af8a7459138adb5903c548e704425 100644 (file)
@@ -927,21 +927,27 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        switch (cmd) {
        case USBTMC_IOCTL_CLEAR_OUT_HALT:
                retval = usbtmc_ioctl_clear_out_halt(data);
+               break;
 
        case USBTMC_IOCTL_CLEAR_IN_HALT:
                retval = usbtmc_ioctl_clear_in_halt(data);
+               break;
 
        case USBTMC_IOCTL_INDICATOR_PULSE:
                retval = usbtmc_ioctl_indicator_pulse(data);
+               break;
 
        case USBTMC_IOCTL_CLEAR:
                retval = usbtmc_ioctl_clear(data);
+               break;
 
        case USBTMC_IOCTL_ABORT_BULK_OUT:
                retval = usbtmc_ioctl_abort_bulk_out(data);
+               break;
 
        case USBTMC_IOCTL_ABORT_BULK_IN:
                retval = usbtmc_ioctl_abort_bulk_in(data);
+               break;
        }
 
        mutex_unlock(&data->io_mutex);
index e1759d17ac5d17ca9cafeea4d67094954af07462..69280c35b5cbc6d93cc50c99d7f4a7f747c1fc99 100644 (file)
@@ -28,7 +28,7 @@ comment "Miscellaneous USB options"
        depends on USB
 
 config USB_DEVICEFS
-       bool "USB device filesystem"
+       bool "USB device filesystem (DEPRECATED)" if EMBEDDED
        depends on USB
        ---help---
          If you say Y here (and to "/proc file system support" in the "File
@@ -46,11 +46,15 @@ config USB_DEVICEFS
          For the format of the various /proc/bus/usb/ files, please read
          <file:Documentation/usb/proc_usb_info.txt>.
 
-         Usbfs files can't handle Access Control Lists (ACL), which are the
-         default way to grant access to USB devices for untrusted users of a
-         desktop system. The usbfs functionality is replaced by real
-         device-nodes managed by udev. These nodes live in /dev/bus/usb and
-         are used by libusb.
+         Modern Linux systems do not use this.
+
+         Usbfs entries are files and not character devices; usbfs can't
+         handle Access Control Lists (ACL) which are the default way to
+         grant access to USB devices for untrusted users of a desktop
+         system.
+
+         The usbfs functionality is replaced by real device-nodes managed by
+         udev.  These nodes lived in /dev/bus/usb and are used by libusb.
 
 config USB_DEVICE_CLASS
        bool "USB device class-devices (DEPRECATED)"
index b6078706fb939d7f1d3aeb9466f6498f8857f366..ec16e60299050ab046f9a02dfb15e0207e264516 100644 (file)
@@ -4,14 +4,14 @@
 
 usbcore-objs   := usb.o hub.o hcd.o urb.o message.o driver.o \
                        config.o file.o buffer.o sysfs.o endpoint.o \
-                       devio.o notify.o generic.o quirks.o
+                       devio.o notify.o generic.o quirks.o devices.o
 
 ifeq ($(CONFIG_PCI),y)
        usbcore-objs    += hcd-pci.o
 endif
 
 ifeq ($(CONFIG_USB_DEVICEFS),y)
-       usbcore-objs    += inode.o devices.o
+       usbcore-objs    += inode.o
 endif
 
 obj-$(CONFIG_USB)      += usbcore.o
index 568244c99bdc0f5d7b0e03d2177d1153e9d49d49..24dfb33f90cb0fbc2268572ee0ed90ecc15efbb5 100644 (file)
@@ -19,6 +19,32 @@ static inline const char *plural(int n)
        return (n == 1 ? "" : "s");
 }
 
+/* FIXME: this is a kludge */
+static int find_next_descriptor_more(unsigned char *buffer, int size,
+    int dt1, int dt2, int dt3, int *num_skipped)
+{
+       struct usb_descriptor_header *h;
+       int n = 0;
+       unsigned char *buffer0 = buffer;
+
+       /* Find the next descriptor of type dt1 or dt2 or dt3 */
+       while (size > 0) {
+               h = (struct usb_descriptor_header *) buffer;
+               if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
+                               h->bDescriptorType == dt3)
+                       break;
+               buffer += h->bLength;
+               size -= h->bLength;
+               ++n;
+       }
+
+       /* Store the number of descriptors skipped and return the
+        * number of bytes skipped */
+       if (num_skipped)
+               *num_skipped = n;
+       return buffer - buffer0;
+}
+
 static int find_next_descriptor(unsigned char *buffer, int size,
     int dt1, int dt2, int *num_skipped)
 {
@@ -43,6 +69,129 @@ static int find_next_descriptor(unsigned char *buffer, int size,
        return buffer - buffer0;
 }
 
+static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+               int inum, int asnum, struct usb_host_endpoint *ep,
+               int num_ep, unsigned char *buffer, int size)
+{
+       unsigned char *buffer_start = buffer;
+       struct usb_ss_ep_comp_descriptor        *desc;
+       int retval;
+       int num_skipped;
+       int max_tx;
+       int i;
+
+       /* Allocate space for the SS endpoint companion descriptor */
+       ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
+                       GFP_KERNEL);
+       if (!ep->ss_ep_comp)
+               return -ENOMEM;
+       desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+       if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
+               dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
+                               " interface %d altsetting %d ep %d: "
+                               "using minimum values\n",
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
+               ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
+               ep->ss_ep_comp->desc.bMaxBurst = 0;
+               /*
+                * Leave bmAttributes as zero, which will mean no streams for
+                * bulk, and isoc won't support multiple bursts of packets.
+                * With bursts of only one packet, and a Mult of 1, the max
+                * amount of data moved per endpoint service interval is one
+                * packet.
+                */
+               if (usb_endpoint_xfer_isoc(&ep->desc) ||
+                               usb_endpoint_xfer_int(&ep->desc))
+                       ep->ss_ep_comp->desc.wBytesPerInterval =
+                               ep->desc.wMaxPacketSize;
+               /*
+                * The next descriptor is for an Endpoint or Interface,
+                * no extra descriptors to copy into the companion structure,
+                * and we didn't eat up any of the buffer.
+                */
+               retval = 0;
+               goto valid;
+       }
+       memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
+       desc = &ep->ss_ep_comp->desc;
+       buffer += desc->bLength;
+       size -= desc->bLength;
+
+       /* Eat up the other descriptors we don't care about */
+       ep->ss_ep_comp->extra = buffer;
+       i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+                       USB_DT_INTERFACE, &num_skipped);
+       ep->ss_ep_comp->extralen = i;
+       buffer += i;
+       size -= i;
+       retval = buffer - buffer_start + i;
+       if (num_skipped > 0)
+               dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
+                               num_skipped, plural(num_skipped),
+                               "SuperSpeed endpoint companion");
+
+       /* Check the various values */
+       if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
+               dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to zero\n", desc->bMaxBurst,
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               desc->bMaxBurst = 0;
+       }
+       if (desc->bMaxBurst > 15) {
+               dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to 15\n", desc->bMaxBurst,
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               desc->bMaxBurst = 15;
+       }
+       if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
+                       && desc->bmAttributes != 0) {
+               dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to zero\n",
+                               usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
+                               desc->bmAttributes,
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               desc->bmAttributes = 0;
+       }
+       if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
+               dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to max\n",
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               desc->bmAttributes = 16;
+       }
+       if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
+               dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to 3\n", desc->bmAttributes + 1,
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress);
+               desc->bmAttributes = 2;
+       }
+       if (usb_endpoint_xfer_isoc(&ep->desc)) {
+               max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
+                       (desc->bmAttributes + 1);
+       } else if (usb_endpoint_xfer_int(&ep->desc)) {
+               max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
+       } else {
+               goto valid;
+       }
+       if (desc->wBytesPerInterval > max_tx) {
+               dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
+                               "config %d interface %d altsetting %d ep %d: "
+                               "setting to %d\n",
+                               usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
+                               desc->wBytesPerInterval,
+                               cfgno, inum, asnum, ep->desc.bEndpointAddress,
+                               max_tx);
+               desc->wBytesPerInterval = max_tx;
+       }
+valid:
+       return retval;
+}
+
 static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
     int asnum, struct usb_host_interface *ifp, int num_ep,
     unsigned char *buffer, int size)
@@ -50,7 +199,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        unsigned char *buffer0 = buffer;
        struct usb_endpoint_descriptor *d;
        struct usb_host_endpoint *endpoint;
-       int n, i, j;
+       int n, i, j, retval;
 
        d = (struct usb_endpoint_descriptor *) buffer;
        buffer += d->bLength;
@@ -92,6 +241,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        if (usb_endpoint_xfer_int(d)) {
                i = 1;
                switch (to_usb_device(ddev)->speed) {
+               case USB_SPEED_SUPER:
                case USB_SPEED_HIGH:
                        /* Many device manufacturers are using full-speed
                         * bInterval values in high-speed interrupt endpoint
@@ -161,17 +311,39 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
                                cfgno, inum, asnum, d->bEndpointAddress,
                                maxp);
        }
-
-       /* Skip over any Class Specific or Vendor Specific descriptors;
-        * find the next endpoint or interface descriptor */
-       endpoint->extra = buffer;
-       i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
-           USB_DT_INTERFACE, &n);
-       endpoint->extralen = i;
+       /* Allocate room for and parse any SS endpoint companion descriptors */
+       if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
+               endpoint->extra = buffer;
+               i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
+                               USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
+               endpoint->extralen = i;
+               buffer += i;
+               size -= i;
+
+               if (size > 0) {
+                       retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
+                                       inum, asnum, endpoint, num_ep, buffer,
+                                       size);
+                       if (retval >= 0) {
+                               buffer += retval;
+                               retval = buffer - buffer0;
+                       }
+               } else {
+                       retval = buffer - buffer0;
+               }
+       } else {
+               /* Skip over any Class Specific or Vendor Specific descriptors;
+                * find the next endpoint or interface descriptor */
+               endpoint->extra = buffer;
+               i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
+                               USB_DT_INTERFACE, &n);
+               endpoint->extralen = i;
+               retval = buffer - buffer0 + i;
+       }
        if (n > 0)
                dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
                    n, plural(n), "endpoint");
-       return buffer - buffer0 + i;
+       return retval;
 
 skip_to_next_endpoint_or_interface_descriptor:
        i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
@@ -452,6 +624,8 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
                kref_init(&intfc->ref);
        }
 
+       /* FIXME: parse the BOS descriptor */
+
        /* Skip over any Class Specific or Vendor Specific descriptors;
         * find the first interface descriptor */
        config->extra = buffer;
index d0a21a5f82017bf246c62108009b5280c5d1c90b..69e5773abfce2459cd65d11a939f9f6250ab7468 100644 (file)
@@ -154,16 +154,11 @@ static const struct usb_device_id *usb_match_dynamic_id(struct usb_interface *in
 static int usb_probe_device(struct device *dev)
 {
        struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
-       struct usb_device *udev;
+       struct usb_device *udev = to_usb_device(dev);
        int error = -ENODEV;
 
        dev_dbg(dev, "%s\n", __func__);
 
-       if (!is_usb_device(dev))        /* Sanity check */
-               return error;
-
-       udev = to_usb_device(dev);
-
        /* TODO: Add real matching code */
 
        /* The device should always appear to be in use
@@ -203,18 +198,13 @@ static void usb_cancel_queued_reset(struct usb_interface *iface)
 static int usb_probe_interface(struct device *dev)
 {
        struct usb_driver *driver = to_usb_driver(dev->driver);
-       struct usb_interface *intf;
-       struct usb_device *udev;
+       struct usb_interface *intf = to_usb_interface(dev);
+       struct usb_device *udev = interface_to_usbdev(intf);
        const struct usb_device_id *id;
        int error = -ENODEV;
 
        dev_dbg(dev, "%s\n", __func__);
 
-       if (is_usb_device(dev))         /* Sanity check */
-               return error;
-
-       intf = to_usb_interface(dev);
-       udev = interface_to_usbdev(intf);
        intf->needs_binding = 0;
 
        if (udev->authorized == 0) {
@@ -385,7 +375,6 @@ void usb_driver_release_interface(struct usb_driver *driver,
                                        struct usb_interface *iface)
 {
        struct device *dev = &iface->dev;
-       struct usb_device *udev = interface_to_usbdev(iface);
 
        /* this should never happen, don't release something that's not ours */
        if (!dev->driver || dev->driver != &driver->drvwrap.driver)
@@ -394,23 +383,19 @@ void usb_driver_release_interface(struct usb_driver *driver,
        /* don't release from within disconnect() */
        if (iface->condition != USB_INTERFACE_BOUND)
                return;
+       iface->condition = USB_INTERFACE_UNBINDING;
 
-       /* don't release if the interface hasn't been added yet */
+       /* Release via the driver core only if the interface
+        * has already been registered
+        */
        if (device_is_registered(dev)) {
-               iface->condition = USB_INTERFACE_UNBINDING;
                device_release_driver(dev);
        } else {
-               iface->condition = USB_INTERFACE_UNBOUND;
-               usb_cancel_queued_reset(iface);
+               down(&dev->sem);
+               usb_unbind_interface(dev);
+               dev->driver = NULL;
+               up(&dev->sem);
        }
-       dev->driver = NULL;
-       usb_set_intfdata(iface, NULL);
-
-       usb_pm_lock(udev);
-       iface->condition = USB_INTERFACE_UNBOUND;
-       mark_quiesced(iface);
-       iface->needs_remote_wakeup = 0;
-       usb_pm_unlock(udev);
 }
 EXPORT_SYMBOL_GPL(usb_driver_release_interface);
 
@@ -598,7 +583,7 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
                /* TODO: Add real matching code */
                return 1;
 
-       } else {
+       } else if (is_usb_interface(dev)) {
                struct usb_interface *intf;
                struct usb_driver *usb_drv;
                const struct usb_device_id *id;
@@ -630,11 +615,14 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
        /* driver is often null here; dev_dbg() would oops */
        pr_debug("usb %s: uevent\n", dev_name(dev));
 
-       if (is_usb_device(dev))
+       if (is_usb_device(dev)) {
                usb_dev = to_usb_device(dev);
-       else {
+       } else if (is_usb_interface(dev)) {
                struct usb_interface *intf = to_usb_interface(dev);
+
                usb_dev = interface_to_usbdev(intf);
+       } else {
+               return 0;
        }
 
        if (usb_dev->devnum < 0) {
@@ -1762,6 +1750,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
 int usb_resume(struct device *dev, pm_message_t msg)
 {
        struct usb_device       *udev;
+       int                     status;
 
        udev = to_usb_device(dev);
 
@@ -1771,7 +1760,14 @@ int usb_resume(struct device *dev, pm_message_t msg)
         */
        if (udev->skip_sys_resume)
                return 0;
-       return usb_external_resume_device(udev, msg);
+       status = usb_external_resume_device(udev, msg);
+
+       /* Avoid PM error messages for devices disconnected while suspended
+        * as we'll display regular disconnect messages just a bit later.
+        */
+       if (status == -ENODEV)
+               return 0;
+       return status;
 }
 
 #endif /* CONFIG_PM */
index 40dee2ac0133740ee97e37932a6181247b94e22a..bc39fc40bbde20781d2467ec227ae48a9bdf46f0 100644 (file)
 #include <linux/usb.h>
 #include "usb.h"
 
-#define MAX_ENDPOINT_MINORS (64*128*32)
-static int usb_endpoint_major;
-static DEFINE_IDR(endpoint_idr);
-
 struct ep_device {
        struct usb_endpoint_descriptor *desc;
        struct usb_device *udev;
        struct device dev;
-       int minor;
 };
 #define to_ep_device(_dev) \
        container_of(_dev, struct ep_device, dev)
 
+struct device_type usb_ep_device_type = {
+       .name =         "usb_endpoint",
+};
+
 struct ep_attribute {
        struct attribute attr;
        ssize_t (*show)(struct usb_device *,
@@ -160,118 +159,10 @@ static struct attribute_group *ep_dev_groups[] = {
        NULL
 };
 
-static int usb_endpoint_major_init(void)
-{
-       dev_t dev;
-       int error;
-
-       error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
-                                   "usb_endpoint");
-       if (error) {
-               printk(KERN_ERR "Unable to get a dynamic major for "
-                      "usb endpoints.\n");
-               return error;
-       }
-       usb_endpoint_major = MAJOR(dev);
-
-       return error;
-}
-
-static void usb_endpoint_major_cleanup(void)
-{
-       unregister_chrdev_region(MKDEV(usb_endpoint_major, 0),
-                                MAX_ENDPOINT_MINORS);
-}
-
-static int endpoint_get_minor(struct ep_device *ep_dev)
-{
-       static DEFINE_MUTEX(minor_lock);
-       int retval = -ENOMEM;
-       int id;
-
-       mutex_lock(&minor_lock);
-       if (idr_pre_get(&endpoint_idr, GFP_KERNEL) == 0)
-               goto exit;
-
-       retval = idr_get_new(&endpoint_idr, ep_dev, &id);
-       if (retval < 0) {
-               if (retval == -EAGAIN)
-                       retval = -ENOMEM;
-               goto exit;
-       }
-       ep_dev->minor = id & MAX_ID_MASK;
-exit:
-       mutex_unlock(&minor_lock);
-       return retval;
-}
-
-static void endpoint_free_minor(struct ep_device *ep_dev)
-{
-       idr_remove(&endpoint_idr, ep_dev->minor);
-}
-
-static struct endpoint_class {
-       struct kref kref;
-       struct class *class;
-} *ep_class;
-
-static int init_endpoint_class(void)
-{
-       int result = 0;
-
-       if (ep_class != NULL) {
-               kref_get(&ep_class->kref);
-               goto exit;
-       }
-
-       ep_class = kmalloc(sizeof(*ep_class), GFP_KERNEL);
-       if (!ep_class) {
-               result = -ENOMEM;
-               goto exit;
-       }
-
-       kref_init(&ep_class->kref);
-       ep_class->class = class_create(THIS_MODULE, "usb_endpoint");
-       if (IS_ERR(ep_class->class)) {
-               result = PTR_ERR(ep_class->class);
-               goto class_create_error;
-       }
-
-       result = usb_endpoint_major_init();
-       if (result)
-               goto endpoint_major_error;
-
-       goto exit;
-
-endpoint_major_error:
-       class_destroy(ep_class->class);
-class_create_error:
-       kfree(ep_class);
-       ep_class = NULL;
-exit:
-       return result;
-}
-
-static void release_endpoint_class(struct kref *kref)
-{
-       /* Ok, we cheat as we know we only have one ep_class */
-       class_destroy(ep_class->class);
-       kfree(ep_class);
-       ep_class = NULL;
-       usb_endpoint_major_cleanup();
-}
-
-static void destroy_endpoint_class(void)
-{
-       if (ep_class)
-               kref_put(&ep_class->kref, release_endpoint_class);
-}
-
 static void ep_device_release(struct device *dev)
 {
        struct ep_device *ep_dev = to_ep_device(dev);
 
-       endpoint_free_minor(ep_dev);
        kfree(ep_dev);
 }
 
@@ -279,62 +170,32 @@ int usb_create_ep_devs(struct device *parent,
                        struct usb_host_endpoint *endpoint,
                        struct usb_device *udev)
 {
-       char name[8];
        struct ep_device *ep_dev;
        int retval;
 
-       retval = init_endpoint_class();
-       if (retval)
-               goto exit;
-
        ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
        if (!ep_dev) {
                retval = -ENOMEM;
-               goto error_alloc;
-       }
-
-       retval = endpoint_get_minor(ep_dev);
-       if (retval) {
-               dev_err(parent, "can not allocate minor number for %s\n",
-                       dev_name(&ep_dev->dev));
-               goto error_register;
+               goto exit;
        }
 
        ep_dev->desc = &endpoint->desc;
        ep_dev->udev = udev;
        ep_dev->dev.groups = ep_dev_groups;
-       ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor);
-       ep_dev->dev.class = ep_class->class;
+       ep_dev->dev.type = &usb_ep_device_type;
        ep_dev->dev.parent = parent;
        ep_dev->dev.release = ep_device_release;
-       dev_set_name(&ep_dev->dev, "usbdev%d.%d_ep%02x",
-                udev->bus->busnum, udev->devnum,
-                endpoint->desc.bEndpointAddress);
+       dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
 
        retval = device_register(&ep_dev->dev);
        if (retval)
-               goto error_chrdev;
+               goto error_register;
 
-       /* create the symlink to the old-style "ep_XX" directory */
-       sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
-       retval = sysfs_create_link(&parent->kobj, &ep_dev->dev.kobj, name);
-       if (retval)
-               goto error_link;
        endpoint->ep_dev = ep_dev;
        return retval;
 
-error_link:
-       device_unregister(&ep_dev->dev);
-       destroy_endpoint_class();
-       return retval;
-
-error_chrdev:
-       endpoint_free_minor(ep_dev);
-
 error_register:
        kfree(ep_dev);
-error_alloc:
-       destroy_endpoint_class();
 exit:
        return retval;
 }
@@ -344,12 +205,7 @@ void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
        struct ep_device *ep_dev = endpoint->ep_dev;
 
        if (ep_dev) {
-               char name[8];
-
-               sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
-               sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
                device_unregister(&ep_dev->dev);
                endpoint->ep_dev = NULL;
-               destroy_endpoint_class();
        }
 }
index 997e659ff693eeb5ed083c2a8e76bdce76a33f97..5cef88929b3ee609af666c0d3e18772be10b1039 100644 (file)
@@ -67,6 +67,16 @@ static struct usb_class {
        struct class *class;
 } *usb_class;
 
+static char *usb_nodename(struct device *dev)
+{
+       struct usb_class_driver *drv;
+
+       drv = dev_get_drvdata(dev);
+       if (!drv || !drv->nodename)
+               return NULL;
+       return drv->nodename(dev);
+}
+
 static int init_usb_class(void)
 {
        int result = 0;
@@ -90,6 +100,7 @@ static int init_usb_class(void)
                kfree(usb_class);
                usb_class = NULL;
        }
+       usb_class->class->nodename = usb_nodename;
 
 exit:
        return result;
@@ -198,7 +209,7 @@ int usb_register_dev(struct usb_interface *intf,
        else
                temp = name;
        intf->usb_dev = device_create(usb_class->class, &intf->dev,
-                                     MKDEV(USB_MAJOR, minor), NULL,
+                                     MKDEV(USB_MAJOR, minor), class_driver,
                                      "%s", temp);
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
index a4301dc02d275c1f28c77cde42183d1a2d435792..91f2885b6ee10ca8f6ca396289e0b1ea2ee7060b 100644 (file)
@@ -185,194 +185,198 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
 
-
-#ifdef CONFIG_PM
-
 /**
- * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD
- * @dev: USB Host Controller being suspended
- * @message: Power Management message describing this state transition
- *
- * Store this function in the HCD's struct pci_driver as .suspend.
+ * usb_hcd_pci_shutdown - shutdown host controller
+ * @dev: USB Host Controller being shutdown
  */
-int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
+void usb_hcd_pci_shutdown(struct pci_dev *dev)
+{
+       struct usb_hcd          *hcd;
+
+       hcd = pci_get_drvdata(dev);
+       if (!hcd)
+               return;
+
+       if (hcd->driver->shutdown)
+               hcd->driver->shutdown(hcd);
+}
+EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int check_root_hub_suspended(struct device *dev)
+{
+       struct pci_dev          *pci_dev = to_pci_dev(dev);
+       struct usb_hcd          *hcd = pci_get_drvdata(pci_dev);
+
+       if (!(hcd->state == HC_STATE_SUSPENDED ||
+                       hcd->state == HC_STATE_HALT)) {
+               dev_warn(dev, "Root hub is not suspended\n");
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static int hcd_pci_suspend(struct device *dev)
 {
-       struct usb_hcd          *hcd = pci_get_drvdata(dev);
-       int                     retval = 0;
-       int                     wake, w;
-       int                     has_pci_pm;
+       struct pci_dev          *pci_dev = to_pci_dev(dev);
+       struct usb_hcd          *hcd = pci_get_drvdata(pci_dev);
+       int                     retval;
 
        /* Root hub suspend should have stopped all downstream traffic,
         * and all bus master traffic.  And done so for both the interface
         * and the stub usb_device (which we check here).  But maybe it
         * didn't; writing sysfs power/state files ignores such rules...
-        *
-        * We must ignore the FREEZE vs SUSPEND distinction here, because
-        * otherwise the swsusp will save (and restore) garbage state.
         */
-       if (!(hcd->state == HC_STATE_SUSPENDED ||
-                       hcd->state == HC_STATE_HALT)) {
-               dev_warn(&dev->dev, "Root hub is not suspended\n");
-               retval = -EBUSY;
-               goto done;
-       }
+       retval = check_root_hub_suspended(dev);
+       if (retval)
+               return retval;
 
        /* We might already be suspended (runtime PM -- not yet written) */
-       if (dev->current_state != PCI_D0)
-               goto done;
+       if (pci_dev->current_state != PCI_D0)
+               return retval;
 
        if (hcd->driver->pci_suspend) {
-               retval = hcd->driver->pci_suspend(hcd, message);
+               retval = hcd->driver->pci_suspend(hcd);
                suspend_report_result(hcd->driver->pci_suspend, retval);
                if (retval)
-                       goto done;
+                       return retval;
        }
 
-       synchronize_irq(dev->irq);
+       synchronize_irq(pci_dev->irq);
 
        /* Downstream ports from this root hub should already be quiesced, so
         * there will be no DMA activity.  Now we can shut down the upstream
-        * link (except maybe for PME# resume signaling) and enter some PCI
-        * low power state, if the hardware allows.
+        * link (except maybe for PME# resume signaling).  We'll enter a
+        * low power state during suspend_noirq, if the hardware allows.
         */
-       pci_disable_device(dev);
+       pci_disable_device(pci_dev);
+       return retval;
+}
+
+static int hcd_pci_suspend_noirq(struct device *dev)
+{
+       struct pci_dev          *pci_dev = to_pci_dev(dev);
+       struct usb_hcd          *hcd = pci_get_drvdata(pci_dev);
+       int                     retval;
+
+       retval = check_root_hub_suspended(dev);
+       if (retval)
+               return retval;
 
-       pci_save_state(dev);
+       pci_save_state(pci_dev);
 
-       /* Don't fail on error to enable wakeup.  We rely on pci code
-        * to reject requests the hardware can't implement, rather
-        * than coding the same thing.
+       /* If the root hub is HALTed rather than SUSPENDed,
+        * disallow remote wakeup.
         */
-       wake = (hcd->state == HC_STATE_SUSPENDED &&
-                       device_may_wakeup(&dev->dev));
-       w = pci_wake_from_d3(dev, wake);
-       if (w < 0)
-               wake = w;
-       dev_dbg(&dev->dev, "wakeup: %d\n", wake);
-
-       /* Don't change state if we don't need to */
-       if (message.event == PM_EVENT_FREEZE ||
-                       message.event == PM_EVENT_PRETHAW) {
-               dev_dbg(&dev->dev, "--> no state change\n");
-               goto done;
-       }
+       if (hcd->state == HC_STATE_HALT)
+               device_set_wakeup_enable(dev, 0);
+       dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev));
 
-       has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM);
-       if (!has_pci_pm) {
-               dev_dbg(&dev->dev, "--> PCI D0 legacy\n");
+       /* Possibly enable remote wakeup,
+        * choose the appropriate low-power state, and go to that state.
+        */
+       retval = pci_prepare_to_sleep(pci_dev);
+       if (retval == -EIO) {           /* Low-power not supported */
+               dev_dbg(dev, "--> PCI D0 legacy\n");
+               retval = 0;
+       } else if (retval == 0) {
+               dev_dbg(dev, "--> PCI %s\n",
+                               pci_power_name(pci_dev->current_state));
        } else {
-
-               /* NOTE:  dev->current_state becomes nonzero only here, and
-                * only for devices that support PCI PM.  Also, exiting
-                * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
-                * some device state (e.g. as part of clock reinit).
-                */
-               retval = pci_set_power_state(dev, PCI_D3hot);
-               suspend_report_result(pci_set_power_state, retval);
-               if (retval == 0) {
-                       dev_dbg(&dev->dev, "--> PCI D3\n");
-               } else {
-                       dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
-                                       retval);
-                       pci_restore_state(dev);
-               }
+               suspend_report_result(pci_prepare_to_sleep, retval);
+               return retval;
        }
 
 #ifdef CONFIG_PPC_PMAC
-       if (retval == 0) {
-               /* Disable ASIC clocks for USB */
-               if (machine_is(powermac)) {
-                       struct device_node      *of_node;
-
-                       of_node = pci_device_to_OF_node(dev);
-                       if (of_node)
-                               pmac_call_feature(PMAC_FTR_USB_ENABLE,
-                                                       of_node, 0, 0);
-               }
+       /* Disable ASIC clocks for USB */
+       if (machine_is(powermac)) {
+               struct device_node      *of_node;
+
+               of_node = pci_device_to_OF_node(pci_dev);
+               if (of_node)
+                       pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
        }
 #endif
-
- done:
        return retval;
 }
-EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
 
-/**
- * usb_hcd_pci_resume - power management resume of a PCI-based HCD
- * @dev: USB Host Controller being resumed
- *
- * Store this function in the HCD's struct pci_driver as .resume.
- */
-int usb_hcd_pci_resume(struct pci_dev *dev)
+static int hcd_pci_resume_noirq(struct device *dev)
 {
-       struct usb_hcd          *hcd;
-       int                     retval;
+       struct pci_dev          *pci_dev = to_pci_dev(dev);
 
 #ifdef CONFIG_PPC_PMAC
        /* Reenable ASIC clocks for USB */
        if (machine_is(powermac)) {
                struct device_node *of_node;
 
-               of_node = pci_device_to_OF_node(dev);
+               of_node = pci_device_to_OF_node(pci_dev);
                if (of_node)
                        pmac_call_feature(PMAC_FTR_USB_ENABLE,
                                                of_node, 0, 1);
        }
 #endif
 
-       pci_restore_state(dev);
+       /* Go back to D0 and disable remote wakeup */
+       pci_back_from_sleep(pci_dev);
+       return 0;
+}
+
+static int resume_common(struct device *dev, bool hibernated)
+{
+       struct pci_dev          *pci_dev = to_pci_dev(dev);
+       struct usb_hcd          *hcd = pci_get_drvdata(pci_dev);
+       int                     retval;
 
-       hcd = pci_get_drvdata(dev);
        if (hcd->state != HC_STATE_SUSPENDED) {
-               dev_dbg(hcd->self.controller,
-                               "can't resume, not suspended!\n");
+               dev_dbg(dev, "can't resume, not suspended!\n");
                return 0;
        }
 
-       pci_enable_wake(dev, PCI_D0, false);
-
-       retval = pci_enable_device(dev);
+       retval = pci_enable_device(pci_dev);
        if (retval < 0) {
-               dev_err(&dev->dev, "can't re-enable after resume, %d!\n",
-                               retval);
+               dev_err(dev, "can't re-enable after resume, %d!\n", retval);
                return retval;
        }
 
-       pci_set_master(dev);
-
-       /* yes, ignore this result too... */
-       (void) pci_wake_from_d3(dev, 0);
+       pci_set_master(pci_dev);
 
        clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
 
        if (hcd->driver->pci_resume) {
-               retval = hcd->driver->pci_resume(hcd);
+               retval = hcd->driver->pci_resume(hcd, hibernated);
                if (retval) {
-                       dev_err(hcd->self.controller,
-                               "PCI post-resume error %d!\n", retval);
+                       dev_err(dev, "PCI post-resume error %d!\n", retval);
                        usb_hc_died(hcd);
                }
        }
        return retval;
 }
-EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
 
-#endif /* CONFIG_PM */
-
-/**
- * usb_hcd_pci_shutdown - shutdown host controller
- * @dev: USB Host Controller being shutdown
- */
-void usb_hcd_pci_shutdown(struct pci_dev *dev)
+static int hcd_pci_resume(struct device *dev)
 {
-       struct usb_hcd          *hcd;
-
-       hcd = pci_get_drvdata(dev);
-       if (!hcd)
-               return;
+       return resume_common(dev, false);
+}
 
-       if (hcd->driver->shutdown)
-               hcd->driver->shutdown(hcd);
+static int hcd_pci_restore(struct device *dev)
+{
+       return resume_common(dev, true);
 }
-EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
 
+struct dev_pm_ops usb_hcd_pci_pm_ops = {
+       .suspend        = hcd_pci_suspend,
+       .suspend_noirq  = hcd_pci_suspend_noirq,
+       .resume_noirq   = hcd_pci_resume_noirq,
+       .resume         = hcd_pci_resume,
+       .freeze         = check_root_hub_suspended,
+       .freeze_noirq   = check_root_hub_suspended,
+       .thaw_noirq     = NULL,
+       .thaw           = NULL,
+       .poweroff       = hcd_pci_suspend,
+       .poweroff_noirq = hcd_pci_suspend_noirq,
+       .restore_noirq  = hcd_pci_resume_noirq,
+       .restore        = hcd_pci_restore,
+};
+EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
+
+#endif /* CONFIG_PM_SLEEP */
index 42b93da1085d00ab52da7cd36843510945520d60..ce3f453f02ef5efe5b5034f3bedfa5a062e0db25 100644 (file)
@@ -128,6 +128,27 @@ static inline int is_root_hub(struct usb_device *udev)
 #define KERNEL_REL     ((LINUX_VERSION_CODE >> 16) & 0x0ff)
 #define KERNEL_VER     ((LINUX_VERSION_CODE >> 8) & 0x0ff)
 
+/* usb 3.0 root hub device descriptor */
+static const u8 usb3_rh_dev_descriptor[18] = {
+       0x12,       /*  __u8  bLength; */
+       0x01,       /*  __u8  bDescriptorType; Device */
+       0x00, 0x03, /*  __le16 bcdUSB; v3.0 */
+
+       0x09,       /*  __u8  bDeviceClass; HUB_CLASSCODE */
+       0x00,       /*  __u8  bDeviceSubClass; */
+       0x03,       /*  __u8  bDeviceProtocol; USB 3.0 hub */
+       0x09,       /*  __u8  bMaxPacketSize0; 2^9 = 512 Bytes */
+
+       0x6b, 0x1d, /*  __le16 idVendor; Linux Foundation */
+       0x02, 0x00, /*  __le16 idProduct; device 0x0002 */
+       KERNEL_VER, KERNEL_REL, /*  __le16 bcdDevice */
+
+       0x03,       /*  __u8  iManufacturer; */
+       0x02,       /*  __u8  iProduct; */
+       0x01,       /*  __u8  iSerialNumber; */
+       0x01        /*  __u8  bNumConfigurations; */
+};
+
 /* usb 2.0 root hub device descriptor */
 static const u8 usb2_rh_dev_descriptor [18] = {
        0x12,       /*  __u8  bLength; */
@@ -273,6 +294,47 @@ static const u8 hs_rh_config_descriptor [] = {
        0x0c        /*  __u8  ep_bInterval; (256ms -- usb 2.0 spec) */
 };
 
+static const u8 ss_rh_config_descriptor[] = {
+       /* one configuration */
+       0x09,       /*  __u8  bLength; */
+       0x02,       /*  __u8  bDescriptorType; Configuration */
+       0x19, 0x00, /*  __le16 wTotalLength; FIXME */
+       0x01,       /*  __u8  bNumInterfaces; (1) */
+       0x01,       /*  __u8  bConfigurationValue; */
+       0x00,       /*  __u8  iConfiguration; */
+       0xc0,       /*  __u8  bmAttributes;
+                                Bit 7: must be set,
+                                    6: Self-powered,
+                                    5: Remote wakeup,
+                                    4..0: resvd */
+       0x00,       /*  __u8  MaxPower; */
+
+       /* one interface */
+       0x09,       /*  __u8  if_bLength; */
+       0x04,       /*  __u8  if_bDescriptorType; Interface */
+       0x00,       /*  __u8  if_bInterfaceNumber; */
+       0x00,       /*  __u8  if_bAlternateSetting; */
+       0x01,       /*  __u8  if_bNumEndpoints; */
+       0x09,       /*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
+       0x00,       /*  __u8  if_bInterfaceSubClass; */
+       0x00,       /*  __u8  if_bInterfaceProtocol; */
+       0x00,       /*  __u8  if_iInterface; */
+
+       /* one endpoint (status change endpoint) */
+       0x07,       /*  __u8  ep_bLength; */
+       0x05,       /*  __u8  ep_bDescriptorType; Endpoint */
+       0x81,       /*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
+       0x03,       /*  __u8  ep_bmAttributes; Interrupt */
+                   /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
+                    * see hub.c:hub_configure() for details. */
+       (USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
+       0x0c        /*  __u8  ep_bInterval; (256ms -- usb 2.0 spec) */
+       /*
+        * All 3.0 hubs should have an endpoint companion descriptor,
+        * but we're ignoring that for now.  FIXME?
+        */
+};
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -426,23 +488,39 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
        case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
                switch (wValue & 0xff00) {
                case USB_DT_DEVICE << 8:
-                       if (hcd->driver->flags & HCD_USB2)
+                       switch (hcd->driver->flags & HCD_MASK) {
+                       case HCD_USB3:
+                               bufp = usb3_rh_dev_descriptor;
+                               break;
+                       case HCD_USB2:
                                bufp = usb2_rh_dev_descriptor;
-                       else if (hcd->driver->flags & HCD_USB11)
+                               break;
+                       case HCD_USB11:
                                bufp = usb11_rh_dev_descriptor;
-                       else
+                               break;
+                       default:
                                goto error;
+                       }
                        len = 18;
                        if (hcd->has_tt)
                                patch_protocol = 1;
                        break;
                case USB_DT_CONFIG << 8:
-                       if (hcd->driver->flags & HCD_USB2) {
+                       switch (hcd->driver->flags & HCD_MASK) {
+                       case HCD_USB3:
+                               bufp = ss_rh_config_descriptor;
+                               len = sizeof ss_rh_config_descriptor;
+                               break;
+                       case HCD_USB2:
                                bufp = hs_rh_config_descriptor;
                                len = sizeof hs_rh_config_descriptor;
-                       } else {
+                               break;
+                       case HCD_USB11:
                                bufp = fs_rh_config_descriptor;
                                len = sizeof fs_rh_config_descriptor;
+                               break;
+                       default:
+                               goto error;
                        }
                        if (device_can_wakeup(&hcd->self.root_hub->dev))
                                patch_wakeup = 1;
@@ -755,23 +833,6 @@ static struct attribute_group usb_bus_attr_group = {
 
 /*-------------------------------------------------------------------------*/
 
-static struct class *usb_host_class;
-
-int usb_host_init(void)
-{
-       int retval = 0;
-
-       usb_host_class = class_create(THIS_MODULE, "usb_host");
-       if (IS_ERR(usb_host_class))
-               retval = PTR_ERR(usb_host_class);
-       return retval;
-}
-
-void usb_host_cleanup(void)
-{
-       class_destroy(usb_host_class);
-}
-
 /**
  * usb_bus_init - shared initialization code
  * @bus: the bus structure being initialized
@@ -818,12 +879,6 @@ static int usb_register_bus(struct usb_bus *bus)
        set_bit (busnum, busmap.busmap);
        bus->busnum = busnum;
 
-       bus->dev = device_create(usb_host_class, bus->controller, MKDEV(0, 0),
-                                bus, "usb_host%d", busnum);
-       result = PTR_ERR(bus->dev);
-       if (IS_ERR(bus->dev))
-               goto error_create_class_dev;
-
        /* Add it to the local list of buses */
        list_add (&bus->bus_list, &usb_bus_list);
        mutex_unlock(&usb_bus_list_lock);
@@ -834,8 +889,6 @@ static int usb_register_bus(struct usb_bus *bus)
                  "number %d\n", bus->busnum);
        return 0;
 
-error_create_class_dev:
-       clear_bit(busnum, busmap.busmap);
 error_find_busnum:
        mutex_unlock(&usb_bus_list_lock);
        return result;
@@ -865,8 +918,6 @@ static void usb_deregister_bus (struct usb_bus *bus)
        usb_notify_remove_bus(bus);
 
        clear_bit (bus->busnum, busmap.busmap);
-
-       device_unregister(bus->dev);
 }
 
 /**
@@ -1199,7 +1250,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
 
        /* Map the URB's buffers for DMA access.
         * Lower level HCD code should use *_dma exclusively,
-        * unless it uses pio or talks to another transport.
+        * unless it uses pio or talks to another transport,
+        * or uses the provided scatter gather list for bulk.
         */
        if (is_root_hub(urb->dev))
                return 0;
@@ -1520,6 +1572,92 @@ rescan:
        }
 }
 
+/* Check whether a new configuration or alt setting for an interface
+ * will exceed the bandwidth for the bus (or the host controller resources).
+ * Only pass in a non-NULL config or interface, not both!
+ * Passing NULL for both new_config and new_intf means the device will be
+ * de-configured by issuing a set configuration 0 command.
+ */
+int usb_hcd_check_bandwidth(struct usb_device *udev,
+               struct usb_host_config *new_config,
+               struct usb_interface *new_intf)
+{
+       int num_intfs, i, j;
+       struct usb_interface_cache *intf_cache;
+       struct usb_host_interface *alt = 0;
+       int ret = 0;
+       struct usb_hcd *hcd;
+       struct usb_host_endpoint *ep;
+
+       hcd = bus_to_hcd(udev->bus);
+       if (!hcd->driver->check_bandwidth)
+               return 0;
+
+       /* Configuration is being removed - set configuration 0 */
+       if (!new_config && !new_intf) {
+               for (i = 1; i < 16; ++i) {
+                       ep = udev->ep_out[i];
+                       if (ep)
+                               hcd->driver->drop_endpoint(hcd, udev, ep);
+                       ep = udev->ep_in[i];
+                       if (ep)
+                               hcd->driver->drop_endpoint(hcd, udev, ep);
+               }
+               hcd->driver->check_bandwidth(hcd, udev);
+               return 0;
+       }
+       /* Check if the HCD says there's enough bandwidth.  Enable all endpoints
+        * each interface's alt setting 0 and ask the HCD to check the bandwidth
+        * of the bus.  There will always be bandwidth for endpoint 0, so it's
+        * ok to exclude it.
+        */
+       if (new_config) {
+               num_intfs = new_config->desc.bNumInterfaces;
+               /* Remove endpoints (except endpoint 0, which is always on the
+                * schedule) from the old config from the schedule
+                */
+               for (i = 1; i < 16; ++i) {
+                       ep = udev->ep_out[i];
+                       if (ep) {
+                               ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+                               if (ret < 0)
+                                       goto reset;
+                       }
+                       ep = udev->ep_in[i];
+                       if (ep) {
+                               ret = hcd->driver->drop_endpoint(hcd, udev, ep);
+                               if (ret < 0)
+                                       goto reset;
+                       }
+               }
+               for (i = 0; i < num_intfs; ++i) {
+
+                       /* Dig the endpoints for alt setting 0 out of the
+                        * interface cache for this interface
+                        */
+                       intf_cache = new_config->intf_cache[i];
+                       for (j = 0; j < intf_cache->num_altsetting; j++) {
+                               if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
+                                       alt = &intf_cache->altsetting[j];
+                       }
+                       if (!alt) {
+                               printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
+                               continue;
+                       }
+                       for (j = 0; j < alt->desc.bNumEndpoints; j++) {
+                               ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
+                               if (ret < 0)
+                                       goto reset;
+                       }
+               }
+       }
+       ret = hcd->driver->check_bandwidth(hcd, udev);
+reset:
+       if (ret < 0)
+               hcd->driver->reset_bandwidth(hcd, udev);
+       return ret;
+}
+
 /* Disables the endpoint: synchronizes with the hcd to make sure all
  * endpoint state is gone from hardware.  usb_hcd_flush_endpoint() must
  * have been called previously.  Use for set_configuration, set_interface,
@@ -1897,8 +2035,20 @@ int usb_add_hcd(struct usb_hcd *hcd,
                retval = -ENOMEM;
                goto err_allocate_root_hub;
        }
-       rhdev->speed = (hcd->driver->flags & HCD_USB2) ? USB_SPEED_HIGH :
-                       USB_SPEED_FULL;
+
+       switch (hcd->driver->flags & HCD_MASK) {
+       case HCD_USB11:
+               rhdev->speed = USB_SPEED_FULL;
+               break;
+       case HCD_USB2:
+               rhdev->speed = USB_SPEED_HIGH;
+               break;
+       case HCD_USB3:
+               rhdev->speed = USB_SPEED_SUPER;
+               break;
+       default:
+               goto err_allocate_root_hub;
+       }
        hcd->self.root_hub = rhdev;
 
        /* wakeup flag init defaults to "everything works" for root hubs,
index e7d4479de41cd04b692d1fe659275f9cadd8c69a..d397ecfd5b178d881f97c72e51989bfccbe2a064 100644 (file)
@@ -173,6 +173,8 @@ struct hc_driver {
 #define        HCD_LOCAL_MEM   0x0002          /* HC needs local memory */
 #define        HCD_USB11       0x0010          /* USB 1.1 */
 #define        HCD_USB2        0x0020          /* USB 2.0 */
+#define        HCD_USB3        0x0040          /* USB 3.0 */
+#define        HCD_MASK        0x0070
 
        /* called to init HCD and root hub */
        int     (*reset) (struct usb_hcd *hcd);
@@ -182,10 +184,10 @@ struct hc_driver {
         * a whole, not just the root hub; they're for PCI bus glue.
         */
        /* called after suspending the hub, before entering D3 etc */
-       int     (*pci_suspend) (struct usb_hcd *hcd, pm_message_t message);
+       int     (*pci_suspend)(struct usb_hcd *hcd);
 
        /* called after entering D0 (etc), before resuming the hub */
-       int     (*pci_resume) (struct usb_hcd *hcd);
+       int     (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
 
        /* cleanly make HCD stop writing memory and doing I/O */
        void    (*stop) (struct usb_hcd *hcd);
@@ -224,6 +226,43 @@ struct hc_driver {
        void    (*relinquish_port)(struct usb_hcd *, int);
                /* has a port been handed over to a companion? */
        int     (*port_handed_over)(struct usb_hcd *, int);
+
+       /* xHCI specific functions */
+               /* Called by usb_alloc_dev to alloc HC device structures */
+       int     (*alloc_dev)(struct usb_hcd *, struct usb_device *);
+               /* Called by usb_release_dev to free HC device structures */
+       void    (*free_dev)(struct usb_hcd *, struct usb_device *);
+
+       /* Bandwidth computation functions */
+       /* Note that add_endpoint() can only be called once per endpoint before
+        * check_bandwidth() or reset_bandwidth() must be called.
+        * drop_endpoint() can only be called once per endpoint also.
+        * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+        * add the endpoint to the schedule with possibly new parameters denoted by a
+        * different endpoint descriptor in usb_host_endpoint.
+        * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+        * not allowed.
+        */
+               /* Allocate endpoint resources and add them to a new schedule */
+       int     (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+               /* Drop an endpoint from a new schedule */
+       int     (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
+               /* Check that a new hardware configuration, set using
+                * endpoint_enable and endpoint_disable, does not exceed bus
+                * bandwidth.  This must be called before any set configuration
+                * or set interface requests are sent to the device.
+                */
+       int     (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+               /* Reset the device schedule to the last known good schedule,
+                * which was set from a previous successful call to
+                * check_bandwidth().  This reverts any add_endpoint() and
+                * drop_endpoint() calls since that last successful call.
+                * Used for when a check_bandwidth() call fails due to resource
+                * or bandwidth constraints.
+                */
+       void    (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+               /* Returns the hardware-chosen device address */
+       int     (*address_device)(struct usb_hcd *, struct usb_device *udev);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -242,6 +281,9 @@ extern void usb_hcd_disable_endpoint(struct usb_device *udev,
 extern void usb_hcd_reset_endpoint(struct usb_device *udev,
                struct usb_host_endpoint *ep);
 extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
+extern int usb_hcd_check_bandwidth(struct usb_device *udev,
+               struct usb_host_config *new_config,
+               struct usb_interface *new_intf);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
 
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
@@ -261,14 +303,11 @@ struct pci_device_id;
 extern int usb_hcd_pci_probe(struct pci_dev *dev,
                                const struct pci_device_id *id);
 extern void usb_hcd_pci_remove(struct pci_dev *dev);
-
-#ifdef CONFIG_PM
-extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
-extern int usb_hcd_pci_resume(struct pci_dev *dev);
-#endif /* CONFIG_PM */
-
 extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
 
+#ifdef CONFIG_PM_SLEEP
+extern struct dev_pm_ops       usb_hcd_pci_pm_ops;
+#endif
 #endif /* CONFIG_PCI */
 
 /* pci-ish (pdev null is ok) buffer alloc/mapping support */
index be86ae3f40881516d529a894319ed52c71086b82..2af3b4f0605405dd723b1ccd4f853f7f85dc1848 100644 (file)
@@ -155,6 +155,8 @@ static inline char *portspeed(int portstatus)
                return "480 Mb/s";
        else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
                return "1.5 Mb/s";
+       else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
+               return "5.0 Gb/s";
        else
                return "12 Mb/s";
 }
@@ -457,13 +459,13 @@ static void hub_tt_kevent (struct work_struct *work)
 
        spin_lock_irqsave (&hub->tt.lock, flags);
        while (--limit && !list_empty (&hub->tt.clear_list)) {
-               struct list_head        *temp;
+               struct list_head        *next;
                struct usb_tt_clear     *clear;
                struct usb_device       *hdev = hub->hdev;
                int                     status;
 
-               temp = hub->tt.clear_list.next;
-               clear = list_entry (temp, struct usb_tt_clear, clear_list);
+               next = hub->tt.clear_list.next;
+               clear = list_entry (next, struct usb_tt_clear, clear_list);
                list_del (&clear->clear_list);
 
                /* drop lock so HCD can concurrently report other TT errors */
@@ -951,6 +953,9 @@ static int hub_configure(struct usb_hub *hub,
                                        ret);
                        hub->tt.hub = hdev;
                        break;
+               case 3:
+                       /* USB 3.0 hubs don't have a TT */
+                       break;
                default:
                        dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
                                hdev->descriptor.bDeviceProtocol);
@@ -1323,6 +1328,11 @@ EXPORT_SYMBOL_GPL(usb_set_device_state);
  * 0 is reserved by USB for default address; (b) Linux's USB stack
  * uses always #1 for the root hub of the controller. So USB stack's
  * port #1, which is wusb virtual-port #0 has address #2.
+ *
+ * Devices connected under xHCI are not as simple.  The host controller
+ * supports virtualization, so the hardware assigns device addresses and
+ * the HCD must setup data structures before issuing a set address
+ * command to the hardware.
  */
 static void choose_address(struct usb_device *udev)
 {
@@ -1642,6 +1652,9 @@ int usb_new_device(struct usb_device *udev)
        err = usb_configure_device(udev);       /* detect & probe dev/intfs */
        if (err < 0)
                goto fail;
+       dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
+                       udev->devnum, udev->bus->busnum,
+                       (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
        /* export the usbdev device-node for libusb */
        udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
                        (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
@@ -2395,19 +2408,29 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
 static int hub_set_address(struct usb_device *udev, int devnum)
 {
        int retval;
+       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
 
-       if (devnum <= 1)
+       /*
+        * The host controller will choose the device address,
+        * instead of the core having chosen it earlier
+        */
+       if (!hcd->driver->address_device && devnum <= 1)
                return -EINVAL;
        if (udev->state == USB_STATE_ADDRESS)
                return 0;
        if (udev->state != USB_STATE_DEFAULT)
                return -EINVAL;
-       retval = usb_control_msg(udev, usb_sndaddr0pipe(),
-               USB_REQ_SET_ADDRESS, 0, devnum, 0,
-               NULL, 0, USB_CTRL_SET_TIMEOUT);
+       if (hcd->driver->address_device) {
+               retval = hcd->driver->address_device(hcd, udev);
+       } else {
+               retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+                               USB_REQ_SET_ADDRESS, 0, devnum, 0,
+                               NULL, 0, USB_CTRL_SET_TIMEOUT);
+               if (retval == 0)
+                       update_address(udev, devnum);
+       }
        if (retval == 0) {
                /* Device now using proper address. */
-               update_address(udev, devnum);
                usb_set_device_state(udev, USB_STATE_ADDRESS);
                usb_ep0_reinit(udev);
        }
@@ -2430,6 +2453,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
        static DEFINE_MUTEX(usb_address0_mutex);
 
        struct usb_device       *hdev = hub->hdev;
+       struct usb_hcd          *hcd = bus_to_hcd(hdev->bus);
        int                     i, j, retval;
        unsigned                delay = HUB_SHORT_RESET_TIME;
        enum usb_device_speed   oldspeed = udev->speed;
@@ -2452,11 +2476,24 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
 
        mutex_lock(&usb_address0_mutex);
 
-       /* Reset the device; full speed may morph to high speed */
-       retval = hub_port_reset(hub, port1, udev, delay);
-       if (retval < 0)         /* error or disconnect */
+       if ((hcd->driver->flags & HCD_USB3) && udev->config) {
+               /* FIXME this will need special handling by the xHCI driver. */
+               dev_dbg(&udev->dev,
+                               "xHCI reset of configured device "
+                               "not supported yet.\n");
+               retval = -EINVAL;
                goto fail;
-                               /* success, speed is known */
+       } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+               /* Don't reset USB 3.0 devices during an initial setup */
+               usb_set_device_state(udev, USB_STATE_DEFAULT);
+       } else {
+               /* Reset the device; full speed may morph to high speed */
+               /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+               retval = hub_port_reset(hub, port1, udev, delay);
+               if (retval < 0)         /* error or disconnect */
+                       goto fail;
+               /* success, speed is known */
+       }
        retval = -ENODEV;
 
        if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2471,6 +2508,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
         * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
         */
        switch (udev->speed) {
+       case USB_SPEED_SUPER:
        case USB_SPEED_VARIABLE:        /* fixed at 512 */
                udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
                break;
@@ -2496,16 +2534,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
        case USB_SPEED_LOW:     speed = "low";  break;
        case USB_SPEED_FULL:    speed = "full"; break;
        case USB_SPEED_HIGH:    speed = "high"; break;
+       case USB_SPEED_SUPER:
+                               speed = "super";
+                               break;
        case USB_SPEED_VARIABLE:
                                speed = "variable";
                                type = "Wireless ";
                                break;
        default:                speed = "?";    break;
        }
-       dev_info (&udev->dev,
-                 "%s %s speed %sUSB device using %s and address %d\n",
-                 (udev->config) ? "reset" : "new", speed, type,
-                 udev->bus->controller->driver->name, devnum);
+       if (udev->speed != USB_SPEED_SUPER)
+               dev_info(&udev->dev,
+                               "%s %s speed %sUSB device using %s and address %d\n",
+                               (udev->config) ? "reset" : "new", speed, type,
+                               udev->bus->controller->driver->name, devnum);
 
        /* Set up TT records, if needed  */
        if (hdev->tt) {
@@ -2530,7 +2572,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
         * value.
         */
        for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
-               if (USE_NEW_SCHEME(retry_counter)) {
+               /*
+                * An xHCI controller cannot send any packets to a device until
+                * a set address command successfully completes.
+                */
+               if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
                        struct usb_device_descriptor *buf;
                        int r = 0;
 
@@ -2596,7 +2642,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
                 * unauthorized address in the Connect Ack sequence;
                 * authorization will assign the final address.
                 */
-               if (udev->wusb == 0) {
+               if (udev->wusb == 0) {
                        for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
                                retval = hub_set_address(udev, devnum);
                                if (retval >= 0)
@@ -2609,13 +2655,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
                                        devnum, retval);
                                goto fail;
                        }
+                       if (udev->speed == USB_SPEED_SUPER) {
+                               devnum = udev->devnum;
+                               dev_info(&udev->dev,
+                                               "%s SuperSpeed USB device using %s and address %d\n",
+                                               (udev->config) ? "reset" : "new",
+                                               udev->bus->controller->driver->name, devnum);
+                       }
 
                        /* cope with hardware quirkiness:
                         *  - let SET_ADDRESS settle, some device hardware wants it
                         *  - read ep0 maxpacket even for high and low speed,
                         */
                        msleep(10);
-                       if (USE_NEW_SCHEME(retry_counter))
+                       if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
                                break;
                }
 
@@ -2634,8 +2687,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
        if (retval)
                goto fail;
 
-       i = udev->descriptor.bMaxPacketSize0 == 0xff?   /* wusb device? */
-           512 : udev->descriptor.bMaxPacketSize0;
+       if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+                       udev->speed == USB_SPEED_SUPER)
+               i = 512;
+       else
+               i = udev->descriptor.bMaxPacketSize0;
        if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
                if (udev->speed != USB_SPEED_FULL ||
                                !(i == 8 || i == 16 || i == 32 || i == 64)) {
@@ -2847,19 +2903,41 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
                }
 
                usb_set_device_state(udev, USB_STATE_POWERED);
-               udev->speed = USB_SPEED_UNKNOWN;
                udev->bus_mA = hub->mA_per_port;
                udev->level = hdev->level + 1;
                udev->wusb = hub_is_wusb(hub);
 
-               /* set the address */
-               choose_address(udev);
-               if (udev->devnum <= 0) {
-                       status = -ENOTCONN;     /* Don't retry */
-                       goto loop;
+               /*
+                * USB 3.0 devices are reset automatically before the connect
+                * port status change appears, and the root hub port status
+                * shows the correct speed.  We also get port change
+                * notifications for USB 3.0 devices from the USB 3.0 portion of
+                * an external USB 3.0 hub, but this isn't handled correctly yet
+                * FIXME.
+                */
+
+               if (!(hcd->driver->flags & HCD_USB3))
+                       udev->speed = USB_SPEED_UNKNOWN;
+               else if ((hdev->parent == NULL) &&
+                               (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
+                       udev->speed = USB_SPEED_SUPER;
+               else
+                       udev->speed = USB_SPEED_UNKNOWN;
+
+               /*
+                * xHCI needs to issue an address device command later
+                * in the hub_port_init sequence for SS/HS/FS/LS devices.
+                */
+               if (!(hcd->driver->flags & HCD_USB3)) {
+                       /* set the address */
+                       choose_address(udev);
+                       if (udev->devnum <= 0) {
+                               status = -ENOTCONN;     /* Don't retry */
+                               goto loop;
+                       }
                }
 
-               /* reset and get descriptor */
+               /* reset (non-USB 3.0 devices) and get descriptor */
                status = hub_port_init(hub, udev, port1, i);
                if (status < 0)
                        goto loop;
index 2a116ce53c9b8e441f942b5b1b386a06a90cf738..889c0f32a40b729431f2dab51d13aead39f5f3e8 100644 (file)
 #define USB_PORT_FEAT_L1               5       /* L1 suspend */
 #define USB_PORT_FEAT_POWER            8
 #define USB_PORT_FEAT_LOWSPEED         9
+/* This value was never in Table 11-17 */
 #define USB_PORT_FEAT_HIGHSPEED                10
+/* This value is also fake */
+#define USB_PORT_FEAT_SUPERSPEED       11
 #define USB_PORT_FEAT_C_CONNECTION     16
 #define USB_PORT_FEAT_C_ENABLE         17
 #define USB_PORT_FEAT_C_SUSPEND                18
index b626283776541fe192c0442a46ab0e33415cc167..2bed83caacb1a0c714a2e05af21242bbb504dcad 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/timer.h>
 #include <linux/ctype.h>
+#include <linux/nls.h>
 #include <linux/device.h>
 #include <linux/scatterlist.h>
 #include <linux/usb/quirks.h>
@@ -364,6 +365,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
        int i;
        int urb_flags;
        int dma;
+       int use_sg;
 
        if (!io || !dev || !sg
                        || usb_pipecontrol(pipe)
@@ -391,7 +393,19 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
        if (io->entries <= 0)
                return io->entries;
 
-       io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+       /* If we're running on an xHCI host controller, queue the whole scatter
+        * gather list with one call to urb_enqueue().  This is only for bulk,
+        * as that endpoint type does not care how the data gets broken up
+        * across frames.
+        */
+       if (usb_pipebulk(pipe) &&
+                       bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
+               io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
+               use_sg = true;
+       } else {
+               io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
+               use_sg = false;
+       }
        if (!io->urbs)
                goto nomem;
 
@@ -401,62 +415,92 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
        if (usb_pipein(pipe))
                urb_flags |= URB_SHORT_NOT_OK;
 
-       for_each_sg(sg, sg, io->entries, i) {
-               unsigned len;
-
-               io->urbs[i] = usb_alloc_urb(0, mem_flags);
-               if (!io->urbs[i]) {
-                       io->entries = i;
+       if (use_sg) {
+               io->urbs[0] = usb_alloc_urb(0, mem_flags);
+               if (!io->urbs[0]) {
+                       io->entries = 0;
                        goto nomem;
                }
 
-               io->urbs[i]->dev = NULL;
-               io->urbs[i]->pipe = pipe;
-               io->urbs[i]->interval = period;
-               io->urbs[i]->transfer_flags = urb_flags;
-
-               io->urbs[i]->complete = sg_complete;
-               io->urbs[i]->context = io;
-
-               /*
-                * Some systems need to revert to PIO when DMA is temporarily
-                * unavailable.  For their sakes, both transfer_buffer and
-                * transfer_dma are set when possible.  However this can only
-                * work on systems without:
-                *
-                *  - HIGHMEM, since DMA buffers located in high memory are
-                *    not directly addressable by the CPU for PIO;
-                *
-                *  - IOMMU, since dma_map_sg() is allowed to use an IOMMU to
-                *    make virtually discontiguous buffers be "dma-contiguous"
-                *    so that PIO and DMA need diferent numbers of URBs.
-                *
-                * So when HIGHMEM or IOMMU are in use, transfer_buffer is NULL
-                * to prevent stale pointers and to help spot bugs.
-                */
-               if (dma) {
-                       io->urbs[i]->transfer_dma = sg_dma_address(sg);
-                       len = sg_dma_len(sg);
+               io->urbs[0]->dev = NULL;
+               io->urbs[0]->pipe = pipe;
+               io->urbs[0]->interval = period;
+               io->urbs[0]->transfer_flags = urb_flags;
+
+               io->urbs[0]->complete = sg_complete;
+               io->urbs[0]->context = io;
+               /* A length of zero means transfer the whole sg list */
+               io->urbs[0]->transfer_buffer_length = length;
+               if (length == 0) {
+                       for_each_sg(sg, sg, io->entries, i) {
+                               io->urbs[0]->transfer_buffer_length +=
+                                       sg_dma_len(sg);
+                       }
+               }
+               io->urbs[0]->sg = io;
+               io->urbs[0]->num_sgs = io->entries;
+               io->entries = 1;
+       } else {
+               for_each_sg(sg, sg, io->entries, i) {
+                       unsigned len;
+
+                       io->urbs[i] = usb_alloc_urb(0, mem_flags);
+                       if (!io->urbs[i]) {
+                               io->entries = i;
+                               goto nomem;
+                       }
+
+                       io->urbs[i]->dev = NULL;
+                       io->urbs[i]->pipe = pipe;
+                       io->urbs[i]->interval = period;
+                       io->urbs[i]->transfer_flags = urb_flags;
+
+                       io->urbs[i]->complete = sg_complete;
+                       io->urbs[i]->context = io;
+
+                       /*
+                        * Some systems need to revert to PIO when DMA is
+                        * temporarily unavailable.  For their sakes, both
+                        * transfer_buffer and transfer_dma are set when
+                        * possible.  However this can only work on systems
+                        * without:
+                        *
+                        *  - HIGHMEM, since DMA buffers located in high memory
+                        *    are not directly addressable by the CPU for PIO;
+                        *
+                        *  - IOMMU, since dma_map_sg() is allowed to use an
+                        *    IOMMU to make virtually discontiguous buffers be
+                        *    "dma-contiguous" so that PIO and DMA need diferent
+                        *    numbers of URBs.
+                        *
+                        * So when HIGHMEM or IOMMU are in use, transfer_buffer
+                        * is NULL to prevent stale pointers and to help spot
+                        * bugs.
+                        */
+                       if (dma) {
+                               io->urbs[i]->transfer_dma = sg_dma_address(sg);
+                               len = sg_dma_len(sg);
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
-                       io->urbs[i]->transfer_buffer = NULL;
+                               io->urbs[i]->transfer_buffer = NULL;
 #else
-                       io->urbs[i]->transfer_buffer = sg_virt(sg);
+                               io->urbs[i]->transfer_buffer = sg_virt(sg);
 #endif
-               } else {
-                       /* hc may use _only_ transfer_buffer */
-                       io->urbs[i]->transfer_buffer = sg_virt(sg);
-                       len = sg->length;
-               }
+                       } else {
+                               /* hc may use _only_ transfer_buffer */
+                               io->urbs[i]->transfer_buffer = sg_virt(sg);
+                               len = sg->length;
+                       }
 
-               if (length) {
-                       len = min_t(unsigned, len, length);
-                       length -= len;
-                       if (length == 0)
-                               io->entries = i + 1;
+                       if (length) {
+                               len = min_t(unsigned, len, length);
+                               length -= len;
+                               if (length == 0)
+                                       io->entries = i + 1;
+                       }
+                       io->urbs[i]->transfer_buffer_length = len;
                }
-               io->urbs[i]->transfer_buffer_length = len;
+               io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
        }
-       io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
 
        /* transaction state */
        io->count = io->entries;
@@ -509,6 +553,10 @@ EXPORT_SYMBOL_GPL(usb_sg_init);
  * could be transferred.  That capability is less useful for low or full
  * speed interrupt endpoints, which allow at most one packet per millisecond,
  * of at most 8 or 64 bytes (respectively).
+ *
+ * It is not necessary to call this function to reserve bandwidth for devices
+ * under an xHCI host controller, as the bandwidth is reserved when the
+ * configuration or interface alt setting is selected.
  */
 void usb_sg_wait(struct usb_sg_request *io)
 {
@@ -759,7 +807,7 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
 }
 
 /**
- * usb_string - returns ISO 8859-1 version of a string descriptor
+ * usb_string - returns UTF-8 version of a string descriptor
  * @dev: the device whose string descriptor is being retrieved
  * @index: the number of the descriptor
  * @buf: where to put the string
@@ -767,17 +815,10 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
  * Context: !in_interrupt ()
  *
  * This converts the UTF-16LE encoded strings returned by devices, from
- * usb_get_string_descriptor(), to null-terminated ISO-8859-1 encoded ones
- * that are more usable in most kernel contexts.  Note that all characters
- * in the chosen descriptor that can't be encoded using ISO-8859-1
- * are converted to the question mark ("?") character, and this function
+ * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
+ * that are more usable in most kernel contexts.  Note that this function
  * chooses strings in the first language supported by the device.
  *
- * The ASCII (or, redundantly, "US-ASCII") character set is the seven-bit
- * subset of ISO 8859-1. ISO-8859-1 is the eight-bit subset of Unicode,
- * and is appropriate for use many uses of English and several other
- * Western European languages.  (But it doesn't include the "Euro" symbol.)
- *
  * This call is synchronous, and may not be used in an interrupt context.
  *
  * Returns length of the string (>= 0) or usb_control_msg status (< 0).
@@ -786,7 +827,6 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
 {
        unsigned char *tbuf;
        int err;
-       unsigned int u, idx;
 
        if (dev->state == USB_STATE_SUSPENDED)
                return -EHOSTUNREACH;
@@ -821,16 +861,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
                goto errout;
 
        size--;         /* leave room for trailing NULL char in output buffer */
-       for (idx = 0, u = 2; u < err; u += 2) {
-               if (idx >= size)
-                       break;
-               if (tbuf[u+1])                  /* high byte */
-                       buf[idx++] = '?';  /* non ISO-8859-1 character */
-               else
-                       buf[idx++] = tbuf[u];
-       }
-       buf[idx] = 0;
-       err = idx;
+       err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
+                       UTF16_LITTLE_ENDIAN, buf, size);
+       buf[err] = 0;
 
        if (tbuf[1] != USB_DT_STRING)
                dev_dbg(&dev->dev,
@@ -843,6 +876,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
 }
 EXPORT_SYMBOL_GPL(usb_string);
 
+/* one UTF-8-encoded 16-bit character has at most three bytes */
+#define MAX_USB_STRING_SIZE (127 * 3 + 1)
+
 /**
  * usb_cache_string - read a string descriptor and cache it for later use
  * @udev: the device whose string descriptor is being read
@@ -860,9 +896,9 @@ char *usb_cache_string(struct usb_device *udev, int index)
        if (index <= 0)
                return NULL;
 
-       buf = kmalloc(256, GFP_KERNEL);
+       buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
        if (buf) {
-               len = usb_string(udev, index, buf, 256);
+               len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
                if (len > 0) {
                        smallbuf = kmalloc(++len, GFP_KERNEL);
                        if (!smallbuf)
@@ -1664,6 +1700,21 @@ free_interfaces:
        if (ret)
                goto free_interfaces;
 
+       /* Make sure we have bandwidth (and available HCD resources) for this
+        * configuration.  Remove endpoints from the schedule if we're dropping
+        * this configuration to set configuration 0.  After this point, the
+        * host controller will not allow submissions to dropped endpoints.  If
+        * this call fails, the device state is unchanged.
+        */
+       if (cp)
+               ret = usb_hcd_check_bandwidth(dev, cp, NULL);
+       else
+               ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
+       if (ret < 0) {
+               usb_autosuspend_device(dev);
+               goto free_interfaces;
+       }
+
        /* if it's already configured, clear out old state first.
         * getting rid of old interfaces means unbinding their drivers.
         */
@@ -1686,6 +1737,7 @@ free_interfaces:
        dev->actconfig = cp;
        if (!cp) {
                usb_set_device_state(dev, USB_STATE_ADDRESS);
+               usb_hcd_check_bandwidth(dev, NULL, NULL);
                usb_autosuspend_device(dev);
                goto free_interfaces;
        }
index c6678919792729b0c0548f6aa15592efc50e1fd1..b5c72e458943f753d5981c20596de1ffe376fb4a 100644 (file)
@@ -552,8 +552,8 @@ static struct attribute *dev_string_attrs[] = {
 static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
                struct attribute *a, int n)
 {
-       struct usb_device *udev = to_usb_device(
-                       container_of(kobj, struct device, kobj));
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct usb_device *udev = to_usb_device(dev);
 
        if (a == &dev_attr_manufacturer.attr) {
                if (udev->manufacturer == NULL)
@@ -585,8 +585,8 @@ static ssize_t
 read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
                char *buf, loff_t off, size_t count)
 {
-       struct usb_device *udev = to_usb_device(
-                       container_of(kobj, struct device, kobj));
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct usb_device *udev = to_usb_device(dev);
        size_t nleft = count;
        size_t srclen, n;
        int cfgno;
@@ -786,8 +786,8 @@ static struct attribute *intf_assoc_attrs[] = {
 static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
                struct attribute *a, int n)
 {
-       struct usb_interface *intf = to_usb_interface(
-                       container_of(kobj, struct device, kobj));
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct usb_interface *intf = to_usb_interface(dev);
 
        if (intf->intf_assoc == NULL)
                return 0;
index 3376055f36e7349057531b41c080832c5bd04623..0885d4abdc6265d0b7775da9b9f5503fc2be0dff 100644 (file)
@@ -241,6 +241,12 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
  * If the USB subsystem can't allocate sufficient bandwidth to perform
  * the periodic request, submitting such a periodic request should fail.
  *
+ * For devices under xHCI, the bandwidth is reserved at configuration time, or
+ * when the alt setting is selected.  If there is not enough bus bandwidth, the
+ * configuration/alt setting request will fail.  Therefore, submissions to
+ * periodic endpoints on devices under xHCI should never fail due to bandwidth
+ * constraints.
+ *
  * Device drivers must explicitly request that repetition, by ensuring that
  * some URB is always on the endpoint's queue (except possibly for short
  * periods during completion callacks).  When there is no longer an urb
@@ -351,6 +357,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
        if (xfertype == USB_ENDPOINT_XFER_ISOC) {
                int     n, len;
 
+               /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */
                /* "high bandwidth" mode, 1-3 packets/uframe? */
                if (dev->speed == USB_SPEED_HIGH) {
                        int     mult = 1 + ((max >> 11) & 0x03);
@@ -426,6 +433,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
                        return -EINVAL;
                /* too big? */
                switch (dev->speed) {
+               case USB_SPEED_SUPER:   /* units are 125us */
+                       /* Handle up to 2^(16-1) microframes */
+                       if (urb->interval > (1 << 15))
+                               return -EINVAL;
+                       max = 1 << 15;
                case USB_SPEED_HIGH:    /* units are microframes */
                        /* NOTE usb handles 2^15 */
                        if (urb->interval > (1024 * 8))
index 7eee400d3e32cac6486cb7809234792012ed4689..a26f73880c32388340fad1ef0639c8233d1c3be4 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/usb.h>
 #include <linux/mutex.h>
 #include <linux/workqueue.h>
+#include <linux/debugfs.h>
 
 #include <asm/io.h>
 #include <linux/scatterlist.h>
@@ -139,8 +140,7 @@ static int __find_interface(struct device *dev, void *data)
        struct find_interface_arg *arg = data;
        struct usb_interface *intf;
 
-       /* can't look at usb devices, only interfaces */
-       if (is_usb_device(dev))
+       if (!is_usb_interface(dev))
                return 0;
 
        intf = to_usb_interface(dev);
@@ -184,11 +184,16 @@ EXPORT_SYMBOL_GPL(usb_find_interface);
 static void usb_release_dev(struct device *dev)
 {
        struct usb_device *udev;
+       struct usb_hcd *hcd;
 
        udev = to_usb_device(dev);
+       hcd = bus_to_hcd(udev->bus);
 
        usb_destroy_configuration(udev);
-       usb_put_hcd(bus_to_hcd(udev->bus));
+       /* Root hubs aren't real devices, so don't free HCD resources */
+       if (hcd->driver->free_dev && udev->parent)
+               hcd->driver->free_dev(hcd, udev);
+       usb_put_hcd(hcd);
        kfree(udev->product);
        kfree(udev->manufacturer);
        kfree(udev->serial);
@@ -305,10 +310,21 @@ static struct dev_pm_ops usb_device_pm_ops = {
 
 #endif /* CONFIG_PM */
 
+
+static char *usb_nodename(struct device *dev)
+{
+       struct usb_device *usb_dev;
+
+       usb_dev = to_usb_device(dev);
+       return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
+                        usb_dev->bus->busnum, usb_dev->devnum);
+}
+
 struct device_type usb_device_type = {
        .name =         "usb_device",
        .release =      usb_release_dev,
        .uevent =       usb_dev_uevent,
+       .nodename =     usb_nodename,
        .pm =           &usb_device_pm_ops,
 };
 
@@ -348,6 +364,13 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
                kfree(dev);
                return NULL;
        }
+       /* Root hubs aren't true devices, so don't allocate HCD resources */
+       if (usb_hcd->driver->alloc_dev && parent &&
+               !usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
+               usb_put_hcd(bus_to_hcd(bus));
+               kfree(dev);
+               return NULL;
+       }
 
        device_initialize(&dev->dev);
        dev->dev.bus = &usb_bus_type;
@@ -375,18 +398,24 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
         */
        if (unlikely(!parent)) {
                dev->devpath[0] = '0';
+               dev->route = 0;
 
                dev->dev.parent = bus->controller;
                dev_set_name(&dev->dev, "usb%d", bus->busnum);
                root_hub = 1;
        } else {
                /* match any labeling on the hubs; it's one-based */
-               if (parent->devpath[0] == '0')
+               if (parent->devpath[0] == '0') {
                        snprintf(dev->devpath, sizeof dev->devpath,
                                "%d", port1);
-               else
+                       /* Root ports are not counted in route string */
+                       dev->route = 0;
+               } else {
                        snprintf(dev->devpath, sizeof dev->devpath,
                                "%s.%d", parent->devpath, port1);
+                       dev->route = parent->route +
+                               (port1 << ((parent->level - 1)*4));
+               }
 
                dev->dev.parent = &parent->dev;
                dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
@@ -799,12 +828,12 @@ void usb_buffer_dmasync(struct urb *urb)
                return;
 
        if (controller->dma_mask) {
-               dma_sync_single(controller,
+               dma_sync_single_for_cpu(controller,
                        urb->transfer_dma, urb->transfer_buffer_length,
                        usb_pipein(urb->pipe)
                                ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
                if (usb_pipecontrol(urb->pipe))
-                       dma_sync_single(controller,
+                       dma_sync_single_for_cpu(controller,
                                        urb->setup_dma,
                                        sizeof(struct usb_ctrlrequest),
                                        DMA_TO_DEVICE);
@@ -922,8 +951,8 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
                        || !controller->dma_mask)
                return;
 
-       dma_sync_sg(controller, sg, n_hw_ents,
-                       is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+       dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
+                           is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
 }
 EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
 #endif
@@ -1001,6 +1030,35 @@ static struct notifier_block usb_bus_nb = {
        .notifier_call = usb_bus_notify,
 };
 
+struct dentry *usb_debug_root;
+EXPORT_SYMBOL_GPL(usb_debug_root);
+
+struct dentry *usb_debug_devices;
+
+static int usb_debugfs_init(void)
+{
+       usb_debug_root = debugfs_create_dir("usb", NULL);
+       if (!usb_debug_root)
+               return -ENOENT;
+
+       usb_debug_devices = debugfs_create_file("devices", 0444,
+                                               usb_debug_root, NULL,
+                                               &usbfs_devices_fops);
+       if (!usb_debug_devices) {
+               debugfs_remove(usb_debug_root);
+               usb_debug_root = NULL;
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
+static void usb_debugfs_cleanup(void)
+{
+       debugfs_remove(usb_debug_devices);
+       debugfs_remove(usb_debug_root);
+}
+
 /*
  * Init
  */
@@ -1012,6 +1070,10 @@ static int __init usb_init(void)
                return 0;
        }
 
+       retval = usb_debugfs_init();
+       if (retval)
+               goto out;
+
        retval = ksuspend_usb_init();
        if (retval)
                goto out;
@@ -1021,9 +1083,6 @@ static int __init usb_init(void)
        retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
        if (retval)
                goto bus_notifier_failed;
-       retval = usb_host_init();
-       if (retval)
-               goto host_init_failed;
        retval = usb_major_init();
        if (retval)
                goto major_init_failed;
@@ -1053,8 +1112,6 @@ usb_devio_init_failed:
 driver_register_failed:
        usb_major_cleanup();
 major_init_failed:
-       usb_host_cleanup();
-host_init_failed:
        bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
 bus_notifier_failed:
        bus_unregister(&usb_bus_type);
@@ -1079,10 +1136,10 @@ static void __exit usb_exit(void)
        usb_deregister(&usbfs_driver);
        usb_devio_cleanup();
        usb_hub_cleanup();
-       usb_host_cleanup();
        bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
        bus_unregister(&usb_bus_type);
        ksuspend_usb_cleanup();
+       usb_debugfs_cleanup();
 }
 
 subsys_initcall(usb_init);
index 79d8a9ea559ba7dc635f268419f9918bdfcf6c08..e2a8cfaade1ddbc7c5bb4c43c7d1f91f580c6230 100644 (file)
@@ -41,8 +41,6 @@ extern int  usb_hub_init(void);
 extern void usb_hub_cleanup(void);
 extern int usb_major_init(void);
 extern void usb_major_cleanup(void);
-extern int usb_host_init(void);
-extern void usb_host_cleanup(void);
 
 #ifdef CONFIG_PM
 
@@ -106,6 +104,7 @@ extern struct workqueue_struct *ksuspend_usb_wq;
 extern struct bus_type usb_bus_type;
 extern struct device_type usb_device_type;
 extern struct device_type usb_if_device_type;
+extern struct device_type usb_ep_device_type;
 extern struct usb_device_driver usb_generic_driver;
 
 static inline int is_usb_device(const struct device *dev)
@@ -113,6 +112,16 @@ static inline int is_usb_device(const struct device *dev)
        return dev->type == &usb_device_type;
 }
 
+static inline int is_usb_interface(const struct device *dev)
+{
+       return dev->type == &usb_if_device_type;
+}
+
+static inline int is_usb_endpoint(const struct device *dev)
+{
+       return dev->type == &usb_ep_device_type;
+}
+
 /* Do the same for device drivers and interface drivers. */
 
 static inline int is_usb_device_driver(struct device_driver *drv)
index 080bb1e4b847aebcd862b71480596edcf7f8b37e..5d1ddf485d1eb6d5d55e12a4eca7536e10a75838 100644 (file)
@@ -156,7 +156,7 @@ config USB_ATMEL_USBA
 
 config USB_GADGET_FSL_USB2
        boolean "Freescale Highspeed USB DR Peripheral Controller"
-       depends on FSL_SOC
+       depends on FSL_SOC || ARCH_MXC
        select USB_GADGET_DUALSPEED
        help
           Some of Freescale PowerPC processors have a High Speed
@@ -253,7 +253,7 @@ config USB_PXA25X_SMALL
 
 config USB_GADGET_PXA27X
        boolean "PXA 27x"
-       depends on ARCH_PXA && PXA27x
+       depends on ARCH_PXA && (PXA27x || PXA3xx)
        select USB_OTG_UTILS
        help
           Intel's PXA 27x series XScale ARM v5TE processors include
@@ -272,6 +272,20 @@ config USB_PXA27X
        default USB_GADGET
        select USB_GADGET_SELECTED
 
+config USB_GADGET_S3C_HSOTG
+       boolean "S3C HS/OtG USB Device controller"
+       depends on S3C_DEV_USB_HSOTG
+       select USB_GADGET_S3C_HSOTG_PIO
+       help
+         The Samsung S3C64XX USB2.0 high-speed gadget controller
+         integrated into the S3C64XX series SoC.
+
+config USB_S3C_HSOTG
+       tristate
+       depends on USB_GADGET_S3C_HSOTG
+       default USB_GADGET
+       select USB_GADGET_SELECTED
+
 config USB_GADGET_S3C2410
        boolean "S3C2410 USB Device Controller"
        depends on ARCH_S3C2410
@@ -460,6 +474,27 @@ config USB_GOKU
        default USB_GADGET
        select USB_GADGET_SELECTED
 
+config USB_GADGET_LANGWELL
+       boolean "Intel Langwell USB Device Controller"
+       depends on PCI
+       select USB_GADGET_DUALSPEED
+       help
+          Intel Langwell USB Device Controller is a High-Speed USB
+          On-The-Go device controller.
+
+          The number of programmable endpoints is different through
+          controller revision.
+
+          Say "y" to link the driver statically, or "m" to build a
+          dynamically linked module called "langwell_udc" and force all
+          gadget drivers to also be dynamically linked.
+
+config USB_LANGWELL
+       tristate
+       depends on USB_GADGET_LANGWELL
+       default USB_GADGET
+       select USB_GADGET_SELECTED
+
 
 #
 # LAST -- dummy/emulated controller
@@ -566,6 +601,20 @@ config USB_ZERO_HNPTEST
          the "B-Peripheral" role, that device will use HNP to let this
          one serve as the USB host instead (in the "B-Host" role).
 
+config USB_AUDIO
+       tristate "Audio Gadget (EXPERIMENTAL)"
+       depends on SND
+       help
+         Gadget Audio is compatible with USB Audio Class specification 1.0.
+         It will include at least one AudioControl interface, zero or more
+         AudioStream interface and zero or more MIDIStream interface.
+
+         Gadget Audio will use on-board ALSA (CONFIG_SND) audio card to
+         playback or capture audio stream.
+
+         Say "y" to link the driver statically, or "m" to build a
+         dynamically linked module called "g_audio".
+
 config USB_ETH
        tristate "Ethernet Gadget (with CDC Ethernet support)"
        depends on NET
index 39a51d746cb76d1eb796fb9798e121b9409ab771..e6017e6bf6da2c7531dae908330231acfbb7649c 100644 (file)
@@ -18,14 +18,21 @@ obj-$(CONFIG_USB_S3C2410)   += s3c2410_udc.o
 obj-$(CONFIG_USB_AT91)         += at91_udc.o
 obj-$(CONFIG_USB_ATMEL_USBA)   += atmel_usba_udc.o
 obj-$(CONFIG_USB_FSL_USB2)     += fsl_usb2_udc.o
+fsl_usb2_udc-objs              := fsl_udc_core.o
+ifeq ($(CONFIG_ARCH_MXC),y)
+fsl_usb2_udc-objs              += fsl_mx3_udc.o
+endif
 obj-$(CONFIG_USB_M66592)       += m66592-udc.o
 obj-$(CONFIG_USB_FSL_QE)       += fsl_qe_udc.o
 obj-$(CONFIG_USB_CI13XXX)      += ci13xxx_udc.o
+obj-$(CONFIG_USB_S3C_HSOTG)    += s3c-hsotg.o
+obj-$(CONFIG_USB_LANGWELL)     += langwell_udc.o
 
 #
 # USB gadget drivers
 #
 g_zero-objs                    := zero.o
+g_audio-objs                   := audio.o
 g_ether-objs                   := ether.o
 g_serial-objs                  := serial.o
 g_midi-objs                    := gmidi.o
@@ -35,6 +42,7 @@ g_printer-objs                        := printer.o
 g_cdc-objs                     := cdc2.o
 
 obj-$(CONFIG_USB_ZERO)         += g_zero.o
+obj-$(CONFIG_USB_AUDIO)                += g_audio.o
 obj-$(CONFIG_USB_ETH)          += g_ether.o
 obj-$(CONFIG_USB_GADGETFS)     += gadgetfs.o
 obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
index 0b2bb8f0706df6ec99a6f357a6cb58a86ca25ba6..72bae8f39d814310ef477df5c987318e79000835 100644 (file)
@@ -485,7 +485,7 @@ static int at91_ep_enable(struct usb_ep *_ep,
                return -ESHUTDOWN;
        }
 
-       tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+       tmp = usb_endpoint_type(desc);
        switch (tmp) {
        case USB_ENDPOINT_XFER_CONTROL:
                DBG("only one control endpoint\n");
@@ -517,7 +517,7 @@ ok:
        local_irq_save(flags);
 
        /* initialize endpoint to match this descriptor */
-       ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
+       ep->is_in = usb_endpoint_dir_in(desc);
        ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
        ep->stopped = 0;
        if (ep->is_in)
@@ -1574,7 +1574,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
 
        udc->driver = driver;
        udc->gadget.dev.driver = &driver->driver;
-       udc->gadget.dev.driver_data = &driver->driver;
+       dev_set_drvdata(&udc->gadget.dev, &driver->driver);
        udc->enabled = 1;
        udc->selfpowered = 1;
 
@@ -1583,7 +1583,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
                DBG("driver->bind() returned %d\n", retval);
                udc->driver = NULL;
                udc->gadget.dev.driver = NULL;
-               udc->gadget.dev.driver_data = NULL;
+               dev_set_drvdata(&udc->gadget.dev, NULL);
                udc->enabled = 0;
                udc->selfpowered = 0;
                return retval;
@@ -1613,7 +1613,7 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
 
        driver->unbind(&udc->gadget);
        udc->gadget.dev.driver = NULL;
-       udc->gadget.dev.driver_data = NULL;
+       dev_set_drvdata(&udc->gadget.dev, NULL);
        udc->driver = NULL;
 
        DBG("unbound from %s\n", driver->driver.name);
index 05c913cc3658f0389ab0ace117a8f50c8a166785..4e970cf0e29ae364b34c5e7436906e95db313952 100644 (file)
@@ -326,13 +326,7 @@ static int vbus_is_present(struct usba_udc *udc)
        return 1;
 }
 
-#if defined(CONFIG_AVR32)
-
-static void toggle_bias(int is_on)
-{
-}
-
-#elif defined(CONFIG_ARCH_AT91)
+#if defined(CONFIG_ARCH_AT91SAM9RL)
 
 #include <mach/at91_pmc.h>
 
@@ -346,7 +340,13 @@ static void toggle_bias(int is_on)
                at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
 }
 
-#endif /* CONFIG_ARCH_AT91 */
+#else
+
+static void toggle_bias(int is_on)
+{
+}
+
+#endif /* CONFIG_ARCH_AT91SAM9RL */
 
 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
 {
@@ -550,12 +550,12 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
                        ep->ep.name, ept_cfg, maxpacket);
 
-       if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) {
+       if (usb_endpoint_dir_in(desc)) {
                ep->is_in = 1;
                ept_cfg |= USBA_EPT_DIR_IN;
        }
 
-       switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+       switch (usb_endpoint_type(desc)) {
        case USB_ENDPOINT_XFER_CONTROL:
                ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
                ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
new file mode 100644 (file)
index 0000000..94de7e8
--- /dev/null
@@ -0,0 +1,302 @@
+/*
+ * audio.c -- Audio gadget driver
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+#include "u_audio.h"
+
+#define DRIVER_DESC            "Linux USB Audio Gadget"
+#define DRIVER_VERSION         "Dec 18, 2008"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_audio.c"
+#include "f_audio.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID. */
+#define AUDIO_VENDOR_NUM               0x0525  /* NetChip */
+#define AUDIO_PRODUCT_NUM              0xa4a1  /* Linux-USB Audio Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+       .bLength =              sizeof device_desc,
+       .bDescriptorType =      USB_DT_DEVICE,
+
+       .bcdUSB =               __constant_cpu_to_le16(0x200),
+
+       .bDeviceClass =         USB_CLASS_PER_INTERFACE,
+       .bDeviceSubClass =      0,
+       .bDeviceProtocol =      0,
+       /* .bMaxPacketSize0 = f(hardware) */
+
+       /* Vendor and product id defaults change according to what configs
+        * we support.  (As does bNumConfigurations.)  These values can
+        * also be overridden by module parameters.
+        */
+       .idVendor =             __constant_cpu_to_le16(AUDIO_VENDOR_NUM),
+       .idProduct =            __constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+       /* .bcdDevice = f(hardware) */
+       /* .iManufacturer = DYNAMIC */
+       /* .iProduct = DYNAMIC */
+       /* NO SERIAL NUMBER */
+       .bNumConfigurations =   1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+       .bLength =              sizeof otg_descriptor,
+       .bDescriptorType =      USB_DT_OTG,
+
+       /* REVISIT SRP-only hardware is possible, although
+        * it would not be called "OTG" ...
+        */
+       .bmAttributes =         USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+       (struct usb_descriptor_header *) &otg_descriptor,
+       NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Handle USB audio endpoint set/get command in setup class request
+ */
+
+static int audio_set_endpoint_req(struct usb_configuration *c,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       int                     value = -EOPNOTSUPP;
+       u16                     ep = le16_to_cpu(ctrl->wIndex);
+       u16                     len = le16_to_cpu(ctrl->wLength);
+       u16                     w_value = le16_to_cpu(ctrl->wValue);
+
+       DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       switch (ctrl->bRequest) {
+       case SET_CUR:
+               value = 0;
+               break;
+
+       case SET_MIN:
+               break;
+
+       case SET_MAX:
+               break;
+
+       case SET_RES:
+               break;
+
+       case SET_MEM:
+               break;
+
+       default:
+               break;
+       }
+
+       return value;
+}
+
+static int audio_get_endpoint_req(struct usb_configuration *c,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       int value = -EOPNOTSUPP;
+       u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16 len = le16_to_cpu(ctrl->wLength);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+
+       DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+                       ctrl->bRequest, w_value, len, ep);
+
+       switch (ctrl->bRequest) {
+       case GET_CUR:
+       case GET_MIN:
+       case GET_MAX:
+       case GET_RES:
+               value = 3;
+               break;
+       case GET_MEM:
+               break;
+       default:
+               break;
+       }
+
+       return value;
+}
+
+static int
+audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct usb_request *req = cdev->req;
+       int value = -EOPNOTSUPP;
+       u16 w_index = le16_to_cpu(ctrl->wIndex);
+       u16 w_value = le16_to_cpu(ctrl->wValue);
+       u16 w_length = le16_to_cpu(ctrl->wLength);
+
+       /* composite driver infrastructure handles everything except
+        * Audio class messages; interface activation uses set_alt().
+        */
+       switch (ctrl->bRequestType) {
+       case USB_AUDIO_SET_ENDPOINT:
+               value = audio_set_endpoint_req(c, ctrl);
+               break;
+
+       case USB_AUDIO_GET_ENDPOINT:
+               value = audio_get_endpoint_req(c, ctrl);
+               break;
+
+       default:
+               ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = 0;
+               req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       ERROR(cdev, "Audio response on err %d\n", value);
+       }
+
+       /* device either stalls (value < 0) or reports success */
+       return value;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_do_config(struct usb_configuration *c)
+{
+       /* FIXME alloc iConfiguration string, set it in c->strings */
+
+       if (gadget_is_otg(c->cdev->gadget)) {
+               c->descriptors = otg_desc;
+               c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+       }
+
+       audio_bind_config(c);
+
+       return 0;
+}
+
+static struct usb_configuration audio_config_driver = {
+       .label                  = DRIVER_DESC,
+       .bind                   = audio_do_config,
+       .setup                  = audio_setup,
+       .bConfigurationValue    = 1,
+       /* .iConfiguration = DYNAMIC */
+       .bmAttributes           = USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init audio_bind(struct usb_composite_dev *cdev)
+{
+       int                     gcnum;
+       int                     status;
+
+       gcnum = usb_gadget_controller_number(cdev->gadget);
+       if (gcnum >= 0)
+               device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+       else {
+               ERROR(cdev, "controller '%s' not recognized; trying %s\n",
+                       cdev->gadget->name,
+                       audio_config_driver.label);
+               device_desc.bcdDevice =
+                       __constant_cpu_to_le16(0x0300 | 0x0099);
+       }
+
+       /* device descriptor strings: manufacturer, product */
+       snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+               init_utsname()->sysname, init_utsname()->release,
+               cdev->gadget->name);
+       status = usb_string_id(cdev);
+       if (status < 0)
+               goto fail;
+       strings_dev[STRING_MANUFACTURER_IDX].id = status;
+       device_desc.iManufacturer = status;
+
+       status = usb_string_id(cdev);
+       if (status < 0)
+               goto fail;
+       strings_dev[STRING_PRODUCT_IDX].id = status;
+       device_desc.iProduct = status;
+
+       status = usb_add_config(cdev, &audio_config_driver);
+       if (status < 0)
+               goto fail;
+
+       INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
+       return 0;
+
+fail:
+       return status;
+}
+
+static int __exit audio_unbind(struct usb_composite_dev *cdev)
+{
+       return 0;
+}
+
+static struct usb_composite_driver audio_driver = {
+       .name           = "g_audio",
+       .dev            = &device_desc,
+       .strings        = audio_strings,
+       .bind           = audio_bind,
+       .unbind         = __exit_p(audio_unbind),
+};
+
+static int __init init(void)
+{
+       return usb_composite_register(&audio_driver);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+       usb_composite_unregister(&audio_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Bryan Wu <cooloney@kernel.org>");
+MODULE_LICENSE("GPL");
+
index 38e531ecae4d310066e7813f2550420aa33e3ab3..c7cb87a6fee22a4519a0f720fcf3f3451eb22920 100644 (file)
@@ -1977,9 +1977,9 @@ static int ep_enable(struct usb_ep *ep,
        if (!list_empty(&mEp->qh[mEp->dir].queue))
                warn("enabling a non-empty endpoint!");
 
-       mEp->dir  = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX;
-       mEp->num  =  desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-       mEp->type =  desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+       mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
+       mEp->num  = usb_endpoint_num(desc);
+       mEp->type = usb_endpoint_type(desc);
 
        mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
 
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
new file mode 100644 (file)
index 0000000..66527ba
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * f_audio.c -- USB Audio class function driver
+  *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+#include "u_audio.h"
+
+#define OUT_EP_MAX_PACKET_SIZE 200
+static int req_buf_size = OUT_EP_MAX_PACKET_SIZE;
+module_param(req_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
+
+static int req_count = 256;
+module_param(req_count, int, S_IRUGO);
+MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
+
+static int audio_buf_size = 48000;
+module_param(audio_buf_size, int, S_IRUGO);
+MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
+
+/*
+ * DESCRIPTORS ... most are static, but strings and full
+ * configuration descriptors are built on demand.
+ */
+
+/*
+ * We have two interfaces- AudioControl and AudioStreaming
+ * TODO: only supcard playback currently
+ */
+#define F_AUDIO_AC_INTERFACE   0
+#define F_AUDIO_AS_INTERFACE   1
+#define F_AUDIO_NUM_INTERFACES 2
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc __initdata = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
+
+#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct usb_ac_header_descriptor_2 ac_header_desc = {
+       .bLength =              USB_DT_AC_HEADER_LENGH,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   HEADER,
+       .bcdADC =               __constant_cpu_to_le16(0x0100),
+       .wTotalLength =         __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
+       .bInCollection =        F_AUDIO_NUM_INTERFACES,
+       .baInterfaceNr = {
+               [0] =           F_AUDIO_AC_INTERFACE,
+               [1] =           F_AUDIO_AS_INTERFACE,
+       }
+};
+
+#define INPUT_TERMINAL_ID      1
+static struct usb_input_terminal_descriptor input_terminal_desc = {
+       .bLength =              USB_DT_AC_INPUT_TERMINAL_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   INPUT_TERMINAL,
+       .bTerminalID =          INPUT_TERMINAL_ID,
+       .wTerminalType =        USB_AC_TERMINAL_STREAMING,
+       .bAssocTerminal =       0,
+       .wChannelConfig =       0x3,
+};
+
+DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID                2
+static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
+       .bLength                = USB_DT_AC_FEATURE_UNIT_SIZE(0),
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = FEATURE_UNIT,
+       .bUnitID                = FEATURE_UNIT_ID,
+       .bSourceID              = INPUT_TERMINAL_ID,
+       .bControlSize           = 2,
+       .bmaControls[0]         = (FU_MUTE | FU_VOLUME),
+};
+
+static struct usb_audio_control mute_control = {
+       .list = LIST_HEAD_INIT(mute_control.list),
+       .name = "Mute Control",
+       .type = MUTE_CONTROL,
+       /* Todo: add real Mute control code */
+       .set = generic_set_cmd,
+       .get = generic_get_cmd,
+};
+
+static struct usb_audio_control volume_control = {
+       .list = LIST_HEAD_INIT(volume_control.list),
+       .name = "Volume Control",
+       .type = VOLUME_CONTROL,
+       /* Todo: add real Volume control code */
+       .set = generic_set_cmd,
+       .get = generic_get_cmd,
+};
+
+static struct usb_audio_control_selector feature_unit = {
+       .list = LIST_HEAD_INIT(feature_unit.list),
+       .id = FEATURE_UNIT_ID,
+       .name = "Mute & Volume Control",
+       .type = FEATURE_UNIT,
+       .desc = (struct usb_descriptor_header *)&feature_unit_desc,
+};
+
+#define OUTPUT_TERMINAL_ID     3
+static struct usb_output_terminal_descriptor output_terminal_desc = {
+       .bLength                = USB_DT_AC_OUTPUT_TERMINAL_SIZE,
+       .bDescriptorType        = USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype     = OUTPUT_TERMINAL,
+       .bTerminalID            = OUTPUT_TERMINAL_ID,
+       .wTerminalType          = USB_AC_OUTPUT_TERMINAL_SPEAKER,
+       .bAssocTerminal         = FEATURE_UNIT_ID,
+       .bSourceID              = FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    0,
+       .bNumEndpoints =        0,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+       .bLength =              USB_DT_INTERFACE_SIZE,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       .bAlternateSetting =    1,
+       .bNumEndpoints =        1,
+       .bInterfaceClass =      USB_CLASS_AUDIO,
+       .bInterfaceSubClass =   USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct usb_as_header_descriptor as_header_desc = {
+       .bLength =              USB_DT_AS_HEADER_SIZE,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   AS_GENERAL,
+       .bTerminalLink =        INPUT_TERMINAL_ID,
+       .bDelay =               1,
+       .wFormatTag =           USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
+       .bLength =              USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubtype =   FORMAT_TYPE,
+       .bFormatType =          USB_AS_FORMAT_TYPE_I,
+       .bSubframeSize =        2,
+       .bBitResolution =       16,
+       .bSamFreqType =         1,
+};
+
+/* Standard ISO OUT Endpoint Descriptor */
+static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
+       .bLength =              USB_DT_ENDPOINT_AUDIO_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_OUT,
+       .bmAttributes =         USB_AS_ENDPOINT_ADAPTIVE
+                               | USB_ENDPOINT_XFER_ISOC,
+       .wMaxPacketSize =       __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
+       .bInterval =            4,
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
+       .bLength =              USB_AS_ISO_ENDPOINT_DESC_SIZE,
+       .bDescriptorType =      USB_DT_CS_ENDPOINT,
+       .bDescriptorSubtype =   EP_GENERAL,
+       .bmAttributes =         1,
+       .bLockDelayUnits =      1,
+       .wLockDelay =           __constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *f_audio_desc[] __initdata = {
+       (struct usb_descriptor_header *)&ac_interface_desc,
+       (struct usb_descriptor_header *)&ac_header_desc,
+
+       (struct usb_descriptor_header *)&input_terminal_desc,
+       (struct usb_descriptor_header *)&output_terminal_desc,
+       (struct usb_descriptor_header *)&feature_unit_desc,
+
+       (struct usb_descriptor_header *)&as_interface_alt_0_desc,
+       (struct usb_descriptor_header *)&as_interface_alt_1_desc,
+       (struct usb_descriptor_header *)&as_header_desc,
+
+       (struct usb_descriptor_header *)&as_type_i_desc,
+
+       (struct usb_descriptor_header *)&as_out_ep_desc,
+       (struct usb_descriptor_header *)&as_iso_out_desc,
+       NULL,
+};
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX                0
+#define STRING_PRODUCT_IDX             1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+       [STRING_MANUFACTURER_IDX].s = manufacturer,
+       [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+       {  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+       .language       = 0x0409,       /* en-us */
+       .strings        = strings_dev,
+};
+
+static struct usb_gadget_strings *audio_strings[] = {
+       &stringtab_dev,
+       NULL,
+};
+
+/*
+ * This function is an ALSA sound card following USB Audio Class Spec 1.0.
+ */
+
+/*-------------------------------------------------------------------------*/
+struct f_audio_buf {
+       u8 *buf;
+       int actual;
+       struct list_head list;
+};
+
+static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
+{
+       struct f_audio_buf *copy_buf;
+
+       copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
+       if (!copy_buf)
+               return (struct f_audio_buf *)-ENOMEM;
+
+       copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
+       if (!copy_buf->buf) {
+               kfree(copy_buf);
+               return (struct f_audio_buf *)-ENOMEM;
+       }
+
+       return copy_buf;
+}
+
+static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
+{
+       kfree(audio_buf->buf);
+       kfree(audio_buf);
+}
+/*-------------------------------------------------------------------------*/
+
+struct f_audio {
+       struct gaudio                   card;
+
+       /* endpoints handle full and/or high speeds */
+       struct usb_ep                   *out_ep;
+       struct usb_endpoint_descriptor  *out_desc;
+
+       spinlock_t                      lock;
+       struct f_audio_buf *copy_buf;
+       struct work_struct playback_work;
+       struct list_head play_queue;
+
+       /* Control Set command */
+       struct list_head cs;
+       u8 set_cmd;
+       struct usb_audio_control *set_con;
+};
+
+static inline struct f_audio *func_to_audio(struct usb_function *f)
+{
+       return container_of(f, struct f_audio, card.func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_playback_work(struct work_struct *data)
+{
+       struct f_audio *audio = container_of(data, struct f_audio,
+                                       playback_work);
+       struct f_audio_buf *play_buf;
+
+       spin_lock_irq(&audio->lock);
+       if (list_empty(&audio->play_queue)) {
+               spin_unlock_irq(&audio->lock);
+               return;
+       }
+       play_buf = list_first_entry(&audio->play_queue,
+                       struct f_audio_buf, list);
+       list_del(&play_buf->list);
+       spin_unlock_irq(&audio->lock);
+
+       u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
+       f_audio_buffer_free(play_buf);
+
+       return;
+}
+
+static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       struct f_audio *audio = req->context;
+       struct usb_composite_dev *cdev = audio->card.func.config->cdev;
+       struct f_audio_buf *copy_buf = audio->copy_buf;
+       int err;
+
+       if (!copy_buf)
+               return -EINVAL;
+
+       /* Copy buffer is full, add it to the play_queue */
+       if (audio_buf_size - copy_buf->actual < req->actual) {
+               list_add_tail(&copy_buf->list, &audio->play_queue);
+               schedule_work(&audio->playback_work);
+               copy_buf = f_audio_buffer_alloc(audio_buf_size);
+               if (copy_buf < 0)
+                       return -ENOMEM;
+       }
+
+       memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
+       copy_buf->actual += req->actual;
+       audio->copy_buf = copy_buf;
+
+       err = usb_ep_queue(ep, req, GFP_ATOMIC);
+       if (err)
+               ERROR(cdev, "%s queue req: %d\n", ep->name, err);
+
+       return 0;
+
+}
+
+static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
+{
+       struct f_audio *audio = req->context;
+       int status = req->status;
+       u32 data = 0;
+       struct usb_ep *out_ep = audio->out_ep;
+
+       switch (status) {
+
+       case 0:                         /* normal completion? */
+               if (ep == out_ep)
+                       f_audio_out_ep_complete(ep, req);
+               else if (audio->set_con) {
+                       memcpy(&data, req->buf, req->length);
+                       audio->set_con->set(audio->set_con, audio->set_cmd,
+                                       le16_to_cpu(data));
+                       audio->set_con = NULL;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static int audio_set_intf_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct f_audio          *audio = func_to_audio(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request      *req = cdev->req;
+       u8                      id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16                     len = le16_to_cpu(ctrl->wLength);
+       u16                     w_value = le16_to_cpu(ctrl->wValue);
+       u8                      con_sel = (w_value >> 8) & 0xFF;
+       u8                      cmd = (ctrl->bRequest & 0x0F);
+       struct usb_audio_control_selector *cs;
+       struct usb_audio_control *con;
+
+       DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+                       ctrl->bRequest, w_value, len, id);
+
+       list_for_each_entry(cs, &audio->cs, list) {
+               if (cs->id == id) {
+                       list_for_each_entry(con, &cs->control, list) {
+                               if (con->type == con_sel) {
+                                       audio->set_con = con;
+                                       break;
+                               }
+                       }
+                       break;
+               }
+       }
+
+       audio->set_cmd = cmd;
+       req->context = audio;
+       req->complete = f_audio_complete;
+
+       return len;
+}
+
+static int audio_get_intf_req(struct usb_function *f,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct f_audio          *audio = func_to_audio(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request      *req = cdev->req;
+       int                     value = -EOPNOTSUPP;
+       u8                      id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+       u16                     len = le16_to_cpu(ctrl->wLength);
+       u16                     w_value = le16_to_cpu(ctrl->wValue);
+       u8                      con_sel = (w_value >> 8) & 0xFF;
+       u8                      cmd = (ctrl->bRequest & 0x0F);
+       struct usb_audio_control_selector *cs;
+       struct usb_audio_control *con;
+
+       DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
+                       ctrl->bRequest, w_value, len, id);
+
+       list_for_each_entry(cs, &audio->cs, list) {
+               if (cs->id == id) {
+                       list_for_each_entry(con, &cs->control, list) {
+                               if (con->type == con_sel && con->get) {
+                                       value = con->get(con, cmd);
+                                       break;
+                               }
+                       }
+                       break;
+               }
+       }
+
+       req->context = audio;
+       req->complete = f_audio_complete;
+       memcpy(req->buf, &value, len);
+
+       return len;
+}
+
+static int
+f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request      *req = cdev->req;
+       int                     value = -EOPNOTSUPP;
+       u16                     w_index = le16_to_cpu(ctrl->wIndex);
+       u16                     w_value = le16_to_cpu(ctrl->wValue);
+       u16                     w_length = le16_to_cpu(ctrl->wLength);
+
+       /* composite driver infrastructure handles everything except
+        * Audio class messages; interface activation uses set_alt().
+        */
+       switch (ctrl->bRequestType) {
+       case USB_AUDIO_SET_INTF:
+               value = audio_set_intf_req(f, ctrl);
+               break;
+
+       case USB_AUDIO_GET_INTF:
+               value = audio_get_intf_req(f, ctrl);
+               break;
+
+       default:
+               ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+       }
+
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = 0;
+               req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       ERROR(cdev, "audio response on err %d\n", value);
+       }
+
+       /* device either stalls (value < 0) or reports success */
+       return value;
+}
+
+static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+       struct f_audio          *audio = func_to_audio(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_ep *out_ep = audio->out_ep;
+       struct usb_request *req;
+       int i = 0, err = 0;
+
+       DBG(cdev, "intf %d, alt %d\n", intf, alt);
+
+       if (intf == 1) {
+               if (alt == 1) {
+                       usb_ep_enable(out_ep, audio->out_desc);
+                       out_ep->driver_data = audio;
+                       audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
+
+                       /*
+                        * allocate a bunch of read buffers
+                        * and queue them all at once.
+                        */
+                       for (i = 0; i < req_count && err == 0; i++) {
+                               req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
+                               if (req) {
+                                       req->buf = kzalloc(req_buf_size,
+                                                       GFP_ATOMIC);
+                                       if (req->buf) {
+                                               req->length = req_buf_size;
+                                               req->context = audio;
+                                               req->complete =
+                                                       f_audio_complete;
+                                               err = usb_ep_queue(out_ep,
+                                                       req, GFP_ATOMIC);
+                                               if (err)
+                                                       ERROR(cdev,
+                                                       "%s queue req: %d\n",
+                                                       out_ep->name, err);
+                                       } else
+                                               err = -ENOMEM;
+                               } else
+                                       err = -ENOMEM;
+                       }
+
+               } else {
+                       struct f_audio_buf *copy_buf = audio->copy_buf;
+                       if (copy_buf) {
+                               list_add_tail(&copy_buf->list,
+                                               &audio->play_queue);
+                               schedule_work(&audio->playback_work);
+                       }
+               }
+       }
+
+       return err;
+}
+
+static void f_audio_disable(struct usb_function *f)
+{
+       return;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void f_audio_build_desc(struct f_audio *audio)
+{
+       struct gaudio *card = &audio->card;
+       u8 *sam_freq;
+       int rate;
+
+       /* Set channel numbers */
+       input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
+       as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
+
+       /* Set sample rates */
+       rate = u_audio_get_playback_rate(card);
+       sam_freq = as_type_i_desc.tSamFreq[0];
+       memcpy(sam_freq, &rate, 3);
+
+       /* Todo: Set Sample bits and other parameters */
+
+       return;
+}
+
+/* audio function driver setup/binding */
+static int __init
+f_audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct usb_composite_dev *cdev = c->cdev;
+       struct f_audio          *audio = func_to_audio(f);
+       int                     status;
+       struct usb_ep           *ep;
+
+       f_audio_build_desc(audio);
+
+       /* allocate instance-specific interface IDs, and patch descriptors */
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       ac_interface_desc.bInterfaceNumber = status;
+
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       as_interface_alt_0_desc.bInterfaceNumber = status;
+       as_interface_alt_1_desc.bInterfaceNumber = status;
+
+       status = -ENODEV;
+
+       /* allocate instance-specific endpoints */
+       ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
+       if (!ep)
+               goto fail;
+       audio->out_ep = ep;
+       ep->driver_data = cdev; /* claim */
+
+       status = -ENOMEM;
+
+       /* supcard all relevant hardware speeds... we expect that when
+        * hardware is dual speed, all bulk-capable endpoints work at
+        * both speeds
+        */
+
+       /* copy descriptors, and track endpoint copies */
+       if (gadget_is_dualspeed(c->cdev->gadget)) {
+               c->highspeed = true;
+               f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
+       } else
+               f->descriptors = usb_copy_descriptors(f_audio_desc);
+
+       return 0;
+
+fail:
+
+       return status;
+}
+
+static void
+f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+       struct f_audio          *audio = func_to_audio(f);
+
+       usb_free_descriptors(f->descriptors);
+       kfree(audio);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Todo: add more control selecotor dynamically */
+int __init control_selector_init(struct f_audio *audio)
+{
+       INIT_LIST_HEAD(&audio->cs);
+       list_add(&feature_unit.list, &audio->cs);
+
+       INIT_LIST_HEAD(&feature_unit.control);
+       list_add(&mute_control.list, &feature_unit.control);
+       list_add(&volume_control.list, &feature_unit.control);
+
+       volume_control.data[_CUR] = 0xffc0;
+       volume_control.data[_MIN] = 0xe3a0;
+       volume_control.data[_MAX] = 0xfff0;
+       volume_control.data[_RES] = 0x0030;
+
+       return 0;
+}
+
+/**
+ * audio_bind_config - add USB audio fucntion to a configuration
+ * @c: the configuration to supcard the USB audio function
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ */
+int __init audio_bind_config(struct usb_configuration *c)
+{
+       struct f_audio *audio;
+       int status;
+
+       /* allocate and initialize one new instance */
+       audio = kzalloc(sizeof *audio, GFP_KERNEL);
+       if (!audio)
+               return -ENOMEM;
+
+       audio->card.func.name = "g_audio";
+       audio->card.gadget = c->cdev->gadget;
+
+       INIT_LIST_HEAD(&audio->play_queue);
+       spin_lock_init(&audio->lock);
+
+       /* set up ASLA audio devices */
+       status = gaudio_setup(&audio->card);
+       if (status < 0)
+               goto setup_fail;
+
+       audio->card.func.strings = audio_strings;
+       audio->card.func.bind = f_audio_bind;
+       audio->card.func.unbind = f_audio_unbind;
+       audio->card.func.set_alt = f_audio_set_alt;
+       audio->card.func.setup = f_audio_setup;
+       audio->card.func.disable = f_audio_disable;
+       audio->out_desc = &as_out_ep_desc;
+
+       control_selector_init(audio);
+
+       INIT_WORK(&audio->playback_work, f_audio_playback_work);
+
+       status = usb_add_function(c, &audio->card.func);
+       if (status)
+               goto add_fail;
+
+       INFO(c->cdev, "audio_buf_size %d, req_buf_size %d, req_count %d\n",
+               audio_buf_size, req_buf_size, req_count);
+
+       return status;
+
+add_fail:
+       gaudio_cleanup(&audio->card);
+setup_fail:
+       kfree(audio);
+       return status;
+}
index 3279a47260428a1122cc409ae64ac8994f4c3eee..424a37c5773f5ac341b9659bc4480cb9ee3e9df6 100644 (file)
@@ -475,7 +475,9 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                if (rndis->port.in_ep->driver_data) {
                        DBG(cdev, "reset rndis\n");
                        gether_disconnect(&rndis->port);
-               } else {
+               }
+
+               if (!rndis->port.in) {
                        DBG(cdev, "init rndis\n");
                        rndis->port.in = ep_choose(cdev->gadget,
                                        rndis->hs.in, rndis->fs.in);
index 381a53b3e11c4a4e4dcd3308bdfcbfd1f2323c6d..1e6aa504d58a9390d196a0b5c3823e3e4f299e69 100644 (file)
 #include <linux/freezer.h>
 #include <linux/utsname.h>
 
+#include <asm/unaligned.h>
+
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 
@@ -799,29 +801,9 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 
 /* Routines for unaligned data access */
 
-static u16 get_be16(u8 *buf)
-{
-       return ((u16) buf[0] << 8) | ((u16) buf[1]);
-}
-
-static u32 get_be32(u8 *buf)
-{
-       return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
-                       ((u32) buf[2] << 8) | ((u32) buf[3]);
-}
-
-static void put_be16(u8 *buf, u16 val)
-{
-       buf[0] = val >> 8;
-       buf[1] = val;
-}
-
-static void put_be32(u8 *buf, u32 val)
+static u32 get_unaligned_be24(u8 *buf)
 {
-       buf[0] = val >> 24;
-       buf[1] = val >> 16;
-       buf[2] = val >> 8;
-       buf[3] = val & 0xff;
+       return 0xffffff & (u32) get_unaligned_be32(buf - 1);
 }
 
 
@@ -1582,9 +1564,9 @@ static int do_read(struct fsg_dev *fsg)
        /* Get the starting Logical Block Address and check that it's
         * not too big */
        if (fsg->cmnd[0] == SC_READ_6)
-               lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+               lba = get_unaligned_be24(&fsg->cmnd[1]);
        else {
-               lba = get_be32(&fsg->cmnd[2]);
+               lba = get_unaligned_be32(&fsg->cmnd[2]);
 
                /* We allow DPO (Disable Page Out = don't save data in the
                 * cache) and FUA (Force Unit Access = don't read from the
@@ -1717,9 +1699,9 @@ static int do_write(struct fsg_dev *fsg)
        /* Get the starting Logical Block Address and check that it's
         * not too big */
        if (fsg->cmnd[0] == SC_WRITE_6)
-               lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
+               lba = get_unaligned_be24(&fsg->cmnd[1]);
        else {
-               lba = get_be32(&fsg->cmnd[2]);
+               lba = get_unaligned_be32(&fsg->cmnd[2]);
 
                /* We allow DPO (Disable Page Out = don't save data in the
                 * cache) and FUA (Force Unit Access = write directly to the
@@ -1940,7 +1922,7 @@ static int do_verify(struct fsg_dev *fsg)
 
        /* Get the starting Logical Block Address and check that it's
         * not too big */
-       lba = get_be32(&fsg->cmnd[2]);
+       lba = get_unaligned_be32(&fsg->cmnd[2]);
        if (lba >= curlun->num_sectors) {
                curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
                return -EINVAL;
@@ -1953,7 +1935,7 @@ static int do_verify(struct fsg_dev *fsg)
                return -EINVAL;
        }
 
-       verification_length = get_be16(&fsg->cmnd[7]);
+       verification_length = get_unaligned_be16(&fsg->cmnd[7]);
        if (unlikely(verification_length == 0))
                return -EIO;            // No default reply
 
@@ -2103,7 +2085,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
        memset(buf, 0, 18);
        buf[0] = valid | 0x70;                  // Valid, current error
        buf[2] = SK(sd);
-       put_be32(&buf[3], sdinfo);              // Sense information
+       put_unaligned_be32(sdinfo, &buf[3]);    /* Sense information */
        buf[7] = 18 - 8;                        // Additional sense length
        buf[12] = ASC(sd);
        buf[13] = ASCQ(sd);
@@ -2114,7 +2096,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
        struct lun      *curlun = fsg->curlun;
-       u32             lba = get_be32(&fsg->cmnd[2]);
+       u32             lba = get_unaligned_be32(&fsg->cmnd[2]);
        int             pmi = fsg->cmnd[8];
        u8              *buf = (u8 *) bh->buf;
 
@@ -2124,8 +2106,9 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
                return -EINVAL;
        }
 
-       put_be32(&buf[0], curlun->num_sectors - 1);     // Max logical block
-       put_be32(&buf[4], 512);                         // Block length
+       put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+                                               /* Max logical block */
+       put_unaligned_be32(512, &buf[4]);       /* Block length */
        return 8;
 }
 
@@ -2144,7 +2127,7 @@ static void store_cdrom_address(u8 *dest, int msf, u32 addr)
                dest[0] = 0;            /* Reserved */
        } else {
                /* Absolute sector */
-               put_be32(dest, addr);
+               put_unaligned_be32(addr, dest);
        }
 }
 
@@ -2152,7 +2135,7 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
 {
        struct lun      *curlun = fsg->curlun;
        int             msf = fsg->cmnd[1] & 0x02;
-       u32             lba = get_be32(&fsg->cmnd[2]);
+       u32             lba = get_unaligned_be32(&fsg->cmnd[2]);
        u8              *buf = (u8 *) bh->buf;
 
        if ((fsg->cmnd[1] & ~0x02) != 0) {              /* Mask away MSF */
@@ -2252,10 +2235,13 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
                        buf[2] = 0x04;  // Write cache enable,
                                        // Read cache not disabled
                                        // No cache retention priorities
-                       put_be16(&buf[4], 0xffff);  // Don't disable prefetch
-                                       // Minimum prefetch = 0
-                       put_be16(&buf[8], 0xffff);  // Maximum prefetch
-                       put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling
+                       put_unaligned_be16(0xffff, &buf[4]);
+                                       /* Don't disable prefetch */
+                                       /* Minimum prefetch = 0 */
+                       put_unaligned_be16(0xffff, &buf[8]);
+                                       /* Maximum prefetch */
+                       put_unaligned_be16(0xffff, &buf[10]);
+                                       /* Maximum prefetch ceiling */
                }
                buf += 12;
        }
@@ -2272,7 +2258,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
        if (mscmnd == SC_MODE_SENSE_6)
                buf0[0] = len - 1;
        else
-               put_be16(buf0, len - 2);
+               put_unaligned_be16(len - 2, buf0);
        return len;
 }
 
@@ -2360,9 +2346,10 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
        buf[3] = 8;             // Only the Current/Maximum Capacity Descriptor
        buf += 4;
 
-       put_be32(&buf[0], curlun->num_sectors);         // Number of blocks
-       put_be32(&buf[4], 512);                         // Block length
-       buf[4] = 0x02;                                  // Current capacity
+       put_unaligned_be32(curlun->num_sectors, &buf[0]);
+                                               /* Number of blocks */
+       put_unaligned_be32(512, &buf[4]);       /* Block length */
+       buf[4] = 0x02;                          /* Current capacity */
        return 12;
 }
 
@@ -2882,7 +2869,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_MODE_SELECT_10:
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+               fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
                if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
                                (1<<1) | (3<<7), 0,
                                "MODE SELECT(10)")) == 0)
@@ -2898,7 +2885,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_MODE_SENSE_10:
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+               fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
                if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
                                (1<<1) | (1<<2) | (3<<7), 0,
                                "MODE SENSE(10)")) == 0)
@@ -2923,7 +2910,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_READ_10:
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+               fsg->data_size_from_cmnd =
+                               get_unaligned_be16(&fsg->cmnd[7]) << 9;
                if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
                                (1<<1) | (0xf<<2) | (3<<7), 1,
                                "READ(10)")) == 0)
@@ -2931,7 +2919,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_READ_12:
-               fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+               fsg->data_size_from_cmnd =
+                               get_unaligned_be32(&fsg->cmnd[6]) << 9;
                if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
                                (1<<1) | (0xf<<2) | (0xf<<6), 1,
                                "READ(12)")) == 0)
@@ -2949,7 +2938,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
        case SC_READ_HEADER:
                if (!mod_data.cdrom)
                        goto unknown_cmnd;
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+               fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
                if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
                                (3<<7) | (0x1f<<1), 1,
                                "READ HEADER")) == 0)
@@ -2959,7 +2948,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
        case SC_READ_TOC:
                if (!mod_data.cdrom)
                        goto unknown_cmnd;
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+               fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
                if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
                                (7<<6) | (1<<1), 1,
                                "READ TOC")) == 0)
@@ -2967,7 +2956,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_READ_FORMAT_CAPACITIES:
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
+               fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
                if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
                                (3<<7), 1,
                                "READ FORMAT CAPACITIES")) == 0)
@@ -3025,7 +3014,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_WRITE_10:
-               fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
+               fsg->data_size_from_cmnd =
+                               get_unaligned_be16(&fsg->cmnd[7]) << 9;
                if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
                                (1<<1) | (0xf<<2) | (3<<7), 1,
                                "WRITE(10)")) == 0)
@@ -3033,7 +3023,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
                break;
 
        case SC_WRITE_12:
-               fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
+               fsg->data_size_from_cmnd =
+                               get_unaligned_be32(&fsg->cmnd[6]) << 9;
                if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
                                (1<<1) | (0xf<<2) | (0xf<<6), 1,
                                "WRITE(12)")) == 0)
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mx3_udc.c
new file mode 100644 (file)
index 0000000..4bc2bf3
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2009
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * Description:
+ * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
+ * driver to function correctly on these systems.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/platform_device.h>
+
+static struct clk *mxc_ahb_clk;
+static struct clk *mxc_usb_clk;
+
+int fsl_udc_clk_init(struct platform_device *pdev)
+{
+       struct fsl_usb2_platform_data *pdata;
+       unsigned long freq;
+       int ret;
+
+       pdata = pdev->dev.platform_data;
+
+       mxc_ahb_clk = clk_get(&pdev->dev, "usb_ahb");
+       if (IS_ERR(mxc_ahb_clk))
+               return PTR_ERR(mxc_ahb_clk);
+
+       ret = clk_enable(mxc_ahb_clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "clk_enable(\"usb_ahb\") failed\n");
+               goto eenahb;
+       }
+
+       /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
+       mxc_usb_clk = clk_get(&pdev->dev, "usb");
+       if (IS_ERR(mxc_usb_clk)) {
+               dev_err(&pdev->dev, "clk_get(\"usb\") failed\n");
+               ret = PTR_ERR(mxc_usb_clk);
+               goto egusb;
+       }
+
+       freq = clk_get_rate(mxc_usb_clk);
+       if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
+           (freq < 59999000 || freq > 60001000)) {
+               dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
+               goto eclkrate;
+       }
+
+       ret = clk_enable(mxc_usb_clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "clk_enable(\"usb_clk\") failed\n");
+               goto eenusb;
+       }
+
+       return 0;
+
+eenusb:
+eclkrate:
+       clk_put(mxc_usb_clk);
+       mxc_usb_clk = NULL;
+egusb:
+       clk_disable(mxc_ahb_clk);
+eenahb:
+       clk_put(mxc_ahb_clk);
+       return ret;
+}
+
+void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+       struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+
+       /* ULPI transceivers don't need usbpll */
+       if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
+               clk_disable(mxc_usb_clk);
+               clk_put(mxc_usb_clk);
+               mxc_usb_clk = NULL;
+       }
+}
+
+void fsl_udc_clk_release(void)
+{
+       if (mxc_usb_clk) {
+               clk_disable(mxc_usb_clk);
+               clk_put(mxc_usb_clk);
+       }
+       clk_disable(mxc_ahb_clk);
+       clk_put(mxc_ahb_clk);
+}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
new file mode 100644 (file)
index 0000000..42a74b8
--- /dev/null
@@ -0,0 +1,2491 @@
+/*
+ * Copyright (C) 2004-2007 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *         Jiang Bo <tanya.jiang@freescale.com>
+ *
+ * Description:
+ * Freescale high-speed USB SOC DR module device controller driver.
+ * This can be found on MPC8349E/MPC8313E cpus.
+ * The driver is previously named as mpc_udc.  Based on bare board
+ * code from Dave Liu and Shlomi Gridish.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#undef VERBOSE
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/dma.h>
+
+#include "fsl_usb2_udc.h"
+
+#define        DRIVER_DESC     "Freescale High-Speed USB SOC Device Controller driver"
+#define        DRIVER_AUTHOR   "Li Yang/Jiang Bo"
+#define        DRIVER_VERSION  "Apr 20, 2007"
+
+#define        DMA_ADDR_INVALID        (~(dma_addr_t)0)
+
+static const char driver_name[] = "fsl-usb2-udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+static struct usb_dr_device *dr_regs;
+#ifndef CONFIG_ARCH_MXC
+static struct usb_sys_interface *usb_sys_regs;
+#endif
+
+/* it is initialized in probe()  */
+static struct fsl_udc *udc_controller = NULL;
+
+static const struct usb_endpoint_descriptor
+fsl_ep0_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     0,
+       .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
+       .wMaxPacketSize =       USB_MAX_CTRL_PAYLOAD,
+};
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep);
+
+#ifdef CONFIG_PPC32
+#define fsl_readl(addr)                in_le32(addr)
+#define fsl_writel(val32, addr) out_le32(addr, val32)
+#else
+#define fsl_readl(addr)                readl(addr)
+#define fsl_writel(val32, addr) writel(val32, addr)
+#endif
+
+/********************************************************************
+ *     Internal Used Function
+********************************************************************/
+/*-----------------------------------------------------------------
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ *     request is still in progress.
+ *--------------------------------------------------------------*/
+static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
+{
+       struct fsl_udc *udc = NULL;
+       unsigned char stopped = ep->stopped;
+       struct ep_td_struct *curr_td, *next_td;
+       int j;
+
+       udc = (struct fsl_udc *)ep->udc;
+       /* Removed the req from fsl_ep->queue */
+       list_del_init(&req->queue);
+
+       /* req.status should be set as -EINPROGRESS in ep_queue() */
+       if (req->req.status == -EINPROGRESS)
+               req->req.status = status;
+       else
+               status = req->req.status;
+
+       /* Free dtd for the request */
+       next_td = req->head;
+       for (j = 0; j < req->dtd_count; j++) {
+               curr_td = next_td;
+               if (j != req->dtd_count - 1) {
+                       next_td = curr_td->next_td_virt;
+               }
+               dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
+       }
+
+       if (req->mapped) {
+               dma_unmap_single(ep->udc->gadget.dev.parent,
+                       req->req.dma, req->req.length,
+                       ep_is_in(ep)
+                               ? DMA_TO_DEVICE
+                               : DMA_FROM_DEVICE);
+               req->req.dma = DMA_ADDR_INVALID;
+               req->mapped = 0;
+       } else
+               dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+                       req->req.dma, req->req.length,
+                       ep_is_in(ep)
+                               ? DMA_TO_DEVICE
+                               : DMA_FROM_DEVICE);
+
+       if (status && (status != -ESHUTDOWN))
+               VDBG("complete %s req %p stat %d len %u/%u",
+                       ep->ep.name, &req->req, status,
+                       req->req.actual, req->req.length);
+
+       ep->stopped = 1;
+
+       spin_unlock(&ep->udc->lock);
+       /* complete() is from gadget layer,
+        * eg fsg->bulk_in_complete() */
+       if (req->req.complete)
+               req->req.complete(&ep->ep, &req->req);
+
+       spin_lock(&ep->udc->lock);
+       ep->stopped = stopped;
+}
+
+/*-----------------------------------------------------------------
+ * nuke(): delete all requests related to this ep
+ * called with spinlock held
+ *--------------------------------------------------------------*/
+static void nuke(struct fsl_ep *ep, int status)
+{
+       ep->stopped = 1;
+
+       /* Flush fifo */
+       fsl_ep_fifo_flush(&ep->ep);
+
+       /* Whether this eq has request linked */
+       while (!list_empty(&ep->queue)) {
+               struct fsl_req *req = NULL;
+
+               req = list_entry(ep->queue.next, struct fsl_req, queue);
+               done(ep, req, status);
+       }
+}
+
+/*------------------------------------------------------------------
+       Internal Hardware related function
+ ------------------------------------------------------------------*/
+
+static int dr_controller_setup(struct fsl_udc *udc)
+{
+       unsigned int tmp, portctrl;
+#ifndef CONFIG_ARCH_MXC
+       unsigned int ctrl;
+#endif
+       unsigned long timeout;
+#define FSL_UDC_RESET_TIMEOUT 1000
+
+       /* Config PHY interface */
+       portctrl = fsl_readl(&dr_regs->portsc1);
+       portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
+       switch (udc->phy_mode) {
+       case FSL_USB2_PHY_ULPI:
+               portctrl |= PORTSCX_PTS_ULPI;
+               break;
+       case FSL_USB2_PHY_UTMI_WIDE:
+               portctrl |= PORTSCX_PTW_16BIT;
+               /* fall through */
+       case FSL_USB2_PHY_UTMI:
+               portctrl |= PORTSCX_PTS_UTMI;
+               break;
+       case FSL_USB2_PHY_SERIAL:
+               portctrl |= PORTSCX_PTS_FSLS;
+               break;
+       default:
+               return -EINVAL;
+       }
+       fsl_writel(portctrl, &dr_regs->portsc1);
+
+       /* Stop and reset the usb controller */
+       tmp = fsl_readl(&dr_regs->usbcmd);
+       tmp &= ~USB_CMD_RUN_STOP;
+       fsl_writel(tmp, &dr_regs->usbcmd);
+
+       tmp = fsl_readl(&dr_regs->usbcmd);
+       tmp |= USB_CMD_CTRL_RESET;
+       fsl_writel(tmp, &dr_regs->usbcmd);
+
+       /* Wait for reset to complete */
+       timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
+       while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
+               if (time_after(jiffies, timeout)) {
+                       ERR("udc reset timeout!\n");
+                       return -ETIMEDOUT;
+               }
+               cpu_relax();
+       }
+
+       /* Set the controller as device mode */
+       tmp = fsl_readl(&dr_regs->usbmode);
+       tmp |= USB_MODE_CTRL_MODE_DEVICE;
+       /* Disable Setup Lockout */
+       tmp |= USB_MODE_SETUP_LOCK_OFF;
+       fsl_writel(tmp, &dr_regs->usbmode);
+
+       /* Clear the setup status */
+       fsl_writel(0, &dr_regs->usbsts);
+
+       tmp = udc->ep_qh_dma;
+       tmp &= USB_EP_LIST_ADDRESS_MASK;
+       fsl_writel(tmp, &dr_regs->endpointlistaddr);
+
+       VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
+               udc->ep_qh, (int)tmp,
+               fsl_readl(&dr_regs->endpointlistaddr));
+
+       /* Config control enable i/o output, cpu endian register */
+#ifndef CONFIG_ARCH_MXC
+       ctrl = __raw_readl(&usb_sys_regs->control);
+       ctrl |= USB_CTRL_IOENB;
+       __raw_writel(ctrl, &usb_sys_regs->control);
+#endif
+
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+       /* Turn on cache snooping hardware, since some PowerPC platforms
+        * wholly rely on hardware to deal with cache coherent. */
+
+       /* Setup Snooping for all the 4GB space */
+       tmp = SNOOP_SIZE_2GB;   /* starts from 0x0, size 2G */
+       __raw_writel(tmp, &usb_sys_regs->snoop1);
+       tmp |= 0x80000000;      /* starts from 0x8000000, size 2G */
+       __raw_writel(tmp, &usb_sys_regs->snoop2);
+#endif
+
+       return 0;
+}
+
+/* Enable DR irq and set controller to run state */
+static void dr_controller_run(struct fsl_udc *udc)
+{
+       u32 temp;
+
+       /* Enable DR irq reg */
+       temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
+               | USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
+               | USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
+
+       fsl_writel(temp, &dr_regs->usbintr);
+
+       /* Clear stopped bit */
+       udc->stopped = 0;
+
+       /* Set the controller as device mode */
+       temp = fsl_readl(&dr_regs->usbmode);
+       temp |= USB_MODE_CTRL_MODE_DEVICE;
+       fsl_writel(temp, &dr_regs->usbmode);
+
+       /* Set controller to Run */
+       temp = fsl_readl(&dr_regs->usbcmd);
+       temp |= USB_CMD_RUN_STOP;
+       fsl_writel(temp, &dr_regs->usbcmd);
+
+       return;
+}
+
+static void dr_controller_stop(struct fsl_udc *udc)
+{
+       unsigned int tmp;
+
+       /* disable all INTR */
+       fsl_writel(0, &dr_regs->usbintr);
+
+       /* Set stopped bit for isr */
+       udc->stopped = 1;
+
+       /* disable IO output */
+/*     usb_sys_regs->control = 0; */
+
+       /* set controller to Stop */
+       tmp = fsl_readl(&dr_regs->usbcmd);
+       tmp &= ~USB_CMD_RUN_STOP;
+       fsl_writel(tmp, &dr_regs->usbcmd);
+
+       return;
+}
+
+static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
+                       unsigned char ep_type)
+{
+       unsigned int tmp_epctrl = 0;
+
+       tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+       if (dir) {
+               if (ep_num)
+                       tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+               tmp_epctrl |= EPCTRL_TX_ENABLE;
+               tmp_epctrl |= ((unsigned int)(ep_type)
+                               << EPCTRL_TX_EP_TYPE_SHIFT);
+       } else {
+               if (ep_num)
+                       tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+               tmp_epctrl |= EPCTRL_RX_ENABLE;
+               tmp_epctrl |= ((unsigned int)(ep_type)
+                               << EPCTRL_RX_EP_TYPE_SHIFT);
+       }
+
+       fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+static void
+dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
+{
+       u32 tmp_epctrl = 0;
+
+       tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+
+       if (value) {
+               /* set the stall bit */
+               if (dir)
+                       tmp_epctrl |= EPCTRL_TX_EP_STALL;
+               else
+                       tmp_epctrl |= EPCTRL_RX_EP_STALL;
+       } else {
+               /* clear the stall bit and reset data toggle */
+               if (dir) {
+                       tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
+                       tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
+               } else {
+                       tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
+                       tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
+               }
+       }
+       fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
+}
+
+/* Get stall status of a specific ep
+   Return: 0: not stalled; 1:stalled */
+static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
+{
+       u32 epctrl;
+
+       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+       if (dir)
+               return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
+       else
+               return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
+}
+
+/********************************************************************
+       Internal Structure Build up functions
+********************************************************************/
+
+/*------------------------------------------------------------------
+* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
+ * @zlt: Zero Length Termination Select (1: disable; 0: enable)
+ * @mult: Mult field
+ ------------------------------------------------------------------*/
+static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
+               unsigned char dir, unsigned char ep_type,
+               unsigned int max_pkt_len,
+               unsigned int zlt, unsigned char mult)
+{
+       struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
+       unsigned int tmp = 0;
+
+       /* set the Endpoint Capabilites in QH */
+       switch (ep_type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               /* Interrupt On Setup (IOS). for control ep  */
+               tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+                       | EP_QUEUE_HEAD_IOS;
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+                       | (mult << EP_QUEUE_HEAD_MULT_POS);
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+       case USB_ENDPOINT_XFER_INT:
+               tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
+               break;
+       default:
+               VDBG("error ep type is %d", ep_type);
+               return;
+       }
+       if (zlt)
+               tmp |= EP_QUEUE_HEAD_ZLT_SEL;
+
+       p_QH->max_pkt_length = cpu_to_le32(tmp);
+       p_QH->next_dtd_ptr = 1;
+       p_QH->size_ioc_int_sts = 0;
+
+       return;
+}
+
+/* Setup qh structure and ep register for ep0. */
+static void ep0_setup(struct fsl_udc *udc)
+{
+       /* the intialization of an ep includes: fields in QH, Regs,
+        * fsl_ep struct */
+       struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
+                       USB_MAX_CTRL_PAYLOAD, 0, 0);
+       struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
+                       USB_MAX_CTRL_PAYLOAD, 0, 0);
+       dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
+       dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
+
+       return;
+
+}
+
+/***********************************************************************
+               Endpoint Management Functions
+***********************************************************************/
+
+/*-------------------------------------------------------------------------
+ * when configurations are set, or when interface settings change
+ * for example the do_set_interface() in gadget layer,
+ * the driver will enable or disable the relevant endpoints
+ * ep0 doesn't use this routine. It is always enabled.
+-------------------------------------------------------------------------*/
+static int fsl_ep_enable(struct usb_ep *_ep,
+               const struct usb_endpoint_descriptor *desc)
+{
+       struct fsl_udc *udc = NULL;
+       struct fsl_ep *ep = NULL;
+       unsigned short max = 0;
+       unsigned char mult = 0, zlt;
+       int retval = -EINVAL;
+       unsigned long flags = 0;
+
+       ep = container_of(_ep, struct fsl_ep, ep);
+
+       /* catch various bogus parameters */
+       if (!_ep || !desc || ep->desc
+                       || (desc->bDescriptorType != USB_DT_ENDPOINT))
+               return -EINVAL;
+
+       udc = ep->udc;
+
+       if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
+               return -ESHUTDOWN;
+
+       max = le16_to_cpu(desc->wMaxPacketSize);
+
+       /* Disable automatic zlp generation.  Driver is reponsible to indicate
+        * explicitly through req->req.zero.  This is needed to enable multi-td
+        * request. */
+       zlt = 1;
+
+       /* Assume the max packet size from gadget is always correct */
+       switch (desc->bmAttributes & 0x03) {
+       case USB_ENDPOINT_XFER_CONTROL:
+       case USB_ENDPOINT_XFER_BULK:
+       case USB_ENDPOINT_XFER_INT:
+               /* mult = 0.  Execute N Transactions as demonstrated by
+                * the USB variable length packet protocol where N is
+                * computed using the Maximum Packet Length (dQH) and
+                * the Total Bytes field (dTD) */
+               mult = 0;
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               /* Calculate transactions needed for high bandwidth iso */
+               mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+               max = max & 0x8ff;      /* bit 0~10 */
+               /* 3 transactions at most */
+               if (mult > 3)
+                       goto en_done;
+               break;
+       default:
+               goto en_done;
+       }
+
+       spin_lock_irqsave(&udc->lock, flags);
+       ep->ep.maxpacket = max;
+       ep->desc = desc;
+       ep->stopped = 0;
+
+       /* Controller related setup */
+       /* Init EPx Queue Head (Ep Capabilites field in QH
+        * according to max, zlt, mult) */
+       struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
+                       (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+                                       ?  USB_SEND : USB_RECV),
+                       (unsigned char) (desc->bmAttributes
+                                       & USB_ENDPOINT_XFERTYPE_MASK),
+                       max, zlt, mult);
+
+       /* Init endpoint ctrl register */
+       dr_ep_setup((unsigned char) ep_index(ep),
+                       (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
+                                       ? USB_SEND : USB_RECV),
+                       (unsigned char) (desc->bmAttributes
+                                       & USB_ENDPOINT_XFERTYPE_MASK));
+
+       spin_unlock_irqrestore(&udc->lock, flags);
+       retval = 0;
+
+       VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
+                       ep->desc->bEndpointAddress & 0x0f,
+                       (desc->bEndpointAddress & USB_DIR_IN)
+                               ? "in" : "out", max);
+en_done:
+       return retval;
+}
+
+/*---------------------------------------------------------------------
+ * @ep : the ep being unconfigured. May not be ep0
+ * Any pending and uncomplete req will complete with status (-ESHUTDOWN)
+*---------------------------------------------------------------------*/
+static int fsl_ep_disable(struct usb_ep *_ep)
+{
+       struct fsl_udc *udc = NULL;
+       struct fsl_ep *ep = NULL;
+       unsigned long flags = 0;
+       u32 epctrl;
+       int ep_num;
+
+       ep = container_of(_ep, struct fsl_ep, ep);
+       if (!_ep || !ep->desc) {
+               VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+               return -EINVAL;
+       }
+
+       /* disable ep on controller */
+       ep_num = ep_index(ep);
+       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+       if (ep_is_in(ep))
+               epctrl &= ~EPCTRL_TX_ENABLE;
+       else
+               epctrl &= ~EPCTRL_RX_ENABLE;
+       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+       udc = (struct fsl_udc *)ep->udc;
+       spin_lock_irqsave(&udc->lock, flags);
+
+       /* nuke all pending requests (does flush) */
+       nuke(ep, -ESHUTDOWN);
+
+       ep->desc = NULL;
+       ep->stopped = 1;
+       spin_unlock_irqrestore(&udc->lock, flags);
+
+       VDBG("disabled %s OK", _ep->name);
+       return 0;
+}
+
+/*---------------------------------------------------------------------
+ * allocate a request object used by this endpoint
+ * the main operation is to insert the req->queue to the eq->queue
+ * Returns the request, or null if one could not be allocated
+*---------------------------------------------------------------------*/
+static struct usb_request *
+fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+       struct fsl_req *req = NULL;
+
+       req = kzalloc(sizeof *req, gfp_flags);
+       if (!req)
+               return NULL;
+
+       req->req.dma = DMA_ADDR_INVALID;
+       INIT_LIST_HEAD(&req->queue);
+
+       return &req->req;
+}
+
+static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+       struct fsl_req *req = NULL;
+
+       req = container_of(_req, struct fsl_req, req);
+
+       if (_req)
+               kfree(req);
+}
+
+/*-------------------------------------------------------------------------*/
+static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+{
+       int i = ep_index(ep) * 2 + ep_is_in(ep);
+       u32 temp, bitmask, tmp_stat;
+       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
+
+       /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
+       VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+
+       bitmask = ep_is_in(ep)
+               ? (1 << (ep_index(ep) + 16))
+               : (1 << (ep_index(ep)));
+
+       /* check if the pipe is empty */
+       if (!(list_empty(&ep->queue))) {
+               /* Add td to the end */
+               struct fsl_req *lastreq;
+               lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
+               lastreq->tail->next_td_ptr =
+                       cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK);
+               /* Read prime bit, if 1 goto done */
+               if (fsl_readl(&dr_regs->endpointprime) & bitmask)
+                       goto out;
+
+               do {
+                       /* Set ATDTW bit in USBCMD */
+                       temp = fsl_readl(&dr_regs->usbcmd);
+                       fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+                       /* Read correct status bit */
+                       tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
+
+               } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
+
+               /* Write ATDTW bit to 0 */
+               temp = fsl_readl(&dr_regs->usbcmd);
+               fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
+
+               if (tmp_stat)
+                       goto out;
+       }
+
+       /* Write dQH next pointer and terminate bit to 0 */
+       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+       dQH->next_dtd_ptr = cpu_to_le32(temp);
+
+       /* Clear active and halt bit */
+       temp = cpu_to_le32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+                       | EP_QUEUE_HEAD_STATUS_HALT));
+       dQH->size_ioc_int_sts &= temp;
+
+       /* Ensure that updates to the QH will occure before priming. */
+       wmb();
+
+       /* Prime endpoint by writing 1 to ENDPTPRIME */
+       temp = ep_is_in(ep)
+               ? (1 << (ep_index(ep) + 16))
+               : (1 << (ep_index(ep)));
+       fsl_writel(temp, &dr_regs->endpointprime);
+out:
+       return;
+}
+
+/* Fill in the dTD structure
+ * @req: request that the transfer belongs to
+ * @length: return actually data length of the dTD
+ * @dma: return dma address of the dTD
+ * @is_last: return flag if it is the last dTD of the request
+ * return: pointer to the built dTD */
+static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+               dma_addr_t *dma, int *is_last)
+{
+       u32 swap_temp;
+       struct ep_td_struct *dtd;
+
+       /* how big will this transfer be? */
+       *length = min(req->req.length - req->req.actual,
+                       (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+       dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
+       if (dtd == NULL)
+               return dtd;
+
+       dtd->td_dma = *dma;
+       /* Clear reserved field */
+       swap_temp = cpu_to_le32(dtd->size_ioc_sts);
+       swap_temp &= ~DTD_RESERVED_FIELDS;
+       dtd->size_ioc_sts = cpu_to_le32(swap_temp);
+
+       /* Init all of buffer page pointers */
+       swap_temp = (u32) (req->req.dma + req->req.actual);
+       dtd->buff_ptr0 = cpu_to_le32(swap_temp);
+       dtd->buff_ptr1 = cpu_to_le32(swap_temp + 0x1000);
+       dtd->buff_ptr2 = cpu_to_le32(swap_temp + 0x2000);
+       dtd->buff_ptr3 = cpu_to_le32(swap_temp + 0x3000);
+       dtd->buff_ptr4 = cpu_to_le32(swap_temp + 0x4000);
+
+       req->req.actual += *length;
+
+       /* zlp is needed if req->req.zero is set */
+       if (req->req.zero) {
+               if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+                       *is_last = 1;
+               else
+                       *is_last = 0;
+       } else if (req->req.length == req->req.actual)
+               *is_last = 1;
+       else
+               *is_last = 0;
+
+       if ((*is_last) == 0)
+               VDBG("multi-dtd request!");
+       /* Fill in the transfer size; set active bit */
+       swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+       /* Enable interrupt for the last dtd of a request */
+       if (*is_last && !req->req.no_interrupt)
+               swap_temp |= DTD_IOC;
+
+       dtd->size_ioc_sts = cpu_to_le32(swap_temp);
+
+       mb();
+
+       VDBG("length = %d address= 0x%x", *length, (int)*dma);
+
+       return dtd;
+}
+
+/* Generate dtd chain for a request */
+static int fsl_req_to_dtd(struct fsl_req *req)
+{
+       unsigned        count;
+       int             is_last;
+       int             is_first =1;
+       struct ep_td_struct     *last_dtd = NULL, *dtd;
+       dma_addr_t dma;
+
+       do {
+               dtd = fsl_build_dtd(req, &count, &dma, &is_last);
+               if (dtd == NULL)
+                       return -ENOMEM;
+
+               if (is_first) {
+                       is_first = 0;
+                       req->head = dtd;
+               } else {
+                       last_dtd->next_td_ptr = cpu_to_le32(dma);
+                       last_dtd->next_td_virt = dtd;
+               }
+               last_dtd = dtd;
+
+               req->dtd_count++;
+       } while (!is_last);
+
+       dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE);
+
+       req->tail = dtd;
+
+       return 0;
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+       struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+       struct fsl_req *req = container_of(_req, struct fsl_req, req);
+       struct fsl_udc *udc;
+       unsigned long flags;
+       int is_iso = 0;
+
+       /* catch various bogus parameters */
+       if (!_req || !req->req.complete || !req->req.buf
+                       || !list_empty(&req->queue)) {
+               VDBG("%s, bad params", __func__);
+               return -EINVAL;
+       }
+       if (unlikely(!_ep || !ep->desc)) {
+               VDBG("%s, bad ep", __func__);
+               return -EINVAL;
+       }
+       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+               if (req->req.length > ep->ep.maxpacket)
+                       return -EMSGSIZE;
+               is_iso = 1;
+       }
+
+       udc = ep->udc;
+       if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+
+       req->ep = ep;
+
+       /* map virtual address to hardware */
+       if (req->req.dma == DMA_ADDR_INVALID) {
+               req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+                                       req->req.buf,
+                                       req->req.length, ep_is_in(ep)
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+               req->mapped = 1;
+       } else {
+               dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+                                       req->req.dma, req->req.length,
+                                       ep_is_in(ep)
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+               req->mapped = 0;
+       }
+
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->dtd_count = 0;
+
+       spin_lock_irqsave(&udc->lock, flags);
+
+       /* build dtds and push them to device queue */
+       if (!fsl_req_to_dtd(req)) {
+               fsl_queue_td(ep, req);
+       } else {
+               spin_unlock_irqrestore(&udc->lock, flags);
+               return -ENOMEM;
+       }
+
+       /* Update ep0 state */
+       if ((ep_index(ep) == 0))
+               udc->ep0_state = DATA_STATE_XMIT;
+
+       /* irq handler advances the queue */
+       if (req != NULL)
+               list_add_tail(&req->queue, &ep->queue);
+       spin_unlock_irqrestore(&udc->lock, flags);
+
+       return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+       struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
+       struct fsl_req *req;
+       unsigned long flags;
+       int ep_num, stopped, ret = 0;
+       u32 epctrl;
+
+       if (!_ep || !_req)
+               return -EINVAL;
+
+       spin_lock_irqsave(&ep->udc->lock, flags);
+       stopped = ep->stopped;
+
+       /* Stop the ep before we deal with the queue */
+       ep->stopped = 1;
+       ep_num = ep_index(ep);
+       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+       if (ep_is_in(ep))
+               epctrl &= ~EPCTRL_TX_ENABLE;
+       else
+               epctrl &= ~EPCTRL_RX_ENABLE;
+       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+
+       /* make sure it's actually queued on this endpoint */
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+       if (&req->req != _req) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* The request is in progress, or completed but not dequeued */
+       if (ep->queue.next == &req->queue) {
+               _req->status = -ECONNRESET;
+               fsl_ep_fifo_flush(_ep); /* flush current transfer */
+
+               /* The request isn't the last request in this ep queue */
+               if (req->queue.next != &ep->queue) {
+                       struct ep_queue_head *qh;
+                       struct fsl_req *next_req;
+
+                       qh = ep->qh;
+                       next_req = list_entry(req->queue.next, struct fsl_req,
+                                       queue);
+
+                       /* Point the QH to the first TD of next request */
+                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+               }
+
+               /* The request hasn't been processed, patch up the TD chain */
+       } else {
+               struct fsl_req *prev_req;
+
+               prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
+               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
+                               &prev_req->tail->next_td_ptr);
+
+       }
+
+       done(ep, req, -ECONNRESET);
+
+       /* Enable EP */
+out:   epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
+       if (ep_is_in(ep))
+               epctrl |= EPCTRL_TX_ENABLE;
+       else
+               epctrl |= EPCTRL_RX_ENABLE;
+       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
+       ep->stopped = stopped;
+
+       spin_unlock_irqrestore(&ep->udc->lock, flags);
+       return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------
+ * modify the endpoint halt feature
+ * @ep: the non-isochronous endpoint being stalled
+ * @value: 1--set halt  0--clear halt
+ * Returns zero, or a negative error code.
+*----------------------------------------------------------------*/
+static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
+{
+       struct fsl_ep *ep = NULL;
+       unsigned long flags = 0;
+       int status = -EOPNOTSUPP;       /* operation not supported */
+       unsigned char ep_dir = 0, ep_num = 0;
+       struct fsl_udc *udc = NULL;
+
+       ep = container_of(_ep, struct fsl_ep, ep);
+       udc = ep->udc;
+       if (!_ep || !ep->desc) {
+               status = -EINVAL;
+               goto out;
+       }
+
+       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+               status = -EOPNOTSUPP;
+               goto out;
+       }
+
+       /* Attempt to halt IN ep will fail if any transfer requests
+        * are still queue */
+       if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
+               status = -EAGAIN;
+               goto out;
+       }
+
+       status = 0;
+       ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+       ep_num = (unsigned char)(ep_index(ep));
+       spin_lock_irqsave(&ep->udc->lock, flags);
+       dr_ep_change_stall(ep_num, ep_dir, value);
+       spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+       if (ep_index(ep) == 0) {
+               udc->ep0_state = WAIT_FOR_SETUP;
+               udc->ep0_dir = 0;
+       }
+out:
+       VDBG(" %s %s halt stat %d", ep->ep.name,
+                       value ?  "set" : "clear", status);
+
+       return status;
+}
+
+static void fsl_ep_fifo_flush(struct usb_ep *_ep)
+{
+       struct fsl_ep *ep;
+       int ep_num, ep_dir;
+       u32 bits;
+       unsigned long timeout;
+#define FSL_UDC_FLUSH_TIMEOUT 1000
+
+       if (!_ep) {
+               return;
+       } else {
+               ep = container_of(_ep, struct fsl_ep, ep);
+               if (!ep->desc)
+                       return;
+       }
+       ep_num = ep_index(ep);
+       ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
+
+       if (ep_num == 0)
+               bits = (1 << 16) | 1;
+       else if (ep_dir == USB_SEND)
+               bits = 1 << (16 + ep_num);
+       else
+               bits = 1 << ep_num;
+
+       timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
+       do {
+               fsl_writel(bits, &dr_regs->endptflush);
+
+               /* Wait until flush complete */
+               while (fsl_readl(&dr_regs->endptflush)) {
+                       if (time_after(jiffies, timeout)) {
+                               ERR("ep flush timeout\n");
+                               return;
+                       }
+                       cpu_relax();
+               }
+               /* See if we need to flush again */
+       } while (fsl_readl(&dr_regs->endptstatus) & bits);
+}
+
+static struct usb_ep_ops fsl_ep_ops = {
+       .enable = fsl_ep_enable,
+       .disable = fsl_ep_disable,
+
+       .alloc_request = fsl_alloc_request,
+       .free_request = fsl_free_request,
+
+       .queue = fsl_ep_queue,
+       .dequeue = fsl_ep_dequeue,
+
+       .set_halt = fsl_ep_set_halt,
+       .fifo_flush = fsl_ep_fifo_flush,        /* flush fifo */
+};
+
+/*-------------------------------------------------------------------------
+               Gadget Driver Layer Operations
+-------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------
+ * Get the current frame number (from DR frame_index Reg )
+ *----------------------------------------------------------------------*/
+static int fsl_get_frame(struct usb_gadget *gadget)
+{
+       return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
+}
+
+/*-----------------------------------------------------------------------
+ * Tries to wake up the host connected to this gadget
+ -----------------------------------------------------------------------*/
+static int fsl_wakeup(struct usb_gadget *gadget)
+{
+       struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
+       u32 portsc;
+
+       /* Remote wakeup feature not enabled by host */
+       if (!udc->remote_wakeup)
+               return -ENOTSUPP;
+
+       portsc = fsl_readl(&dr_regs->portsc1);
+       /* not suspended? */
+       if (!(portsc & PORTSCX_PORT_SUSPEND))
+               return 0;
+       /* trigger force resume */
+       portsc |= PORTSCX_PORT_FORCE_RESUME;
+       fsl_writel(portsc, &dr_regs->portsc1);
+       return 0;
+}
+
+static int can_pullup(struct fsl_udc *udc)
+{
+       return udc->driver && udc->softconnect && udc->vbus_active;
+}
+
+/* Notify controller that VBUS is powered, Called by whatever
+   detects VBUS sessions */
+static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+       struct fsl_udc  *udc;
+       unsigned long   flags;
+
+       udc = container_of(gadget, struct fsl_udc, gadget);
+       spin_lock_irqsave(&udc->lock, flags);
+       VDBG("VBUS %s", is_active ? "on" : "off");
+       udc->vbus_active = (is_active != 0);
+       if (can_pullup(udc))
+               fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+                               &dr_regs->usbcmd);
+       else
+               fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+                               &dr_regs->usbcmd);
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return 0;
+}
+
+/* constrain controller's VBUS power usage
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+       struct fsl_udc *udc;
+
+       udc = container_of(gadget, struct fsl_udc, gadget);
+       if (udc->transceiver)
+               return otg_set_power(udc->transceiver, mA);
+       return -ENOTSUPP;
+}
+
+/* Change Data+ pullup status
+ * this func is used by usb_gadget_connect/disconnet
+ */
+static int fsl_pullup(struct usb_gadget *gadget, int is_on)
+{
+       struct fsl_udc *udc;
+
+       udc = container_of(gadget, struct fsl_udc, gadget);
+       udc->softconnect = (is_on != 0);
+       if (can_pullup(udc))
+               fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
+                               &dr_regs->usbcmd);
+       else
+               fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
+                               &dr_regs->usbcmd);
+
+       return 0;
+}
+
+/* defined in gadget.h */
+static struct usb_gadget_ops fsl_gadget_ops = {
+       .get_frame = fsl_get_frame,
+       .wakeup = fsl_wakeup,
+/*     .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
+       .vbus_session = fsl_vbus_session,
+       .vbus_draw = fsl_vbus_draw,
+       .pullup = fsl_pullup,
+};
+
+/* Set protocol stall on ep0, protocol stall will automatically be cleared
+   on new transaction */
+static void ep0stall(struct fsl_udc *udc)
+{
+       u32 tmp;
+
+       /* must set tx and rx to stall at the same time */
+       tmp = fsl_readl(&dr_regs->endptctrl[0]);
+       tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
+       fsl_writel(tmp, &dr_regs->endptctrl[0]);
+       udc->ep0_state = WAIT_FOR_SETUP;
+       udc->ep0_dir = 0;
+}
+
+/* Prime a status phase for ep0 */
+static int ep0_prime_status(struct fsl_udc *udc, int direction)
+{
+       struct fsl_req *req = udc->status_req;
+       struct fsl_ep *ep;
+
+       if (direction == EP_DIR_IN)
+               udc->ep0_dir = USB_DIR_IN;
+       else
+               udc->ep0_dir = USB_DIR_OUT;
+
+       ep = &udc->eps[0];
+       udc->ep0_state = WAIT_FOR_OUT_STATUS;
+
+       req->ep = ep;
+       req->req.length = 0;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->req.complete = NULL;
+       req->dtd_count = 0;
+
+       if (fsl_req_to_dtd(req) == 0)
+               fsl_queue_td(ep, req);
+       else
+               return -ENOMEM;
+
+       list_add_tail(&req->queue, &ep->queue);
+
+       return 0;
+}
+
+static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
+{
+       struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
+
+       if (ep->name)
+               nuke(ep, -ESHUTDOWN);
+}
+
+/*
+ * ch9 Set address
+ */
+static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
+{
+       /* Save the new address to device struct */
+       udc->device_address = (u8) value;
+       /* Update usb state */
+       udc->usb_state = USB_STATE_ADDRESS;
+       /* Status phase */
+       if (ep0_prime_status(udc, EP_DIR_IN))
+               ep0stall(udc);
+}
+
+/*
+ * ch9 Get status
+ */
+static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
+               u16 index, u16 length)
+{
+       u16 tmp = 0;            /* Status, cpu endian */
+       struct fsl_req *req;
+       struct fsl_ep *ep;
+
+       ep = &udc->eps[0];
+
+       if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+               /* Get device status */
+               tmp = 1 << USB_DEVICE_SELF_POWERED;
+               tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+               /* Get interface status */
+               /* We don't have interface information in udc driver */
+               tmp = 0;
+       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+               /* Get endpoint status */
+               struct fsl_ep *target_ep;
+
+               target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
+
+               /* stall if endpoint doesn't exist */
+               if (!target_ep->desc)
+                       goto stall;
+               tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
+                               << USB_ENDPOINT_HALT;
+       }
+
+       udc->ep0_dir = USB_DIR_IN;
+       /* Borrow the per device status_req */
+       req = udc->status_req;
+       /* Fill in the reqest structure */
+       *((u16 *) req->req.buf) = cpu_to_le16(tmp);
+       req->ep = ep;
+       req->req.length = 2;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->req.complete = NULL;
+       req->dtd_count = 0;
+
+       /* prime the data phase */
+       if ((fsl_req_to_dtd(req) == 0))
+               fsl_queue_td(ep, req);
+       else                    /* no mem */
+               goto stall;
+
+       list_add_tail(&req->queue, &ep->queue);
+       udc->ep0_state = DATA_STATE_XMIT;
+       return;
+stall:
+       ep0stall(udc);
+}
+
+static void setup_received_irq(struct fsl_udc *udc,
+               struct usb_ctrlrequest *setup)
+{
+       u16 wValue = le16_to_cpu(setup->wValue);
+       u16 wIndex = le16_to_cpu(setup->wIndex);
+       u16 wLength = le16_to_cpu(setup->wLength);
+
+       udc_reset_ep_queue(udc, 0);
+
+       /* We process some stardard setup requests here */
+       switch (setup->bRequest) {
+       case USB_REQ_GET_STATUS:
+               /* Data+Status phase from udc */
+               if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+                                       != (USB_DIR_IN | USB_TYPE_STANDARD))
+                       break;
+               ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
+               return;
+
+       case USB_REQ_SET_ADDRESS:
+               /* Status phase from udc */
+               if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+                                               | USB_RECIP_DEVICE))
+                       break;
+               ch9setaddress(udc, wValue, wIndex, wLength);
+               return;
+
+       case USB_REQ_CLEAR_FEATURE:
+       case USB_REQ_SET_FEATURE:
+               /* Status phase from udc */
+       {
+               int rc = -EOPNOTSUPP;
+
+               if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+                               == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+                       int pipe = get_pipe_by_windex(wIndex);
+                       struct fsl_ep *ep;
+
+                       if (wValue != 0 || wLength != 0 || pipe > udc->max_ep)
+                               break;
+                       ep = get_ep_by_pipe(udc, pipe);
+
+                       spin_unlock(&udc->lock);
+                       rc = fsl_ep_set_halt(&ep->ep,
+                                       (setup->bRequest == USB_REQ_SET_FEATURE)
+                                               ? 1 : 0);
+                       spin_lock(&udc->lock);
+
+               } else if ((setup->bRequestType & (USB_RECIP_MASK
+                               | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+                               | USB_TYPE_STANDARD)) {
+                       /* Note: The driver has not include OTG support yet.
+                        * This will be set when OTG support is added */
+                       if (!gadget_is_otg(&udc->gadget))
+                               break;
+                       else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
+                               udc->gadget.b_hnp_enable = 1;
+                       else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+                               udc->gadget.a_hnp_support = 1;
+                       else if (setup->bRequest ==
+                                       USB_DEVICE_A_ALT_HNP_SUPPORT)
+                               udc->gadget.a_alt_hnp_support = 1;
+                       else
+                               break;
+                       rc = 0;
+               } else
+                       break;
+
+               if (rc == 0) {
+                       if (ep0_prime_status(udc, EP_DIR_IN))
+                               ep0stall(udc);
+               }
+               return;
+       }
+
+       default:
+               break;
+       }
+
+       /* Requests handled by gadget */
+       if (wLength) {
+               /* Data phase from gadget, status phase from udc */
+               udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+                               ?  USB_DIR_IN : USB_DIR_OUT;
+               spin_unlock(&udc->lock);
+               if (udc->driver->setup(&udc->gadget,
+                               &udc->local_setup_buff) < 0)
+                       ep0stall(udc);
+               spin_lock(&udc->lock);
+               udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+                               ?  DATA_STATE_XMIT : DATA_STATE_RECV;
+       } else {
+               /* No data phase, IN status from gadget */
+               udc->ep0_dir = USB_DIR_IN;
+               spin_unlock(&udc->lock);
+               if (udc->driver->setup(&udc->gadget,
+                               &udc->local_setup_buff) < 0)
+                       ep0stall(udc);
+               spin_lock(&udc->lock);
+               udc->ep0_state = WAIT_FOR_OUT_STATUS;
+       }
+}
+
+/* Process request for Data or Status phase of ep0
+ * prime status phase if needed */
+static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
+               struct fsl_req *req)
+{
+       if (udc->usb_state == USB_STATE_ADDRESS) {
+               /* Set the new address */
+               u32 new_address = (u32) udc->device_address;
+               fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
+                               &dr_regs->deviceaddr);
+       }
+
+       done(ep0, req, 0);
+
+       switch (udc->ep0_state) {
+       case DATA_STATE_XMIT:
+               /* receive status phase */
+               if (ep0_prime_status(udc, EP_DIR_OUT))
+                       ep0stall(udc);
+               break;
+       case DATA_STATE_RECV:
+               /* send status phase */
+               if (ep0_prime_status(udc, EP_DIR_IN))
+                       ep0stall(udc);
+               break;
+       case WAIT_FOR_OUT_STATUS:
+               udc->ep0_state = WAIT_FOR_SETUP;
+               break;
+       case WAIT_FOR_SETUP:
+               ERR("Unexpect ep0 packets\n");
+               break;
+       default:
+               ep0stall(udc);
+               break;
+       }
+}
+
+/* Tripwire mechanism to ensure a setup packet payload is extracted without
+ * being corrupted by another incoming setup packet */
+static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+       u32 temp;
+       struct ep_queue_head *qh;
+
+       qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
+
+       /* Clear bit in ENDPTSETUPSTAT */
+       temp = fsl_readl(&dr_regs->endptsetupstat);
+       fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
+
+       /* while a hazard exists when setup package arrives */
+       do {
+               /* Set Setup Tripwire */
+               temp = fsl_readl(&dr_regs->usbcmd);
+               fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
+
+               /* Copy the setup packet to local buffer */
+               memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
+       } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
+
+       /* Clear Setup Tripwire */
+       temp = fsl_readl(&dr_regs->usbcmd);
+       fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
+}
+
+/* process-ep_req(): free the completed Tds for this req */
+static int process_ep_req(struct fsl_udc *udc, int pipe,
+               struct fsl_req *curr_req)
+{
+       struct ep_td_struct *curr_td;
+       int     td_complete, actual, remaining_length, j, tmp;
+       int     status = 0;
+       int     errors = 0;
+       struct  ep_queue_head *curr_qh = &udc->ep_qh[pipe];
+       int direction = pipe % 2;
+
+       curr_td = curr_req->head;
+       td_complete = 0;
+       actual = curr_req->req.length;
+
+       for (j = 0; j < curr_req->dtd_count; j++) {
+               remaining_length = (le32_to_cpu(curr_td->size_ioc_sts)
+                                       & DTD_PACKET_SIZE)
+                               >> DTD_LENGTH_BIT_POS;
+               actual -= remaining_length;
+
+               if ((errors = le32_to_cpu(curr_td->size_ioc_sts) &
+                                               DTD_ERROR_MASK)) {
+                       if (errors & DTD_STATUS_HALTED) {
+                               ERR("dTD error %08x QH=%d\n", errors, pipe);
+                               /* Clear the errors and Halt condition */
+                               tmp = le32_to_cpu(curr_qh->size_ioc_int_sts);
+                               tmp &= ~errors;
+                               curr_qh->size_ioc_int_sts = cpu_to_le32(tmp);
+                               status = -EPIPE;
+                               /* FIXME: continue with next queued TD? */
+
+                               break;
+                       }
+                       if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+                               VDBG("Transfer overflow");
+                               status = -EPROTO;
+                               break;
+                       } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+                               VDBG("ISO error");
+                               status = -EILSEQ;
+                               break;
+                       } else
+                               ERR("Unknown error has occured (0x%x)!\n",
+                                       errors);
+
+               } else if (le32_to_cpu(curr_td->size_ioc_sts)
+                               & DTD_STATUS_ACTIVE) {
+                       VDBG("Request not complete");
+                       status = REQ_UNCOMPLETE;
+                       return status;
+               } else if (remaining_length) {
+                       if (direction) {
+                               VDBG("Transmit dTD remaining length not zero");
+                               status = -EPROTO;
+                               break;
+                       } else {
+                               td_complete++;
+                               break;
+                       }
+               } else {
+                       td_complete++;
+                       VDBG("dTD transmitted successful");
+               }
+
+               if (j != curr_req->dtd_count - 1)
+                       curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
+       }
+
+       if (status)
+               return status;
+
+       curr_req->req.actual = actual;
+
+       return 0;
+}
+
+/* Process a DTD completion interrupt */
+static void dtd_complete_irq(struct fsl_udc *udc)
+{
+       u32 bit_pos;
+       int i, ep_num, direction, bit_mask, status;
+       struct fsl_ep *curr_ep;
+       struct fsl_req *curr_req, *temp_req;
+
+       /* Clear the bits in the register */
+       bit_pos = fsl_readl(&dr_regs->endptcomplete);
+       fsl_writel(bit_pos, &dr_regs->endptcomplete);
+
+       if (!bit_pos)
+               return;
+
+       for (i = 0; i < udc->max_ep * 2; i++) {
+               ep_num = i >> 1;
+               direction = i % 2;
+
+               bit_mask = 1 << (ep_num + 16 * direction);
+
+               if (!(bit_pos & bit_mask))
+                       continue;
+
+               curr_ep = get_ep_by_pipe(udc, i);
+
+               /* If the ep is configured */
+               if (curr_ep->name == NULL) {
+                       WARNING("Invalid EP?");
+                       continue;
+               }
+
+               /* process the req queue until an uncomplete request */
+               list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
+                               queue) {
+                       status = process_ep_req(udc, i, curr_req);
+
+                       VDBG("status of process_ep_req= %d, ep = %d",
+                                       status, ep_num);
+                       if (status == REQ_UNCOMPLETE)
+                               break;
+                       /* write back status to req */
+                       curr_req->req.status = status;
+
+                       if (ep_num == 0) {
+                               ep0_req_complete(udc, curr_ep, curr_req);
+                               break;
+                       } else
+                               done(curr_ep, curr_req, status);
+               }
+       }
+}
+
+/* Process a port change interrupt */
+static void port_change_irq(struct fsl_udc *udc)
+{
+       u32 speed;
+
+       /* Bus resetting is finished */
+       if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
+               /* Get the speed */
+               speed = (fsl_readl(&dr_regs->portsc1)
+                               & PORTSCX_PORT_SPEED_MASK);
+               switch (speed) {
+               case PORTSCX_PORT_SPEED_HIGH:
+                       udc->gadget.speed = USB_SPEED_HIGH;
+                       break;
+               case PORTSCX_PORT_SPEED_FULL:
+                       udc->gadget.speed = USB_SPEED_FULL;
+                       break;
+               case PORTSCX_PORT_SPEED_LOW:
+                       udc->gadget.speed = USB_SPEED_LOW;
+                       break;
+               default:
+                       udc->gadget.speed = USB_SPEED_UNKNOWN;
+                       break;
+               }
+       }
+
+       /* Update USB state */
+       if (!udc->resume_state)
+               udc->usb_state = USB_STATE_DEFAULT;
+}
+
+/* Process suspend interrupt */
+static void suspend_irq(struct fsl_udc *udc)
+{
+       udc->resume_state = udc->usb_state;
+       udc->usb_state = USB_STATE_SUSPENDED;
+
+       /* report suspend to the driver, serial.c does not support this */
+       if (udc->driver->suspend)
+               udc->driver->suspend(&udc->gadget);
+}
+
+static void bus_resume(struct fsl_udc *udc)
+{
+       udc->usb_state = udc->resume_state;
+       udc->resume_state = 0;
+
+       /* report resume to the driver, serial.c does not support this */
+       if (udc->driver->resume)
+               udc->driver->resume(&udc->gadget);
+}
+
+/* Clear up all ep queues */
+static int reset_queues(struct fsl_udc *udc)
+{
+       u8 pipe;
+
+       for (pipe = 0; pipe < udc->max_pipes; pipe++)
+               udc_reset_ep_queue(udc, pipe);
+
+       /* report disconnect; the driver is already quiesced */
+       spin_unlock(&udc->lock);
+       udc->driver->disconnect(&udc->gadget);
+       spin_lock(&udc->lock);
+
+       return 0;
+}
+
+/* Process reset interrupt */
+static void reset_irq(struct fsl_udc *udc)
+{
+       u32 temp;
+       unsigned long timeout;
+
+       /* Clear the device address */
+       temp = fsl_readl(&dr_regs->deviceaddr);
+       fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
+
+       udc->device_address = 0;
+
+       /* Clear usb state */
+       udc->resume_state = 0;
+       udc->ep0_dir = 0;
+       udc->ep0_state = WAIT_FOR_SETUP;
+       udc->remote_wakeup = 0; /* default to 0 on reset */
+       udc->gadget.b_hnp_enable = 0;
+       udc->gadget.a_hnp_support = 0;
+       udc->gadget.a_alt_hnp_support = 0;
+
+       /* Clear all the setup token semaphores */
+       temp = fsl_readl(&dr_regs->endptsetupstat);
+       fsl_writel(temp, &dr_regs->endptsetupstat);
+
+       /* Clear all the endpoint complete status bits */
+       temp = fsl_readl(&dr_regs->endptcomplete);
+       fsl_writel(temp, &dr_regs->endptcomplete);
+
+       timeout = jiffies + 100;
+       while (fsl_readl(&dr_regs->endpointprime)) {
+               /* Wait until all endptprime bits cleared */
+               if (time_after(jiffies, timeout)) {
+                       ERR("Timeout for reset\n");
+                       break;
+               }
+               cpu_relax();
+       }
+
+       /* Write 1s to the flush register */
+       fsl_writel(0xffffffff, &dr_regs->endptflush);
+
+       if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
+               VDBG("Bus reset");
+               /* Reset all the queues, include XD, dTD, EP queue
+                * head and TR Queue */
+               reset_queues(udc);
+               udc->usb_state = USB_STATE_DEFAULT;
+       } else {
+               VDBG("Controller reset");
+               /* initialize usb hw reg except for regs for EP, not
+                * touch usbintr reg */
+               dr_controller_setup(udc);
+
+               /* Reset all internal used Queues */
+               reset_queues(udc);
+
+               ep0_setup(udc);
+
+               /* Enable DR IRQ reg, Set Run bit, change udc state */
+               dr_controller_run(udc);
+               udc->usb_state = USB_STATE_ATTACHED;
+       }
+}
+
+/*
+ * USB device controller interrupt handler
+ */
+static irqreturn_t fsl_udc_irq(int irq, void *_udc)
+{
+       struct fsl_udc *udc = _udc;
+       u32 irq_src;
+       irqreturn_t status = IRQ_NONE;
+       unsigned long flags;
+
+       /* Disable ISR for OTG host mode */
+       if (udc->stopped)
+               return IRQ_NONE;
+       spin_lock_irqsave(&udc->lock, flags);
+       irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
+       /* Clear notification bits */
+       fsl_writel(irq_src, &dr_regs->usbsts);
+
+       /* VDBG("irq_src [0x%8x]", irq_src); */
+
+       /* Need to resume? */
+       if (udc->usb_state == USB_STATE_SUSPENDED)
+               if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
+                       bus_resume(udc);
+
+       /* USB Interrupt */
+       if (irq_src & USB_STS_INT) {
+               VDBG("Packet int");
+               /* Setup package, we only support ep0 as control ep */
+               if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
+                       tripwire_handler(udc, 0,
+                                       (u8 *) (&udc->local_setup_buff));
+                       setup_received_irq(udc, &udc->local_setup_buff);
+                       status = IRQ_HANDLED;
+               }
+
+               /* completion of dtd */
+               if (fsl_readl(&dr_regs->endptcomplete)) {
+                       dtd_complete_irq(udc);
+                       status = IRQ_HANDLED;
+               }
+       }
+
+       /* SOF (for ISO transfer) */
+       if (irq_src & USB_STS_SOF) {
+               status = IRQ_HANDLED;
+       }
+
+       /* Port Change */
+       if (irq_src & USB_STS_PORT_CHANGE) {
+               port_change_irq(udc);
+               status = IRQ_HANDLED;
+       }
+
+       /* Reset Received */
+       if (irq_src & USB_STS_RESET) {
+               reset_irq(udc);
+               status = IRQ_HANDLED;
+       }
+
+       /* Sleep Enable (Suspend) */
+       if (irq_src & USB_STS_SUSPEND) {
+               suspend_irq(udc);
+               status = IRQ_HANDLED;
+       }
+
+       if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
+               VDBG("Error IRQ %x", irq_src);
+       }
+
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return status;
+}
+
+/*----------------------------------------------------------------*
+ * Hook to gadget drivers
+ * Called by initialization code of gadget drivers
+*----------------------------------------------------------------*/
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+       int retval = -ENODEV;
+       unsigned long flags = 0;
+
+       if (!udc_controller)
+               return -ENODEV;
+
+       if (!driver || (driver->speed != USB_SPEED_FULL
+                               && driver->speed != USB_SPEED_HIGH)
+                       || !driver->bind || !driver->disconnect
+                       || !driver->setup)
+               return -EINVAL;
+
+       if (udc_controller->driver)
+               return -EBUSY;
+
+       /* lock is needed but whether should use this lock or another */
+       spin_lock_irqsave(&udc_controller->lock, flags);
+
+       driver->driver.bus = NULL;
+       /* hook up the driver */
+       udc_controller->driver = driver;
+       udc_controller->gadget.dev.driver = &driver->driver;
+       spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+       /* bind udc driver to gadget driver */
+       retval = driver->bind(&udc_controller->gadget);
+       if (retval) {
+               VDBG("bind to %s --> %d", driver->driver.name, retval);
+               udc_controller->gadget.dev.driver = NULL;
+               udc_controller->driver = NULL;
+               goto out;
+       }
+
+       /* Enable DR IRQ reg and Set usbcmd reg  Run bit */
+       dr_controller_run(udc_controller);
+       udc_controller->usb_state = USB_STATE_ATTACHED;
+       udc_controller->ep0_state = WAIT_FOR_SETUP;
+       udc_controller->ep0_dir = 0;
+       printk(KERN_INFO "%s: bind to driver %s\n",
+                       udc_controller->gadget.name, driver->driver.name);
+
+out:
+       if (retval)
+               printk(KERN_WARNING "gadget driver register failed %d\n",
+                      retval);
+       return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/* Disconnect from gadget driver */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+       struct fsl_ep *loop_ep;
+       unsigned long flags;
+
+       if (!udc_controller)
+               return -ENODEV;
+
+       if (!driver || driver != udc_controller->driver || !driver->unbind)
+               return -EINVAL;
+
+       if (udc_controller->transceiver)
+               otg_set_peripheral(udc_controller->transceiver, NULL);
+
+       /* stop DR, disable intr */
+       dr_controller_stop(udc_controller);
+
+       /* in fact, no needed */
+       udc_controller->usb_state = USB_STATE_ATTACHED;
+       udc_controller->ep0_state = WAIT_FOR_SETUP;
+       udc_controller->ep0_dir = 0;
+
+       /* stand operation */
+       spin_lock_irqsave(&udc_controller->lock, flags);
+       udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+       nuke(&udc_controller->eps[0], -ESHUTDOWN);
+       list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
+                       ep.ep_list)
+               nuke(loop_ep, -ESHUTDOWN);
+       spin_unlock_irqrestore(&udc_controller->lock, flags);
+
+       /* report disconnect; the controller is already quiesced */
+       driver->disconnect(&udc_controller->gadget);
+
+       /* unbind gadget and unhook driver. */
+       driver->unbind(&udc_controller->gadget);
+       udc_controller->gadget.dev.driver = NULL;
+       udc_controller->driver = NULL;
+
+       printk(KERN_WARNING "unregistered gadget driver '%s'\n",
+              driver->driver.name);
+       return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/*-------------------------------------------------------------------------
+               PROC File System Support
+-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+#include <linux/seq_file.h>
+
+static const char proc_filename[] = "driver/fsl_usb2_udc";
+
+static int fsl_proc_read(char *page, char **start, off_t off, int count,
+               int *eof, void *_dev)
+{
+       char *buf = page;
+       char *next = buf;
+       unsigned size = count;
+       unsigned long flags;
+       int t, i;
+       u32 tmp_reg;
+       struct fsl_ep *ep = NULL;
+       struct fsl_req *req;
+
+       struct fsl_udc *udc = udc_controller;
+       if (off != 0)
+               return 0;
+
+       spin_lock_irqsave(&udc->lock, flags);
+
+       /* ------basic driver information ---- */
+       t = scnprintf(next, size,
+                       DRIVER_DESC "\n"
+                       "%s version: %s\n"
+                       "Gadget driver: %s\n\n",
+                       driver_name, DRIVER_VERSION,
+                       udc->driver ? udc->driver->driver.name : "(none)");
+       size -= t;
+       next += t;
+
+       /* ------ DR Registers ----- */
+       tmp_reg = fsl_readl(&dr_regs->usbcmd);
+       t = scnprintf(next, size,
+                       "USBCMD reg:\n"
+                       "SetupTW: %d\n"
+                       "Run/Stop: %s\n\n",
+                       (tmp_reg & USB_CMD_SUTW) ? 1 : 0,
+                       (tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->usbsts);
+       t = scnprintf(next, size,
+                       "USB Status Reg:\n"
+                       "Dr Suspend: %d Reset Received: %d System Error: %s "
+                       "USB Error Interrupt: %s\n\n",
+                       (tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
+                       (tmp_reg & USB_STS_RESET) ? 1 : 0,
+                       (tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
+                       (tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->usbintr);
+       t = scnprintf(next, size,
+                       "USB Intrrupt Enable Reg:\n"
+                       "Sleep Enable: %d SOF Received Enable: %d "
+                       "Reset Enable: %d\n"
+                       "System Error Enable: %d "
+                       "Port Change Dectected Enable: %d\n"
+                       "USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
+                       (tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
+                       (tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
+                       (tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
+                       (tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
+                       (tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
+                       (tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
+                       (tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->frindex);
+       t = scnprintf(next, size,
+                       "USB Frame Index Reg: Frame Number is 0x%x\n\n",
+                       (tmp_reg & USB_FRINDEX_MASKS));
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->deviceaddr);
+       t = scnprintf(next, size,
+                       "USB Device Address Reg: Device Addr is 0x%x\n\n",
+                       (tmp_reg & USB_DEVICE_ADDRESS_MASK));
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
+       t = scnprintf(next, size,
+                       "USB Endpoint List Address Reg: "
+                       "Device Addr is 0x%x\n\n",
+                       (tmp_reg & USB_EP_LIST_ADDRESS_MASK));
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->portsc1);
+       t = scnprintf(next, size,
+               "USB Port Status&Control Reg:\n"
+               "Port Transceiver Type : %s Port Speed: %s\n"
+               "PHY Low Power Suspend: %s Port Reset: %s "
+               "Port Suspend Mode: %s\n"
+               "Over-current Change: %s "
+               "Port Enable/Disable Change: %s\n"
+               "Port Enabled/Disabled: %s "
+               "Current Connect Status: %s\n\n", ( {
+                       char *s;
+                       switch (tmp_reg & PORTSCX_PTS_FSLS) {
+                       case PORTSCX_PTS_UTMI:
+                               s = "UTMI"; break;
+                       case PORTSCX_PTS_ULPI:
+                               s = "ULPI "; break;
+                       case PORTSCX_PTS_FSLS:
+                               s = "FS/LS Serial"; break;
+                       default:
+                               s = "None"; break;
+                       }
+                       s;} ), ( {
+                       char *s;
+                       switch (tmp_reg & PORTSCX_PORT_SPEED_UNDEF) {
+                       case PORTSCX_PORT_SPEED_FULL:
+                               s = "Full Speed"; break;
+                       case PORTSCX_PORT_SPEED_LOW:
+                               s = "Low Speed"; break;
+                       case PORTSCX_PORT_SPEED_HIGH:
+                               s = "High Speed"; break;
+                       default:
+                               s = "Undefined"; break;
+                       }
+                       s;
+               } ),
+               (tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
+               "Normal PHY mode" : "Low power mode",
+               (tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
+               "Not in Reset",
+               (tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
+               (tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
+               "No",
+               (tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
+               "Not change",
+               (tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
+               "Not correct",
+               (tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
+               "Attached" : "Not-Att");
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->usbmode);
+       t = scnprintf(next, size,
+                       "USB Mode Reg: Controller Mode is: %s\n\n", ( {
+                               char *s;
+                               switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
+                               case USB_MODE_CTRL_MODE_IDLE:
+                                       s = "Idle"; break;
+                               case USB_MODE_CTRL_MODE_DEVICE:
+                                       s = "Device Controller"; break;
+                               case USB_MODE_CTRL_MODE_HOST:
+                                       s = "Host Controller"; break;
+                               default:
+                                       s = "None"; break;
+                               }
+                               s;
+                       } ));
+       size -= t;
+       next += t;
+
+       tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
+       t = scnprintf(next, size,
+                       "Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
+                       (tmp_reg & EP_SETUP_STATUS_MASK));
+       size -= t;
+       next += t;
+
+       for (i = 0; i < udc->max_ep / 2; i++) {
+               tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
+               t = scnprintf(next, size, "EP Ctrl Reg [0x%x]: = [0x%x]\n",
+                               i, tmp_reg);
+               size -= t;
+               next += t;
+       }
+       tmp_reg = fsl_readl(&dr_regs->endpointprime);
+       t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
+       size -= t;
+       next += t;
+
+#ifndef CONFIG_ARCH_MXC
+       tmp_reg = usb_sys_regs->snoop1;
+       t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
+       size -= t;
+       next += t;
+
+       tmp_reg = usb_sys_regs->control;
+       t = scnprintf(next, size, "General Control Reg : = [0x%x]\n\n",
+                       tmp_reg);
+       size -= t;
+       next += t;
+#endif
+
+       /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
+       ep = &udc->eps[0];
+       t = scnprintf(next, size, "For %s Maxpkt is 0x%x index is 0x%x\n",
+                       ep->ep.name, ep_maxpacket(ep), ep_index(ep));
+       size -= t;
+       next += t;
+
+       if (list_empty(&ep->queue)) {
+               t = scnprintf(next, size, "its req queue is empty\n\n");
+               size -= t;
+               next += t;
+       } else {
+               list_for_each_entry(req, &ep->queue, queue) {
+                       t = scnprintf(next, size,
+                               "req %p actual 0x%x length 0x%x buf %p\n",
+                               &req->req, req->req.actual,
+                               req->req.length, req->req.buf);
+                       size -= t;
+                       next += t;
+               }
+       }
+       /* other gadget->eplist ep */
+       list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+               if (ep->desc) {
+                       t = scnprintf(next, size,
+                                       "\nFor %s Maxpkt is 0x%x "
+                                       "index is 0x%x\n",
+                                       ep->ep.name, ep_maxpacket(ep),
+                                       ep_index(ep));
+                       size -= t;
+                       next += t;
+
+                       if (list_empty(&ep->queue)) {
+                               t = scnprintf(next, size,
+                                               "its req queue is empty\n\n");
+                               size -= t;
+                               next += t;
+                       } else {
+                               list_for_each_entry(req, &ep->queue, queue) {
+                                       t = scnprintf(next, size,
+                                               "req %p actual 0x%x length "
+                                               "0x%x  buf %p\n",
+                                               &req->req, req->req.actual,
+                                               req->req.length, req->req.buf);
+                                       size -= t;
+                                       next += t;
+                                       }       /* end for each_entry of ep req */
+                               }       /* end for else */
+                       }       /* end for if(ep->queue) */
+               }               /* end (ep->desc) */
+
+       spin_unlock_irqrestore(&udc->lock, flags);
+
+       *eof = 1;
+       return count - size;
+}
+
+#define create_proc_file()     create_proc_read_entry(proc_filename, \
+                               0, NULL, fsl_proc_read, NULL)
+
+#define remove_proc_file()     remove_proc_entry(proc_filename, NULL)
+
+#else                          /* !CONFIG_USB_GADGET_DEBUG_FILES */
+
+#define create_proc_file()     do {} while (0)
+#define remove_proc_file()     do {} while (0)
+
+#endif                         /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+/*-------------------------------------------------------------------------*/
+
+/* Release udc structures */
+static void fsl_udc_release(struct device *dev)
+{
+       complete(udc_controller->done);
+       dma_free_coherent(dev, udc_controller->ep_qh_size,
+                       udc_controller->ep_qh, udc_controller->ep_qh_dma);
+       kfree(udc_controller);
+}
+
+/******************************************************************
+       Internal structure setup functions
+*******************************************************************/
+/*------------------------------------------------------------------
+ * init resource for globle controller
+ * Return the udc handle on success or NULL on failure
+ ------------------------------------------------------------------*/
+static int __init struct_udc_setup(struct fsl_udc *udc,
+               struct platform_device *pdev)
+{
+       struct fsl_usb2_platform_data *pdata;
+       size_t size;
+
+       pdata = pdev->dev.platform_data;
+       udc->phy_mode = pdata->phy_mode;
+
+       udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
+       if (!udc->eps) {
+               ERR("malloc fsl_ep failed\n");
+               return -1;
+       }
+
+       /* initialized QHs, take care of alignment */
+       size = udc->max_ep * sizeof(struct ep_queue_head);
+       if (size < QH_ALIGNMENT)
+               size = QH_ALIGNMENT;
+       else if ((size % QH_ALIGNMENT) != 0) {
+               size += QH_ALIGNMENT + 1;
+               size &= ~(QH_ALIGNMENT - 1);
+       }
+       udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
+                                       &udc->ep_qh_dma, GFP_KERNEL);
+       if (!udc->ep_qh) {
+               ERR("malloc QHs for udc failed\n");
+               kfree(udc->eps);
+               return -1;
+       }
+
+       udc->ep_qh_size = size;
+
+       /* Initialize ep0 status request structure */
+       /* FIXME: fsl_alloc_request() ignores ep argument */
+       udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
+                       struct fsl_req, req);
+       /* allocate a small amount of memory to get valid address */
+       udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+       udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
+
+       udc->resume_state = USB_STATE_NOTATTACHED;
+       udc->usb_state = USB_STATE_POWERED;
+       udc->ep0_dir = 0;
+       udc->remote_wakeup = 0; /* default to 0 on reset */
+
+       return 0;
+}
+
+/*----------------------------------------------------------------
+ * Setup the fsl_ep struct for eps
+ * Link fsl_ep->ep to gadget->ep_list
+ * ep0out is not used so do nothing here
+ * ep0in should be taken care
+ *--------------------------------------------------------------*/
+static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
+               char *name, int link)
+{
+       struct fsl_ep *ep = &udc->eps[index];
+
+       ep->udc = udc;
+       strcpy(ep->name, name);
+       ep->ep.name = ep->name;
+
+       ep->ep.ops = &fsl_ep_ops;
+       ep->stopped = 0;
+
+       /* for ep0: maxP defined in desc
+        * for other eps, maxP is set by epautoconfig() called by gadget layer
+        */
+       ep->ep.maxpacket = (unsigned short) ~0;
+
+       /* the queue lists any req for this ep */
+       INIT_LIST_HEAD(&ep->queue);
+
+       /* gagdet.ep_list used for ep_autoconfig so no ep0 */
+       if (link)
+               list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+       ep->gadget = &udc->gadget;
+       ep->qh = &udc->ep_qh[index];
+
+       return 0;
+}
+
+/* Driver probe function
+ * all intialization operations implemented here except enabling usb_intr reg
+ * board setup should have been done in the platform code
+ */
+static int __init fsl_udc_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int ret = -ENODEV;
+       unsigned int i;
+       u32 dccparams;
+
+       if (strcmp(pdev->name, driver_name)) {
+               VDBG("Wrong device");
+               return -ENODEV;
+       }
+
+       udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
+       if (udc_controller == NULL) {
+               ERR("malloc udc failed\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_init(&udc_controller->lock);
+       udc_controller->stopped = 1;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               ret = -ENXIO;
+               goto err_kfree;
+       }
+
+       if (!request_mem_region(res->start, res->end - res->start + 1,
+                               driver_name)) {
+               ERR("request mem region for %s failed\n", pdev->name);
+               ret = -EBUSY;
+               goto err_kfree;
+       }
+
+       dr_regs = ioremap(res->start, resource_size(res));
+       if (!dr_regs) {
+               ret = -ENOMEM;
+               goto err_release_mem_region;
+       }
+
+#ifndef CONFIG_ARCH_MXC
+       usb_sys_regs = (struct usb_sys_interface *)
+                       ((u32)dr_regs + USB_DR_SYS_OFFSET);
+#endif
+
+       /* Initialize USB clocks */
+       ret = fsl_udc_clk_init(pdev);
+       if (ret < 0)
+               goto err_iounmap_noclk;
+
+       /* Read Device Controller Capability Parameters register */
+       dccparams = fsl_readl(&dr_regs->dccparams);
+       if (!(dccparams & DCCPARAMS_DC)) {
+               ERR("This SOC doesn't support device role\n");
+               ret = -ENODEV;
+               goto err_iounmap;
+       }
+       /* Get max device endpoints */
+       /* DEN is bidirectional ep number, max_ep doubles the number */
+       udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
+
+       udc_controller->irq = platform_get_irq(pdev, 0);
+       if (!udc_controller->irq) {
+               ret = -ENODEV;
+               goto err_iounmap;
+       }
+
+       ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
+                       driver_name, udc_controller);
+       if (ret != 0) {
+               ERR("cannot request irq %d err %d\n",
+                               udc_controller->irq, ret);
+               goto err_iounmap;
+       }
+
+       /* Initialize the udc structure including QH member and other member */
+       if (struct_udc_setup(udc_controller, pdev)) {
+               ERR("Can't initialize udc data structure\n");
+               ret = -ENOMEM;
+               goto err_free_irq;
+       }
+
+       /* initialize usb hw reg except for regs for EP,
+        * leave usbintr reg untouched */
+       dr_controller_setup(udc_controller);
+
+       fsl_udc_clk_finalize(pdev);
+
+       /* Setup gadget structure */
+       udc_controller->gadget.ops = &fsl_gadget_ops;
+       udc_controller->gadget.is_dualspeed = 1;
+       udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
+       INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
+       udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
+       udc_controller->gadget.name = driver_name;
+
+       /* Setup gadget.dev and register with kernel */
+       dev_set_name(&udc_controller->gadget.dev, "gadget");
+       udc_controller->gadget.dev.release = fsl_udc_release;
+       udc_controller->gadget.dev.parent = &pdev->dev;
+       ret = device_register(&udc_controller->gadget.dev);
+       if (ret < 0)
+               goto err_free_irq;
+
+       /* setup QH and epctrl for ep0 */
+       ep0_setup(udc_controller);
+
+       /* setup udc->eps[] for ep0 */
+       struct_ep_setup(udc_controller, 0, "ep0", 0);
+       /* for ep0: the desc defined here;
+        * for other eps, gadget layer called ep_enable with defined desc
+        */
+       udc_controller->eps[0].desc = &fsl_ep0_desc;
+       udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
+
+       /* setup the udc->eps[] for non-control endpoints and link
+        * to gadget.ep_list */
+       for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+               char name[14];
+
+               sprintf(name, "ep%dout", i);
+               struct_ep_setup(udc_controller, i * 2, name, 1);
+               sprintf(name, "ep%din", i);
+               struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
+       }
+
+       /* use dma_pool for TD management */
+       udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
+                       sizeof(struct ep_td_struct),
+                       DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
+       if (udc_controller->td_pool == NULL) {
+               ret = -ENOMEM;
+               goto err_unregister;
+       }
+       create_proc_file();
+       return 0;
+
+err_unregister:
+       device_unregister(&udc_controller->gadget.dev);
+err_free_irq:
+       free_irq(udc_controller->irq, udc_controller);
+err_iounmap:
+       fsl_udc_clk_release();
+err_iounmap_noclk:
+       iounmap(dr_regs);
+err_release_mem_region:
+       release_mem_region(res->start, res->end - res->start + 1);
+err_kfree:
+       kfree(udc_controller);
+       udc_controller = NULL;
+       return ret;
+}
+
+/* Driver removal function
+ * Free resources and finish pending transactions
+ */
+static int __exit fsl_udc_remove(struct platform_device *pdev)
+{
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       DECLARE_COMPLETION(done);
+
+       if (!udc_controller)
+               return -ENODEV;
+       udc_controller->done = &done;
+
+       fsl_udc_clk_release();
+
+       /* DR has been stopped in usb_gadget_unregister_driver() */
+       remove_proc_file();
+
+       /* Free allocated memory */
+       kfree(udc_controller->status_req->req.buf);
+       kfree(udc_controller->status_req);
+       kfree(udc_controller->eps);
+
+       dma_pool_destroy(udc_controller->td_pool);
+       free_irq(udc_controller->irq, udc_controller);
+       iounmap(dr_regs);
+       release_mem_region(res->start, res->end - res->start + 1);
+
+       device_unregister(&udc_controller->gadget.dev);
+       /* free udc --wait for the release() finished */
+       wait_for_completion(&done);
+
+       return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Modify Power management attributes
+ * Used by OTG statemachine to disable gadget temporarily
+ -----------------------------------------------------------------*/
+static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       dr_controller_stop(udc_controller);
+       return 0;
+}
+
+/*-----------------------------------------------------------------
+ * Invoked on USB resume. May be called in_interrupt.
+ * Here we start the DR controller and enable the irq
+ *-----------------------------------------------------------------*/
+static int fsl_udc_resume(struct platform_device *pdev)
+{
+       /* Enable DR irq reg and set controller Run */
+       if (udc_controller->stopped) {
+               dr_controller_setup(udc_controller);
+               dr_controller_run(udc_controller);
+       }
+       udc_controller->usb_state = USB_STATE_ATTACHED;
+       udc_controller->ep0_state = WAIT_FOR_SETUP;
+       udc_controller->ep0_dir = 0;
+       return 0;
+}
+
+/*-------------------------------------------------------------------------
+       Register entry point for the peripheral controller driver
+--------------------------------------------------------------------------*/
+
+static struct platform_driver udc_driver = {
+       .remove  = __exit_p(fsl_udc_remove),
+       /* these suspend and resume are not usb suspend and resume */
+       .suspend = fsl_udc_suspend,
+       .resume  = fsl_udc_resume,
+       .driver  = {
+               .name = (char *)driver_name,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init udc_init(void)
+{
+       printk(KERN_INFO "%s (%s)\n", driver_desc, DRIVER_VERSION);
+       return platform_driver_probe(&udc_driver, fsl_udc_probe);
+}
+
+module_init(udc_init);
+
+static void __exit udc_exit(void)
+{
+       platform_driver_unregister(&udc_driver);
+       printk(KERN_WARNING "%s unregistered\n", driver_desc);
+}
+
+module_exit(udc_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:fsl-usb2-udc");
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
deleted file mode 100644 (file)
index 9d7b95d..0000000
+++ /dev/null
@@ -1,2468 +0,0 @@
-/*
- * Copyright (C) 2004-2007 Freescale Semicondutor, Inc. All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- *         Jiang Bo <tanya.jiang@freescale.com>
- *
- * Description:
- * Freescale high-speed USB SOC DR module device controller driver.
- * This can be found on MPC8349E/MPC8313E cpus.
- * The driver is previously named as mpc_udc.  Based on bare board
- * code from Dave Liu and Shlomi Gridish.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#undef VERBOSE
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/fsl_devices.h>
-#include <linux/dmapool.h>
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-#include <asm/dma.h>
-
-#include "fsl_usb2_udc.h"
-
-#define        DRIVER_DESC     "Freescale High-Speed USB SOC Device Controller driver"
-#define        DRIVER_AUTHOR   "Li Yang/Jiang Bo"
-#define        DRIVER_VERSION  "Apr 20, 2007"
-
-#define        DMA_ADDR_INVALID        (~(dma_addr_t)0)
-
-static const char driver_name[] = "fsl-usb2-udc";
-static const char driver_desc[] = DRIVER_DESC;
-
-static struct usb_dr_device *dr_regs;
-static struct usb_sys_interface *usb_sys_regs;
-
-/* it is initialized in probe()  */
-static struct fsl_udc *udc_controller = NULL;
-
-static const struct usb_endpoint_descriptor
-fsl_ep0_desc = {
-       .bLength =              USB_DT_ENDPOINT_SIZE,
-       .bDescriptorType =      USB_DT_ENDPOINT,
-       .bEndpointAddress =     0,
-       .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
-       .wMaxPacketSize =       USB_MAX_CTRL_PAYLOAD,
-};
-
-static void fsl_ep_fifo_flush(struct usb_ep *_ep);
-
-#ifdef CONFIG_PPC32
-#define fsl_readl(addr)                in_le32(addr)
-#define fsl_writel(val32, addr) out_le32(addr, val32)
-#else
-#define fsl_readl(addr)                readl(addr)
-#define fsl_writel(val32, addr) writel(val32, addr)
-#endif
-
-/********************************************************************
- *     Internal Used Function
-********************************************************************/
-/*-----------------------------------------------------------------
- * done() - retire a request; caller blocked irqs
- * @status : request status to be set, only works when
- *     request is still in progress.
- *--------------------------------------------------------------*/
-static void done(struct fsl_ep *ep, struct fsl_req *req, int status)
-{
-       struct fsl_udc *udc = NULL;
-       unsigned char stopped = ep->stopped;
-       struct ep_td_struct *curr_td, *next_td;
-       int j;
-
-       udc = (struct fsl_udc *)ep->udc;
-       /* Removed the req from fsl_ep->queue */
-       list_del_init(&req->queue);
-
-       /* req.status should be set as -EINPROGRESS in ep_queue() */
-       if (req->req.status == -EINPROGRESS)
-               req->req.status = status;
-       else
-               status = req->req.status;
-
-       /* Free dtd for the request */
-       next_td = req->head;
-       for (j = 0; j < req->dtd_count; j++) {
-               curr_td = next_td;
-               if (j != req->dtd_count - 1) {
-                       next_td = curr_td->next_td_virt;
-               }
-               dma_pool_free(udc->td_pool, curr_td, curr_td->td_dma);
-       }
-
-       if (req->mapped) {
-               dma_unmap_single(ep->udc->gadget.dev.parent,
-                       req->req.dma, req->req.length,
-                       ep_is_in(ep)
-                               ? DMA_TO_DEVICE
-                               : DMA_FROM_DEVICE);
-               req->req.dma = DMA_ADDR_INVALID;
-               req->mapped = 0;
-       } else
-               dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
-                       req->req.dma, req->req.length,
-                       ep_is_in(ep)
-                               ? DMA_TO_DEVICE
-                               : DMA_FROM_DEVICE);
-
-       if (status && (status != -ESHUTDOWN))
-               VDBG("complete %s req %p stat %d len %u/%u",
-                       ep->ep.name, &req->req, status,
-                       req->req.actual, req->req.length);
-
-       ep->stopped = 1;
-
-       spin_unlock(&ep->udc->lock);
-       /* complete() is from gadget layer,
-        * eg fsg->bulk_in_complete() */
-       if (req->req.complete)
-               req->req.complete(&ep->ep, &req->req);
-
-       spin_lock(&ep->udc->lock);
-       ep->stopped = stopped;
-}
-
-/*-----------------------------------------------------------------
- * nuke(): delete all requests related to this ep
- * called with spinlock held
- *--------------------------------------------------------------*/
-static void nuke(struct fsl_ep *ep, int status)
-{
-       ep->stopped = 1;
-
-       /* Flush fifo */
-       fsl_ep_fifo_flush(&ep->ep);
-
-       /* Whether this eq has request linked */
-       while (!list_empty(&ep->queue)) {
-               struct fsl_req *req = NULL;
-
-               req = list_entry(ep->queue.next, struct fsl_req, queue);
-               done(ep, req, status);
-       }
-}
-
-/*------------------------------------------------------------------
-       Internal Hardware related function
- ------------------------------------------------------------------*/
-
-static int dr_controller_setup(struct fsl_udc *udc)
-{
-       unsigned int tmp = 0, portctrl = 0, ctrl = 0;
-       unsigned long timeout;
-#define FSL_UDC_RESET_TIMEOUT 1000
-
-       /* Stop and reset the usb controller */
-       tmp = fsl_readl(&dr_regs->usbcmd);
-       tmp &= ~USB_CMD_RUN_STOP;
-       fsl_writel(tmp, &dr_regs->usbcmd);
-
-       tmp = fsl_readl(&dr_regs->usbcmd);
-       tmp |= USB_CMD_CTRL_RESET;
-       fsl_writel(tmp, &dr_regs->usbcmd);
-
-       /* Wait for reset to complete */
-       timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
-       while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
-               if (time_after(jiffies, timeout)) {
-                       ERR("udc reset timeout!\n");
-                       return -ETIMEDOUT;
-               }
-               cpu_relax();
-       }
-
-       /* Set the controller as device mode */
-       tmp = fsl_readl(&dr_regs->usbmode);
-       tmp |= USB_MODE_CTRL_MODE_DEVICE;
-       /* Disable Setup Lockout */
-       tmp |= USB_MODE_SETUP_LOCK_OFF;
-       fsl_writel(tmp, &dr_regs->usbmode);
-
-       /* Clear the setup status */
-       fsl_writel(0, &dr_regs->usbsts);
-
-       tmp = udc->ep_qh_dma;
-       tmp &= USB_EP_LIST_ADDRESS_MASK;
-       fsl_writel(tmp, &dr_regs->endpointlistaddr);
-
-       VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
-               udc->ep_qh, (int)tmp,
-               fsl_readl(&dr_regs->endpointlistaddr));
-
-       /* Config PHY interface */
-       portctrl = fsl_readl(&dr_regs->portsc1);
-       portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
-       switch (udc->phy_mode) {
-       case FSL_USB2_PHY_ULPI:
-               portctrl |= PORTSCX_PTS_ULPI;
-               break;
-       case FSL_USB2_PHY_UTMI_WIDE:
-               portctrl |= PORTSCX_PTW_16BIT;
-               /* fall through */
-       case FSL_USB2_PHY_UTMI:
-               portctrl |= PORTSCX_PTS_UTMI;
-               break;
-       case FSL_USB2_PHY_SERIAL:
-               portctrl |= PORTSCX_PTS_FSLS;
-               break;
-       default:
-               return -EINVAL;
-       }
-       fsl_writel(portctrl, &dr_regs->portsc1);
-
-       /* Config control enable i/o output, cpu endian register */
-       ctrl = __raw_readl(&usb_sys_regs->control);
-       ctrl |= USB_CTRL_IOENB;
-       __raw_writel(ctrl, &usb_sys_regs->control);
-
-#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
-       /* Turn on cache snooping hardware, since some PowerPC platforms
-        * wholly rely on hardware to deal with cache coherent. */
-
-       /* Setup Snooping for all the 4GB space */
-       tmp = SNOOP_SIZE_2GB;   /* starts from 0x0, size 2G */
-       __raw_writel(tmp, &usb_sys_regs->snoop1);
-       tmp |= 0x80000000;      /* starts from 0x8000000, size 2G */
-       __raw_writel(tmp, &usb_sys_regs->snoop2);
-#endif
-
-       return 0;
-}
-
-/* Enable DR irq and set controller to run state */
-static void dr_controller_run(struct fsl_udc *udc)
-{
-       u32 temp;
-
-       /* Enable DR irq reg */
-       temp = USB_INTR_INT_EN | USB_INTR_ERR_INT_EN
-               | USB_INTR_PTC_DETECT_EN | USB_INTR_RESET_EN
-               | USB_INTR_DEVICE_SUSPEND | USB_INTR_SYS_ERR_EN;
-
-       fsl_writel(temp, &dr_regs->usbintr);
-
-       /* Clear stopped bit */
-       udc->stopped = 0;
-
-       /* Set the controller as device mode */
-       temp = fsl_readl(&dr_regs->usbmode);
-       temp |= USB_MODE_CTRL_MODE_DEVICE;
-       fsl_writel(temp, &dr_regs->usbmode);
-
-       /* Set controller to Run */
-       temp = fsl_readl(&dr_regs->usbcmd);
-       temp |= USB_CMD_RUN_STOP;
-       fsl_writel(temp, &dr_regs->usbcmd);
-
-       return;
-}
-
-static void dr_controller_stop(struct fsl_udc *udc)
-{
-       unsigned int tmp;
-
-       /* disable all INTR */
-       fsl_writel(0, &dr_regs->usbintr);
-
-       /* Set stopped bit for isr */
-       udc->stopped = 1;
-
-       /* disable IO output */
-/*     usb_sys_regs->control = 0; */
-
-       /* set controller to Stop */
-       tmp = fsl_readl(&dr_regs->usbcmd);
-       tmp &= ~USB_CMD_RUN_STOP;
-       fsl_writel(tmp, &dr_regs->usbcmd);
-
-       return;
-}
-
-static void dr_ep_setup(unsigned char ep_num, unsigned char dir,
-                       unsigned char ep_type)
-{
-       unsigned int tmp_epctrl = 0;
-
-       tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-       if (dir) {
-               if (ep_num)
-                       tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
-               tmp_epctrl |= EPCTRL_TX_ENABLE;
-               tmp_epctrl |= ((unsigned int)(ep_type)
-                               << EPCTRL_TX_EP_TYPE_SHIFT);
-       } else {
-               if (ep_num)
-                       tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
-               tmp_epctrl |= EPCTRL_RX_ENABLE;
-               tmp_epctrl |= ((unsigned int)(ep_type)
-                               << EPCTRL_RX_EP_TYPE_SHIFT);
-       }
-
-       fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
-}
-
-static void
-dr_ep_change_stall(unsigned char ep_num, unsigned char dir, int value)
-{
-       u32 tmp_epctrl = 0;
-
-       tmp_epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-
-       if (value) {
-               /* set the stall bit */
-               if (dir)
-                       tmp_epctrl |= EPCTRL_TX_EP_STALL;
-               else
-                       tmp_epctrl |= EPCTRL_RX_EP_STALL;
-       } else {
-               /* clear the stall bit and reset data toggle */
-               if (dir) {
-                       tmp_epctrl &= ~EPCTRL_TX_EP_STALL;
-                       tmp_epctrl |= EPCTRL_TX_DATA_TOGGLE_RST;
-               } else {
-                       tmp_epctrl &= ~EPCTRL_RX_EP_STALL;
-                       tmp_epctrl |= EPCTRL_RX_DATA_TOGGLE_RST;
-               }
-       }
-       fsl_writel(tmp_epctrl, &dr_regs->endptctrl[ep_num]);
-}
-
-/* Get stall status of a specific ep
-   Return: 0: not stalled; 1:stalled */
-static int dr_ep_get_stall(unsigned char ep_num, unsigned char dir)
-{
-       u32 epctrl;
-
-       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-       if (dir)
-               return (epctrl & EPCTRL_TX_EP_STALL) ? 1 : 0;
-       else
-               return (epctrl & EPCTRL_RX_EP_STALL) ? 1 : 0;
-}
-
-/********************************************************************
-       Internal Structure Build up functions
-********************************************************************/
-
-/*------------------------------------------------------------------
-* struct_ep_qh_setup(): set the Endpoint Capabilites field of QH
- * @zlt: Zero Length Termination Select (1: disable; 0: enable)
- * @mult: Mult field
- ------------------------------------------------------------------*/
-static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
-               unsigned char dir, unsigned char ep_type,
-               unsigned int max_pkt_len,
-               unsigned int zlt, unsigned char mult)
-{
-       struct ep_queue_head *p_QH = &udc->ep_qh[2 * ep_num + dir];
-       unsigned int tmp = 0;
-
-       /* set the Endpoint Capabilites in QH */
-       switch (ep_type) {
-       case USB_ENDPOINT_XFER_CONTROL:
-               /* Interrupt On Setup (IOS). for control ep  */
-               tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
-                       | EP_QUEUE_HEAD_IOS;
-               break;
-       case USB_ENDPOINT_XFER_ISOC:
-               tmp = (max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
-                       | (mult << EP_QUEUE_HEAD_MULT_POS);
-               break;
-       case USB_ENDPOINT_XFER_BULK:
-       case USB_ENDPOINT_XFER_INT:
-               tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
-               break;
-       default:
-               VDBG("error ep type is %d", ep_type);
-               return;
-       }
-       if (zlt)
-               tmp |= EP_QUEUE_HEAD_ZLT_SEL;
-
-       p_QH->max_pkt_length = cpu_to_le32(tmp);
-       p_QH->next_dtd_ptr = 1;
-       p_QH->size_ioc_int_sts = 0;
-
-       return;
-}
-
-/* Setup qh structure and ep register for ep0. */
-static void ep0_setup(struct fsl_udc *udc)
-{
-       /* the intialization of an ep includes: fields in QH, Regs,
-        * fsl_ep struct */
-       struct_ep_qh_setup(udc, 0, USB_RECV, USB_ENDPOINT_XFER_CONTROL,
-                       USB_MAX_CTRL_PAYLOAD, 0, 0);
-       struct_ep_qh_setup(udc, 0, USB_SEND, USB_ENDPOINT_XFER_CONTROL,
-                       USB_MAX_CTRL_PAYLOAD, 0, 0);
-       dr_ep_setup(0, USB_RECV, USB_ENDPOINT_XFER_CONTROL);
-       dr_ep_setup(0, USB_SEND, USB_ENDPOINT_XFER_CONTROL);
-
-       return;
-
-}
-
-/***********************************************************************
-               Endpoint Management Functions
-***********************************************************************/
-
-/*-------------------------------------------------------------------------
- * when configurations are set, or when interface settings change
- * for example the do_set_interface() in gadget layer,
- * the driver will enable or disable the relevant endpoints
- * ep0 doesn't use this routine. It is always enabled.
--------------------------------------------------------------------------*/
-static int fsl_ep_enable(struct usb_ep *_ep,
-               const struct usb_endpoint_descriptor *desc)
-{
-       struct fsl_udc *udc = NULL;
-       struct fsl_ep *ep = NULL;
-       unsigned short max = 0;
-       unsigned char mult = 0, zlt;
-       int retval = -EINVAL;
-       unsigned long flags = 0;
-
-       ep = container_of(_ep, struct fsl_ep, ep);
-
-       /* catch various bogus parameters */
-       if (!_ep || !desc || ep->desc
-                       || (desc->bDescriptorType != USB_DT_ENDPOINT))
-               return -EINVAL;
-
-       udc = ep->udc;
-
-       if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
-               return -ESHUTDOWN;
-
-       max = le16_to_cpu(desc->wMaxPacketSize);
-
-       /* Disable automatic zlp generation.  Driver is reponsible to indicate
-        * explicitly through req->req.zero.  This is needed to enable multi-td
-        * request. */
-       zlt = 1;
-
-       /* Assume the max packet size from gadget is always correct */
-       switch (desc->bmAttributes & 0x03) {
-       case USB_ENDPOINT_XFER_CONTROL:
-       case USB_ENDPOINT_XFER_BULK:
-       case USB_ENDPOINT_XFER_INT:
-               /* mult = 0.  Execute N Transactions as demonstrated by
-                * the USB variable length packet protocol where N is
-                * computed using the Maximum Packet Length (dQH) and
-                * the Total Bytes field (dTD) */
-               mult = 0;
-               break;
-       case USB_ENDPOINT_XFER_ISOC:
-               /* Calculate transactions needed for high bandwidth iso */
-               mult = (unsigned char)(1 + ((max >> 11) & 0x03));
-               max = max & 0x8ff;      /* bit 0~10 */
-               /* 3 transactions at most */
-               if (mult > 3)
-                       goto en_done;
-               break;
-       default:
-               goto en_done;
-       }
-
-       spin_lock_irqsave(&udc->lock, flags);
-       ep->ep.maxpacket = max;
-       ep->desc = desc;
-       ep->stopped = 0;
-
-       /* Controller related setup */
-       /* Init EPx Queue Head (Ep Capabilites field in QH
-        * according to max, zlt, mult) */
-       struct_ep_qh_setup(udc, (unsigned char) ep_index(ep),
-                       (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
-                                       ?  USB_SEND : USB_RECV),
-                       (unsigned char) (desc->bmAttributes
-                                       & USB_ENDPOINT_XFERTYPE_MASK),
-                       max, zlt, mult);
-
-       /* Init endpoint ctrl register */
-       dr_ep_setup((unsigned char) ep_index(ep),
-                       (unsigned char) ((desc->bEndpointAddress & USB_DIR_IN)
-                                       ? USB_SEND : USB_RECV),
-                       (unsigned char) (desc->bmAttributes
-                                       & USB_ENDPOINT_XFERTYPE_MASK));
-
-       spin_unlock_irqrestore(&udc->lock, flags);
-       retval = 0;
-
-       VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
-                       ep->desc->bEndpointAddress & 0x0f,
-                       (desc->bEndpointAddress & USB_DIR_IN)
-                               ? "in" : "out", max);
-en_done:
-       return retval;
-}
-
-/*---------------------------------------------------------------------
- * @ep : the ep being unconfigured. May not be ep0
- * Any pending and uncomplete req will complete with status (-ESHUTDOWN)
-*---------------------------------------------------------------------*/
-static int fsl_ep_disable(struct usb_ep *_ep)
-{
-       struct fsl_udc *udc = NULL;
-       struct fsl_ep *ep = NULL;
-       unsigned long flags = 0;
-       u32 epctrl;
-       int ep_num;
-
-       ep = container_of(_ep, struct fsl_ep, ep);
-       if (!_ep || !ep->desc) {
-               VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
-               return -EINVAL;
-       }
-
-       /* disable ep on controller */
-       ep_num = ep_index(ep);
-       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-       if (ep_is_in(ep))
-               epctrl &= ~EPCTRL_TX_ENABLE;
-       else
-               epctrl &= ~EPCTRL_RX_ENABLE;
-       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
-
-       udc = (struct fsl_udc *)ep->udc;
-       spin_lock_irqsave(&udc->lock, flags);
-
-       /* nuke all pending requests (does flush) */
-       nuke(ep, -ESHUTDOWN);
-
-       ep->desc = NULL;
-       ep->stopped = 1;
-       spin_unlock_irqrestore(&udc->lock, flags);
-
-       VDBG("disabled %s OK", _ep->name);
-       return 0;
-}
-
-/*---------------------------------------------------------------------
- * allocate a request object used by this endpoint
- * the main operation is to insert the req->queue to the eq->queue
- * Returns the request, or null if one could not be allocated
-*---------------------------------------------------------------------*/
-static struct usb_request *
-fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
-{
-       struct fsl_req *req = NULL;
-
-       req = kzalloc(sizeof *req, gfp_flags);
-       if (!req)
-               return NULL;
-
-       req->req.dma = DMA_ADDR_INVALID;
-       INIT_LIST_HEAD(&req->queue);
-
-       return &req->req;
-}
-
-static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
-       struct fsl_req *req = NULL;
-
-       req = container_of(_req, struct fsl_req, req);
-
-       if (_req)
-               kfree(req);
-}
-
-/*-------------------------------------------------------------------------*/
-static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
-{
-       int i = ep_index(ep) * 2 + ep_is_in(ep);
-       u32 temp, bitmask, tmp_stat;
-       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
-
-       /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
-       VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
-
-       bitmask = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-
-       /* check if the pipe is empty */
-       if (!(list_empty(&ep->queue))) {
-               /* Add td to the end */
-               struct fsl_req *lastreq;
-               lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
-               lastreq->tail->next_td_ptr =
-                       cpu_to_le32(req->head->td_dma & DTD_ADDR_MASK);
-               /* Read prime bit, if 1 goto done */
-               if (fsl_readl(&dr_regs->endpointprime) & bitmask)
-                       goto out;
-
-               do {
-                       /* Set ATDTW bit in USBCMD */
-                       temp = fsl_readl(&dr_regs->usbcmd);
-                       fsl_writel(temp | USB_CMD_ATDTW, &dr_regs->usbcmd);
-
-                       /* Read correct status bit */
-                       tmp_stat = fsl_readl(&dr_regs->endptstatus) & bitmask;
-
-               } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_ATDTW));
-
-               /* Write ATDTW bit to 0 */
-               temp = fsl_readl(&dr_regs->usbcmd);
-               fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
-
-               if (tmp_stat)
-                       goto out;
-       }
-
-       /* Write dQH next pointer and terminate bit to 0 */
-       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-       dQH->next_dtd_ptr = cpu_to_le32(temp);
-
-       /* Clear active and halt bit */
-       temp = cpu_to_le32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
-                       | EP_QUEUE_HEAD_STATUS_HALT));
-       dQH->size_ioc_int_sts &= temp;
-
-       /* Ensure that updates to the QH will occure before priming. */
-       wmb();
-
-       /* Prime endpoint by writing 1 to ENDPTPRIME */
-       temp = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-       fsl_writel(temp, &dr_regs->endpointprime);
-out:
-       return;
-}
-
-/* Fill in the dTD structure
- * @req: request that the transfer belongs to
- * @length: return actually data length of the dTD
- * @dma: return dma address of the dTD
- * @is_last: return flag if it is the last dTD of the request
- * return: pointer to the built dTD */
-static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
-               dma_addr_t *dma, int *is_last)
-{
-       u32 swap_temp;
-       struct ep_td_struct *dtd;
-
-       /* how big will this transfer be? */
-       *length = min(req->req.length - req->req.actual,
-                       (unsigned)EP_MAX_LENGTH_TRANSFER);
-
-       dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
-       if (dtd == NULL)
-               return dtd;
-
-       dtd->td_dma = *dma;
-       /* Clear reserved field */
-       swap_temp = cpu_to_le32(dtd->size_ioc_sts);
-       swap_temp &= ~DTD_RESERVED_FIELDS;
-       dtd->size_ioc_sts = cpu_to_le32(swap_temp);
-
-       /* Init all of buffer page pointers */
-       swap_temp = (u32) (req->req.dma + req->req.actual);
-       dtd->buff_ptr0 = cpu_to_le32(swap_temp);
-       dtd->buff_ptr1 = cpu_to_le32(swap_temp + 0x1000);
-       dtd->buff_ptr2 = cpu_to_le32(swap_temp + 0x2000);
-       dtd->buff_ptr3 = cpu_to_le32(swap_temp + 0x3000);
-       dtd->buff_ptr4 = cpu_to_le32(swap_temp + 0x4000);
-
-       req->req.actual += *length;
-
-       /* zlp is needed if req->req.zero is set */
-       if (req->req.zero) {
-               if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
-                       *is_last = 1;
-               else
-                       *is_last = 0;
-       } else if (req->req.length == req->req.actual)
-               *is_last = 1;
-       else
-               *is_last = 0;
-
-       if ((*is_last) == 0)
-               VDBG("multi-dtd request!");
-       /* Fill in the transfer size; set active bit */
-       swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
-
-       /* Enable interrupt for the last dtd of a request */
-       if (*is_last && !req->req.no_interrupt)
-               swap_temp |= DTD_IOC;
-
-       dtd->size_ioc_sts = cpu_to_le32(swap_temp);
-
-       mb();
-
-       VDBG("length = %d address= 0x%x", *length, (int)*dma);
-
-       return dtd;
-}
-
-/* Generate dtd chain for a request */
-static int fsl_req_to_dtd(struct fsl_req *req)
-{
-       unsigned        count;
-       int             is_last;
-       int             is_first =1;
-       struct ep_td_struct     *last_dtd = NULL, *dtd;
-       dma_addr_t dma;
-
-       do {
-               dtd = fsl_build_dtd(req, &count, &dma, &is_last);
-               if (dtd == NULL)
-                       return -ENOMEM;
-
-               if (is_first) {
-                       is_first = 0;
-                       req->head = dtd;
-               } else {
-                       last_dtd->next_td_ptr = cpu_to_le32(dma);
-                       last_dtd->next_td_virt = dtd;
-               }
-               last_dtd = dtd;
-
-               req->dtd_count++;
-       } while (!is_last);
-
-       dtd->next_td_ptr = cpu_to_le32(DTD_NEXT_TERMINATE);
-
-       req->tail = dtd;
-
-       return 0;
-}
-
-/* queues (submits) an I/O request to an endpoint */
-static int
-fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
-{
-       struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
-       struct fsl_req *req = container_of(_req, struct fsl_req, req);
-       struct fsl_udc *udc;
-       unsigned long flags;
-       int is_iso = 0;
-
-       /* catch various bogus parameters */
-       if (!_req || !req->req.complete || !req->req.buf
-                       || !list_empty(&req->queue)) {
-               VDBG("%s, bad params", __func__);
-               return -EINVAL;
-       }
-       if (unlikely(!_ep || !ep->desc)) {
-               VDBG("%s, bad ep", __func__);
-               return -EINVAL;
-       }
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
-               if (req->req.length > ep->ep.maxpacket)
-                       return -EMSGSIZE;
-               is_iso = 1;
-       }
-
-       udc = ep->udc;
-       if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
-               return -ESHUTDOWN;
-
-       req->ep = ep;
-
-       /* map virtual address to hardware */
-       if (req->req.dma == DMA_ADDR_INVALID) {
-               req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
-                                       req->req.buf,
-                                       req->req.length, ep_is_in(ep)
-                                               ? DMA_TO_DEVICE
-                                               : DMA_FROM_DEVICE);
-               req->mapped = 1;
-       } else {
-               dma_sync_single_for_device(ep->udc->gadget.dev.parent,
-                                       req->req.dma, req->req.length,
-                                       ep_is_in(ep)
-                                               ? DMA_TO_DEVICE
-                                               : DMA_FROM_DEVICE);
-               req->mapped = 0;
-       }
-
-       req->req.status = -EINPROGRESS;
-       req->req.actual = 0;
-       req->dtd_count = 0;
-
-       spin_lock_irqsave(&udc->lock, flags);
-
-       /* build dtds and push them to device queue */
-       if (!fsl_req_to_dtd(req)) {
-               fsl_queue_td(ep, req);
-       } else {
-               spin_unlock_irqrestore(&udc->lock, flags);
-               return -ENOMEM;
-       }
-
-       /* Update ep0 state */
-       if ((ep_index(ep) == 0))
-               udc->ep0_state = DATA_STATE_XMIT;
-
-       /* irq handler advances the queue */
-       if (req != NULL)
-               list_add_tail(&req->queue, &ep->queue);
-       spin_unlock_irqrestore(&udc->lock, flags);
-
-       return 0;
-}
-
-/* dequeues (cancels, unlinks) an I/O request from an endpoint */
-static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
-       struct fsl_ep *ep = container_of(_ep, struct fsl_ep, ep);
-       struct fsl_req *req;
-       unsigned long flags;
-       int ep_num, stopped, ret = 0;
-       u32 epctrl;
-
-       if (!_ep || !_req)
-               return -EINVAL;
-
-       spin_lock_irqsave(&ep->udc->lock, flags);
-       stopped = ep->stopped;
-
-       /* Stop the ep before we deal with the queue */
-       ep->stopped = 1;
-       ep_num = ep_index(ep);
-       epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-       if (ep_is_in(ep))
-               epctrl &= ~EPCTRL_TX_ENABLE;
-       else
-               epctrl &= ~EPCTRL_RX_ENABLE;
-       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
-
-       /* make sure it's actually queued on this endpoint */
-       list_for_each_entry(req, &ep->queue, queue) {
-               if (&req->req == _req)
-                       break;
-       }
-       if (&req->req != _req) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* The request is in progress, or completed but not dequeued */
-       if (ep->queue.next == &req->queue) {
-               _req->status = -ECONNRESET;
-               fsl_ep_fifo_flush(_ep); /* flush current transfer */
-
-               /* The request isn't the last request in this ep queue */
-               if (req->queue.next != &ep->queue) {
-                       struct ep_queue_head *qh;
-                       struct fsl_req *next_req;
-
-                       qh = ep->qh;
-                       next_req = list_entry(req->queue.next, struct fsl_req,
-                                       queue);
-
-                       /* Point the QH to the first TD of next request */
-                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
-               }
-
-               /* The request hasn't been processed, patch up the TD chain */
-       } else {
-               struct fsl_req *prev_req;
-
-               prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
-               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
-                               &prev_req->tail->next_td_ptr);
-
-       }
-
-       done(ep, req, -ECONNRESET);
-
-       /* Enable EP */
-out:   epctrl = fsl_readl(&dr_regs->endptctrl[ep_num]);
-       if (ep_is_in(ep))
-               epctrl |= EPCTRL_TX_ENABLE;
-       else
-               epctrl |= EPCTRL_RX_ENABLE;
-       fsl_writel(epctrl, &dr_regs->endptctrl[ep_num]);
-       ep->stopped = stopped;
-
-       spin_unlock_irqrestore(&ep->udc->lock, flags);
-       return ret;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*-----------------------------------------------------------------
- * modify the endpoint halt feature
- * @ep: the non-isochronous endpoint being stalled
- * @value: 1--set halt  0--clear halt
- * Returns zero, or a negative error code.
-*----------------------------------------------------------------*/
-static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
-{
-       struct fsl_ep *ep = NULL;
-       unsigned long flags = 0;
-       int status = -EOPNOTSUPP;       /* operation not supported */
-       unsigned char ep_dir = 0, ep_num = 0;
-       struct fsl_udc *udc = NULL;
-
-       ep = container_of(_ep, struct fsl_ep, ep);
-       udc = ep->udc;
-       if (!_ep || !ep->desc) {
-               status = -EINVAL;
-               goto out;
-       }
-
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
-               status = -EOPNOTSUPP;
-               goto out;
-       }
-
-       /* Attempt to halt IN ep will fail if any transfer requests
-        * are still queue */
-       if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
-               status = -EAGAIN;
-               goto out;
-       }
-
-       status = 0;
-       ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
-       ep_num = (unsigned char)(ep_index(ep));
-       spin_lock_irqsave(&ep->udc->lock, flags);
-       dr_ep_change_stall(ep_num, ep_dir, value);
-       spin_unlock_irqrestore(&ep->udc->lock, flags);
-
-       if (ep_index(ep) == 0) {
-               udc->ep0_state = WAIT_FOR_SETUP;
-               udc->ep0_dir = 0;
-       }
-out:
-       VDBG(" %s %s halt stat %d", ep->ep.name,
-                       value ?  "set" : "clear", status);
-
-       return status;
-}
-
-static void fsl_ep_fifo_flush(struct usb_ep *_ep)
-{
-       struct fsl_ep *ep;
-       int ep_num, ep_dir;
-       u32 bits;
-       unsigned long timeout;
-#define FSL_UDC_FLUSH_TIMEOUT 1000
-
-       if (!_ep) {
-               return;
-       } else {
-               ep = container_of(_ep, struct fsl_ep, ep);
-               if (!ep->desc)
-                       return;
-       }
-       ep_num = ep_index(ep);
-       ep_dir = ep_is_in(ep) ? USB_SEND : USB_RECV;
-
-       if (ep_num == 0)
-               bits = (1 << 16) | 1;
-       else if (ep_dir == USB_SEND)
-               bits = 1 << (16 + ep_num);
-       else
-               bits = 1 << ep_num;
-
-       timeout = jiffies + FSL_UDC_FLUSH_TIMEOUT;
-       do {
-               fsl_writel(bits, &dr_regs->endptflush);
-
-               /* Wait until flush complete */
-               while (fsl_readl(&dr_regs->endptflush)) {
-                       if (time_after(jiffies, timeout)) {
-                               ERR("ep flush timeout\n");
-                               return;
-                       }
-                       cpu_relax();
-               }
-               /* See if we need to flush again */
-       } while (fsl_readl(&dr_regs->endptstatus) & bits);
-}
-
-static struct usb_ep_ops fsl_ep_ops = {
-       .enable = fsl_ep_enable,
-       .disable = fsl_ep_disable,
-
-       .alloc_request = fsl_alloc_request,
-       .free_request = fsl_free_request,
-
-       .queue = fsl_ep_queue,
-       .dequeue = fsl_ep_dequeue,
-
-       .set_halt = fsl_ep_set_halt,
-       .fifo_flush = fsl_ep_fifo_flush,        /* flush fifo */
-};
-
-/*-------------------------------------------------------------------------
-               Gadget Driver Layer Operations
--------------------------------------------------------------------------*/
-
-/*----------------------------------------------------------------------
- * Get the current frame number (from DR frame_index Reg )
- *----------------------------------------------------------------------*/
-static int fsl_get_frame(struct usb_gadget *gadget)
-{
-       return (int)(fsl_readl(&dr_regs->frindex) & USB_FRINDEX_MASKS);
-}
-
-/*-----------------------------------------------------------------------
- * Tries to wake up the host connected to this gadget
- -----------------------------------------------------------------------*/
-static int fsl_wakeup(struct usb_gadget *gadget)
-{
-       struct fsl_udc *udc = container_of(gadget, struct fsl_udc, gadget);
-       u32 portsc;
-
-       /* Remote wakeup feature not enabled by host */
-       if (!udc->remote_wakeup)
-               return -ENOTSUPP;
-
-       portsc = fsl_readl(&dr_regs->portsc1);
-       /* not suspended? */
-       if (!(portsc & PORTSCX_PORT_SUSPEND))
-               return 0;
-       /* trigger force resume */
-       portsc |= PORTSCX_PORT_FORCE_RESUME;
-       fsl_writel(portsc, &dr_regs->portsc1);
-       return 0;
-}
-
-static int can_pullup(struct fsl_udc *udc)
-{
-       return udc->driver && udc->softconnect && udc->vbus_active;
-}
-
-/* Notify controller that VBUS is powered, Called by whatever
-   detects VBUS sessions */
-static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
-{
-       struct fsl_udc  *udc;
-       unsigned long   flags;
-
-       udc = container_of(gadget, struct fsl_udc, gadget);
-       spin_lock_irqsave(&udc->lock, flags);
-       VDBG("VBUS %s", is_active ? "on" : "off");
-       udc->vbus_active = (is_active != 0);
-       if (can_pullup(udc))
-               fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
-                               &dr_regs->usbcmd);
-       else
-               fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
-                               &dr_regs->usbcmd);
-       spin_unlock_irqrestore(&udc->lock, flags);
-       return 0;
-}
-
-/* constrain controller's VBUS power usage
- * This call is used by gadget drivers during SET_CONFIGURATION calls,
- * reporting how much power the device may consume.  For example, this
- * could affect how quickly batteries are recharged.
- *
- * Returns zero on success, else negative errno.
- */
-static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
-       struct fsl_udc *udc;
-
-       udc = container_of(gadget, struct fsl_udc, gadget);
-       if (udc->transceiver)
-               return otg_set_power(udc->transceiver, mA);
-       return -ENOTSUPP;
-}
-
-/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
- */
-static int fsl_pullup(struct usb_gadget *gadget, int is_on)
-{
-       struct fsl_udc *udc;
-
-       udc = container_of(gadget, struct fsl_udc, gadget);
-       udc->softconnect = (is_on != 0);
-       if (can_pullup(udc))
-               fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
-                               &dr_regs->usbcmd);
-       else
-               fsl_writel((fsl_readl(&dr_regs->usbcmd) & ~USB_CMD_RUN_STOP),
-                               &dr_regs->usbcmd);
-
-       return 0;
-}
-
-/* defined in gadget.h */
-static struct usb_gadget_ops fsl_gadget_ops = {
-       .get_frame = fsl_get_frame,
-       .wakeup = fsl_wakeup,
-/*     .set_selfpowered = fsl_set_selfpowered, */ /* Always selfpowered */
-       .vbus_session = fsl_vbus_session,
-       .vbus_draw = fsl_vbus_draw,
-       .pullup = fsl_pullup,
-};
-
-/* Set protocol stall on ep0, protocol stall will automatically be cleared
-   on new transaction */
-static void ep0stall(struct fsl_udc *udc)
-{
-       u32 tmp;
-
-       /* must set tx and rx to stall at the same time */
-       tmp = fsl_readl(&dr_regs->endptctrl[0]);
-       tmp |= EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL;
-       fsl_writel(tmp, &dr_regs->endptctrl[0]);
-       udc->ep0_state = WAIT_FOR_SETUP;
-       udc->ep0_dir = 0;
-}
-
-/* Prime a status phase for ep0 */
-static int ep0_prime_status(struct fsl_udc *udc, int direction)
-{
-       struct fsl_req *req = udc->status_req;
-       struct fsl_ep *ep;
-
-       if (direction == EP_DIR_IN)
-               udc->ep0_dir = USB_DIR_IN;
-       else
-               udc->ep0_dir = USB_DIR_OUT;
-
-       ep = &udc->eps[0];
-       udc->ep0_state = WAIT_FOR_OUT_STATUS;
-
-       req->ep = ep;
-       req->req.length = 0;
-       req->req.status = -EINPROGRESS;
-       req->req.actual = 0;
-       req->req.complete = NULL;
-       req->dtd_count = 0;
-
-       if (fsl_req_to_dtd(req) == 0)
-               fsl_queue_td(ep, req);
-       else
-               return -ENOMEM;
-
-       list_add_tail(&req->queue, &ep->queue);
-
-       return 0;
-}
-
-static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
-{
-       struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
-
-       if (ep->name)
-               nuke(ep, -ESHUTDOWN);
-}
-
-/*
- * ch9 Set address
- */
-static void ch9setaddress(struct fsl_udc *udc, u16 value, u16 index, u16 length)
-{
-       /* Save the new address to device struct */
-       udc->device_address = (u8) value;
-       /* Update usb state */
-       udc->usb_state = USB_STATE_ADDRESS;
-       /* Status phase */
-       if (ep0_prime_status(udc, EP_DIR_IN))
-               ep0stall(udc);
-}
-
-/*
- * ch9 Get status
- */
-static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
-               u16 index, u16 length)
-{
-       u16 tmp = 0;            /* Status, cpu endian */
-       struct fsl_req *req;
-       struct fsl_ep *ep;
-
-       ep = &udc->eps[0];
-
-       if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
-               /* Get device status */
-               tmp = 1 << USB_DEVICE_SELF_POWERED;
-               tmp |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
-       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
-               /* Get interface status */
-               /* We don't have interface information in udc driver */
-               tmp = 0;
-       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
-               /* Get endpoint status */
-               struct fsl_ep *target_ep;
-
-               target_ep = get_ep_by_pipe(udc, get_pipe_by_windex(index));
-
-               /* stall if endpoint doesn't exist */
-               if (!target_ep->desc)
-                       goto stall;
-               tmp = dr_ep_get_stall(ep_index(target_ep), ep_is_in(target_ep))
-                               << USB_ENDPOINT_HALT;
-       }
-
-       udc->ep0_dir = USB_DIR_IN;
-       /* Borrow the per device status_req */
-       req = udc->status_req;
-       /* Fill in the reqest structure */
-       *((u16 *) req->req.buf) = cpu_to_le16(tmp);
-       req->ep = ep;
-       req->req.length = 2;
-       req->req.status = -EINPROGRESS;
-       req->req.actual = 0;
-       req->req.complete = NULL;
-       req->dtd_count = 0;
-
-       /* prime the data phase */
-       if ((fsl_req_to_dtd(req) == 0))
-               fsl_queue_td(ep, req);
-       else                    /* no mem */
-               goto stall;
-
-       list_add_tail(&req->queue, &ep->queue);
-       udc->ep0_state = DATA_STATE_XMIT;
-       return;
-stall:
-       ep0stall(udc);
-}
-
-static void setup_received_irq(struct fsl_udc *udc,
-               struct usb_ctrlrequest *setup)
-{
-       u16 wValue = le16_to_cpu(setup->wValue);
-       u16 wIndex = le16_to_cpu(setup->wIndex);
-       u16 wLength = le16_to_cpu(setup->wLength);
-
-       udc_reset_ep_queue(udc, 0);
-
-       /* We process some stardard setup requests here */
-       switch (setup->bRequest) {
-       case USB_REQ_GET_STATUS:
-               /* Data+Status phase from udc */
-               if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
-                                       != (USB_DIR_IN | USB_TYPE_STANDARD))
-                       break;
-               ch9getstatus(udc, setup->bRequestType, wValue, wIndex, wLength);
-               return;
-
-       case USB_REQ_SET_ADDRESS:
-               /* Status phase from udc */
-               if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
-                                               | USB_RECIP_DEVICE))
-                       break;
-               ch9setaddress(udc, wValue, wIndex, wLength);
-               return;
-
-       case USB_REQ_CLEAR_FEATURE:
-       case USB_REQ_SET_FEATURE:
-               /* Status phase from udc */
-       {
-               int rc = -EOPNOTSUPP;
-
-               if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
-                               == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
-                       int pipe = get_pipe_by_windex(wIndex);
-                       struct fsl_ep *ep;
-
-                       if (wValue != 0 || wLength != 0 || pipe > udc->max_ep)
-                               break;
-                       ep = get_ep_by_pipe(udc, pipe);
-
-                       spin_unlock(&udc->lock);
-                       rc = fsl_ep_set_halt(&ep->ep,
-                                       (setup->bRequest == USB_REQ_SET_FEATURE)
-                                               ? 1 : 0);
-                       spin_lock(&udc->lock);
-
-               } else if ((setup->bRequestType & (USB_RECIP_MASK
-                               | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
-                               | USB_TYPE_STANDARD)) {
-                       /* Note: The driver has not include OTG support yet.
-                        * This will be set when OTG support is added */
-                       if (!gadget_is_otg(&udc->gadget))
-                               break;
-                       else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
-                               udc->gadget.b_hnp_enable = 1;
-                       else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
-                               udc->gadget.a_hnp_support = 1;
-                       else if (setup->bRequest ==
-                                       USB_DEVICE_A_ALT_HNP_SUPPORT)
-                               udc->gadget.a_alt_hnp_support = 1;
-                       else
-                               break;
-                       rc = 0;
-               } else
-                       break;
-
-               if (rc == 0) {
-                       if (ep0_prime_status(udc, EP_DIR_IN))
-                               ep0stall(udc);
-               }
-               return;
-       }
-
-       default:
-               break;
-       }
-
-       /* Requests handled by gadget */
-       if (wLength) {
-               /* Data phase from gadget, status phase from udc */
-               udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
-                               ?  USB_DIR_IN : USB_DIR_OUT;
-               spin_unlock(&udc->lock);
-               if (udc->driver->setup(&udc->gadget,
-                               &udc->local_setup_buff) < 0)
-                       ep0stall(udc);
-               spin_lock(&udc->lock);
-               udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
-                               ?  DATA_STATE_XMIT : DATA_STATE_RECV;
-       } else {
-               /* No data phase, IN status from gadget */
-               udc->ep0_dir = USB_DIR_IN;
-               spin_unlock(&udc->lock);
-               if (udc->driver->setup(&udc->gadget,
-                               &udc->local_setup_buff) < 0)
-                       ep0stall(udc);
-               spin_lock(&udc->lock);
-               udc->ep0_state = WAIT_FOR_OUT_STATUS;
-       }
-}
-
-/* Process request for Data or Status phase of ep0
- * prime status phase if needed */
-static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
-               struct fsl_req *req)
-{
-       if (udc->usb_state == USB_STATE_ADDRESS) {
-               /* Set the new address */
-               u32 new_address = (u32) udc->device_address;
-               fsl_writel(new_address << USB_DEVICE_ADDRESS_BIT_POS,
-                               &dr_regs->deviceaddr);
-       }
-
-       done(ep0, req, 0);
-
-       switch (udc->ep0_state) {
-       case DATA_STATE_XMIT:
-               /* receive status phase */
-               if (ep0_prime_status(udc, EP_DIR_OUT))
-                       ep0stall(udc);
-               break;
-       case DATA_STATE_RECV:
-               /* send status phase */
-               if (ep0_prime_status(udc, EP_DIR_IN))
-                       ep0stall(udc);
-               break;
-       case WAIT_FOR_OUT_STATUS:
-               udc->ep0_state = WAIT_FOR_SETUP;
-               break;
-       case WAIT_FOR_SETUP:
-               ERR("Unexpect ep0 packets\n");
-               break;
-       default:
-               ep0stall(udc);
-               break;
-       }
-}
-
-/* Tripwire mechanism to ensure a setup packet payload is extracted without
- * being corrupted by another incoming setup packet */
-static void tripwire_handler(struct fsl_udc *udc, u8 ep_num, u8 *buffer_ptr)
-{
-       u32 temp;
-       struct ep_queue_head *qh;
-
-       qh = &udc->ep_qh[ep_num * 2 + EP_DIR_OUT];
-
-       /* Clear bit in ENDPTSETUPSTAT */
-       temp = fsl_readl(&dr_regs->endptsetupstat);
-       fsl_writel(temp | (1 << ep_num), &dr_regs->endptsetupstat);
-
-       /* while a hazard exists when setup package arrives */
-       do {
-               /* Set Setup Tripwire */
-               temp = fsl_readl(&dr_regs->usbcmd);
-               fsl_writel(temp | USB_CMD_SUTW, &dr_regs->usbcmd);
-
-               /* Copy the setup packet to local buffer */
-               memcpy(buffer_ptr, (u8 *) qh->setup_buffer, 8);
-       } while (!(fsl_readl(&dr_regs->usbcmd) & USB_CMD_SUTW));
-
-       /* Clear Setup Tripwire */
-       temp = fsl_readl(&dr_regs->usbcmd);
-       fsl_writel(temp & ~USB_CMD_SUTW, &dr_regs->usbcmd);
-}
-
-/* process-ep_req(): free the completed Tds for this req */
-static int process_ep_req(struct fsl_udc *udc, int pipe,
-               struct fsl_req *curr_req)
-{
-       struct ep_td_struct *curr_td;
-       int     td_complete, actual, remaining_length, j, tmp;
-       int     status = 0;
-       int     errors = 0;
-       struct  ep_queue_head *curr_qh = &udc->ep_qh[pipe];
-       int direction = pipe % 2;
-
-       curr_td = curr_req->head;
-       td_complete = 0;
-       actual = curr_req->req.length;
-
-       for (j = 0; j < curr_req->dtd_count; j++) {
-               remaining_length = (le32_to_cpu(curr_td->size_ioc_sts)
-                                       & DTD_PACKET_SIZE)
-                               >> DTD_LENGTH_BIT_POS;
-               actual -= remaining_length;
-
-               if ((errors = le32_to_cpu(curr_td->size_ioc_sts) &
-                                               DTD_ERROR_MASK)) {
-                       if (errors & DTD_STATUS_HALTED) {
-                               ERR("dTD error %08x QH=%d\n", errors, pipe);
-                               /* Clear the errors and Halt condition */
-                               tmp = le32_to_cpu(curr_qh->size_ioc_int_sts);
-                               tmp &= ~errors;
-                               curr_qh->size_ioc_int_sts = cpu_to_le32(tmp);
-                               status = -EPIPE;
-                               /* FIXME: continue with next queued TD? */
-
-                               break;
-                       }
-                       if (errors & DTD_STATUS_DATA_BUFF_ERR) {
-                               VDBG("Transfer overflow");
-                               status = -EPROTO;
-                               break;
-                       } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
-                               VDBG("ISO error");
-                               status = -EILSEQ;
-                               break;
-                       } else
-                               ERR("Unknown error has occured (0x%x)!\n",
-                                       errors);
-
-               } else if (le32_to_cpu(curr_td->size_ioc_sts)
-                               & DTD_STATUS_ACTIVE) {
-                       VDBG("Request not complete");
-                       status = REQ_UNCOMPLETE;
-                       return status;
-               } else if (remaining_length) {
-                       if (direction) {
-                               VDBG("Transmit dTD remaining length not zero");
-                               status = -EPROTO;
-                               break;
-                       } else {
-                               td_complete++;
-                               break;
-                       }
-               } else {
-                       td_complete++;
-                       VDBG("dTD transmitted successful");
-               }
-
-               if (j != curr_req->dtd_count - 1)
-                       curr_td = (struct ep_td_struct *)curr_td->next_td_virt;
-       }
-
-       if (status)
-               return status;
-
-       curr_req->req.actual = actual;
-
-       return 0;
-}
-
-/* Process a DTD completion interrupt */
-static void dtd_complete_irq(struct fsl_udc *udc)
-{
-       u32 bit_pos;
-       int i, ep_num, direction, bit_mask, status;
-       struct fsl_ep *curr_ep;
-       struct fsl_req *curr_req, *temp_req;
-
-       /* Clear the bits in the register */
-       bit_pos = fsl_readl(&dr_regs->endptcomplete);
-       fsl_writel(bit_pos, &dr_regs->endptcomplete);
-
-       if (!bit_pos)
-               return;
-
-       for (i = 0; i < udc->max_ep * 2; i++) {
-               ep_num = i >> 1;
-               direction = i % 2;
-
-               bit_mask = 1 << (ep_num + 16 * direction);
-
-               if (!(bit_pos & bit_mask))
-                       continue;
-
-               curr_ep = get_ep_by_pipe(udc, i);
-
-               /* If the ep is configured */
-               if (curr_ep->name == NULL) {
-                       WARNING("Invalid EP?");
-                       continue;
-               }
-
-               /* process the req queue until an uncomplete request */
-               list_for_each_entry_safe(curr_req, temp_req, &curr_ep->queue,
-                               queue) {
-                       status = process_ep_req(udc, i, curr_req);
-
-                       VDBG("status of process_ep_req= %d, ep = %d",
-                                       status, ep_num);
-                       if (status == REQ_UNCOMPLETE)
-                               break;
-                       /* write back status to req */
-                       curr_req->req.status = status;
-
-                       if (ep_num == 0) {
-                               ep0_req_complete(udc, curr_ep, curr_req);
-                               break;
-                       } else
-                               done(curr_ep, curr_req, status);
-               }
-       }
-}
-
-/* Process a port change interrupt */
-static void port_change_irq(struct fsl_udc *udc)
-{
-       u32 speed;
-
-       /* Bus resetting is finished */
-       if (!(fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET)) {
-               /* Get the speed */
-               speed = (fsl_readl(&dr_regs->portsc1)
-                               & PORTSCX_PORT_SPEED_MASK);
-               switch (speed) {
-               case PORTSCX_PORT_SPEED_HIGH:
-                       udc->gadget.speed = USB_SPEED_HIGH;
-                       break;
-               case PORTSCX_PORT_SPEED_FULL:
-                       udc->gadget.speed = USB_SPEED_FULL;
-                       break;
-               case PORTSCX_PORT_SPEED_LOW:
-                       udc->gadget.speed = USB_SPEED_LOW;
-                       break;
-               default:
-                       udc->gadget.speed = USB_SPEED_UNKNOWN;
-                       break;
-               }
-       }
-
-       /* Update USB state */
-       if (!udc->resume_state)
-               udc->usb_state = USB_STATE_DEFAULT;
-}
-
-/* Process suspend interrupt */
-static void suspend_irq(struct fsl_udc *udc)
-{
-       udc->resume_state = udc->usb_state;
-       udc->usb_state = USB_STATE_SUSPENDED;
-
-       /* report suspend to the driver, serial.c does not support this */
-       if (udc->driver->suspend)
-               udc->driver->suspend(&udc->gadget);
-}
-
-static void bus_resume(struct fsl_udc *udc)
-{
-       udc->usb_state = udc->resume_state;
-       udc->resume_state = 0;
-
-       /* report resume to the driver, serial.c does not support this */
-       if (udc->driver->resume)
-               udc->driver->resume(&udc->gadget);
-}
-
-/* Clear up all ep queues */
-static int reset_queues(struct fsl_udc *udc)
-{
-       u8 pipe;
-
-       for (pipe = 0; pipe < udc->max_pipes; pipe++)
-               udc_reset_ep_queue(udc, pipe);
-
-       /* report disconnect; the driver is already quiesced */
-       spin_unlock(&udc->lock);
-       udc->driver->disconnect(&udc->gadget);
-       spin_lock(&udc->lock);
-
-       return 0;
-}
-
-/* Process reset interrupt */
-static void reset_irq(struct fsl_udc *udc)
-{
-       u32 temp;
-       unsigned long timeout;
-
-       /* Clear the device address */
-       temp = fsl_readl(&dr_regs->deviceaddr);
-       fsl_writel(temp & ~USB_DEVICE_ADDRESS_MASK, &dr_regs->deviceaddr);
-
-       udc->device_address = 0;
-
-       /* Clear usb state */
-       udc->resume_state = 0;
-       udc->ep0_dir = 0;
-       udc->ep0_state = WAIT_FOR_SETUP;
-       udc->remote_wakeup = 0; /* default to 0 on reset */
-       udc->gadget.b_hnp_enable = 0;
-       udc->gadget.a_hnp_support = 0;
-       udc->gadget.a_alt_hnp_support = 0;
-
-       /* Clear all the setup token semaphores */
-       temp = fsl_readl(&dr_regs->endptsetupstat);
-       fsl_writel(temp, &dr_regs->endptsetupstat);
-
-       /* Clear all the endpoint complete status bits */
-       temp = fsl_readl(&dr_regs->endptcomplete);
-       fsl_writel(temp, &dr_regs->endptcomplete);
-
-       timeout = jiffies + 100;
-       while (fsl_readl(&dr_regs->endpointprime)) {
-               /* Wait until all endptprime bits cleared */
-               if (time_after(jiffies, timeout)) {
-                       ERR("Timeout for reset\n");
-                       break;
-               }
-               cpu_relax();
-       }
-
-       /* Write 1s to the flush register */
-       fsl_writel(0xffffffff, &dr_regs->endptflush);
-
-       if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
-               VDBG("Bus reset");
-               /* Reset all the queues, include XD, dTD, EP queue
-                * head and TR Queue */
-               reset_queues(udc);
-               udc->usb_state = USB_STATE_DEFAULT;
-       } else {
-               VDBG("Controller reset");
-               /* initialize usb hw reg except for regs for EP, not
-                * touch usbintr reg */
-               dr_controller_setup(udc);
-
-               /* Reset all internal used Queues */
-               reset_queues(udc);
-
-               ep0_setup(udc);
-
-               /* Enable DR IRQ reg, Set Run bit, change udc state */
-               dr_controller_run(udc);
-               udc->usb_state = USB_STATE_ATTACHED;
-       }
-}
-
-/*
- * USB device controller interrupt handler
- */
-static irqreturn_t fsl_udc_irq(int irq, void *_udc)
-{
-       struct fsl_udc *udc = _udc;
-       u32 irq_src;
-       irqreturn_t status = IRQ_NONE;
-       unsigned long flags;
-
-       /* Disable ISR for OTG host mode */
-       if (udc->stopped)
-               return IRQ_NONE;
-       spin_lock_irqsave(&udc->lock, flags);
-       irq_src = fsl_readl(&dr_regs->usbsts) & fsl_readl(&dr_regs->usbintr);
-       /* Clear notification bits */
-       fsl_writel(irq_src, &dr_regs->usbsts);
-
-       /* VDBG("irq_src [0x%8x]", irq_src); */
-
-       /* Need to resume? */
-       if (udc->usb_state == USB_STATE_SUSPENDED)
-               if ((fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_SUSPEND) == 0)
-                       bus_resume(udc);
-
-       /* USB Interrupt */
-       if (irq_src & USB_STS_INT) {
-               VDBG("Packet int");
-               /* Setup package, we only support ep0 as control ep */
-               if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
-                       tripwire_handler(udc, 0,
-                                       (u8 *) (&udc->local_setup_buff));
-                       setup_received_irq(udc, &udc->local_setup_buff);
-                       status = IRQ_HANDLED;
-               }
-
-               /* completion of dtd */
-               if (fsl_readl(&dr_regs->endptcomplete)) {
-                       dtd_complete_irq(udc);
-                       status = IRQ_HANDLED;
-               }
-       }
-
-       /* SOF (for ISO transfer) */
-       if (irq_src & USB_STS_SOF) {
-               status = IRQ_HANDLED;
-       }
-
-       /* Port Change */
-       if (irq_src & USB_STS_PORT_CHANGE) {
-               port_change_irq(udc);
-               status = IRQ_HANDLED;
-       }
-
-       /* Reset Received */
-       if (irq_src & USB_STS_RESET) {
-               reset_irq(udc);
-               status = IRQ_HANDLED;
-       }
-
-       /* Sleep Enable (Suspend) */
-       if (irq_src & USB_STS_SUSPEND) {
-               suspend_irq(udc);
-               status = IRQ_HANDLED;
-       }
-
-       if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
-               VDBG("Error IRQ %x", irq_src);
-       }
-
-       spin_unlock_irqrestore(&udc->lock, flags);
-       return status;
-}
-
-/*----------------------------------------------------------------*
- * Hook to gadget drivers
- * Called by initialization code of gadget drivers
-*----------------------------------------------------------------*/
-int usb_gadget_register_driver(struct usb_gadget_driver *driver)
-{
-       int retval = -ENODEV;
-       unsigned long flags = 0;
-
-       if (!udc_controller)
-               return -ENODEV;
-
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                               && driver->speed != USB_SPEED_HIGH)
-                       || !driver->bind || !driver->disconnect
-                       || !driver->setup)
-               return -EINVAL;
-
-       if (udc_controller->driver)
-               return -EBUSY;
-
-       /* lock is needed but whether should use this lock or another */
-       spin_lock_irqsave(&udc_controller->lock, flags);
-
-       driver->driver.bus = NULL;
-       /* hook up the driver */
-       udc_controller->driver = driver;
-       udc_controller->gadget.dev.driver = &driver->driver;
-       spin_unlock_irqrestore(&udc_controller->lock, flags);
-
-       /* bind udc driver to gadget driver */
-       retval = driver->bind(&udc_controller->gadget);
-       if (retval) {
-               VDBG("bind to %s --> %d", driver->driver.name, retval);
-               udc_controller->gadget.dev.driver = NULL;
-               udc_controller->driver = NULL;
-               goto out;
-       }
-
-       /* Enable DR IRQ reg and Set usbcmd reg  Run bit */
-       dr_controller_run(udc_controller);
-       udc_controller->usb_state = USB_STATE_ATTACHED;
-       udc_controller->ep0_state = WAIT_FOR_SETUP;
-       udc_controller->ep0_dir = 0;
-       printk(KERN_INFO "%s: bind to driver %s\n",
-                       udc_controller->gadget.name, driver->driver.name);
-
-out:
-       if (retval)
-               printk(KERN_WARNING "gadget driver register failed %d\n",
-                      retval);
-       return retval;
-}
-EXPORT_SYMBOL(usb_gadget_register_driver);
-
-/* Disconnect from gadget driver */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
-       struct fsl_ep *loop_ep;
-       unsigned long flags;
-
-       if (!udc_controller)
-               return -ENODEV;
-
-       if (!driver || driver != udc_controller->driver || !driver->unbind)
-               return -EINVAL;
-
-       if (udc_controller->transceiver)
-               otg_set_peripheral(udc_controller->transceiver, NULL);
-
-       /* stop DR, disable intr */
-       dr_controller_stop(udc_controller);
-
-       /* in fact, no needed */
-       udc_controller->usb_state = USB_STATE_ATTACHED;
-       udc_controller->ep0_state = WAIT_FOR_SETUP;
-       udc_controller->ep0_dir = 0;
-
-       /* stand operation */
-       spin_lock_irqsave(&udc_controller->lock, flags);
-       udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
-       nuke(&udc_controller->eps[0], -ESHUTDOWN);
-       list_for_each_entry(loop_ep, &udc_controller->gadget.ep_list,
-                       ep.ep_list)
-               nuke(loop_ep, -ESHUTDOWN);
-       spin_unlock_irqrestore(&udc_controller->lock, flags);
-
-       /* report disconnect; the controller is already quiesced */
-       driver->disconnect(&udc_controller->gadget);
-
-       /* unbind gadget and unhook driver. */
-       driver->unbind(&udc_controller->gadget);
-       udc_controller->gadget.dev.driver = NULL;
-       udc_controller->driver = NULL;
-
-       printk(KERN_WARNING "unregistered gadget driver '%s'\n",
-              driver->driver.name);
-       return 0;
-}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-/*-------------------------------------------------------------------------
-               PROC File System Support
--------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_GADGET_DEBUG_FILES
-
-#include <linux/seq_file.h>
-
-static const char proc_filename[] = "driver/fsl_usb2_udc";
-
-static int fsl_proc_read(char *page, char **start, off_t off, int count,
-               int *eof, void *_dev)
-{
-       char *buf = page;
-       char *next = buf;
-       unsigned size = count;
-       unsigned long flags;
-       int t, i;
-       u32 tmp_reg;
-       struct fsl_ep *ep = NULL;
-       struct fsl_req *req;
-
-       struct fsl_udc *udc = udc_controller;
-       if (off != 0)
-               return 0;
-
-       spin_lock_irqsave(&udc->lock, flags);
-
-       /* ------basic driver information ---- */
-       t = scnprintf(next, size,
-                       DRIVER_DESC "\n"
-                       "%s version: %s\n"
-                       "Gadget driver: %s\n\n",
-                       driver_name, DRIVER_VERSION,
-                       udc->driver ? udc->driver->driver.name : "(none)");
-       size -= t;
-       next += t;
-
-       /* ------ DR Registers ----- */
-       tmp_reg = fsl_readl(&dr_regs->usbcmd);
-       t = scnprintf(next, size,
-                       "USBCMD reg:\n"
-                       "SetupTW: %d\n"
-                       "Run/Stop: %s\n\n",
-                       (tmp_reg & USB_CMD_SUTW) ? 1 : 0,
-                       (tmp_reg & USB_CMD_RUN_STOP) ? "Run" : "Stop");
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->usbsts);
-       t = scnprintf(next, size,
-                       "USB Status Reg:\n"
-                       "Dr Suspend: %d Reset Received: %d System Error: %s "
-                       "USB Error Interrupt: %s\n\n",
-                       (tmp_reg & USB_STS_SUSPEND) ? 1 : 0,
-                       (tmp_reg & USB_STS_RESET) ? 1 : 0,
-                       (tmp_reg & USB_STS_SYS_ERR) ? "Err" : "Normal",
-                       (tmp_reg & USB_STS_ERR) ? "Err detected" : "No err");
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->usbintr);
-       t = scnprintf(next, size,
-                       "USB Intrrupt Enable Reg:\n"
-                       "Sleep Enable: %d SOF Received Enable: %d "
-                       "Reset Enable: %d\n"
-                       "System Error Enable: %d "
-                       "Port Change Dectected Enable: %d\n"
-                       "USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
-                       (tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
-                       (tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
-                       (tmp_reg & USB_INTR_RESET_EN) ? 1 : 0,
-                       (tmp_reg & USB_INTR_SYS_ERR_EN) ? 1 : 0,
-                       (tmp_reg & USB_INTR_PTC_DETECT_EN) ? 1 : 0,
-                       (tmp_reg & USB_INTR_ERR_INT_EN) ? 1 : 0,
-                       (tmp_reg & USB_INTR_INT_EN) ? 1 : 0);
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->frindex);
-       t = scnprintf(next, size,
-                       "USB Frame Index Reg: Frame Number is 0x%x\n\n",
-                       (tmp_reg & USB_FRINDEX_MASKS));
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->deviceaddr);
-       t = scnprintf(next, size,
-                       "USB Device Address Reg: Device Addr is 0x%x\n\n",
-                       (tmp_reg & USB_DEVICE_ADDRESS_MASK));
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->endpointlistaddr);
-       t = scnprintf(next, size,
-                       "USB Endpoint List Address Reg: "
-                       "Device Addr is 0x%x\n\n",
-                       (tmp_reg & USB_EP_LIST_ADDRESS_MASK));
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->portsc1);
-       t = scnprintf(next, size,
-               "USB Port Status&Control Reg:\n"
-               "Port Transceiver Type : %s Port Speed: %s\n"
-               "PHY Low Power Suspend: %s Port Reset: %s "
-               "Port Suspend Mode: %s\n"
-               "Over-current Change: %s "
-               "Port Enable/Disable Change: %s\n"
-               "Port Enabled/Disabled: %s "
-               "Current Connect Status: %s\n\n", ( {
-                       char *s;
-                       switch (tmp_reg & PORTSCX_PTS_FSLS) {
-                       case PORTSCX_PTS_UTMI:
-                               s = "UTMI"; break;
-                       case PORTSCX_PTS_ULPI:
-                               s = "ULPI "; break;
-                       case PORTSCX_PTS_FSLS:
-                               s = "FS/LS Serial"; break;
-                       default:
-                               s = "None"; break;
-                       }
-                       s;} ), ( {
-                       char *s;
-                       switch (tmp_reg & PORTSCX_PORT_SPEED_UNDEF) {
-                       case PORTSCX_PORT_SPEED_FULL:
-                               s = "Full Speed"; break;
-                       case PORTSCX_PORT_SPEED_LOW:
-                               s = "Low Speed"; break;
-                       case PORTSCX_PORT_SPEED_HIGH:
-                               s = "High Speed"; break;
-                       default:
-                               s = "Undefined"; break;
-                       }
-                       s;
-               } ),
-               (tmp_reg & PORTSCX_PHY_LOW_POWER_SPD) ?
-               "Normal PHY mode" : "Low power mode",
-               (tmp_reg & PORTSCX_PORT_RESET) ? "In Reset" :
-               "Not in Reset",
-               (tmp_reg & PORTSCX_PORT_SUSPEND) ? "In " : "Not in",
-               (tmp_reg & PORTSCX_OVER_CURRENT_CHG) ? "Dected" :
-               "No",
-               (tmp_reg & PORTSCX_PORT_EN_DIS_CHANGE) ? "Disable" :
-               "Not change",
-               (tmp_reg & PORTSCX_PORT_ENABLE) ? "Enable" :
-               "Not correct",
-               (tmp_reg & PORTSCX_CURRENT_CONNECT_STATUS) ?
-               "Attached" : "Not-Att");
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->usbmode);
-       t = scnprintf(next, size,
-                       "USB Mode Reg: Controller Mode is: %s\n\n", ( {
-                               char *s;
-                               switch (tmp_reg & USB_MODE_CTRL_MODE_HOST) {
-                               case USB_MODE_CTRL_MODE_IDLE:
-                                       s = "Idle"; break;
-                               case USB_MODE_CTRL_MODE_DEVICE:
-                                       s = "Device Controller"; break;
-                               case USB_MODE_CTRL_MODE_HOST:
-                                       s = "Host Controller"; break;
-                               default:
-                                       s = "None"; break;
-                               }
-                               s;
-                       } ));
-       size -= t;
-       next += t;
-
-       tmp_reg = fsl_readl(&dr_regs->endptsetupstat);
-       t = scnprintf(next, size,
-                       "Endpoint Setup Status Reg: SETUP on ep 0x%x\n\n",
-                       (tmp_reg & EP_SETUP_STATUS_MASK));
-       size -= t;
-       next += t;
-
-       for (i = 0; i < udc->max_ep / 2; i++) {
-               tmp_reg = fsl_readl(&dr_regs->endptctrl[i]);
-               t = scnprintf(next, size, "EP Ctrl Reg [0x%x]: = [0x%x]\n",
-                               i, tmp_reg);
-               size -= t;
-               next += t;
-       }
-       tmp_reg = fsl_readl(&dr_regs->endpointprime);
-       t = scnprintf(next, size, "EP Prime Reg = [0x%x]\n\n", tmp_reg);
-       size -= t;
-       next += t;
-
-       tmp_reg = usb_sys_regs->snoop1;
-       t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
-       size -= t;
-       next += t;
-
-       tmp_reg = usb_sys_regs->control;
-       t = scnprintf(next, size, "General Control Reg : = [0x%x]\n\n",
-                       tmp_reg);
-       size -= t;
-       next += t;
-
-       /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
-       ep = &udc->eps[0];
-       t = scnprintf(next, size, "For %s Maxpkt is 0x%x index is 0x%x\n",
-                       ep->ep.name, ep_maxpacket(ep), ep_index(ep));
-       size -= t;
-       next += t;
-
-       if (list_empty(&ep->queue)) {
-               t = scnprintf(next, size, "its req queue is empty\n\n");
-               size -= t;
-               next += t;
-       } else {
-               list_for_each_entry(req, &ep->queue, queue) {
-                       t = scnprintf(next, size,
-                               "req %p actual 0x%x length 0x%x buf %p\n",
-                               &req->req, req->req.actual,
-                               req->req.length, req->req.buf);
-                       size -= t;
-                       next += t;
-               }
-       }
-       /* other gadget->eplist ep */
-       list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
-               if (ep->desc) {
-                       t = scnprintf(next, size,
-                                       "\nFor %s Maxpkt is 0x%x "
-                                       "index is 0x%x\n",
-                                       ep->ep.name, ep_maxpacket(ep),
-                                       ep_index(ep));
-                       size -= t;
-                       next += t;
-
-                       if (list_empty(&ep->queue)) {
-                               t = scnprintf(next, size,
-                                               "its req queue is empty\n\n");
-                               size -= t;
-                               next += t;
-                       } else {
-                               list_for_each_entry(req, &ep->queue, queue) {
-                                       t = scnprintf(next, size,
-                                               "req %p actual 0x%x length "
-                                               "0x%x  buf %p\n",
-                                               &req->req, req->req.actual,
-                                               req->req.length, req->req.buf);
-                                       size -= t;
-                                       next += t;
-                                       }       /* end for each_entry of ep req */
-                               }       /* end for else */
-                       }       /* end for if(ep->queue) */
-               }               /* end (ep->desc) */
-
-       spin_unlock_irqrestore(&udc->lock, flags);
-
-       *eof = 1;
-       return count - size;
-}
-
-#define create_proc_file()     create_proc_read_entry(proc_filename, \
-                               0, NULL, fsl_proc_read, NULL)
-
-#define remove_proc_file()     remove_proc_entry(proc_filename, NULL)
-
-#else                          /* !CONFIG_USB_GADGET_DEBUG_FILES */
-
-#define create_proc_file()     do {} while (0)
-#define remove_proc_file()     do {} while (0)
-
-#endif                         /* CONFIG_USB_GADGET_DEBUG_FILES */
-
-/*-------------------------------------------------------------------------*/
-
-/* Release udc structures */
-static void fsl_udc_release(struct device *dev)
-{
-       complete(udc_controller->done);
-       dma_free_coherent(dev, udc_controller->ep_qh_size,
-                       udc_controller->ep_qh, udc_controller->ep_qh_dma);
-       kfree(udc_controller);
-}
-
-/******************************************************************
-       Internal structure setup functions
-*******************************************************************/
-/*------------------------------------------------------------------
- * init resource for globle controller
- * Return the udc handle on success or NULL on failure
- ------------------------------------------------------------------*/
-static int __init struct_udc_setup(struct fsl_udc *udc,
-               struct platform_device *pdev)
-{
-       struct fsl_usb2_platform_data *pdata;
-       size_t size;
-
-       pdata = pdev->dev.platform_data;
-       udc->phy_mode = pdata->phy_mode;
-
-       udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
-       if (!udc->eps) {
-               ERR("malloc fsl_ep failed\n");
-               return -1;
-       }
-
-       /* initialized QHs, take care of alignment */
-       size = udc->max_ep * sizeof(struct ep_queue_head);
-       if (size < QH_ALIGNMENT)
-               size = QH_ALIGNMENT;
-       else if ((size % QH_ALIGNMENT) != 0) {
-               size += QH_ALIGNMENT + 1;
-               size &= ~(QH_ALIGNMENT - 1);
-       }
-       udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
-                                       &udc->ep_qh_dma, GFP_KERNEL);
-       if (!udc->ep_qh) {
-               ERR("malloc QHs for udc failed\n");
-               kfree(udc->eps);
-               return -1;
-       }
-
-       udc->ep_qh_size = size;
-
-       /* Initialize ep0 status request structure */
-       /* FIXME: fsl_alloc_request() ignores ep argument */
-       udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
-                       struct fsl_req, req);
-       /* allocate a small amount of memory to get valid address */
-       udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
-       udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
-
-       udc->resume_state = USB_STATE_NOTATTACHED;
-       udc->usb_state = USB_STATE_POWERED;
-       udc->ep0_dir = 0;
-       udc->remote_wakeup = 0; /* default to 0 on reset */
-
-       return 0;
-}
-
-/*----------------------------------------------------------------
- * Setup the fsl_ep struct for eps
- * Link fsl_ep->ep to gadget->ep_list
- * ep0out is not used so do nothing here
- * ep0in should be taken care
- *--------------------------------------------------------------*/
-static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
-               char *name, int link)
-{
-       struct fsl_ep *ep = &udc->eps[index];
-
-       ep->udc = udc;
-       strcpy(ep->name, name);
-       ep->ep.name = ep->name;
-
-       ep->ep.ops = &fsl_ep_ops;
-       ep->stopped = 0;
-
-       /* for ep0: maxP defined in desc
-        * for other eps, maxP is set by epautoconfig() called by gadget layer
-        */
-       ep->ep.maxpacket = (unsigned short) ~0;
-
-       /* the queue lists any req for this ep */
-       INIT_LIST_HEAD(&ep->queue);
-
-       /* gagdet.ep_list used for ep_autoconfig so no ep0 */
-       if (link)
-               list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
-       ep->gadget = &udc->gadget;
-       ep->qh = &udc->ep_qh[index];
-
-       return 0;
-}
-
-/* Driver probe function
- * all intialization operations implemented here except enabling usb_intr reg
- * board setup should have been done in the platform code
- */
-static int __init fsl_udc_probe(struct platform_device *pdev)
-{
-       struct resource *res;
-       int ret = -ENODEV;
-       unsigned int i;
-       u32 dccparams;
-
-       if (strcmp(pdev->name, driver_name)) {
-               VDBG("Wrong device");
-               return -ENODEV;
-       }
-
-       udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
-       if (udc_controller == NULL) {
-               ERR("malloc udc failed\n");
-               return -ENOMEM;
-       }
-
-       spin_lock_init(&udc_controller->lock);
-       udc_controller->stopped = 1;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENXIO;
-               goto err_kfree;
-       }
-
-       if (!request_mem_region(res->start, res->end - res->start + 1,
-                               driver_name)) {
-               ERR("request mem region for %s failed\n", pdev->name);
-               ret = -EBUSY;
-               goto err_kfree;
-       }
-
-       dr_regs = ioremap(res->start, res->end - res->start + 1);
-       if (!dr_regs) {
-               ret = -ENOMEM;
-               goto err_release_mem_region;
-       }
-
-       usb_sys_regs = (struct usb_sys_interface *)
-                       ((u32)dr_regs + USB_DR_SYS_OFFSET);
-
-       /* Read Device Controller Capability Parameters register */
-       dccparams = fsl_readl(&dr_regs->dccparams);
-       if (!(dccparams & DCCPARAMS_DC)) {
-               ERR("This SOC doesn't support device role\n");
-               ret = -ENODEV;
-               goto err_iounmap;
-       }
-       /* Get max device endpoints */
-       /* DEN is bidirectional ep number, max_ep doubles the number */
-       udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
-
-       udc_controller->irq = platform_get_irq(pdev, 0);
-       if (!udc_controller->irq) {
-               ret = -ENODEV;
-               goto err_iounmap;
-       }
-
-       ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
-                       driver_name, udc_controller);
-       if (ret != 0) {
-               ERR("cannot request irq %d err %d\n",
-                               udc_controller->irq, ret);
-               goto err_iounmap;
-       }
-
-       /* Initialize the udc structure including QH member and other member */
-       if (struct_udc_setup(udc_controller, pdev)) {
-               ERR("Can't initialize udc data structure\n");
-               ret = -ENOMEM;
-               goto err_free_irq;
-       }
-
-       /* initialize usb hw reg except for regs for EP,
-        * leave usbintr reg untouched */
-       dr_controller_setup(udc_controller);
-
-       /* Setup gadget structure */
-       udc_controller->gadget.ops = &fsl_gadget_ops;
-       udc_controller->gadget.is_dualspeed = 1;
-       udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
-       INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
-       udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
-       udc_controller->gadget.name = driver_name;
-
-       /* Setup gadget.dev and register with kernel */
-       dev_set_name(&udc_controller->gadget.dev, "gadget");
-       udc_controller->gadget.dev.release = fsl_udc_release;
-       udc_controller->gadget.dev.parent = &pdev->dev;
-       ret = device_register(&udc_controller->gadget.dev);
-       if (ret < 0)
-               goto err_free_irq;
-
-       /* setup QH and epctrl for ep0 */
-       ep0_setup(udc_controller);
-
-       /* setup udc->eps[] for ep0 */
-       struct_ep_setup(udc_controller, 0, "ep0", 0);
-       /* for ep0: the desc defined here;
-        * for other eps, gadget layer called ep_enable with defined desc
-        */
-       udc_controller->eps[0].desc = &fsl_ep0_desc;
-       udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
-
-       /* setup the udc->eps[] for non-control endpoints and link
-        * to gadget.ep_list */
-       for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
-               char name[14];
-
-               sprintf(name, "ep%dout", i);
-               struct_ep_setup(udc_controller, i * 2, name, 1);
-               sprintf(name, "ep%din", i);
-               struct_ep_setup(udc_controller, i * 2 + 1, name, 1);
-       }
-
-       /* use dma_pool for TD management */
-       udc_controller->td_pool = dma_pool_create("udc_td", &pdev->dev,
-                       sizeof(struct ep_td_struct),
-                       DTD_ALIGNMENT, UDC_DMA_BOUNDARY);
-       if (udc_controller->td_pool == NULL) {
-               ret = -ENOMEM;
-               goto err_unregister;
-       }
-       create_proc_file();
-       return 0;
-
-err_unregister:
-       device_unregister(&udc_controller->gadget.dev);
-err_free_irq:
-       free_irq(udc_controller->irq, udc_controller);
-err_iounmap:
-       iounmap(dr_regs);
-err_release_mem_region:
-       release_mem_region(res->start, res->end - res->start + 1);
-err_kfree:
-       kfree(udc_controller);
-       udc_controller = NULL;
-       return ret;
-}
-
-/* Driver removal function
- * Free resources and finish pending transactions
- */
-static int __exit fsl_udc_remove(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       DECLARE_COMPLETION(done);
-
-       if (!udc_controller)
-               return -ENODEV;
-       udc_controller->done = &done;
-
-       /* DR has been stopped in usb_gadget_unregister_driver() */
-       remove_proc_file();
-
-       /* Free allocated memory */
-       kfree(udc_controller->status_req->req.buf);
-       kfree(udc_controller->status_req);
-       kfree(udc_controller->eps);
-
-       dma_pool_destroy(udc_controller->td_pool);
-       free_irq(udc_controller->irq, udc_controller);
-       iounmap(dr_regs);
-       release_mem_region(res->start, res->end - res->start + 1);
-
-       device_unregister(&udc_controller->gadget.dev);
-       /* free udc --wait for the release() finished */
-       wait_for_completion(&done);
-
-       return 0;
-}
-
-/*-----------------------------------------------------------------
- * Modify Power management attributes
- * Used by OTG statemachine to disable gadget temporarily
- -----------------------------------------------------------------*/
-static int fsl_udc_suspend(struct platform_device *pdev, pm_message_t state)
-{
-       dr_controller_stop(udc_controller);
-       return 0;
-}
-
-/*-----------------------------------------------------------------
- * Invoked on USB resume. May be called in_interrupt.
- * Here we start the DR controller and enable the irq
- *-----------------------------------------------------------------*/
-static int fsl_udc_resume(struct platform_device *pdev)
-{
-       /* Enable DR irq reg and set controller Run */
-       if (udc_controller->stopped) {
-               dr_controller_setup(udc_controller);
-               dr_controller_run(udc_controller);
-       }
-       udc_controller->usb_state = USB_STATE_ATTACHED;
-       udc_controller->ep0_state = WAIT_FOR_SETUP;
-       udc_controller->ep0_dir = 0;
-       return 0;
-}
-
-/*-------------------------------------------------------------------------
-       Register entry point for the peripheral controller driver
---------------------------------------------------------------------------*/
-
-static struct platform_driver udc_driver = {
-       .remove  = __exit_p(fsl_udc_remove),
-       /* these suspend and resume are not usb suspend and resume */
-       .suspend = fsl_udc_suspend,
-       .resume  = fsl_udc_resume,
-       .driver  = {
-               .name = (char *)driver_name,
-               .owner = THIS_MODULE,
-       },
-};
-
-static int __init udc_init(void)
-{
-       printk(KERN_INFO "%s (%s)\n", driver_desc, DRIVER_VERSION);
-       return platform_driver_probe(&udc_driver, fsl_udc_probe);
-}
-
-module_init(udc_init);
-
-static void __exit udc_exit(void)
-{
-       platform_driver_unregister(&udc_driver);
-       printk(KERN_WARNING "%s unregistered\n", driver_desc);
-}
-
-module_exit(udc_exit);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:fsl-usb2-udc");
index e63ef12645f5524582a65059875f404f438a6554..20aeceed48c712e1db819b0047f970787923205f 100644 (file)
@@ -563,4 +563,22 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
                                        * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)     (ep_index(EP) * 2 + ep_is_in(EP))
 
+struct platform_device;
+#ifdef CONFIG_ARCH_MXC
+int fsl_udc_clk_init(struct platform_device *pdev);
+void fsl_udc_clk_finalize(struct platform_device *pdev);
+void fsl_udc_clk_release(void);
+#else
+static inline int fsl_udc_clk_init(struct platform_device *pdev)
+{
+       return 0;
+}
+static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+{
+}
+static inline void fsl_udc_clk_release(void)
+{
+}
+#endif
+
 #endif
index ec6d439a2aa588c0872e86335ff22079f6463f6c..8e0e9a0b736479a77aa59a8e73bce9a2c28aa512 100644 (file)
 #define gadget_is_musbhdrc(g)  0
 #endif
 
+#ifdef CONFIG_USB_GADGET_LANGWELL
+#define gadget_is_langwell(g)  (!strcmp("langwell_udc", (g)->name))
+#else
+#define gadget_is_langwell(g)  0
+#endif
+
 /* from Montavista kernel (?) */
 #ifdef CONFIG_USB_GADGET_MPC8272
 #define gadget_is_mpc8272(g)   !strcmp("mpc8272_udc", (g)->name)
@@ -231,6 +237,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
                return 0x22;
        else if (gadget_is_ci13xxx(gadget))
                return 0x23;
+       else if (gadget_is_langwell(gadget))
+               return 0x24;
        return -ENOENT;
 }
 
index de010c939dbbca672c5f992f85471aab5d0b6dec..112bb40a427cd4a217ed69af61d73effc8863959 100644 (file)
@@ -110,10 +110,10 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
                return -EINVAL;
        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
                return -ESHUTDOWN;
-       if (ep->num != (desc->bEndpointAddress & 0x0f))
+       if (ep->num != usb_endpoint_num(desc))
                return -EINVAL;
 
-       switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+       switch (usb_endpoint_type(desc)) {
        case USB_ENDPOINT_XFER_BULK:
        case USB_ENDPOINT_XFER_INT:
                break;
@@ -142,7 +142,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        /* ep1/ep2 dma direction is chosen early; it works in the other
         * direction, with pio.  be cautious with out-dma.
         */
-       ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0;
+       ep->is_in = usb_endpoint_dir_in(desc);
        if (ep->is_in) {
                mode |= 1;
                ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
index 168658b4b4e26d2bc7b2614cf5b8cb891a3614e5..c52a681f376cba495303fe843e9cd0839a15a234 100644 (file)
@@ -415,6 +415,13 @@ static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
        u8      *buf;
        int     length, count, temp;
 
+       if (unlikely(__raw_readl(imx_ep->imx_usb->base +
+                                USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
+               D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
+                       __func__, imx_ep->ep.name);
+               return -1;
+       }
+
        buf = req->req.buf + req->req.actual;
        prefetch(buf);
 
@@ -734,9 +741,12 @@ static struct usb_request *imx_ep_alloc_request
 {
        struct imx_request *req;
 
+       if (!usb_ep)
+               return NULL;
+
        req = kzalloc(sizeof *req, gfp_flags);
-       if (!req || !usb_ep)
-               return 0;
+       if (!req)
+               return NULL;
 
        INIT_LIST_HEAD(&req->queue);
        req->in_use = 0;
index d20937f28a1988209557a356f061900ec2706d35..7d33f50b5874bc683ed42db17aa87c8b59544035 100644 (file)
@@ -384,9 +384,8 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                return value;
 
        /* halt any endpoint by doing a "wrong direction" i/o call */
-       if (data->desc.bEndpointAddress & USB_DIR_IN) {
-               if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-                               == USB_ENDPOINT_XFER_ISOC)
+       if (usb_endpoint_dir_in(&data->desc)) {
+               if (usb_endpoint_xfer_isoc(&data->desc))
                        return -EINVAL;
                DBG (data->dev, "%s halt\n", data->name);
                spin_lock_irq (&data->dev->lock);
@@ -428,9 +427,8 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                return value;
 
        /* halt any endpoint by doing a "wrong direction" i/o call */
-       if (!(data->desc.bEndpointAddress & USB_DIR_IN)) {
-               if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-                               == USB_ENDPOINT_XFER_ISOC)
+       if (!usb_endpoint_dir_in(&data->desc)) {
+               if (usb_endpoint_xfer_isoc(&data->desc))
                        return -EINVAL;
                DBG (data->dev, "%s halt\n", data->name);
                spin_lock_irq (&data->dev->lock);
@@ -691,7 +689,7 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
        struct ep_data          *epdata = iocb->ki_filp->private_data;
        char                    *buf;
 
-       if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN))
+       if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
                return -EINVAL;
 
        buf = kmalloc(iocb->ki_left, GFP_KERNEL);
@@ -711,7 +709,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
        size_t                  len = 0;
        int                     i = 0;
 
-       if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN)))
+       if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
                return -EINVAL;
 
        buf = kmalloc(iocb->ki_left, GFP_KERNEL);
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
new file mode 100644 (file)
index 0000000..6829d59
--- /dev/null
@@ -0,0 +1,3373 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+
+/* #undef      DEBUG */
+/* #undef      VERBOSE */
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#define        OTG_TRANSCEIVER
+#endif
+
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "langwell_udc.h"
+
+
+#define        DRIVER_DESC             "Intel Langwell USB Device Controller driver"
+#define        DRIVER_VERSION          "16 May 2009"
+
+static const char driver_name[] = "langwell_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+
+/* controller device global variable */
+static struct langwell_udc     *the_controller;
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor
+langwell_ep0_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     0,
+       .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
+       .wMaxPacketSize =       EP0_MAX_PKT_SIZE,
+};
+
+
+/*-------------------------------------------------------------------------*/
+/* debugging */
+
+#ifdef DEBUG
+#define        DBG(dev, fmt, args...) \
+       pr_debug("%s %s: " fmt , driver_name, \
+                       pci_name(dev->pdev), ## args)
+#else
+#define        DBG(dev, fmt, args...) \
+       do { } while (0)
+#endif /* DEBUG */
+
+
+#ifdef VERBOSE
+#define        VDBG DBG
+#else
+#define        VDBG(dev, fmt, args...) \
+       do { } while (0)
+#endif /* VERBOSE */
+
+
+#define        ERROR(dev, fmt, args...) \
+       pr_err("%s %s: " fmt , driver_name, \
+                       pci_name(dev->pdev), ## args)
+
+#define        WARNING(dev, fmt, args...) \
+       pr_warning("%s %s: " fmt , driver_name, \
+                       pci_name(dev->pdev), ## args)
+
+#define        INFO(dev, fmt, args...) \
+       pr_info("%s %s: " fmt , driver_name, \
+                       pci_name(dev->pdev), ## args)
+
+
+#ifdef VERBOSE
+static inline void print_all_registers(struct langwell_udc *dev)
+{
+       int     i;
+
+       /* Capability Registers */
+       printk(KERN_DEBUG "Capability Registers (offset: "
+                       "0x%04x, length: 0x%08x)\n",
+                       CAP_REG_OFFSET,
+                       (u32)sizeof(struct langwell_cap_regs));
+       printk(KERN_DEBUG "caplength=0x%02x\n",
+                       readb(&dev->cap_regs->caplength));
+       printk(KERN_DEBUG "hciversion=0x%04x\n",
+                       readw(&dev->cap_regs->hciversion));
+       printk(KERN_DEBUG "hcsparams=0x%08x\n",
+                       readl(&dev->cap_regs->hcsparams));
+       printk(KERN_DEBUG "hccparams=0x%08x\n",
+                       readl(&dev->cap_regs->hccparams));
+       printk(KERN_DEBUG "dciversion=0x%04x\n",
+                       readw(&dev->cap_regs->dciversion));
+       printk(KERN_DEBUG "dccparams=0x%08x\n",
+                       readl(&dev->cap_regs->dccparams));
+
+       /* Operational Registers */
+       printk(KERN_DEBUG "Operational Registers (offset: "
+                       "0x%04x, length: 0x%08x)\n",
+                       OP_REG_OFFSET,
+                       (u32)sizeof(struct langwell_op_regs));
+       printk(KERN_DEBUG "extsts=0x%08x\n",
+                       readl(&dev->op_regs->extsts));
+       printk(KERN_DEBUG "extintr=0x%08x\n",
+                       readl(&dev->op_regs->extintr));
+       printk(KERN_DEBUG "usbcmd=0x%08x\n",
+                       readl(&dev->op_regs->usbcmd));
+       printk(KERN_DEBUG "usbsts=0x%08x\n",
+                       readl(&dev->op_regs->usbsts));
+       printk(KERN_DEBUG "usbintr=0x%08x\n",
+                       readl(&dev->op_regs->usbintr));
+       printk(KERN_DEBUG "frindex=0x%08x\n",
+                       readl(&dev->op_regs->frindex));
+       printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
+                       readl(&dev->op_regs->ctrldssegment));
+       printk(KERN_DEBUG "deviceaddr=0x%08x\n",
+                       readl(&dev->op_regs->deviceaddr));
+       printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
+                       readl(&dev->op_regs->endpointlistaddr));
+       printk(KERN_DEBUG "ttctrl=0x%08x\n",
+                       readl(&dev->op_regs->ttctrl));
+       printk(KERN_DEBUG "burstsize=0x%08x\n",
+                       readl(&dev->op_regs->burstsize));
+       printk(KERN_DEBUG "txfilltuning=0x%08x\n",
+                       readl(&dev->op_regs->txfilltuning));
+       printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
+                       readl(&dev->op_regs->txttfilltuning));
+       printk(KERN_DEBUG "ic_usb=0x%08x\n",
+                       readl(&dev->op_regs->ic_usb));
+       printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
+                       readl(&dev->op_regs->ulpi_viewport));
+       printk(KERN_DEBUG "configflag=0x%08x\n",
+                       readl(&dev->op_regs->configflag));
+       printk(KERN_DEBUG "portsc1=0x%08x\n",
+                       readl(&dev->op_regs->portsc1));
+       printk(KERN_DEBUG "devlc=0x%08x\n",
+                       readl(&dev->op_regs->devlc));
+       printk(KERN_DEBUG "otgsc=0x%08x\n",
+                       readl(&dev->op_regs->otgsc));
+       printk(KERN_DEBUG "usbmode=0x%08x\n",
+                       readl(&dev->op_regs->usbmode));
+       printk(KERN_DEBUG "endptnak=0x%08x\n",
+                       readl(&dev->op_regs->endptnak));
+       printk(KERN_DEBUG "endptnaken=0x%08x\n",
+                       readl(&dev->op_regs->endptnaken));
+       printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
+                       readl(&dev->op_regs->endptsetupstat));
+       printk(KERN_DEBUG "endptprime=0x%08x\n",
+                       readl(&dev->op_regs->endptprime));
+       printk(KERN_DEBUG "endptflush=0x%08x\n",
+                       readl(&dev->op_regs->endptflush));
+       printk(KERN_DEBUG "endptstat=0x%08x\n",
+                       readl(&dev->op_regs->endptstat));
+       printk(KERN_DEBUG "endptcomplete=0x%08x\n",
+                       readl(&dev->op_regs->endptcomplete));
+
+       for (i = 0; i < dev->ep_max / 2; i++) {
+               printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
+                               i, readl(&dev->op_regs->endptctrl[i]));
+       }
+}
+#endif /* VERBOSE */
+
+
+/*-------------------------------------------------------------------------*/
+
+#define        DIR_STRING(bAddress)    (((bAddress) & USB_DIR_IN) ? "in" : "out")
+
+#define is_in(ep)      (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
+                       USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
+                       & USB_DIR_IN) == USB_DIR_IN)
+
+
+#ifdef DEBUG
+static char *type_string(u8 bmAttributes)
+{
+       switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+       case USB_ENDPOINT_XFER_BULK:
+               return "bulk";
+       case USB_ENDPOINT_XFER_ISOC:
+               return "iso";
+       case USB_ENDPOINT_XFER_INT:
+               return "int";
+       };
+
+       return "control";
+}
+#endif
+
+
+/* configure endpoint control registers */
+static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
+               unsigned char is_in, unsigned char ep_type)
+{
+       struct langwell_udc     *dev;
+       u32                     endptctrl;
+
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+       if (is_in) {    /* TX */
+               if (ep_num)
+                       endptctrl |= EPCTRL_TXR;
+               endptctrl |= EPCTRL_TXE;
+               endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
+       } else {        /* RX */
+               if (ep_num)
+                       endptctrl |= EPCTRL_RXR;
+               endptctrl |= EPCTRL_RXE;
+               endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
+       }
+
+       writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* reset ep0 dQH and endptctrl */
+static void ep0_reset(struct langwell_udc *dev)
+{
+       struct langwell_ep      *ep;
+       int                     i;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* ep0 in and out */
+       for (i = 0; i < 2; i++) {
+               ep = &dev->ep[i];
+               ep->dev = dev;
+
+               /* ep0 dQH */
+               ep->dqh = &dev->ep_dqh[i];
+
+               /* configure ep0 endpoint capabilities in dQH */
+               ep->dqh->dqh_ios = 1;
+               ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+               /* FIXME: enable ep0-in HW zero length termination select */
+               if (is_in(ep))
+                       ep->dqh->dqh_zlt = 0;
+               ep->dqh->dqh_mult = 0;
+
+               /* configure ep0 control registers */
+               ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+       }
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoints operations */
+
+/* configure endpoint, making it usable */
+static int langwell_ep_enable(struct usb_ep *_ep,
+               const struct usb_endpoint_descriptor *desc)
+{
+       struct langwell_udc     *dev;
+       struct langwell_ep      *ep;
+       u16                     max = 0;
+       unsigned long           flags;
+       int                     retval = 0;
+       unsigned char           zlt, ios = 0, mult = 0;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !desc || ep->desc
+                       || desc->bDescriptorType != USB_DT_ENDPOINT)
+               return -EINVAL;
+
+       if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+
+       max = le16_to_cpu(desc->wMaxPacketSize);
+
+       /*
+        * disable HW zero length termination select
+        * driver handles zero length packet through req->req.zero
+        */
+       zlt = 1;
+
+       /*
+        * sanity check type, direction, address, and then
+        * initialize the endpoint capabilities fields in dQH
+        */
+       switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               ios = 1;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               if ((dev->gadget.speed == USB_SPEED_HIGH
+                                       && max != 512)
+                               || (dev->gadget.speed == USB_SPEED_FULL
+                                       && max > 64)) {
+                       goto done;
+               }
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
+                       goto done;
+
+               switch (dev->gadget.speed) {
+               case USB_SPEED_HIGH:
+                       if (max <= 1024)
+                               break;
+               case USB_SPEED_FULL:
+                       if (max <= 64)
+                               break;
+               default:
+                       if (max <= 8)
+                               break;
+                       goto done;
+               }
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               if (strstr(ep->ep.name, "-bulk")
+                               || strstr(ep->ep.name, "-int"))
+                       goto done;
+
+               switch (dev->gadget.speed) {
+               case USB_SPEED_HIGH:
+                       if (max <= 1024)
+                               break;
+               case USB_SPEED_FULL:
+                       if (max <= 1023)
+                               break;
+               default:
+                       goto done;
+               }
+               /*
+                * FIXME:
+                * calculate transactions needed for high bandwidth iso
+                */
+               mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+               max = max & 0x8ff;      /* bit 0~10 */
+               /* 3 transactions at most */
+               if (mult > 3)
+                       goto done;
+               break;
+       default:
+               goto done;
+       }
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* configure endpoint capabilities in dQH */
+       ep->dqh->dqh_ios = ios;
+       ep->dqh->dqh_mpl = cpu_to_le16(max);
+       ep->dqh->dqh_zlt = zlt;
+       ep->dqh->dqh_mult = mult;
+
+       ep->ep.maxpacket = max;
+       ep->desc = desc;
+       ep->stopped = 0;
+       ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+
+       /* ep_type */
+       ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+       /* configure endpoint control registers */
+       ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
+       DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
+                       _ep->name,
+                       ep->ep_num,
+                       DIR_STRING(desc->bEndpointAddress),
+                       type_string(desc->bmAttributes),
+                       max);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+done:
+       VDBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* retire a request */
+static void done(struct langwell_ep *ep, struct langwell_request *req,
+               int status)
+{
+       struct langwell_udc     *dev = ep->dev;
+       unsigned                stopped = ep->stopped;
+       struct langwell_dtd     *curr_dtd, *next_dtd;
+       int                     i;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* remove the req from ep->queue */
+       list_del_init(&req->queue);
+
+       if (req->req.status == -EINPROGRESS)
+               req->req.status = status;
+       else
+               status = req->req.status;
+
+       /* free dTD for the request */
+       next_dtd = req->head;
+       for (i = 0; i < req->dtd_count; i++) {
+               curr_dtd = next_dtd;
+               if (i != req->dtd_count - 1)
+                       next_dtd = curr_dtd->next_dtd_virt;
+               dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
+       }
+
+       if (req->mapped) {
+               dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length,
+                       is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+               req->req.dma = DMA_ADDR_INVALID;
+               req->mapped = 0;
+       } else
+               dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
+                               req->req.length,
+                               is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+       if (status != -ESHUTDOWN)
+               DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n",
+                       ep->ep.name, &req->req, status,
+                       req->req.actual, req->req.length);
+
+       /* don't modify queue heads during completion callback */
+       ep->stopped = 1;
+
+       spin_unlock(&dev->lock);
+       /* complete routine from gadget driver */
+       if (req->req.complete)
+               req->req.complete(&ep->ep, &req->req);
+
+       spin_lock(&dev->lock);
+       ep->stopped = stopped;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void langwell_ep_fifo_flush(struct usb_ep *_ep);
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct langwell_ep *ep, int status)
+{
+       /* called with spinlock held */
+       ep->stopped = 1;
+
+       /* endpoint fifo flush */
+       if (&ep->ep && ep->desc)
+               langwell_ep_fifo_flush(&ep->ep);
+
+       while (!list_empty(&ep->queue)) {
+               struct langwell_request *req = NULL;
+               req = list_entry(ep->queue.next, struct langwell_request,
+                               queue);
+               done(ep, req, status);
+       }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint is no longer usable */
+static int langwell_ep_disable(struct usb_ep *_ep)
+{
+       struct langwell_ep      *ep;
+       unsigned long           flags;
+       struct langwell_udc     *dev;
+       int                     ep_num;
+       u32                     endptctrl;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !ep->desc)
+               return -EINVAL;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* disable endpoint control register */
+       ep_num = ep->ep_num;
+       endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+       if (is_in(ep))
+               endptctrl &= ~EPCTRL_TXE;
+       else
+               endptctrl &= ~EPCTRL_RXE;
+       writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+       /* nuke all pending requests (does flush) */
+       nuke(ep, -ESHUTDOWN);
+
+       ep->desc = NULL;
+       ep->stopped = 1;
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       DBG(dev, "disabled %s\n", _ep->name);
+       VDBG(dev, "<--- %s()\n", __func__);
+
+       return 0;
+}
+
+
+/* allocate a request object to use with this endpoint */
+static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
+               gfp_t gfp_flags)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       struct langwell_request *req = NULL;
+
+       if (!_ep)
+               return NULL;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       req = kzalloc(sizeof(*req), gfp_flags);
+       if (!req)
+               return NULL;
+
+       req->req.dma = DMA_ADDR_INVALID;
+       INIT_LIST_HEAD(&req->queue);
+
+       VDBG(dev, "alloc request for %s\n", _ep->name);
+       VDBG(dev, "<--- %s()\n", __func__);
+       return &req->req;
+}
+
+
+/* free a request object */
+static void langwell_free_request(struct usb_ep *_ep,
+               struct usb_request *_req)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       struct langwell_request *req = NULL;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !_req)
+               return;
+
+       req = container_of(_req, struct langwell_request, req);
+       WARN_ON(!list_empty(&req->queue));
+
+       if (_req)
+               kfree(req);
+
+       VDBG(dev, "free request for %s\n", _ep->name);
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* queue dTD and PRIME endpoint */
+static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+{
+       u32                     bit_mask, usbcmd, endptstat, dtd_dma;
+       u8                      dtd_status;
+       int                     i;
+       struct langwell_dqh     *dqh;
+       struct langwell_udc     *dev;
+
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       i = ep->ep_num * 2 + is_in(ep);
+       dqh = &dev->ep_dqh[i];
+
+       if (ep->ep_num)
+               VDBG(dev, "%s\n", ep->name);
+       else
+               /* ep0 */
+               VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
+
+       VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
+
+       bit_mask = is_in(ep) ?
+               (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
+
+       VDBG(dev, "bit_mask = 0x%08x\n", bit_mask);
+
+       /* check if the pipe is empty */
+       if (!(list_empty(&ep->queue))) {
+               /* add dTD to the end of linked list */
+               struct langwell_request *lastreq;
+               lastreq = list_entry(ep->queue.prev,
+                               struct langwell_request, queue);
+
+               lastreq->tail->dtd_next =
+                       cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
+
+               /* read prime bit, if 1 goto out */
+               if (readl(&dev->op_regs->endptprime) & bit_mask)
+                       goto out;
+
+               do {
+                       /* set ATDTW bit in USBCMD */
+                       usbcmd = readl(&dev->op_regs->usbcmd);
+                       writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
+
+                       /* read correct status bit */
+                       endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
+
+               } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
+
+               /* write ATDTW bit to 0 */
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
+
+               if (endptstat)
+                       goto out;
+       }
+
+       /* write dQH next pointer and terminate bit to 0 */
+       dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
+       dqh->dtd_next = cpu_to_le32(dtd_dma);
+
+       /* clear active and halt bit */
+       dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
+       dqh->dtd_status &= dtd_status;
+       VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
+
+       /* write 1 to endptprime register to PRIME endpoint */
+       bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+       VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+       writel(bit_mask, &dev->op_regs->endptprime);
+out:
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* fill in the dTD structure to build a transfer descriptor */
+static struct langwell_dtd *build_dtd(struct langwell_request *req,
+               unsigned *length, dma_addr_t *dma, int *is_last)
+{
+       u32                      buf_ptr;
+       struct langwell_dtd     *dtd;
+       struct langwell_udc     *dev;
+       int                     i;
+
+       dev = req->ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* the maximum transfer length, up to 16k bytes */
+       *length = min(req->req.length - req->req.actual,
+                       (unsigned)DTD_MAX_TRANSFER_LENGTH);
+
+       /* create dTD dma_pool resource */
+       dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
+       if (dtd == NULL)
+               return dtd;
+       dtd->dtd_dma = *dma;
+
+       /* initialize buffer page pointers */
+       buf_ptr = (u32)(req->req.dma + req->req.actual);
+       for (i = 0; i < 5; i++)
+               dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
+
+       req->req.actual += *length;
+
+       /* fill in total bytes with transfer size */
+       dtd->dtd_total = cpu_to_le16(*length);
+       VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
+
+       /* set is_last flag if req->req.zero is set or not */
+       if (req->req.zero) {
+               if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+                       *is_last = 1;
+               else
+                       *is_last = 0;
+       } else if (req->req.length == req->req.actual) {
+               *is_last = 1;
+       } else
+               *is_last = 0;
+
+       if (*is_last == 0)
+               VDBG(dev, "multi-dtd request!\n");
+
+       /* set interrupt on complete bit for the last dTD */
+       if (*is_last && !req->req.no_interrupt)
+               dtd->dtd_ioc = 1;
+
+       /* set multiplier override 0 for non-ISO and non-TX endpoint */
+       dtd->dtd_multo = 0;
+
+       /* set the active bit of status field to 1 */
+       dtd->dtd_status = DTD_STS_ACTIVE;
+       VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status);
+
+       VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma);
+       VDBG(dev, "<--- %s()\n", __func__);
+       return dtd;
+}
+
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct langwell_request *req)
+{
+       unsigned                count;
+       int                     is_last, is_first = 1;
+       struct langwell_dtd     *dtd, *last_dtd = NULL;
+       struct langwell_udc     *dev;
+       dma_addr_t              dma;
+
+       dev = req->ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+       do {
+               dtd = build_dtd(req, &count, &dma, &is_last);
+               if (dtd == NULL)
+                       return -ENOMEM;
+
+               if (is_first) {
+                       is_first = 0;
+                       req->head = dtd;
+               } else {
+                       last_dtd->dtd_next = cpu_to_le32(dma);
+                       last_dtd->next_dtd_virt = dtd;
+               }
+               last_dtd = dtd;
+               req->dtd_count++;
+       } while (!is_last);
+
+       /* set terminate bit to 1 for the last dTD */
+       dtd->dtd_next = DTD_TERM;
+
+       req->tail = dtd;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* queue (submits) an I/O requests to an endpoint */
+static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+               gfp_t gfp_flags)
+{
+       struct langwell_request *req;
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       unsigned long           flags;
+       int                     is_iso = 0, zlflag = 0;
+
+       /* always require a cpu-view buffer */
+       req = container_of(_req, struct langwell_request, req);
+       ep = container_of(_ep, struct langwell_ep, ep);
+
+       if (!_req || !_req->complete || !_req->buf
+                       || !list_empty(&req->queue)) {
+               return -EINVAL;
+       }
+
+       if (unlikely(!_ep || !ep->desc))
+               return -EINVAL;
+
+       dev = ep->dev;
+       req->ep = ep;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+               if (req->req.length > ep->ep.maxpacket)
+                       return -EMSGSIZE;
+               is_iso = 1;
+       }
+
+       if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
+               return -ESHUTDOWN;
+
+       /* set up dma mapping in case the caller didn't */
+       if (_req->dma == DMA_ADDR_INVALID) {
+               /* WORKAROUND: WARN_ON(size == 0) */
+               if (_req->length == 0) {
+                       VDBG(dev, "req->length: 0->1\n");
+                       zlflag = 1;
+                       _req->length++;
+               }
+
+               _req->dma = dma_map_single(&dev->pdev->dev,
+                               _req->buf, _req->length,
+                               is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               if (zlflag && (_req->length == 1)) {
+                       VDBG(dev, "req->length: 1->0\n");
+                       zlflag = 0;
+                       _req->length = 0;
+               }
+
+               req->mapped = 1;
+               VDBG(dev, "req->mapped = 1\n");
+       } else {
+               dma_sync_single_for_device(&dev->pdev->dev,
+                               _req->dma, _req->length,
+                               is_in(ep) ?  DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               req->mapped = 0;
+               VDBG(dev, "req->mapped = 0\n");
+       }
+
+       DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+                       _ep->name,
+                       _req, _req->length, _req->buf, _req->dma);
+
+       _req->status = -EINPROGRESS;
+       _req->actual = 0;
+       req->dtd_count = 0;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* build and put dTDs to endpoint queue */
+       if (!req_to_dtd(req)) {
+               queue_dtd(ep, req);
+       } else {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return -ENOMEM;
+       }
+
+       /* update ep0 state */
+       if (ep->ep_num == 0)
+               dev->ep0_state = DATA_STATE_XMIT;
+
+       if (likely(req != NULL)) {
+               list_add_tail(&req->queue, &ep->queue);
+               VDBG(dev, "list_add_tail() \n");
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* dequeue (cancels, unlinks) an I/O request from an endpoint */
+static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       struct langwell_request *req;
+       unsigned long           flags;
+       int                     stopped, ep_num, retval = 0;
+       u32                     endptctrl;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !ep->desc || !_req)
+               return -EINVAL;
+
+       if (!dev->driver)
+               return -ESHUTDOWN;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       stopped = ep->stopped;
+
+       /* quiesce dma while we patch the queue */
+       ep->stopped = 1;
+       ep_num = ep->ep_num;
+
+       /* disable endpoint control register */
+       endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+       if (is_in(ep))
+               endptctrl &= ~EPCTRL_TXE;
+       else
+               endptctrl &= ~EPCTRL_RXE;
+       writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+       /* make sure it's still queued on this endpoint */
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+
+       if (&req->req != _req) {
+               retval = -EINVAL;
+               goto done;
+       }
+
+       /* queue head may be partially complete. */
+       if (ep->queue.next == &req->queue) {
+               DBG(dev, "unlink (%s) dma\n", _ep->name);
+               _req->status = -ECONNRESET;
+               langwell_ep_fifo_flush(&ep->ep);
+
+               /* not the last request in endpoint queue */
+               if (likely(ep->queue.next == &req->queue)) {
+                       struct langwell_dqh     *dqh;
+                       struct langwell_request *next_req;
+
+                       dqh = ep->dqh;
+                       next_req = list_entry(req->queue.next,
+                                       struct langwell_request, queue);
+
+                       /* point the dQH to the first dTD of next request */
+                       writel((u32) next_req->head, &dqh->dqh_current);
+               }
+       } else {
+               struct langwell_request *prev_req;
+
+               prev_req = list_entry(req->queue.prev,
+                               struct langwell_request, queue);
+               writel(readl(&req->tail->dtd_next),
+                               &prev_req->tail->dtd_next);
+       }
+
+       done(ep, req, -ECONNRESET);
+
+done:
+       /* enable endpoint again */
+       endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+       if (is_in(ep))
+               endptctrl |= EPCTRL_TXE;
+       else
+               endptctrl |= EPCTRL_RXE;
+       writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+       ep->stopped = stopped;
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint set/clear halt */
+static void ep_set_halt(struct langwell_ep *ep, int value)
+{
+       u32                     endptctrl = 0;
+       int                     ep_num;
+       struct langwell_udc     *dev = ep->dev;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       ep_num = ep->ep_num;
+       endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
+
+       /* value: 1 - set halt, 0 - clear halt */
+       if (value) {
+               /* set the stall bit */
+               if (is_in(ep))
+                       endptctrl |= EPCTRL_TXS;
+               else
+                       endptctrl |= EPCTRL_RXS;
+       } else {
+               /* clear the stall bit and reset data toggle */
+               if (is_in(ep)) {
+                       endptctrl &= ~EPCTRL_TXS;
+                       endptctrl |= EPCTRL_TXR;
+               } else {
+                       endptctrl &= ~EPCTRL_RXS;
+                       endptctrl |= EPCTRL_RXR;
+               }
+       }
+
+       writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* set the endpoint halt feature */
+static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       unsigned long           flags;
+       int                     retval = 0;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !ep->desc)
+               return -EINVAL;
+
+       if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+
+       if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+                       == USB_ENDPOINT_XFER_ISOC)
+               return  -EOPNOTSUPP;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /*
+        * attempt to halt IN ep will fail if any transfer requests
+        * are still queue
+        */
+       if (!list_empty(&ep->queue) && is_in(ep) && value) {
+               /* IN endpoint FIFO holds bytes */
+               DBG(dev, "%s FIFO holds bytes\n", _ep->name);
+               retval = -EAGAIN;
+               goto done;
+       }
+
+       /* endpoint set/clear halt */
+       if (ep->ep_num) {
+               ep_set_halt(ep, value);
+       } else { /* endpoint 0 */
+               dev->ep0_state = WAIT_FOR_SETUP;
+               dev->ep0_dir = USB_DIR_OUT;
+       }
+done:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear");
+       VDBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+
+
+/* set the halt feature and ignores clear requests */
+static int langwell_ep_set_wedge(struct usb_ep *_ep)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !ep->desc)
+               return -EINVAL;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return usb_ep_set_halt(_ep);
+}
+
+
+/* flush contents of a fifo */
+static void langwell_ep_fifo_flush(struct usb_ep *_ep)
+{
+       struct langwell_ep      *ep;
+       struct langwell_udc     *dev;
+       u32                     flush_bit;
+       unsigned long           timeout;
+
+       ep = container_of(_ep, struct langwell_ep, ep);
+       dev = ep->dev;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (!_ep || !ep->desc) {
+               VDBG(dev, "ep or ep->desc is NULL\n");
+               VDBG(dev, "<--- %s()\n", __func__);
+               return;
+       }
+
+       VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
+
+       /* flush endpoint buffer */
+       if (ep->ep_num == 0)
+               flush_bit = (1 << 16) | 1;
+       else if (is_in(ep))
+               flush_bit = 1 << (ep->ep_num + 16);     /* TX */
+       else
+               flush_bit = 1 << ep->ep_num;            /* RX */
+
+       /* wait until flush complete */
+       timeout = jiffies + FLUSH_TIMEOUT;
+       do {
+               writel(flush_bit, &dev->op_regs->endptflush);
+               while (readl(&dev->op_regs->endptflush)) {
+                       if (time_after(jiffies, timeout)) {
+                               ERROR(dev, "ep flush timeout\n");
+                               goto done;
+                       }
+                       cpu_relax();
+               }
+       } while (readl(&dev->op_regs->endptstat) & flush_bit);
+done:
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* endpoints operations structure */
+static const struct usb_ep_ops langwell_ep_ops = {
+
+       /* configure endpoint, making it usable */
+       .enable         = langwell_ep_enable,
+
+       /* endpoint is no longer usable */
+       .disable        = langwell_ep_disable,
+
+       /* allocate a request object to use with this endpoint */
+       .alloc_request  = langwell_alloc_request,
+
+       /* free a request object */
+       .free_request   = langwell_free_request,
+
+       /* queue (submits) an I/O requests to an endpoint */
+       .queue          = langwell_ep_queue,
+
+       /* dequeue (cancels, unlinks) an I/O request from an endpoint */
+       .dequeue        = langwell_ep_dequeue,
+
+       /* set the endpoint halt feature */
+       .set_halt       = langwell_ep_set_halt,
+
+       /* set the halt feature and ignores clear requests */
+       .set_wedge      = langwell_ep_set_wedge,
+
+       /* flush contents of a fifo */
+       .fifo_flush     = langwell_ep_fifo_flush,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller usb_gadget_ops structure */
+
+/* returns the current frame number */
+static int langwell_get_frame(struct usb_gadget *_gadget)
+{
+       struct langwell_udc     *dev;
+       u16                     retval;
+
+       if (!_gadget)
+               return -ENODEV;
+
+       dev = container_of(_gadget, struct langwell_udc, gadget);
+       VDBG(dev, "---> %s()\n", __func__);
+
+       retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+
+
+/* tries to wake up the host connected to this gadget */
+static int langwell_wakeup(struct usb_gadget *_gadget)
+{
+       struct langwell_udc     *dev;
+       u32                     portsc1, devlc;
+       unsigned long           flags;
+
+       if (!_gadget)
+               return 0;
+
+       dev = container_of(_gadget, struct langwell_udc, gadget);
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* Remote Wakeup feature not enabled by host */
+       if (!dev->remote_wakeup)
+               return -ENOTSUPP;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       portsc1 = readl(&dev->op_regs->portsc1);
+       if (!(portsc1 & PORTS_SUSP)) {
+               spin_unlock_irqrestore(&dev->lock, flags);
+               return 0;
+       }
+
+       /* LPM L1 to L0, remote wakeup */
+       if (dev->lpm && dev->lpm_state == LPM_L1) {
+               portsc1 |= PORTS_SLP;
+               writel(portsc1, &dev->op_regs->portsc1);
+       }
+
+       /* force port resume */
+       if (dev->usb_state == USB_STATE_SUSPENDED) {
+               portsc1 |= PORTS_FPR;
+               writel(portsc1, &dev->op_regs->portsc1);
+       }
+
+       /* exit PHY low power suspend */
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "devlc = 0x%08x\n", devlc);
+       devlc &= ~LPM_PHCD;
+       writel(devlc, &dev->op_regs->devlc);
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* notify controller that VBUS is powered or not */
+static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+       struct langwell_udc     *dev;
+       unsigned long           flags;
+       u32                     usbcmd;
+
+       if (!_gadget)
+               return -ENODEV;
+
+       dev = container_of(_gadget, struct langwell_udc, gadget);
+       VDBG(dev, "---> %s()\n", __func__);
+
+       spin_lock_irqsave(&dev->lock, flags);
+       VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off");
+
+       dev->vbus_active = (is_active != 0);
+       if (dev->driver && dev->softconnected && dev->vbus_active) {
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               usbcmd |= CMD_RUNSTOP;
+               writel(usbcmd, &dev->op_regs->usbcmd);
+       } else {
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               usbcmd &= ~CMD_RUNSTOP;
+               writel(usbcmd, &dev->op_regs->usbcmd);
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* constrain controller's VBUS power usage */
+static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
+{
+       struct langwell_udc     *dev;
+
+       if (!_gadget)
+               return -ENODEV;
+
+       dev = container_of(_gadget, struct langwell_udc, gadget);
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (dev->transceiver) {
+               VDBG(dev, "otg_set_power\n");
+               VDBG(dev, "<--- %s()\n", __func__);
+               return otg_set_power(dev->transceiver, mA);
+       }
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return -ENOTSUPP;
+}
+
+
+/* D+ pullup, software-controlled connect/disconnect to USB host */
+static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
+{
+       struct langwell_udc     *dev;
+       u32                     usbcmd;
+       unsigned long           flags;
+
+       if (!_gadget)
+               return -ENODEV;
+
+       dev = container_of(_gadget, struct langwell_udc, gadget);
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       spin_lock_irqsave(&dev->lock, flags);
+       dev->softconnected = (is_on != 0);
+
+       if (dev->driver && dev->softconnected && dev->vbus_active) {
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               usbcmd |= CMD_RUNSTOP;
+               writel(usbcmd, &dev->op_regs->usbcmd);
+       } else {
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               usbcmd &= ~CMD_RUNSTOP;
+               writel(usbcmd, &dev->op_regs->usbcmd);
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops langwell_ops = {
+
+       /* returns the current frame number */
+       .get_frame      = langwell_get_frame,
+
+       /* tries to wake up the host connected to this gadget */
+       .wakeup         = langwell_wakeup,
+
+       /* set the device selfpowered feature, always selfpowered */
+       /* .set_selfpowered = langwell_set_selfpowered, */
+
+       /* notify controller that VBUS is powered or not */
+       .vbus_session   = langwell_vbus_session,
+
+       /* constrain controller's VBUS power usage */
+       .vbus_draw      = langwell_vbus_draw,
+
+       /* D+ pullup, software-controlled connect/disconnect to USB host */
+       .pullup         = langwell_pullup,
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device controller operations */
+
+/* reset device controller */
+static int langwell_udc_reset(struct langwell_udc *dev)
+{
+       u32             usbcmd, usbmode, devlc, endpointlistaddr;
+       unsigned long   timeout;
+
+       if (!dev)
+               return -EINVAL;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* set controller to stop state */
+       usbcmd = readl(&dev->op_regs->usbcmd);
+       usbcmd &= ~CMD_RUNSTOP;
+       writel(usbcmd, &dev->op_regs->usbcmd);
+
+       /* reset device controller */
+       usbcmd = readl(&dev->op_regs->usbcmd);
+       usbcmd |= CMD_RST;
+       writel(usbcmd, &dev->op_regs->usbcmd);
+
+       /* wait for reset to complete */
+       timeout = jiffies + RESET_TIMEOUT;
+       while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
+               if (time_after(jiffies, timeout)) {
+                       ERROR(dev, "device reset timeout\n");
+                       return -ETIMEDOUT;
+               }
+               cpu_relax();
+       }
+
+       /* set controller to device mode */
+       usbmode = readl(&dev->op_regs->usbmode);
+       usbmode |= MODE_DEVICE;
+
+       /* turn setup lockout off, require setup tripwire in usbcmd */
+       usbmode |= MODE_SLOM;
+
+       writel(usbmode, &dev->op_regs->usbmode);
+       usbmode = readl(&dev->op_regs->usbmode);
+       VDBG(dev, "usbmode=0x%08x\n", usbmode);
+
+       /* Write-Clear setup status */
+       writel(0, &dev->op_regs->usbsts);
+
+       /* if support USB LPM, ACK all LPM token */
+       if (dev->lpm) {
+               devlc = readl(&dev->op_regs->devlc);
+               devlc &= ~LPM_STL;      /* don't STALL LPM token */
+               devlc &= ~LPM_NYT_ACK;  /* ACK LPM token */
+               writel(devlc, &dev->op_regs->devlc);
+       }
+
+       /* fill endpointlistaddr register */
+       endpointlistaddr = dev->ep_dqh_dma;
+       endpointlistaddr &= ENDPOINTLISTADDR_MASK;
+       writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
+
+       VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
+                       dev->ep_dqh, endpointlistaddr,
+                       readl(&dev->op_regs->endpointlistaddr));
+       DBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* reinitialize device controller endpoints */
+static int eps_reinit(struct langwell_udc *dev)
+{
+       struct langwell_ep      *ep;
+       char                    name[14];
+       int                     i;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* initialize ep0 */
+       ep = &dev->ep[0];
+       ep->dev = dev;
+       strncpy(ep->name, "ep0", sizeof(ep->name));
+       ep->ep.name = ep->name;
+       ep->ep.ops = &langwell_ep_ops;
+       ep->stopped = 0;
+       ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+       ep->ep_num = 0;
+       ep->desc = &langwell_ep0_desc;
+       INIT_LIST_HEAD(&ep->queue);
+
+       ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+       /* initialize other endpoints */
+       for (i = 2; i < dev->ep_max; i++) {
+               ep = &dev->ep[i];
+               if (i % 2)
+                       snprintf(name, sizeof(name), "ep%din", i / 2);
+               else
+                       snprintf(name, sizeof(name), "ep%dout", i / 2);
+               ep->dev = dev;
+               strncpy(ep->name, name, sizeof(ep->name));
+               ep->ep.name = ep->name;
+
+               ep->ep.ops = &langwell_ep_ops;
+               ep->stopped = 0;
+               ep->ep.maxpacket = (unsigned short) ~0;
+               ep->ep_num = i / 2;
+
+               INIT_LIST_HEAD(&ep->queue);
+               list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+               ep->dqh = &dev->ep_dqh[i];
+       }
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* enable interrupt and set controller to run state */
+static void langwell_udc_start(struct langwell_udc *dev)
+{
+       u32     usbintr, usbcmd;
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* enable interrupts */
+       usbintr = INTR_ULPIE    /* ULPI */
+               | INTR_SLE      /* suspend */
+               /* | INTR_SRE   SOF received */
+               | INTR_URE      /* USB reset */
+               | INTR_AAE      /* async advance */
+               | INTR_SEE      /* system error */
+               | INTR_FRE      /* frame list rollover */
+               | INTR_PCE      /* port change detect */
+               | INTR_UEE      /* USB error interrupt */
+               | INTR_UE;      /* USB interrupt */
+       writel(usbintr, &dev->op_regs->usbintr);
+
+       /* clear stopped bit */
+       dev->stopped = 0;
+
+       /* set controller to run */
+       usbcmd = readl(&dev->op_regs->usbcmd);
+       usbcmd |= CMD_RUNSTOP;
+       writel(usbcmd, &dev->op_regs->usbcmd);
+
+       DBG(dev, "<--- %s()\n", __func__);
+       return;
+}
+
+
+/* disable interrupt and set controller to stop state */
+static void langwell_udc_stop(struct langwell_udc *dev)
+{
+       u32     usbcmd;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* disable all interrupts */
+       writel(0, &dev->op_regs->usbintr);
+
+       /* set stopped bit */
+       dev->stopped = 1;
+
+       /* set controller to stop state */
+       usbcmd = readl(&dev->op_regs->usbcmd);
+       usbcmd &= ~CMD_RUNSTOP;
+       writel(usbcmd, &dev->op_regs->usbcmd);
+
+       DBG(dev, "<--- %s()\n", __func__);
+       return;
+}
+
+
+/* stop all USB activities */
+static void stop_activity(struct langwell_udc *dev,
+               struct usb_gadget_driver *driver)
+{
+       struct langwell_ep      *ep;
+       DBG(dev, "---> %s()\n", __func__);
+
+       nuke(&dev->ep[0], -ESHUTDOWN);
+
+       list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+               nuke(ep, -ESHUTDOWN);
+       }
+
+       /* report disconnect; the driver is already quiesced */
+       if (driver) {
+               spin_unlock(&dev->lock);
+               driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
+
+       DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* device "function" sysfs attribute file */
+static ssize_t show_function(struct device *_dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct langwell_udc     *dev = the_controller;
+
+       if (!dev->driver || !dev->driver->function
+                       || strlen(dev->driver->function) > PAGE_SIZE)
+               return 0;
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+}
+static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+
+
+/* device "langwell_udc" sysfs attribute file */
+static ssize_t show_langwell_udc(struct device *_dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct langwell_udc     *dev = the_controller;
+       struct langwell_request *req;
+       struct langwell_ep      *ep = NULL;
+       char                    *next;
+       unsigned                size;
+       unsigned                t;
+       unsigned                i;
+       unsigned long           flags;
+       u32                     tmp_reg;
+
+       next = buf;
+       size = PAGE_SIZE;
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* driver basic information */
+       t = scnprintf(next, size,
+                       DRIVER_DESC "\n"
+                       "%s version: %s\n"
+                       "Gadget driver: %s\n\n",
+                       driver_name, DRIVER_VERSION,
+                       dev->driver ? dev->driver->driver.name : "(none)");
+       size -= t;
+       next += t;
+
+       /* device registers */
+       tmp_reg = readl(&dev->op_regs->usbcmd);
+       t = scnprintf(next, size,
+                       "USBCMD reg:\n"
+                       "SetupTW: %d\n"
+                       "Run/Stop: %s\n\n",
+                       (tmp_reg & CMD_SUTW) ? 1 : 0,
+                       (tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->usbsts);
+       t = scnprintf(next, size,
+                       "USB Status Reg:\n"
+                       "Device Suspend: %d\n"
+                       "Reset Received: %d\n"
+                       "System Error: %s\n"
+                       "USB Error Interrupt: %s\n\n",
+                       (tmp_reg & STS_SLI) ? 1 : 0,
+                       (tmp_reg & STS_URI) ? 1 : 0,
+                       (tmp_reg & STS_SEI) ? "Error" : "No error",
+                       (tmp_reg & STS_UEI) ? "Error detected" : "No error");
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->usbintr);
+       t = scnprintf(next, size,
+                       "USB Intrrupt Enable Reg:\n"
+                       "Sleep Enable: %d\n"
+                       "SOF Received Enable: %d\n"
+                       "Reset Enable: %d\n"
+                       "System Error Enable: %d\n"
+                       "Port Change Dectected Enable: %d\n"
+                       "USB Error Intr Enable: %d\n"
+                       "USB Intr Enable: %d\n\n",
+                       (tmp_reg & INTR_SLE) ? 1 : 0,
+                       (tmp_reg & INTR_SRE) ? 1 : 0,
+                       (tmp_reg & INTR_URE) ? 1 : 0,
+                       (tmp_reg & INTR_SEE) ? 1 : 0,
+                       (tmp_reg & INTR_PCE) ? 1 : 0,
+                       (tmp_reg & INTR_UEE) ? 1 : 0,
+                       (tmp_reg & INTR_UE) ? 1 : 0);
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->frindex);
+       t = scnprintf(next, size,
+                       "USB Frame Index Reg:\n"
+                       "Frame Number is 0x%08x\n\n",
+                       (tmp_reg & FRINDEX_MASK));
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->deviceaddr);
+       t = scnprintf(next, size,
+                       "USB Device Address Reg:\n"
+                       "Device Addr is 0x%x\n\n",
+                       USBADR(tmp_reg));
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->endpointlistaddr);
+       t = scnprintf(next, size,
+                       "USB Endpoint List Address Reg:\n"
+                       "Endpoint List Pointer is 0x%x\n\n",
+                       EPBASE(tmp_reg));
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->portsc1);
+       t = scnprintf(next, size,
+               "USB Port Status & Control Reg:\n"
+               "Port Reset: %s\n"
+               "Port Suspend Mode: %s\n"
+               "Over-current Change: %s\n"
+               "Port Enable/Disable Change: %s\n"
+               "Port Enabled/Disabled: %s\n"
+               "Current Connect Status: %s\n\n",
+               (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+               (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+               (tmp_reg & PORTS_OCC) ? "Detected" : "No",
+               (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+               (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+               (tmp_reg & PORTS_CCS) ?  "Attached" : "Not Attached");
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->devlc);
+       t = scnprintf(next, size,
+               "Device LPM Control Reg:\n"
+               "Parallel Transceiver : %d\n"
+               "Serial Transceiver : %d\n"
+               "Port Speed: %s\n"
+               "Port Force Full Speed Connenct: %s\n"
+               "PHY Low Power Suspend Clock Disable: %s\n"
+               "BmAttributes: %d\n\n",
+               LPM_PTS(tmp_reg),
+               (tmp_reg & LPM_STS) ? 1 : 0,
+               ({
+                       char    *s;
+                       switch (LPM_PSPD(tmp_reg)) {
+                       case LPM_SPEED_FULL:
+                               s = "Full Speed"; break;
+                       case LPM_SPEED_LOW:
+                               s = "Low Speed"; break;
+                       case LPM_SPEED_HIGH:
+                               s = "High Speed"; break;
+                       default:
+                               s = "Unknown Speed"; break;
+                       }
+                       s;
+               }),
+               (tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
+               (tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
+               LPM_BA(tmp_reg));
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->usbmode);
+       t = scnprintf(next, size,
+                       "USB Mode Reg:\n"
+                       "Controller Mode is : %s\n\n", ({
+                               char *s;
+                               switch (MODE_CM(tmp_reg)) {
+                               case MODE_IDLE:
+                                       s = "Idle"; break;
+                               case MODE_DEVICE:
+                                       s = "Device Controller"; break;
+                               case MODE_HOST:
+                                       s = "Host Controller"; break;
+                               default:
+                                       s = "None"; break;
+                               }
+                               s;
+                       }));
+       size -= t;
+       next += t;
+
+       tmp_reg = readl(&dev->op_regs->endptsetupstat);
+       t = scnprintf(next, size,
+                       "Endpoint Setup Status Reg:\n"
+                       "SETUP on ep 0x%04x\n\n",
+                       tmp_reg & SETUPSTAT_MASK);
+       size -= t;
+       next += t;
+
+       for (i = 0; i < dev->ep_max / 2; i++) {
+               tmp_reg = readl(&dev->op_regs->endptctrl[i]);
+               t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
+                               i, tmp_reg);
+               size -= t;
+               next += t;
+       }
+       tmp_reg = readl(&dev->op_regs->endptprime);
+       t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
+       size -= t;
+       next += t;
+
+       /* langwell_udc, langwell_ep, langwell_request structure information */
+       ep = &dev->ep[0];
+       t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
+                       ep->ep.name, ep->ep.maxpacket, ep->ep_num);
+       size -= t;
+       next += t;
+
+       if (list_empty(&ep->queue)) {
+               t = scnprintf(next, size, "its req queue is empty\n\n");
+               size -= t;
+               next += t;
+       } else {
+               list_for_each_entry(req, &ep->queue, queue) {
+                       t = scnprintf(next, size,
+                               "req %p actual 0x%x length 0x%x  buf %p\n",
+                               &req->req, req->req.actual,
+                               req->req.length, req->req.buf);
+                       size -= t;
+                       next += t;
+               }
+       }
+       /* other gadget->eplist ep */
+       list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+               if (ep->desc) {
+                       t = scnprintf(next, size,
+                                       "\n%s MaxPacketSize: 0x%x, "
+                                       "ep_num: %d\n",
+                                       ep->ep.name, ep->ep.maxpacket,
+                                       ep->ep_num);
+                       size -= t;
+                       next += t;
+
+                       if (list_empty(&ep->queue)) {
+                               t = scnprintf(next, size,
+                                               "its req queue is empty\n\n");
+                               size -= t;
+                               next += t;
+                       } else {
+                               list_for_each_entry(req, &ep->queue, queue) {
+                                       t = scnprintf(next, size,
+                                               "req %p actual 0x%x length "
+                                               "0x%x  buf %p\n",
+                                               &req->req, req->req.actual,
+                                               req->req.length, req->req.buf);
+                                       size -= t;
+                                       next += t;
+                               }
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests.  then usb traffic follows until a
+ * disconnect is reported.  then a host may connect again, or
+ * the driver might get unbound.
+ */
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+       struct langwell_udc     *dev = the_controller;
+       unsigned long           flags;
+       int                     retval;
+
+       if (!dev)
+               return -ENODEV;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       if (dev->driver)
+               return -EBUSY;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* hook up the driver ... */
+       driver->driver.bus = NULL;
+       dev->driver = driver;
+       dev->gadget.dev.driver = &driver->driver;
+
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       retval = driver->bind(&dev->gadget);
+       if (retval) {
+               DBG(dev, "bind to driver %s --> %d\n",
+                               driver->driver.name, retval);
+               dev->driver = NULL;
+               dev->gadget.dev.driver = NULL;
+               return retval;
+       }
+
+       retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+       if (retval)
+               goto err_unbind;
+
+       dev->usb_state = USB_STATE_ATTACHED;
+       dev->ep0_state = WAIT_FOR_SETUP;
+       dev->ep0_dir = USB_DIR_OUT;
+
+       /* enable interrupt and set controller to run state */
+       if (dev->got_irq)
+               langwell_udc_start(dev);
+
+       VDBG(dev, "After langwell_udc_start(), print all registers:\n");
+#ifdef VERBOSE
+       print_all_registers(dev);
+#endif
+
+       INFO(dev, "register driver: %s\n", driver->driver.name);
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+
+err_unbind:
+       driver->unbind(&dev->gadget);
+       dev->gadget.dev.driver = NULL;
+       dev->driver = NULL;
+
+       DBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+
+/* unregister gadget driver */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+       struct langwell_udc     *dev = the_controller;
+       unsigned long           flags;
+
+       if (!dev)
+               return -ENODEV;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       if (unlikely(!driver || !driver->bind || !driver->unbind))
+               return -EINVAL;
+
+       /* unbind OTG transceiver */
+       if (dev->transceiver)
+               (void)otg_set_peripheral(dev->transceiver, 0);
+
+       /* disable interrupt and set controller to stop state */
+       langwell_udc_stop(dev);
+
+       dev->usb_state = USB_STATE_ATTACHED;
+       dev->ep0_state = WAIT_FOR_SETUP;
+       dev->ep0_dir = USB_DIR_OUT;
+
+       spin_lock_irqsave(&dev->lock, flags);
+
+       /* stop all usb activities */
+       dev->gadget.speed = USB_SPEED_UNKNOWN;
+       stop_activity(dev, driver);
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       /* unbind gadget driver */
+       driver->unbind(&dev->gadget);
+       dev->gadget.dev.driver = NULL;
+       dev->driver = NULL;
+
+       device_remove_file(&dev->pdev->dev, &dev_attr_function);
+
+       INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
+       DBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * setup tripwire is used as a semaphore to ensure that the setup data
+ * payload is extracted from a dQH without being corrupted
+ */
+static void setup_tripwire(struct langwell_udc *dev)
+{
+       u32                     usbcmd,
+                               endptsetupstat;
+       unsigned long           timeout;
+       struct langwell_dqh     *dqh;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* ep0 OUT dQH */
+       dqh = &dev->ep_dqh[EP_DIR_OUT];
+
+       /* Write-Clear endptsetupstat */
+       endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+       writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+       /* wait until endptsetupstat is cleared */
+       timeout = jiffies + SETUPSTAT_TIMEOUT;
+       while (readl(&dev->op_regs->endptsetupstat)) {
+               if (time_after(jiffies, timeout)) {
+                       ERROR(dev, "setup_tripwire timeout\n");
+                       break;
+               }
+               cpu_relax();
+       }
+
+       /* while a hazard exists when setup packet arrives */
+       do {
+               /* set setup tripwire bit */
+               usbcmd = readl(&dev->op_regs->usbcmd);
+               writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
+
+               /* copy the setup packet to local buffer */
+               memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
+       } while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
+
+       /* Write-Clear setup tripwire bit */
+       usbcmd = readl(&dev->op_regs->usbcmd);
+       writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct langwell_udc *dev)
+{
+       u32     endptctrl;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* set TX and RX to stall */
+       endptctrl = readl(&dev->op_regs->endptctrl[0]);
+       endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
+       writel(endptctrl, &dev->op_regs->endptctrl[0]);
+
+       /* update ep0 state */
+       dev->ep0_state = WAIT_FOR_SETUP;
+       dev->ep0_dir = USB_DIR_OUT;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* PRIME a status phase for ep0 */
+static int prime_status_phase(struct langwell_udc *dev, int dir)
+{
+       struct langwell_request *req;
+       struct langwell_ep      *ep;
+       int                     status = 0;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (dir == EP_DIR_IN)
+               dev->ep0_dir = USB_DIR_IN;
+       else
+               dev->ep0_dir = USB_DIR_OUT;
+
+       ep = &dev->ep[0];
+       dev->ep0_state = WAIT_FOR_OUT_STATUS;
+
+       req = dev->status_req;
+
+       req->ep = ep;
+       req->req.length = 0;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->req.complete = NULL;
+       req->dtd_count = 0;
+
+       if (!req_to_dtd(req))
+               status = queue_dtd(ep, req);
+       else
+               return -ENOMEM;
+
+       if (status)
+               ERROR(dev, "can't queue ep0 status request\n");
+
+       list_add_tail(&req->queue, &ep->queue);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return status;
+}
+
+
+/* SET_ADDRESS request routine */
+static void set_address(struct langwell_udc *dev, u16 value,
+               u16 index, u16 length)
+{
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* save the new address to device struct */
+       dev->dev_addr = (u8) value;
+       VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr);
+
+       /* update usb state */
+       dev->usb_state = USB_STATE_ADDRESS;
+
+       /* STATUS phase */
+       if (prime_status_phase(dev, EP_DIR_IN))
+               ep0_stall(dev);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* return endpoint by windex */
+static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
+               u16 wIndex)
+{
+       struct langwell_ep              *ep;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+               return &dev->ep[0];
+
+       list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+               u8      bEndpointAddress;
+               if (!ep->desc)
+                       continue;
+
+               bEndpointAddress = ep->desc->bEndpointAddress;
+               if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+                       continue;
+
+               if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
+                       == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
+                       return ep;
+       }
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return NULL;
+}
+
+
+/* return whether endpoint is stalled, 0: not stalled; 1: stalled */
+static int ep_is_stall(struct langwell_ep *ep)
+{
+       struct langwell_udc     *dev = ep->dev;
+       u32                     endptctrl;
+       int                     retval;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
+       if (is_in(ep))
+               retval = endptctrl & EPCTRL_TXS ? 1 : 0;
+       else
+               retval = endptctrl & EPCTRL_RXS ? 1 : 0;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return retval;
+}
+
+
+/* GET_STATUS request routine */
+static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+               u16 index, u16 length)
+{
+       struct langwell_request *req;
+       struct langwell_ep      *ep;
+       u16     status_data = 0;        /* 16 bits cpu view status data */
+       int     status = 0;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       ep = &dev->ep[0];
+
+       if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+               /* get device status */
+               status_data = 1 << USB_DEVICE_SELF_POWERED;
+               status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+               /* get interface status */
+               status_data = 0;
+       } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
+               /* get endpoint status */
+               struct langwell_ep      *epn;
+               epn = get_ep_by_windex(dev, index);
+               /* stall if endpoint doesn't exist */
+               if (!epn)
+                       goto stall;
+
+               status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+       }
+
+       dev->ep0_dir = USB_DIR_IN;
+
+       /* borrow the per device status_req */
+       req = dev->status_req;
+
+       /* fill in the reqest structure */
+       *((u16 *) req->req.buf) = cpu_to_le16(status_data);
+       req->ep = ep;
+       req->req.length = 2;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->req.complete = NULL;
+       req->dtd_count = 0;
+
+       /* prime the data phase */
+       if (!req_to_dtd(req))
+               status = queue_dtd(ep, req);
+       else                    /* no mem */
+               goto stall;
+
+       if (status) {
+               ERROR(dev, "response error on GET_STATUS request\n");
+               goto stall;
+       }
+
+       list_add_tail(&req->queue, &ep->queue);
+       dev->ep0_state = DATA_STATE_XMIT;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return;
+stall:
+       ep0_stall(dev);
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* setup packet interrupt handler */
+static void handle_setup_packet(struct langwell_udc *dev,
+               struct usb_ctrlrequest *setup)
+{
+       u16     wValue = le16_to_cpu(setup->wValue);
+       u16     wIndex = le16_to_cpu(setup->wIndex);
+       u16     wLength = le16_to_cpu(setup->wLength);
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* ep0 fifo flush */
+       nuke(&dev->ep[0], -ESHUTDOWN);
+
+       DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+                       setup->bRequestType, setup->bRequest,
+                       wValue, wIndex, wLength);
+
+       /* RNDIS gadget delegate */
+       if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
+               /* USB_CDC_SEND_ENCAPSULATED_COMMAND */
+               goto delegate;
+       }
+
+       /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+       if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
+               /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
+               goto delegate;
+       }
+
+       /* We process some stardard setup requests here */
+       switch (setup->bRequest) {
+       case USB_REQ_GET_STATUS:
+               DBG(dev, "SETUP: USB_REQ_GET_STATUS\n");
+               /* get status, DATA and STATUS phase */
+               if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+                                       != (USB_DIR_IN | USB_TYPE_STANDARD))
+                       break;
+               get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
+               goto end;
+
+       case USB_REQ_SET_ADDRESS:
+               DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n");
+               /* STATUS phase */
+               if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
+                                               | USB_RECIP_DEVICE))
+                       break;
+               set_address(dev, wValue, wIndex, wLength);
+               goto end;
+
+       case USB_REQ_CLEAR_FEATURE:
+       case USB_REQ_SET_FEATURE:
+               /* STATUS phase */
+       {
+               int rc = -EOPNOTSUPP;
+               if (setup->bRequest == USB_REQ_SET_FEATURE)
+                       DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n");
+               else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
+                       DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n");
+
+               if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
+                               == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
+                       struct langwell_ep      *epn;
+                       epn = get_ep_by_windex(dev, wIndex);
+                       /* stall if endpoint doesn't exist */
+                       if (!epn) {
+                               ep0_stall(dev);
+                               goto end;
+                       }
+
+                       if (wValue != 0 || wLength != 0
+                                       || epn->ep_num > dev->ep_max)
+                               break;
+
+                       spin_unlock(&dev->lock);
+                       rc = langwell_ep_set_halt(&epn->ep,
+                                       (setup->bRequest == USB_REQ_SET_FEATURE)
+                                               ? 1 : 0);
+                       spin_lock(&dev->lock);
+
+               } else if ((setup->bRequestType & (USB_RECIP_MASK
+                               | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+                               | USB_TYPE_STANDARD)) {
+                       if (!gadget_is_otg(&dev->gadget))
+                               break;
+                       else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
+                               dev->gadget.b_hnp_enable = 1;
+#ifdef OTG_TRANSCEIVER
+                               if (!dev->lotg->otg.default_a)
+                                       dev->lotg->hsm.b_hnp_enable = 1;
+#endif
+                       } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+                               dev->gadget.a_hnp_support = 1;
+                       else if (setup->bRequest ==
+                                       USB_DEVICE_A_ALT_HNP_SUPPORT)
+                               dev->gadget.a_alt_hnp_support = 1;
+                       else
+                               break;
+                       rc = 0;
+               } else
+                       break;
+
+               if (rc == 0) {
+                       if (prime_status_phase(dev, EP_DIR_IN))
+                               ep0_stall(dev);
+               }
+               goto end;
+       }
+
+       case USB_REQ_GET_DESCRIPTOR:
+               DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n");
+               goto delegate;
+
+       case USB_REQ_SET_DESCRIPTOR:
+               DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
+               goto delegate;
+
+       case USB_REQ_GET_CONFIGURATION:
+               DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n");
+               goto delegate;
+
+       case USB_REQ_SET_CONFIGURATION:
+               DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n");
+               goto delegate;
+
+       case USB_REQ_GET_INTERFACE:
+               DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n");
+               goto delegate;
+
+       case USB_REQ_SET_INTERFACE:
+               DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n");
+               goto delegate;
+
+       case USB_REQ_SYNCH_FRAME:
+               DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
+               goto delegate;
+
+       default:
+               /* delegate USB standard requests to the gadget driver */
+               goto delegate;
+delegate:
+               /* USB requests handled by gadget */
+               if (wLength) {
+                       /* DATA phase from gadget, STATUS phase from udc */
+                       dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+                                       ?  USB_DIR_IN : USB_DIR_OUT;
+                       VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+                                       dev->ep0_dir, wLength);
+                       spin_unlock(&dev->lock);
+                       if (dev->driver->setup(&dev->gadget,
+                                       &dev->local_setup_buff) < 0)
+                               ep0_stall(dev);
+                       spin_lock(&dev->lock);
+                       dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
+                                       ?  DATA_STATE_XMIT : DATA_STATE_RECV;
+               } else {
+                       /* no DATA phase, IN STATUS phase from gadget */
+                       dev->ep0_dir = USB_DIR_IN;
+                       VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
+                                       dev->ep0_dir, wLength);
+                       spin_unlock(&dev->lock);
+                       if (dev->driver->setup(&dev->gadget,
+                                       &dev->local_setup_buff) < 0)
+                               ep0_stall(dev);
+                       spin_lock(&dev->lock);
+                       dev->ep0_state = WAIT_FOR_OUT_STATUS;
+               }
+               break;
+       }
+end:
+       VDBG(dev, "<--- %s()\n", __func__);
+       return;
+}
+
+
+/* transfer completion, process endpoint request and free the completed dTDs
+ * for this request
+ */
+static int process_ep_req(struct langwell_udc *dev, int index,
+               struct langwell_request *curr_req)
+{
+       struct langwell_dtd     *curr_dtd;
+       struct langwell_dqh     *curr_dqh;
+       int                     td_complete, actual, remaining_length;
+       int                     i, dir;
+       u8                      dtd_status = 0;
+       int                     retval = 0;
+
+       curr_dqh = &dev->ep_dqh[index];
+       dir = index % 2;
+
+       curr_dtd = curr_req->head;
+       td_complete = 0;
+       actual = curr_req->req.length;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       for (i = 0; i < curr_req->dtd_count; i++) {
+               remaining_length = le16_to_cpu(curr_dtd->dtd_total);
+               actual -= remaining_length;
+
+               /* command execution states by dTD */
+               dtd_status = curr_dtd->dtd_status;
+
+               if (!dtd_status) {
+                       /* transfers completed successfully */
+                       if (!remaining_length) {
+                               td_complete++;
+                               VDBG(dev, "dTD transmitted successfully\n");
+                       } else {
+                               if (dir) {
+                                       VDBG(dev, "TX dTD remains data\n");
+                                       retval = -EPROTO;
+                                       break;
+
+                               } else {
+                                       td_complete++;
+                                       break;
+                               }
+                       }
+               } else {
+                       /* transfers completed with errors */
+                       if (dtd_status & DTD_STS_ACTIVE) {
+                               DBG(dev, "request not completed\n");
+                               retval = 1;
+                               return retval;
+                       } else if (dtd_status & DTD_STS_HALTED) {
+                               ERROR(dev, "dTD error %08x dQH[%d]\n",
+                                               dtd_status, index);
+                               /* clear the errors and halt condition */
+                               curr_dqh->dtd_status = 0;
+                               retval = -EPIPE;
+                               break;
+                       } else if (dtd_status & DTD_STS_DBE) {
+                               DBG(dev, "data buffer (overflow) error\n");
+                               retval = -EPROTO;
+                               break;
+                       } else if (dtd_status & DTD_STS_TRE) {
+                               DBG(dev, "transaction(ISO) error\n");
+                               retval = -EILSEQ;
+                               break;
+                       } else
+                               ERROR(dev, "unknown error (0x%x)!\n",
+                                               dtd_status);
+               }
+
+               if (i != curr_req->dtd_count - 1)
+                       curr_dtd = (struct langwell_dtd *)
+                               curr_dtd->next_dtd_virt;
+       }
+
+       if (retval)
+               return retval;
+
+       curr_req->req.actual = actual;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct langwell_udc *dev,
+               struct langwell_ep *ep0, struct langwell_request *req)
+{
+       u32     new_addr;
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (dev->usb_state == USB_STATE_ADDRESS) {
+               /* set the new address */
+               new_addr = (u32)dev->dev_addr;
+               writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
+
+               new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
+               VDBG(dev, "new_addr = %d\n", new_addr);
+       }
+
+       done(ep0, req, 0);
+
+       switch (dev->ep0_state) {
+       case DATA_STATE_XMIT:
+               /* receive status phase */
+               if (prime_status_phase(dev, EP_DIR_OUT))
+                       ep0_stall(dev);
+               break;
+       case DATA_STATE_RECV:
+               /* send status phase */
+               if (prime_status_phase(dev, EP_DIR_IN))
+                       ep0_stall(dev);
+               break;
+       case WAIT_FOR_OUT_STATUS:
+               dev->ep0_state = WAIT_FOR_SETUP;
+               break;
+       case WAIT_FOR_SETUP:
+               ERROR(dev, "unexpect ep0 packets\n");
+               break;
+       default:
+               ep0_stall(dev);
+               break;
+       }
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB transfer completion interrupt */
+static void handle_trans_complete(struct langwell_udc *dev)
+{
+       u32                     complete_bits;
+       int                     i, ep_num, dir, bit_mask, status;
+       struct langwell_ep      *epn;
+       struct langwell_request *curr_req, *temp_req;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       complete_bits = readl(&dev->op_regs->endptcomplete);
+       VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits);
+
+       /* Write-Clear the bits in endptcomplete register */
+       writel(complete_bits, &dev->op_regs->endptcomplete);
+
+       if (!complete_bits) {
+               DBG(dev, "complete_bits = 0\n");
+               goto done;
+       }
+
+       for (i = 0; i < dev->ep_max; i++) {
+               ep_num = i / 2;
+               dir = i % 2;
+
+               bit_mask = 1 << (ep_num + 16 * dir);
+
+               if (!(complete_bits & bit_mask))
+                       continue;
+
+               /* ep0 */
+               if (i == 1)
+                       epn = &dev->ep[0];
+               else
+                       epn = &dev->ep[i];
+
+               if (epn->name == NULL) {
+                       WARNING(dev, "invalid endpoint\n");
+                       continue;
+               }
+
+               if (i < 2)
+                       /* ep0 in and out */
+                       DBG(dev, "%s-%s transfer completed\n",
+                                       epn->name,
+                                       is_in(epn) ? "in" : "out");
+               else
+                       DBG(dev, "%s transfer completed\n", epn->name);
+
+               /* process the req queue until an uncomplete request */
+               list_for_each_entry_safe(curr_req, temp_req,
+                               &epn->queue, queue) {
+                       status = process_ep_req(dev, i, curr_req);
+                       VDBG(dev, "%s req status: %d\n", epn->name, status);
+
+                       if (status)
+                               break;
+
+                       /* write back status to req */
+                       curr_req->req.status = status;
+
+                       /* ep0 request completion */
+                       if (ep_num == 0) {
+                               ep0_req_complete(dev, epn, curr_req);
+                               break;
+                       } else {
+                               done(epn, curr_req, status);
+                       }
+               }
+       }
+done:
+       VDBG(dev, "<--- %s()\n", __func__);
+       return;
+}
+
+
+/* port change detect interrupt handler */
+static void handle_port_change(struct langwell_udc *dev)
+{
+       u32     portsc1, devlc;
+       u32     speed;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (dev->bus_reset)
+               dev->bus_reset = 0;
+
+       portsc1 = readl(&dev->op_regs->portsc1);
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
+                       portsc1, devlc);
+
+       /* bus reset is finished */
+       if (!(portsc1 & PORTS_PR)) {
+               /* get the speed */
+               speed = LPM_PSPD(devlc);
+               switch (speed) {
+               case LPM_SPEED_HIGH:
+                       dev->gadget.speed = USB_SPEED_HIGH;
+                       break;
+               case LPM_SPEED_FULL:
+                       dev->gadget.speed = USB_SPEED_FULL;
+                       break;
+               case LPM_SPEED_LOW:
+                       dev->gadget.speed = USB_SPEED_LOW;
+                       break;
+               default:
+                       dev->gadget.speed = USB_SPEED_UNKNOWN;
+                       break;
+               }
+               VDBG(dev, "speed = %d, dev->gadget.speed = %d\n",
+                               speed, dev->gadget.speed);
+       }
+
+       /* LPM L0 to L1 */
+       if (dev->lpm && dev->lpm_state == LPM_L0)
+               if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+                               INFO(dev, "LPM L0 to L1\n");
+                               dev->lpm_state = LPM_L1;
+               }
+
+       /* LPM L1 to L0, force resume or remote wakeup finished */
+       if (dev->lpm && dev->lpm_state == LPM_L1)
+               if (!(portsc1 & PORTS_SUSP)) {
+                       if (portsc1 & PORTS_SLP)
+                               INFO(dev, "LPM L1 to L0, force resume\n");
+                       else
+                               INFO(dev, "LPM L1 to L0, remote wakeup\n");
+
+                       dev->lpm_state = LPM_L0;
+               }
+
+       /* update USB state */
+       if (!dev->resume_state)
+               dev->usb_state = USB_STATE_DEFAULT;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB reset interrupt handler */
+static void handle_usb_reset(struct langwell_udc *dev)
+{
+       u32             deviceaddr,
+                       endptsetupstat,
+                       endptcomplete;
+       unsigned long   timeout;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       /* Write-Clear the device address */
+       deviceaddr = readl(&dev->op_regs->deviceaddr);
+       writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
+
+       dev->dev_addr = 0;
+
+       /* clear usb state */
+       dev->resume_state = 0;
+
+       /* LPM L1 to L0, reset */
+       if (dev->lpm)
+               dev->lpm_state = LPM_L0;
+
+       dev->ep0_dir = USB_DIR_OUT;
+       dev->ep0_state = WAIT_FOR_SETUP;
+       dev->remote_wakeup = 0;         /* default to 0 on reset */
+       dev->gadget.b_hnp_enable = 0;
+       dev->gadget.a_hnp_support = 0;
+       dev->gadget.a_alt_hnp_support = 0;
+
+       /* Write-Clear all the setup token semaphores */
+       endptsetupstat = readl(&dev->op_regs->endptsetupstat);
+       writel(endptsetupstat, &dev->op_regs->endptsetupstat);
+
+       /* Write-Clear all the endpoint complete status bits */
+       endptcomplete = readl(&dev->op_regs->endptcomplete);
+       writel(endptcomplete, &dev->op_regs->endptcomplete);
+
+       /* wait until all endptprime bits cleared */
+       timeout = jiffies + PRIME_TIMEOUT;
+       while (readl(&dev->op_regs->endptprime)) {
+               if (time_after(jiffies, timeout)) {
+                       ERROR(dev, "USB reset timeout\n");
+                       break;
+               }
+               cpu_relax();
+       }
+
+       /* write 1s to endptflush register to clear any primed buffers */
+       writel((u32) ~0, &dev->op_regs->endptflush);
+
+       if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
+               VDBG(dev, "USB bus reset\n");
+               /* bus is reseting */
+               dev->bus_reset = 1;
+
+               /* reset all the queues, stop all USB activities */
+               stop_activity(dev, dev->driver);
+               dev->usb_state = USB_STATE_DEFAULT;
+       } else {
+               VDBG(dev, "device controller reset\n");
+               /* controller reset */
+               langwell_udc_reset(dev);
+
+               /* reset all the queues, stop all USB activities */
+               stop_activity(dev, dev->driver);
+
+               /* reset ep0 dQH and endptctrl */
+               ep0_reset(dev);
+
+               /* enable interrupt and set controller to run state */
+               langwell_udc_start(dev);
+
+               dev->usb_state = USB_STATE_ATTACHED;
+       }
+
+#ifdef OTG_TRANSCEIVER
+       /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
+       if (!dev->lotg->otg.default_a)
+               dev->lotg->hsm.b_hnp_enable = 0;
+#endif
+
+       VDBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB bus suspend/resume interrupt */
+static void handle_bus_suspend(struct langwell_udc *dev)
+{
+       u32             devlc;
+       DBG(dev, "---> %s()\n", __func__);
+
+       dev->resume_state = dev->usb_state;
+       dev->usb_state = USB_STATE_SUSPENDED;
+
+#ifdef OTG_TRANSCEIVER
+       if (dev->lotg->otg.default_a) {
+               if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
+                       dev->lotg->hsm.b_bus_suspend = 1;
+                       /* notify transceiver the state changes */
+                       if (spin_trylock(&dev->lotg->wq_lock)) {
+                               langwell_update_transceiver();
+                               spin_unlock(&dev->lotg->wq_lock);
+                       }
+               }
+               dev->lotg->hsm.b_bus_suspend_vld++;
+       } else {
+               if (!dev->lotg->hsm.a_bus_suspend) {
+                       dev->lotg->hsm.a_bus_suspend = 1;
+                       /* notify transceiver the state changes */
+                       if (spin_trylock(&dev->lotg->wq_lock)) {
+                               langwell_update_transceiver();
+                               spin_unlock(&dev->lotg->wq_lock);
+                       }
+               }
+       }
+#endif
+
+       /* report suspend to the driver */
+       if (dev->driver) {
+               if (dev->driver->suspend) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->suspend(&dev->gadget);
+                       spin_lock(&dev->lock);
+                       DBG(dev, "suspend %s\n", dev->driver->driver.name);
+               }
+       }
+
+       /* enter PHY low power suspend */
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "devlc = 0x%08x\n", devlc);
+       devlc |= LPM_PHCD;
+       writel(devlc, &dev->op_regs->devlc);
+
+       DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+static void handle_bus_resume(struct langwell_udc *dev)
+{
+       u32             devlc;
+       DBG(dev, "---> %s()\n", __func__);
+
+       dev->usb_state = dev->resume_state;
+       dev->resume_state = 0;
+
+       /* exit PHY low power suspend */
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "devlc = 0x%08x\n", devlc);
+       devlc &= ~LPM_PHCD;
+       writel(devlc, &dev->op_regs->devlc);
+
+#ifdef OTG_TRANSCEIVER
+       if (dev->lotg->otg.default_a == 0)
+               dev->lotg->hsm.a_bus_suspend = 0;
+#endif
+
+       /* report resume to the driver */
+       if (dev->driver) {
+               if (dev->driver->resume) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->resume(&dev->gadget);
+                       spin_lock(&dev->lock);
+                       DBG(dev, "resume %s\n", dev->driver->driver.name);
+               }
+       }
+
+       DBG(dev, "<--- %s()\n", __func__);
+}
+
+
+/* USB device controller interrupt handler */
+static irqreturn_t langwell_irq(int irq, void *_dev)
+{
+       struct langwell_udc     *dev = _dev;
+       u32                     usbsts,
+                               usbintr,
+                               irq_sts,
+                               portsc1;
+
+       VDBG(dev, "---> %s()\n", __func__);
+
+       if (dev->stopped) {
+               VDBG(dev, "handle IRQ_NONE\n");
+               VDBG(dev, "<--- %s()\n", __func__);
+               return IRQ_NONE;
+       }
+
+       spin_lock(&dev->lock);
+
+       /* USB status */
+       usbsts = readl(&dev->op_regs->usbsts);
+
+       /* USB interrupt enable */
+       usbintr = readl(&dev->op_regs->usbintr);
+
+       irq_sts = usbsts & usbintr;
+       VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
+                       usbsts, usbintr, irq_sts);
+
+       if (!irq_sts) {
+               VDBG(dev, "handle IRQ_NONE\n");
+               VDBG(dev, "<--- %s()\n", __func__);
+               spin_unlock(&dev->lock);
+               return IRQ_NONE;
+       }
+
+       /* Write-Clear interrupt status bits */
+       writel(irq_sts, &dev->op_regs->usbsts);
+
+       /* resume from suspend */
+       portsc1 = readl(&dev->op_regs->portsc1);
+       if (dev->usb_state == USB_STATE_SUSPENDED)
+               if (!(portsc1 & PORTS_SUSP))
+                       handle_bus_resume(dev);
+
+       /* USB interrupt */
+       if (irq_sts & STS_UI) {
+               VDBG(dev, "USB interrupt\n");
+
+               /* setup packet received from ep0 */
+               if (readl(&dev->op_regs->endptsetupstat)
+                               & EP0SETUPSTAT_MASK) {
+                       VDBG(dev, "USB SETUP packet received interrupt\n");
+                       /* setup tripwire semaphone */
+                       setup_tripwire(dev);
+                       handle_setup_packet(dev, &dev->local_setup_buff);
+               }
+
+               /* USB transfer completion */
+               if (readl(&dev->op_regs->endptcomplete)) {
+                       VDBG(dev, "USB transfer completion interrupt\n");
+                       handle_trans_complete(dev);
+               }
+       }
+
+       /* SOF received interrupt (for ISO transfer) */
+       if (irq_sts & STS_SRI) {
+               /* FIXME */
+               /* VDBG(dev, "SOF received interrupt\n"); */
+       }
+
+       /* port change detect interrupt */
+       if (irq_sts & STS_PCI) {
+               VDBG(dev, "port change detect interrupt\n");
+               handle_port_change(dev);
+       }
+
+       /* suspend interrrupt */
+       if (irq_sts & STS_SLI) {
+               VDBG(dev, "suspend interrupt\n");
+               handle_bus_suspend(dev);
+       }
+
+       /* USB reset interrupt */
+       if (irq_sts & STS_URI) {
+               VDBG(dev, "USB reset interrupt\n");
+               handle_usb_reset(dev);
+       }
+
+       /* USB error or system error interrupt */
+       if (irq_sts & (STS_UEI | STS_SEI)) {
+               /* FIXME */
+               WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts);
+       }
+
+       spin_unlock(&dev->lock);
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return IRQ_HANDLED;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+       struct langwell_udc     *dev = the_controller;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       complete(dev->done);
+
+       DBG(dev, "<--- %s()\n", __func__);
+       kfree(dev);
+}
+
+
+/* tear down the binding between this driver and the pci device */
+static void langwell_udc_remove(struct pci_dev *pdev)
+{
+       struct langwell_udc     *dev = the_controller;
+
+       DECLARE_COMPLETION(done);
+
+       BUG_ON(dev->driver);
+       DBG(dev, "---> %s()\n", __func__);
+
+       dev->done = &done;
+
+       /* free memory allocated in probe */
+       if (dev->dtd_pool)
+               dma_pool_destroy(dev->dtd_pool);
+
+       if (dev->status_req) {
+               kfree(dev->status_req->req.buf);
+               kfree(dev->status_req);
+       }
+
+       if (dev->ep_dqh)
+               dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+                       dev->ep_dqh, dev->ep_dqh_dma);
+
+       kfree(dev->ep);
+
+       /* diable IRQ handler */
+       if (dev->got_irq)
+               free_irq(pdev->irq, dev);
+
+#ifndef        OTG_TRANSCEIVER
+       if (dev->cap_regs)
+               iounmap(dev->cap_regs);
+
+       if (dev->region)
+               release_mem_region(pci_resource_start(pdev, 0),
+                               pci_resource_len(pdev, 0));
+
+       if (dev->enabled)
+               pci_disable_device(pdev);
+#else
+       if (dev->transceiver) {
+               otg_put_transceiver(dev->transceiver);
+               dev->transceiver = NULL;
+               dev->lotg = NULL;
+       }
+#endif
+
+       dev->cap_regs = NULL;
+
+       INFO(dev, "unbind\n");
+       DBG(dev, "<--- %s()\n", __func__);
+
+       device_unregister(&dev->gadget.dev);
+       device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+
+#ifndef        OTG_TRANSCEIVER
+       pci_set_drvdata(pdev, NULL);
+#endif
+
+       /* free dev, wait for the release() finished */
+       wait_for_completion(&done);
+
+       the_controller = NULL;
+}
+
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us.
+ */
+static int langwell_udc_probe(struct pci_dev *pdev,
+               const struct pci_device_id *id)
+{
+       struct langwell_udc     *dev;
+#ifndef        OTG_TRANSCEIVER
+       unsigned long           resource, len;
+#endif
+       void                    __iomem *base = NULL;
+       size_t                  size;
+       int                     retval;
+
+       if (the_controller) {
+               dev_warn(&pdev->dev, "ignoring\n");
+               return -EBUSY;
+       }
+
+       /* alloc, and start init */
+       dev = kzalloc(sizeof *dev, GFP_KERNEL);
+       if (dev == NULL) {
+               retval = -ENOMEM;
+               goto error;
+       }
+
+       /* initialize device spinlock */
+       spin_lock_init(&dev->lock);
+
+       dev->pdev = pdev;
+       DBG(dev, "---> %s()\n", __func__);
+
+#ifdef OTG_TRANSCEIVER
+       /* PCI device is already enabled by otg_transceiver driver */
+       dev->enabled = 1;
+
+       /* mem region and register base */
+       dev->region = 1;
+       dev->transceiver = otg_get_transceiver();
+       dev->lotg = otg_to_langwell(dev->transceiver);
+       base = dev->lotg->regs;
+#else
+       pci_set_drvdata(pdev, dev);
+
+       /* now all the pci goodies ... */
+       if (pci_enable_device(pdev) < 0) {
+               retval = -ENODEV;
+               goto error;
+       }
+       dev->enabled = 1;
+
+       /* control register: BAR 0 */
+       resource = pci_resource_start(pdev, 0);
+       len = pci_resource_len(pdev, 0);
+       if (!request_mem_region(resource, len, driver_name)) {
+               ERROR(dev, "controller already in use\n");
+               retval = -EBUSY;
+               goto error;
+       }
+       dev->region = 1;
+
+       base = ioremap_nocache(resource, len);
+#endif
+       if (base == NULL) {
+               ERROR(dev, "can't map memory\n");
+               retval = -EFAULT;
+               goto error;
+       }
+
+       dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
+       VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs);
+       dev->op_regs = (struct langwell_op_regs __iomem *)
+               (base + OP_REG_OFFSET);
+       VDBG(dev, "dev->op_regs: %p\n", dev->op_regs);
+
+       /* irq setup after old hardware is cleaned up */
+       if (!pdev->irq) {
+               ERROR(dev, "No IRQ. Check PCI setup!\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
+#ifndef        OTG_TRANSCEIVER
+       INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+                       pdev->irq, resource, len, base);
+       /* enables bus-mastering for device dev */
+       pci_set_master(pdev);
+
+       if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
+                               driver_name, dev) != 0) {
+               ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+               retval = -EBUSY;
+               goto error;
+       }
+       dev->got_irq = 1;
+#endif
+
+       /* set stopped bit */
+       dev->stopped = 1;
+
+       /* capabilities and endpoint number */
+       dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
+       dev->dciversion = readw(&dev->cap_regs->dciversion);
+       dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
+       VDBG(dev, "dev->lpm: %d\n", dev->lpm);
+       VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion);
+       VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams));
+       VDBG(dev, "dev->devcap: %d\n", dev->devcap);
+       if (!dev->devcap) {
+               ERROR(dev, "can't support device mode\n");
+               retval = -ENODEV;
+               goto error;
+       }
+
+       /* a pair of endpoints (out/in) for each address */
+       dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
+       VDBG(dev, "dev->ep_max: %d\n", dev->ep_max);
+
+       /* allocate endpoints memory */
+       dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
+                       GFP_KERNEL);
+       if (!dev->ep) {
+               ERROR(dev, "allocate endpoints memory failed\n");
+               retval = -ENOMEM;
+               goto error;
+       }
+
+       /* allocate device dQH memory */
+       size = dev->ep_max * sizeof(struct langwell_dqh);
+       VDBG(dev, "orig size = %d\n", size);
+       if (size < DQH_ALIGNMENT)
+               size = DQH_ALIGNMENT;
+       else if ((size % DQH_ALIGNMENT) != 0) {
+               size += DQH_ALIGNMENT + 1;
+               size &= ~(DQH_ALIGNMENT - 1);
+       }
+       dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+                                       &dev->ep_dqh_dma, GFP_KERNEL);
+       if (!dev->ep_dqh) {
+               ERROR(dev, "allocate dQH memory failed\n");
+               retval = -ENOMEM;
+               goto error;
+       }
+       dev->ep_dqh_size = size;
+       VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
+
+       /* initialize ep0 status request structure */
+       dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+       if (!dev->status_req) {
+               ERROR(dev, "allocate status_req memory failed\n");
+               retval = -ENOMEM;
+               goto error;
+       }
+       INIT_LIST_HEAD(&dev->status_req->queue);
+
+       /* allocate a small amount of memory to get valid address */
+       dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+       dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
+
+       dev->resume_state = USB_STATE_NOTATTACHED;
+       dev->usb_state = USB_STATE_POWERED;
+       dev->ep0_dir = USB_DIR_OUT;
+       dev->remote_wakeup = 0; /* default to 0 on reset */
+
+#ifndef        OTG_TRANSCEIVER
+       /* reset device controller */
+       langwell_udc_reset(dev);
+#endif
+
+       /* initialize gadget structure */
+       dev->gadget.ops = &langwell_ops;        /* usb_gadget_ops */
+       dev->gadget.ep0 = &dev->ep[0].ep;       /* gadget ep0 */
+       INIT_LIST_HEAD(&dev->gadget.ep_list);   /* ep_list */
+       dev->gadget.speed = USB_SPEED_UNKNOWN;  /* speed */
+       dev->gadget.is_dualspeed = 1;           /* support dual speed */
+#ifdef OTG_TRANSCEIVER
+       dev->gadget.is_otg = 1;                 /* support otg mode */
+#endif
+
+       /* the "gadget" abstracts/virtualizes the controller */
+       dev_set_name(&dev->gadget.dev, "gadget");
+       dev->gadget.dev.parent = &pdev->dev;
+       dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+       dev->gadget.dev.release = gadget_release;
+       dev->gadget.name = driver_name;         /* gadget name */
+
+       /* controller endpoints reinit */
+       eps_reinit(dev);
+
+#ifndef        OTG_TRANSCEIVER
+       /* reset ep0 dQH and endptctrl */
+       ep0_reset(dev);
+#endif
+
+       /* create dTD dma_pool resource */
+       dev->dtd_pool = dma_pool_create("langwell_dtd",
+                       &dev->pdev->dev,
+                       sizeof(struct langwell_dtd),
+                       DTD_ALIGNMENT,
+                       DMA_BOUNDARY);
+
+       if (!dev->dtd_pool) {
+               retval = -ENOMEM;
+               goto error;
+       }
+
+       /* done */
+       INFO(dev, "%s\n", driver_desc);
+       INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
+       INFO(dev, "Driver version: " DRIVER_VERSION "\n");
+       INFO(dev, "Support (max) %d endpoints\n", dev->ep_max);
+       INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion);
+       INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host");
+       INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
+
+       VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
+#ifdef VERBOSE
+       print_all_registers(dev);
+#endif
+
+       the_controller = dev;
+
+       retval = device_register(&dev->gadget.dev);
+       if (retval)
+               goto error;
+
+       retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
+       if (retval)
+               goto error;
+
+       VDBG(dev, "<--- %s()\n", __func__);
+       return 0;
+
+error:
+       if (dev) {
+               DBG(dev, "<--- %s()\n", __func__);
+               langwell_udc_remove(pdev);
+       }
+
+       return retval;
+}
+
+
+/* device controller suspend */
+static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct langwell_udc     *dev = the_controller;
+       u32                     devlc;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* disable interrupt and set controller to stop state */
+       langwell_udc_stop(dev);
+
+       /* diable IRQ handler */
+       if (dev->got_irq)
+               free_irq(pdev->irq, dev);
+       dev->got_irq = 0;
+
+
+       /* save PCI state */
+       pci_save_state(pdev);
+
+       /* set device power state */
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       /* enter PHY low power suspend */
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "devlc = 0x%08x\n", devlc);
+       devlc |= LPM_PHCD;
+       writel(devlc, &dev->op_regs->devlc);
+
+       DBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* device controller resume */
+static int langwell_udc_resume(struct pci_dev *pdev)
+{
+       struct langwell_udc     *dev = the_controller;
+       u32                     devlc;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* exit PHY low power suspend */
+       devlc = readl(&dev->op_regs->devlc);
+       VDBG(dev, "devlc = 0x%08x\n", devlc);
+       devlc &= ~LPM_PHCD;
+       writel(devlc, &dev->op_regs->devlc);
+
+       /* set device D0 power state */
+       pci_set_power_state(pdev, PCI_D0);
+
+       /* restore PCI state */
+       pci_restore_state(pdev);
+
+       /* enable IRQ handler */
+       if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
+                       != 0) {
+               ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+               return -1;
+       }
+       dev->got_irq = 1;
+
+       /* reset and start controller to run state */
+       if (dev->stopped) {
+               /* reset device controller */
+               langwell_udc_reset(dev);
+
+               /* reset ep0 dQH and endptctrl */
+               ep0_reset(dev);
+
+               /* start device if gadget is loaded */
+               if (dev->driver)
+                       langwell_udc_start(dev);
+       }
+
+       /* reset USB status */
+       dev->usb_state = USB_STATE_ATTACHED;
+       dev->ep0_state = WAIT_FOR_SETUP;
+       dev->ep0_dir = USB_DIR_OUT;
+
+       DBG(dev, "<--- %s()\n", __func__);
+       return 0;
+}
+
+
+/* pci driver shutdown */
+static void langwell_udc_shutdown(struct pci_dev *pdev)
+{
+       struct langwell_udc     *dev = the_controller;
+       u32                     usbmode;
+
+       DBG(dev, "---> %s()\n", __func__);
+
+       /* reset controller mode to IDLE */
+       usbmode = readl(&dev->op_regs->usbmode);
+       DBG(dev, "usbmode = 0x%08x\n", usbmode);
+       usbmode &= (~3 | MODE_IDLE);
+       writel(usbmode, &dev->op_regs->usbmode);
+
+       DBG(dev, "<--- %s()\n", __func__);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct pci_device_id pci_ids[] = { {
+       .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+       .class_mask =   ~0,
+       .vendor =       0x8086,
+       .device =       0x0811,
+       .subvendor =    PCI_ANY_ID,
+       .subdevice =    PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+
+static struct pci_driver langwell_pci_driver = {
+       .name =         (char *) driver_name,
+       .id_table =     pci_ids,
+
+       .probe =        langwell_udc_probe,
+       .remove =       langwell_udc_remove,
+
+       /* device controller suspend/resume */
+       .suspend =      langwell_udc_suspend,
+       .resume =       langwell_udc_resume,
+
+       .shutdown =     langwell_udc_shutdown,
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+
+static int __init init(void)
+{
+#ifdef OTG_TRANSCEIVER
+       return langwell_register_peripheral(&langwell_pci_driver);
+#else
+       return pci_register_driver(&langwell_pci_driver);
+#endif
+}
+module_init(init);
+
+
+static void __exit cleanup(void)
+{
+#ifdef OTG_TRANSCEIVER
+       return langwell_unregister_peripheral(&langwell_pci_driver);
+#else
+       pci_unregister_driver(&langwell_pci_driver);
+#endif
+}
+module_exit(cleanup);
+
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
new file mode 100644 (file)
index 0000000..9719934
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/usb/langwell_udc.h>
+
+#if defined(CONFIG_USB_LANGWELL_OTG)
+#include <linux/usb/langwell_otg.h>
+#endif
+
+
+/*-------------------------------------------------------------------------*/
+
+/* driver data structures and utilities */
+
+/*
+ * dTD: Device Endpoint Transfer Descriptor
+ * describe to the device controller the location and quantity of
+ * data to be send/received for given transfer
+ */
+struct langwell_dtd {
+       u32     dtd_next;
+/* bits 31:5, next transfer element pointer */
+#define        DTD_NEXT(d)     (((d)>>5)&0x7ffffff)
+#define        DTD_NEXT_MASK   (0x7ffffff << 5)
+/* terminate */
+#define        DTD_TERM        BIT(0)
+       /* bits 7:0, execution back states */
+       u32     dtd_status:8;
+#define        DTD_STATUS(d)   (((d)>>0)&0xff)
+#define        DTD_STS_ACTIVE  BIT(7)  /* active */
+#define        DTD_STS_HALTED  BIT(6)  /* halted */
+#define        DTD_STS_DBE     BIT(5)  /* data buffer error */
+#define        DTD_STS_TRE     BIT(3)  /* transaction error  */
+       /* bits 9:8 */
+       u32     dtd_res0:2;
+       /* bits 11:10, multipier override */
+       u32     dtd_multo:2;
+#define        DTD_MULTO       (BIT(11) | BIT(10))
+       /* bits 14:12 */
+       u32     dtd_res1:3;
+       /* bit 15, interrupt on complete */
+       u32     dtd_ioc:1;
+#define        DTD_IOC         BIT(15)
+       /* bits 30:16, total bytes */
+       u32     dtd_total:15;
+#define        DTD_TOTAL(d)    (((d)>>16)&0x7fff)
+#define        DTD_MAX_TRANSFER_LENGTH 0x4000
+       /* bit 31 */
+       u32     dtd_res2:1;
+       /* dTD buffer pointer page 0 to 4 */
+       u32     dtd_buf[5];
+#define        DTD_OFFSET_MASK 0xfff
+/* bits 31:12, buffer pointer */
+#define        DTD_BUFFER(d)   (((d)>>12)&0x3ff)
+/* bits 11:0, current offset */
+#define        DTD_C_OFFSET(d) (((d)>>0)&0xfff)
+/* bits 10:0, frame number */
+#define        DTD_FRAME(d)    (((d)>>0)&0x7ff)
+
+       /* driver-private parts */
+
+       /* dtd dma address */
+       dma_addr_t              dtd_dma;
+       /* next dtd virtual address */
+       struct langwell_dtd     *next_dtd_virt;
+};
+
+
+/*
+ * dQH: Device Endpoint Queue Head
+ * describe where all transfers are managed
+ * 48-byte data structure, aligned on 64-byte boundary
+ *
+ * These are associated with dTD structure
+ */
+struct langwell_dqh {
+       /* endpoint capabilities and characteristics */
+       u32     dqh_res0:15;    /* bits 14:0 */
+       u32     dqh_ios:1;      /* bit 15, interrupt on setup */
+#define        DQH_IOS         BIT(15)
+       u32     dqh_mpl:11;     /* bits 26:16, maximum packet length */
+#define        DQH_MPL         (0x7ff << 16)
+       u32     dqh_res1:2;     /* bits 28:27 */
+       u32     dqh_zlt:1;      /* bit 29, zero length termination */
+#define        DQH_ZLT         BIT(29)
+       u32     dqh_mult:2;     /* bits 31:30 */
+#define        DQH_MULT        (BIT(30) | BIT(31))
+
+       /* current dTD pointer */
+       u32     dqh_current;    /* locate the transfer in progress */
+#define DQH_C_DTD(e)   \
+       (((e)>>5)&0x7ffffff)    /* bits 31:5, current dTD pointer */
+
+       /* transfer overlay, hardware parts of a struct langwell_dtd */
+       u32     dtd_next;
+       u32     dtd_status:8;   /* bits 7:0, execution back states */
+       u32     dtd_res0:2;     /* bits 9:8 */
+       u32     dtd_multo:2;    /* bits 11:10, multipier override */
+       u32     dtd_res1:3;     /* bits 14:12 */
+       u32     dtd_ioc:1;      /* bit 15, interrupt on complete */
+       u32     dtd_total:15;   /* bits 30:16, total bytes */
+       u32     dtd_res2:1;     /* bit 31 */
+       u32     dtd_buf[5];     /* dTD buffer pointer page 0 to 4 */
+
+       u32     dqh_res2;
+       struct usb_ctrlrequest  dqh_setup;      /* setup packet buffer */
+} __attribute__ ((aligned(64)));
+
+
+/* endpoint data structure */
+struct langwell_ep {
+       struct usb_ep           ep;
+       dma_addr_t              dma;
+       struct langwell_udc     *dev;
+       unsigned long           irqs;
+       struct list_head        queue;
+       struct langwell_dqh     *dqh;
+       const struct usb_endpoint_descriptor    *desc;
+       char                    name[14];
+       unsigned                stopped:1,
+                               ep_type:2,
+                               ep_num:8;
+};
+
+
+/* request data structure */
+struct langwell_request {
+       struct usb_request      req;
+       struct langwell_dtd     *dtd, *head, *tail;
+       struct langwell_ep      *ep;
+       dma_addr_t              dtd_dma;
+       struct list_head        queue;
+       unsigned                dtd_count;
+       unsigned                mapped:1;
+};
+
+
+/* ep0 transfer state */
+enum ep0_state {
+       WAIT_FOR_SETUP,
+       DATA_STATE_XMIT,
+       DATA_STATE_NEED_ZLP,
+       WAIT_FOR_OUT_STATUS,
+       DATA_STATE_RECV,
+};
+
+
+/* device suspend state */
+enum lpm_state {
+       LPM_L0, /* on */
+       LPM_L1, /* LPM L1 sleep */
+       LPM_L2, /* suspend */
+       LPM_L3, /* off */
+};
+
+
+/* device data structure */
+struct langwell_udc {
+       /* each pci device provides one gadget, several endpoints */
+       struct usb_gadget       gadget;
+       spinlock_t              lock;   /* device lock */
+       struct langwell_ep      *ep;
+       struct usb_gadget_driver        *driver;
+       struct otg_transceiver  *transceiver;
+       u8                      dev_addr;
+       u32                     usb_state;
+       u32                     resume_state;
+       u32                     bus_reset;
+       enum lpm_state          lpm_state;
+       enum ep0_state          ep0_state;
+       u32                     ep0_dir;
+       u16                     dciversion;
+       unsigned                ep_max;
+       unsigned                devcap:1,
+                               enabled:1,
+                               region:1,
+                               got_irq:1,
+                               powered:1,
+                               remote_wakeup:1,
+                               rate:1,
+                               is_reset:1,
+                               softconnected:1,
+                               vbus_active:1,
+                               suspended:1,
+                               stopped:1,
+                               lpm:1;  /* LPM capability */
+
+       /* pci state used to access those endpoints */
+       struct pci_dev          *pdev;
+
+       /* Langwell otg transceiver */
+       struct langwell_otg     *lotg;
+
+       /* control registers */
+       struct langwell_cap_regs        __iomem *cap_regs;
+       struct langwell_op_regs         __iomem *op_regs;
+
+       struct usb_ctrlrequest  local_setup_buff;
+       struct langwell_dqh     *ep_dqh;
+       size_t                  ep_dqh_size;
+       dma_addr_t              ep_dqh_dma;
+
+       /* ep0 status request */
+       struct langwell_request *status_req;
+
+       /* dma pool */
+       struct dma_pool         *dtd_pool;
+
+       /* make sure release() is done */
+       struct completion       *done;
+};
+
index 8cc676ecbb23ca28f2200575271be80e7b83faee..1937d8c7b433201dc0d31474f99014fc25ce2f04 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/usb.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
-#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
 #include <mach/udc.h>
 
 #include "pxa27x_udc.h"
@@ -473,6 +472,23 @@ static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
                        (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS));
 }
 
+/**
+ * ep_write_UDCCSR - set bits in UDCCSR
+ * @udc: udc device
+ * @mask: bits to set in UDCCR
+ *
+ * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
+ *
+ * A specific case is applied to ep0 : the ACM bit is always set to 1, for
+ * SET_INTERFACE and SET_CONFIGURATION.
+ */
+static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
+{
+       if (is_ep0(ep))
+               mask |= UDCCSR0_ACM;
+       udc_ep_writel(ep, UDCCSR, mask);
+}
+
 /**
  * ep_count_bytes_remain - get how many bytes in udc endpoint
  * @ep: udc endpoint
@@ -861,7 +877,7 @@ static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
                *buf++ = udc_ep_readl(ep, UDCDR);
        req->req.actual += count;
 
-       udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+       ep_write_UDCCSR(ep, UDCCSR_PC);
 
        return count;
 }
@@ -969,12 +985,12 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
                if (udccsr & UDCCSR_PC) {
                        ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
                                udccsr);
-                       udc_ep_writel(ep, UDCCSR, UDCCSR_PC);
+                       ep_write_UDCCSR(ep, UDCCSR_PC);
                }
                if (udccsr & UDCCSR_TRN) {
                        ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
                                udccsr);
-                       udc_ep_writel(ep, UDCCSR, UDCCSR_TRN);
+                       ep_write_UDCCSR(ep, UDCCSR_TRN);
                }
 
                count = write_packet(ep, req, max);
@@ -996,7 +1012,7 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
                }
 
                if (is_short)
-                       udc_ep_writel(ep, UDCCSR, UDCCSR_SP);
+                       ep_write_UDCCSR(ep, UDCCSR_SP);
 
                /* requests complete when all IN data is in the FIFO */
                if (is_last) {
@@ -1029,7 +1045,7 @@ static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
 
        while (epout_has_pkt(ep)) {
                count = read_packet(ep, req);
-               udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+               ep_write_UDCCSR(ep, UDCCSR0_OPC);
                inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
 
                is_short = (count < ep->fifo_size);
@@ -1074,7 +1090,7 @@ static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
 
        /* Sends either a short packet or a 0 length packet */
        if (unlikely(is_short))
-               udc_ep_writel(ep, UDCCSR, UDCCSR0_IPR);
+               ep_write_UDCCSR(ep, UDCCSR0_IPR);
 
        ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
                count, is_short ? "/S" : "", is_last ? "/L" : "",
@@ -1277,7 +1293,7 @@ static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
 
        /* FST, FEF bits are the same for control and non control endpoints */
        rc = 0;
-       udc_ep_writel(ep, UDCCSR, UDCCSR_FST | UDCCSR_FEF);
+       ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
        if (is_ep0(ep))
                set_ep0state(ep->dev, STALL);
 
@@ -1343,7 +1359,7 @@ static void pxa_ep_fifo_flush(struct usb_ep *_ep)
                        udc_ep_readl(ep, UDCDR);
        } else {
                /* most IN status is the same, but ISO can't stall */
-               udc_ep_writel(ep, UDCCSR,
+               ep_write_UDCCSR(ep,
                                UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
                                | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
        }
@@ -1728,6 +1744,7 @@ static void udc_enable(struct pxa_udc *udc)
        memset(&udc->stats, 0, sizeof(udc->stats));
 
        udc_set_mask_UDCCR(udc, UDCCR_UDE);
+       ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
        udelay(2);
        if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
                dev_err(udc->dev, "Configuration errors, udc disabled\n");
@@ -1893,6 +1910,15 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
 
        nuke(ep, -EPROTO);
 
+       /*
+        * In the PXA320 manual, in the section about Back-to-Back setup
+        * packets, it describes this situation.  The solution is to set OPC to
+        * get rid of the status packet, and then continue with the setup
+        * packet. Generalize to pxa27x CPUs.
+        */
+       if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
+               ep_write_UDCCSR(ep, UDCCSR0_OPC);
+
        /* read SETUP packet */
        for (i = 0; i < 2; i++) {
                if (unlikely(ep_is_empty(ep)))
@@ -1919,7 +1945,7 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
                set_ep0state(udc, OUT_DATA_STAGE);
 
        /* Tell UDC to enter Data Stage */
-       udc_ep_writel(ep, UDCCSR, UDCCSR0_SA | UDCCSR0_OPC);
+       ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
 
        i = udc->driver->setup(&udc->gadget, &u.r);
        if (i < 0)
@@ -1929,7 +1955,7 @@ out:
 stall:
        ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
                udc_ep_readl(ep, UDCCSR), i);
-       udc_ep_writel(ep, UDCCSR, UDCCSR0_FST | UDCCSR0_FTF);
+       ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
        set_ep0state(udc, STALL);
        goto out;
 }
@@ -1966,6 +1992,8 @@ stall:
  *     cleared by software.
  *   - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
  *     before reading ep0.
+ *     This is true only for PXA27x. This is not true anymore for PXA3xx family
+ *     (check Back-to-Back setup packet in developers guide).
  *   - irq can be called on a "packet complete" event (opc_irq=1), while
  *     UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
  *     from experimentation).
@@ -1998,7 +2026,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
        if (udccsr0 & UDCCSR0_SST) {
                ep_dbg(ep, "clearing stall status\n");
                nuke(ep, -EPIPE);
-               udc_ep_writel(ep, UDCCSR, UDCCSR0_SST);
+               ep_write_UDCCSR(ep, UDCCSR0_SST);
                ep0_idle(udc);
        }
 
@@ -2023,7 +2051,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
                break;
        case IN_DATA_STAGE:                     /* GET_DESCRIPTOR */
                if (epout_has_pkt(ep))
-                       udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC);
+                       ep_write_UDCCSR(ep, UDCCSR0_OPC);
                if (req && !ep_is_full(ep))
                        completed = write_ep0_fifo(ep, req);
                if (completed)
@@ -2036,7 +2064,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
                        ep0_end_out_req(ep, req);
                break;
        case STALL:
-               udc_ep_writel(ep, UDCCSR, UDCCSR0_FST);
+               ep_write_UDCCSR(ep, UDCCSR0_FST);
                break;
        case IN_STATUS_STAGE:
                /*
@@ -2131,6 +2159,7 @@ static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
 
        set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
        udc->driver->setup(&udc->gadget, &req);
+       ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
 }
 
 /**
@@ -2159,6 +2188,7 @@ static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
 
        set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
        udc->driver->setup(&udc->gadget, &req);
+       ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
 }
 
 /*
@@ -2280,7 +2310,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
        memset(&udc->stats, 0, sizeof udc->stats);
 
        nuke(ep, -EPROTO);
-       udc_ep_writel(ep, UDCCSR, UDCCSR0_FTF | UDCCSR0_OPC);
+       ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
        ep0_idle(udc);
 }
 
@@ -2479,6 +2509,12 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
                udc_disable(udc);
 }
 
+#ifdef CONFIG_CPU_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph()   do {} while (0)
+#endif
+
 #ifdef CONFIG_PM
 /**
  * pxa_udc_suspend - Suspend udc device
@@ -2546,8 +2582,7 @@ static int pxa_udc_resume(struct platform_device *_dev)
         * Software must configure the USB OTG pad, UDC, and UHC
         * to the state they were in before entering sleep mode.
         */
-       if (cpu_is_pxa27x())
-               PSSR |= PSSR_OTGPH;
+       pxa27x_clear_otgph();
 
        return 0;
 }
@@ -2571,7 +2606,7 @@ static struct platform_driver udc_driver = {
 
 static int __init udc_init(void)
 {
-       if (!cpu_is_pxa27x())
+       if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
                return -ENODEV;
 
        printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
index db58125331dad5d3788d5b3774a34f1637c893df..e25225e26586698ea1ac7ceb5e042b38c4245574 100644 (file)
 #define UP2OCR_HXOE    (1 << 17)       /* Transceiver Output Enable */
 #define UP2OCR_SEOS    (1 << 24)       /* Single-Ended Output Select */
 
+#define UDCCSR0_ACM    (1 << 9)        /* Ack Control Mode */
+#define UDCCSR0_AREN   (1 << 8)        /* Ack Response Enable */
 #define UDCCSR0_SA     (1 << 7)        /* Setup Active */
 #define UDCCSR0_RNE    (1 << 6)        /* Receive FIFO Not Empty */
 #define UDCCSR0_FST    (1 << 5)        /* Force Stall */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
new file mode 100644 (file)
index 0000000..50c71aa
--- /dev/null
@@ -0,0 +1,3269 @@
+/* linux/drivers/usb/gadget/s3c-hsotg.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      Ben Dooks <ben@simtec.co.uk>
+ *      http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <mach/map.h>
+
+#include <plat/regs-usb-hsotg-phy.h>
+#include <plat/regs-usb-hsotg.h>
+#include <plat/regs-sys.h>
+#include <plat/udc-hs.h>
+
+#define DMA_ADDR_INVALID (~((dma_addr_t)0))
+
+/* EP0_MPS_LIMIT
+ *
+ * Unfortunately there seems to be a limit of the amount of data that can
+ * be transfered by IN transactions on EP0. This is either 127 bytes or 3
+ * packets (which practially means 1 packet and 63 bytes of data) when the
+ * MPS is set to 64.
+ *
+ * This means if we are wanting to move >127 bytes of data, we need to
+ * split the transactions up, but just doing one packet at a time does
+ * not work (this may be an implicit DATA0 PID on first packet of the
+ * transaction) and doing 2 packets is outside the controller's limits.
+ *
+ * If we try to lower the MPS size for EP0, then no transfers work properly
+ * for EP0, and the system will fail basic enumeration. As no cause for this
+ * has currently been found, we cannot support any large IN transfers for
+ * EP0.
+ */
+#define EP0_MPS_LIMIT  64
+
+struct s3c_hsotg;
+struct s3c_hsotg_req;
+
+/**
+ * struct s3c_hsotg_ep - driver endpoint definition.
+ * @ep: The gadget layer representation of the endpoint.
+ * @name: The driver generated name for the endpoint.
+ * @queue: Queue of requests for this endpoint.
+ * @parent: Reference back to the parent device structure.
+ * @req: The current request that the endpoint is processing. This is
+ *       used to indicate an request has been loaded onto the endpoint
+ *       and has yet to be completed (maybe due to data move, or simply
+ *      awaiting an ack from the core all the data has been completed).
+ * @debugfs: File entry for debugfs file for this endpoint.
+ * @lock: State lock to protect contents of endpoint.
+ * @dir_in: Set to true if this endpoint is of the IN direction, which
+ *         means that it is sending data to the Host.
+ * @index: The index for the endpoint registers.
+ * @name: The name array passed to the USB core.
+ * @halted: Set if the endpoint has been halted.
+ * @periodic: Set if this is a periodic ep, such as Interrupt
+ * @sent_zlp: Set if we've sent a zero-length packet.
+ * @total_data: The total number of data bytes done.
+ * @fifo_size: The size of the FIFO (for periodic IN endpoints)
+ * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
+ * @last_load: The offset of data for the last start of request.
+ * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ *
+ * This is the driver's state for each registered enpoint, allowing it
+ * to keep track of transactions that need doing. Each endpoint has a
+ * lock to protect the state, to try and avoid using an overall lock
+ * for the host controller as much as possible.
+ *
+ * For periodic IN endpoints, we have fifo_size and fifo_load to try
+ * and keep track of the amount of data in the periodic FIFO for each
+ * of these as we don't have a status register that tells us how much
+ * is in each of them.
+ */
+struct s3c_hsotg_ep {
+       struct usb_ep           ep;
+       struct list_head        queue;
+       struct s3c_hsotg        *parent;
+       struct s3c_hsotg_req    *req;
+       struct dentry           *debugfs;
+
+       spinlock_t              lock;
+
+       unsigned long           total_data;
+       unsigned int            size_loaded;
+       unsigned int            last_load;
+       unsigned int            fifo_load;
+       unsigned short          fifo_size;
+
+       unsigned char           dir_in;
+       unsigned char           index;
+
+       unsigned int            halted:1;
+       unsigned int            periodic:1;
+       unsigned int            sent_zlp:1;
+
+       char                    name[10];
+};
+
+#define S3C_HSOTG_EPS  (8+1)   /* limit to 9 for the moment */
+
+/**
+ * struct s3c_hsotg - driver state.
+ * @dev: The parent device supplied to the probe function
+ * @driver: USB gadget driver
+ * @plat: The platform specific configuration data.
+ * @regs: The memory area mapped for accessing registers.
+ * @regs_res: The resource that was allocated when claiming register space.
+ * @irq: The IRQ number we are using
+ * @debug_root: root directrory for debugfs.
+ * @debug_file: main status file for debugfs.
+ * @debug_fifo: FIFO status file for debugfs.
+ * @ep0_reply: Request used for ep0 reply.
+ * @ep0_buff: Buffer for EP0 reply data, if needed.
+ * @ctrl_buff: Buffer for EP0 control requests.
+ * @ctrl_req: Request for EP0 control packets.
+ * @eps: The endpoints being supplied to the gadget framework
+ */
+struct s3c_hsotg {
+       struct device            *dev;
+       struct usb_gadget_driver *driver;
+       struct s3c_hsotg_plat    *plat;
+
+       void __iomem            *regs;
+       struct resource         *regs_res;
+       int                     irq;
+
+       struct dentry           *debug_root;
+       struct dentry           *debug_file;
+       struct dentry           *debug_fifo;
+
+       struct usb_request      *ep0_reply;
+       struct usb_request      *ctrl_req;
+       u8                      ep0_buff[8];
+       u8                      ctrl_buff[8];
+
+       struct usb_gadget       gadget;
+       struct s3c_hsotg_ep     eps[];
+};
+
+/**
+ * struct s3c_hsotg_req - data transfer request
+ * @req: The USB gadget request
+ * @queue: The list of requests for the endpoint this is queued for.
+ * @in_progress: Has already had size/packets written to core
+ * @mapped: DMA buffer for this request has been mapped via dma_map_single().
+ */
+struct s3c_hsotg_req {
+       struct usb_request      req;
+       struct list_head        queue;
+       unsigned char           in_progress;
+       unsigned char           mapped;
+};
+
+/* conversion functions */
+static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
+{
+       return container_of(req, struct s3c_hsotg_req, req);
+}
+
+static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
+{
+       return container_of(ep, struct s3c_hsotg_ep, ep);
+}
+
+static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
+{
+       return container_of(gadget, struct s3c_hsotg, gadget);
+}
+
+static inline void __orr32(void __iomem *ptr, u32 val)
+{
+       writel(readl(ptr) | val, ptr);
+}
+
+static inline void __bic32(void __iomem *ptr, u32 val)
+{
+       writel(readl(ptr) & ~val, ptr);
+}
+
+/* forward decleration of functions */
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
+
+/**
+ * using_dma - return the DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using DMA.
+ *
+ * Currently, we have the DMA support code worked into everywhere
+ * that needs it, but the AMBA DMA implementation in the hardware can
+ * only DMA from 32bit aligned addresses. This means that gadgets such
+ * as the CDC Ethernet cannot work as they often pass packets which are
+ * not 32bit aligned.
+ *
+ * Unfortunately the choice to use DMA or not is global to the controller
+ * and seems to be only settable when the controller is being put through
+ * a core reset. This means we either need to fix the gadgets to take
+ * account of DMA alignment, or add bounce buffers (yuerk).
+ *
+ * Until this issue is sorted out, we always return 'false'.
+ */
+static inline bool using_dma(struct s3c_hsotg *hsotg)
+{
+       return false;   /* support is not complete */
+}
+
+/**
+ * s3c_hsotg_en_gsint - enable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+       u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+       u32 new_gsintmsk;
+
+       new_gsintmsk = gsintmsk | ints;
+
+       if (new_gsintmsk != gsintmsk) {
+               dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
+               writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+       }
+}
+
+/**
+ * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
+ * @hsotg: The device state
+ * @ints: A bitmask of the interrupts to enable
+ */
+static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
+{
+       u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
+       u32 new_gsintmsk;
+
+       new_gsintmsk = gsintmsk & ~ints;
+
+       if (new_gsintmsk != gsintmsk)
+               writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
+}
+
+/**
+ * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
+ * @hsotg: The device state
+ * @ep: The endpoint index
+ * @dir_in: True if direction is in.
+ * @en: The enable value, true to enable
+ *
+ * Set or clear the mask for an individual endpoint's interrupt
+ * request.
+ */
+static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
+                                unsigned int ep, unsigned int dir_in,
+                                unsigned int en)
+{
+       unsigned long flags;
+       u32 bit = 1 << ep;
+       u32 daint;
+
+       if (!dir_in)
+               bit <<= 16;
+
+       local_irq_save(flags);
+       daint = readl(hsotg->regs + S3C_DAINTMSK);
+       if (en)
+               daint |= bit;
+       else
+               daint &= ~bit;
+       writel(daint, hsotg->regs + S3C_DAINTMSK);
+       local_irq_restore(flags);
+}
+
+/**
+ * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
+ * @hsotg: The device instance.
+ */
+static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
+{
+       /* the ryu 2.6.24 release ahs
+          writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
+          writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
+               S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+               hsotg->regs + S3C_GNPTXFSIZ);
+       */
+
+       /* set FIFO sizes to 2048/0x1C0 */
+
+       writel(2048, hsotg->regs + S3C_GRXFSIZ);
+       writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
+              S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
+              hsotg->regs + S3C_GNPTXFSIZ);
+}
+
+/**
+ * @ep: USB endpoint to allocate request for.
+ * @flags: Allocation flags
+ *
+ * Allocate a new USB request structure appropriate for the specified endpoint
+ */
+struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+{
+       struct s3c_hsotg_req *req;
+
+       req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
+       if (!req)
+               return NULL;
+
+       INIT_LIST_HEAD(&req->queue);
+
+       req->req.dma = DMA_ADDR_INVALID;
+       return &req->req;
+}
+
+/**
+ * is_ep_periodic - return true if the endpoint is in periodic mode.
+ * @hs_ep: The endpoint to query.
+ *
+ * Returns true if the endpoint is in periodic mode, meaning it is being
+ * used for an Interrupt or ISO transfer.
+ */
+static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
+{
+       return hs_ep->periodic;
+}
+
+/**
+ * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint for the request
+ * @hs_req: The request being processed.
+ *
+ * This is the reverse of s3c_hsotg_map_dma(), called for the completion
+ * of a request to ensure the buffer is ready for access by the caller.
+*/
+static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
+                               struct s3c_hsotg_ep *hs_ep,
+                               struct s3c_hsotg_req *hs_req)
+{
+       struct usb_request *req = &hs_req->req;
+       enum dma_data_direction dir;
+
+       dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+       /* ignore this if we're not moving any data */
+       if (hs_req->req.length == 0)
+               return;
+
+       if (hs_req->mapped) {
+               /* we mapped this, so unmap and remove the dma */
+
+               dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
+
+               req->dma = DMA_ADDR_INVALID;
+               hs_req->mapped = 0;
+       } else {
+               dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+       }
+}
+
+/**
+ * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint we're going to write for.
+ * @hs_req: The request to write data for.
+ *
+ * This is called when the TxFIFO has some space in it to hold a new
+ * transmission and we have something to give it. The actual setup of
+ * the data size is done elsewhere, so all we have to do is to actually
+ * write the data.
+ *
+ * The return value is zero if there is more space (or nothing was done)
+ * otherwise -ENOSPC is returned if the FIFO space was used up.
+ *
+ * This routine is only needed for PIO
+*/
+static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
+                               struct s3c_hsotg_ep *hs_ep,
+                               struct s3c_hsotg_req *hs_req)
+{
+       bool periodic = is_ep_periodic(hs_ep);
+       u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
+       int buf_pos = hs_req->req.actual;
+       int to_write = hs_ep->size_loaded;
+       void *data;
+       int can_write;
+       int pkt_round;
+
+       to_write -= (buf_pos - hs_ep->last_load);
+
+       /* if there's nothing to write, get out early */
+       if (to_write == 0)
+               return 0;
+
+       if (periodic) {
+               u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+               int size_left;
+               int size_done;
+
+               /* work out how much data was loaded so we can calculate
+                * how much data is left in the fifo. */
+
+               size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+               dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
+                       __func__, size_left,
+                       hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
+
+               /* how much of the data has moved */
+               size_done = hs_ep->size_loaded - size_left;
+
+               /* how much data is left in the fifo */
+               can_write = hs_ep->fifo_load - size_done;
+               dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
+                       __func__, can_write);
+
+               can_write = hs_ep->fifo_size - can_write;
+               dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
+                       __func__, can_write);
+
+               if (can_write <= 0) {
+                       s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+                       return -ENOSPC;
+               }
+       } else {
+               if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
+                       dev_dbg(hsotg->dev,
+                               "%s: no queue slots available (0x%08x)\n",
+                               __func__, gnptxsts);
+
+                       s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+                       return -ENOSPC;
+               }
+
+               can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
+       }
+
+       dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
+                __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
+
+       /* limit to 512 bytes of data, it seems at least on the non-periodic
+        * FIFO, requests of >512 cause the endpoint to get stuck with a
+        * fragment of the end of the transfer in it.
+        */
+       if (can_write > 512)
+               can_write = 512;
+
+       /* see if we can write data */
+
+       if (to_write > can_write) {
+               to_write = can_write;
+               pkt_round = to_write % hs_ep->ep.maxpacket;
+
+               /* Not sure, but we probably shouldn't be writing partial
+                * packets into the FIFO, so round the write down to an
+                * exact number of packets.
+                *
+                * Note, we do not currently check to see if we can ever
+                * write a full packet or not to the FIFO.
+                */
+
+               if (pkt_round)
+                       to_write -= pkt_round;
+
+               /* enable correct FIFO interrupt to alert us when there
+                * is more room left. */
+
+               s3c_hsotg_en_gsint(hsotg,
+                                  periodic ? S3C_GINTSTS_PTxFEmp :
+                                  S3C_GINTSTS_NPTxFEmp);
+       }
+
+       dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
+                to_write, hs_req->req.length, can_write, buf_pos);
+
+       if (to_write <= 0)
+               return -ENOSPC;
+
+       hs_req->req.actual = buf_pos + to_write;
+       hs_ep->total_data += to_write;
+
+       if (periodic)
+               hs_ep->fifo_load += to_write;
+
+       to_write = DIV_ROUND_UP(to_write, 4);
+       data = hs_req->req.buf + buf_pos;
+
+       writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
+
+       return (to_write >= can_write) ? -ENOSPC : 0;
+}
+
+/**
+ * get_ep_limit - get the maximum data legnth for this endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * so that transfers that are too long can be split.
+ */
+static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
+{
+       int index = hs_ep->index;
+       unsigned maxsize;
+       unsigned maxpkt;
+
+       if (index != 0) {
+               maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
+               maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
+       } else {
+               if (hs_ep->dir_in) {
+                       /* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */
+                       maxsize = 64+64+1;
+                       maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
+               } else {
+                       maxsize = 0x3f;
+                       maxpkt = 2;
+               }
+       }
+
+       /* we made the constant loading easier above by using +1 */
+       maxpkt--;
+       maxsize--;
+
+       /* constrain by packet count if maxpkts*pktsize is greater
+        * than the length register size. */
+
+       if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
+               maxsize = maxpkt * hs_ep->ep.maxpacket;
+
+       return maxsize;
+}
+
+/**
+ * s3c_hsotg_start_req - start a USB request from an endpoint's queue
+ * @hsotg: The controller state.
+ * @hs_ep: The endpoint to process a request for
+ * @hs_req: The request to start.
+ * @continuing: True if we are doing more for the current request.
+ *
+ * Start the given request running by setting the endpoint registers
+ * appropriately, and writing any data to the FIFOs.
+ */
+static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
+                               struct s3c_hsotg_ep *hs_ep,
+                               struct s3c_hsotg_req *hs_req,
+                               bool continuing)
+{
+       struct usb_request *ureq = &hs_req->req;
+       int index = hs_ep->index;
+       int dir_in = hs_ep->dir_in;
+       u32 epctrl_reg;
+       u32 epsize_reg;
+       u32 epsize;
+       u32 ctrl;
+       unsigned length;
+       unsigned packets;
+       unsigned maxreq;
+
+       if (index != 0) {
+               if (hs_ep->req && !continuing) {
+                       dev_err(hsotg->dev, "%s: active request\n", __func__);
+                       WARN_ON(1);
+                       return;
+               } else if (hs_ep->req != hs_req && continuing) {
+                       dev_err(hsotg->dev,
+                               "%s: continue different req\n", __func__);
+                       WARN_ON(1);
+                       return;
+               }
+       }
+
+       epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+       epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
+
+       dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
+               __func__, readl(hsotg->regs + epctrl_reg), index,
+               hs_ep->dir_in ? "in" : "out");
+
+       length = ureq->length - ureq->actual;
+
+       if (0)
+               dev_dbg(hsotg->dev,
+                       "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
+                       ureq->buf, length, ureq->dma,
+                       ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
+
+       maxreq = get_ep_limit(hs_ep);
+       if (length > maxreq) {
+               int round = maxreq % hs_ep->ep.maxpacket;
+
+               dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
+                       __func__, length, maxreq, round);
+
+               /* round down to multiple of packets */
+               if (round)
+                       maxreq -= round;
+
+               length = maxreq;
+       }
+
+       if (length)
+               packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
+       else
+               packets = 1;    /* send one packet if length is zero. */
+
+       if (dir_in && index != 0)
+               epsize = S3C_DxEPTSIZ_MC(1);
+       else
+               epsize = 0;
+
+       if (index != 0 && ureq->zero) {
+               /* test for the packets being exactly right for the
+                * transfer */
+
+               if (length == (packets * hs_ep->ep.maxpacket))
+                       packets++;
+       }
+
+       epsize |= S3C_DxEPTSIZ_PktCnt(packets);
+       epsize |= S3C_DxEPTSIZ_XferSize(length);
+
+       dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
+               __func__, packets, length, ureq->length, epsize, epsize_reg);
+
+       /* store the request as the current one we're doing */
+       hs_ep->req = hs_req;
+
+       /* write size / packets */
+       writel(epsize, hsotg->regs + epsize_reg);
+
+       ctrl = readl(hsotg->regs + epctrl_reg);
+
+       if (ctrl & S3C_DxEPCTL_Stall) {
+               dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
+
+               /* not sure what we can do here, if it is EP0 then we should
+                * get this cleared once the endpoint has transmitted the
+                * STALL packet, otherwise it needs to be cleared by the
+                * host.
+                */
+       }
+
+       if (using_dma(hsotg)) {
+               unsigned int dma_reg;
+
+               /* write DMA address to control register, buffer already
+                * synced by s3c_hsotg_ep_queue().  */
+
+               dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
+               writel(ureq->dma, hsotg->regs + dma_reg);
+
+               dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
+                       __func__, ureq->dma, dma_reg);
+       }
+
+       ctrl |= S3C_DxEPCTL_EPEna;      /* ensure ep enabled */
+       ctrl |= S3C_DxEPCTL_USBActEp;
+       ctrl |= S3C_DxEPCTL_CNAK;       /* clear NAK set by core */
+
+       dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+       writel(ctrl, hsotg->regs + epctrl_reg);
+
+       /* set these, it seems that DMA support increments past the end
+        * of the packet buffer so we need to calculate the length from
+        * this information. */
+       hs_ep->size_loaded = length;
+       hs_ep->last_load = ureq->actual;
+
+       if (dir_in && !using_dma(hsotg)) {
+               /* set these anyway, we may need them for non-periodic in */
+               hs_ep->fifo_load = 0;
+
+               s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+       }
+
+       /* clear the INTknTXFEmpMsk when we start request, more as a aide
+        * to debugging to see what is going on. */
+       if (dir_in)
+               writel(S3C_DIEPMSK_INTknTXFEmpMsk,
+                      hsotg->regs + S3C_DIEPINT(index));
+
+       /* Note, trying to clear the NAK here causes problems with transmit
+        * on the S3C6400 ending up with the TXFIFO becomming full. */
+
+       /* check ep is enabled */
+       if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
+               dev_warn(hsotg->dev,
+                        "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
+                        index, readl(hsotg->regs + epctrl_reg));
+
+       dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
+               __func__, readl(hsotg->regs + epctrl_reg));
+}
+
+/**
+ * s3c_hsotg_map_dma - map the DMA memory being used for the request
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request is on.
+ * @req: The request being processed.
+ *
+ * We've been asked to queue a request, so ensure that the memory buffer
+ * is correctly setup for DMA. If we've been passed an extant DMA address
+ * then ensure the buffer has been synced to memory. If our buffer has no
+ * DMA memory, then we map the memory and mark our request to allow us to
+ * cleanup on completion.
+*/
+static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
+                            struct s3c_hsotg_ep *hs_ep,
+                            struct usb_request *req)
+{
+       enum dma_data_direction dir;
+       struct s3c_hsotg_req *hs_req = our_req(req);
+
+       dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+       /* if the length is zero, ignore the DMA data */
+       if (hs_req->req.length == 0)
+               return 0;
+
+       if (req->dma == DMA_ADDR_INVALID) {
+               dma_addr_t dma;
+
+               dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
+
+               if (unlikely(dma_mapping_error(hsotg->dev, dma)))
+                       goto dma_error;
+
+               if (dma & 3) {
+                       dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
+                               __func__);
+
+                       dma_unmap_single(hsotg->dev, dma, req->length, dir);
+                       return -EINVAL;
+               }
+
+               hs_req->mapped = 1;
+               req->dma = dma;
+       } else {
+               dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+               hs_req->mapped = 0;
+       }
+
+       return 0;
+
+dma_error:
+       dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
+               __func__, req->buf, req->length);
+
+       return -EIO;
+}
+
+static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+                             gfp_t gfp_flags)
+{
+       struct s3c_hsotg_req *hs_req = our_req(req);
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hs = hs_ep->parent;
+       unsigned long irqflags;
+       bool first;
+
+       dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
+               ep->name, req, req->length, req->buf, req->no_interrupt,
+               req->zero, req->short_not_ok);
+
+       /* initialise status of the request */
+       INIT_LIST_HEAD(&hs_req->queue);
+       req->actual = 0;
+       req->status = -EINPROGRESS;
+
+       /* if we're using DMA, sync the buffers as necessary */
+       if (using_dma(hs)) {
+               int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
+               if (ret)
+                       return ret;
+       }
+
+       spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+       first = list_empty(&hs_ep->queue);
+       list_add_tail(&hs_req->queue, &hs_ep->queue);
+
+       if (first)
+               s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
+
+       spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+       return 0;
+}
+
+static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
+                                     struct usb_request *req)
+{
+       struct s3c_hsotg_req *hs_req = our_req(req);
+
+       kfree(hs_req);
+}
+
+/**
+ * s3c_hsotg_complete_oursetup - setup completion callback
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself
+ * submitted that need cleaning up.
+ */
+static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
+                                       struct usb_request *req)
+{
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hsotg = hs_ep->parent;
+
+       dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
+
+       s3c_hsotg_ep_free_request(ep, req);
+}
+
+/**
+ * ep_from_windex - convert control wIndex value to endpoint
+ * @hsotg: The driver state.
+ * @windex: The control request wIndex field (in host order).
+ *
+ * Convert the given wIndex into a pointer to an driver endpoint
+ * structure, or return NULL if it is not a valid endpoint.
+*/
+static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
+                                          u32 windex)
+{
+       struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
+       int dir = (windex & USB_DIR_IN) ? 1 : 0;
+       int idx = windex & 0x7F;
+
+       if (windex >= 0x100)
+               return NULL;
+
+       if (idx > S3C_HSOTG_EPS)
+               return NULL;
+
+       if (idx && ep->dir_in != dir)
+               return NULL;
+
+       return ep;
+}
+
+/**
+ * s3c_hsotg_send_reply - send reply to control request
+ * @hsotg: The device state
+ * @ep: Endpoint 0
+ * @buff: Buffer for request
+ * @length: Length of reply.
+ *
+ * Create a request and queue it on the given endpoint. This is useful as
+ * an internal method of sending replies to certain control requests, etc.
+ */
+static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
+                               struct s3c_hsotg_ep *ep,
+                               void *buff,
+                               int length)
+{
+       struct usb_request *req;
+       int ret;
+
+       dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
+
+       req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
+       hsotg->ep0_reply = req;
+       if (!req) {
+               dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
+               return -ENOMEM;
+       }
+
+       req->buf = hsotg->ep0_buff;
+       req->length = length;
+       req->zero = 1; /* always do zero-length final transfer */
+       req->complete = s3c_hsotg_complete_oursetup;
+
+       if (length)
+               memcpy(req->buf, buff, length);
+       else
+               ep->sent_zlp = 1;
+
+       ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
+       if (ret) {
+               dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * s3c_hsotg_process_req_status - process request GET_STATUS
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
+                                       struct usb_ctrlrequest *ctrl)
+{
+       struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+       struct s3c_hsotg_ep *ep;
+       __le16 reply;
+       int ret;
+
+       dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+
+       if (!ep0->dir_in) {
+               dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
+               return -EINVAL;
+       }
+
+       switch (ctrl->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_DEVICE:
+               reply = cpu_to_le16(0); /* bit 0 => self powered,
+                                        * bit 1 => remote wakeup */
+               break;
+
+       case USB_RECIP_INTERFACE:
+               /* currently, the data result should be zero */
+               reply = cpu_to_le16(0);
+               break;
+
+       case USB_RECIP_ENDPOINT:
+               ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+               if (!ep)
+                       return -ENOENT;
+
+               reply = cpu_to_le16(ep->halted ? 1 : 0);
+               break;
+
+       default:
+               return 0;
+       }
+
+       if (le16_to_cpu(ctrl->wLength) != 2)
+               return -EINVAL;
+
+       ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
+       if (ret) {
+               dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
+               return ret;
+       }
+
+       return 1;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+
+/**
+ * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
+ * @hsotg: The device state
+ * @ctrl: USB control request
+ */
+static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
+                                        struct usb_ctrlrequest *ctrl)
+{
+       bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+       struct s3c_hsotg_ep *ep;
+
+       dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
+               __func__, set ? "SET" : "CLEAR");
+
+       if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
+               ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
+               if (!ep) {
+                       dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
+                               __func__, le16_to_cpu(ctrl->wIndex));
+                       return -ENOENT;
+               }
+
+               switch (le16_to_cpu(ctrl->wValue)) {
+               case USB_ENDPOINT_HALT:
+                       s3c_hsotg_ep_sethalt(&ep->ep, set);
+                       break;
+
+               default:
+                       return -ENOENT;
+               }
+       } else
+               return -ENOENT;  /* currently only deal with endpoint */
+
+       return 1;
+}
+
+/**
+ * s3c_hsotg_process_control - process a control request
+ * @hsotg: The device state
+ * @ctrl: The control request received
+ *
+ * The controller has received the SETUP phase of a control request, and
+ * needs to work out what to do next (and whether to pass it on to the
+ * gadget driver).
+ */
+static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
+                                     struct usb_ctrlrequest *ctrl)
+{
+       struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
+       int ret = 0;
+       u32 dcfg;
+
+       ep0->sent_zlp = 0;
+
+       dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
+                ctrl->bRequest, ctrl->bRequestType,
+                ctrl->wValue, ctrl->wLength);
+
+       /* record the direction of the request, for later use when enquing
+        * packets onto EP0. */
+
+       ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
+       dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
+
+       /* if we've no data with this request, then the last part of the
+        * transaction is going to implicitly be IN. */
+       if (ctrl->wLength == 0)
+               ep0->dir_in = 1;
+
+       if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+               switch (ctrl->bRequest) {
+               case USB_REQ_SET_ADDRESS:
+                       dcfg = readl(hsotg->regs + S3C_DCFG);
+                       dcfg &= ~S3C_DCFG_DevAddr_MASK;
+                       dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
+                       writel(dcfg, hsotg->regs + S3C_DCFG);
+
+                       dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
+
+                       ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+                       return;
+
+               case USB_REQ_GET_STATUS:
+                       ret = s3c_hsotg_process_req_status(hsotg, ctrl);
+                       break;
+
+               case USB_REQ_CLEAR_FEATURE:
+               case USB_REQ_SET_FEATURE:
+                       ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
+                       break;
+               }
+       }
+
+       /* as a fallback, try delivering it to the driver to deal with */
+
+       if (ret == 0 && hsotg->driver) {
+               ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
+               if (ret < 0)
+                       dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
+       }
+
+       if (ret > 0) {
+               if (!ep0->dir_in) {
+                       /* need to generate zlp in reply or take data */
+                       /* todo - deal with any data we might be sent? */
+                       ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
+               }
+       }
+
+       /* the request is either unhandlable, or is not formatted correctly
+        * so respond with a STALL for the status stage to indicate failure.
+        */
+
+       if (ret < 0) {
+               u32 reg;
+               u32 ctrl;
+
+               dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
+               reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
+
+               /* S3C_DxEPCTL_Stall will be cleared by EP once it has
+                * taken effect, so no need to clear later. */
+
+               ctrl = readl(hsotg->regs + reg);
+               ctrl |= S3C_DxEPCTL_Stall;
+               ctrl |= S3C_DxEPCTL_CNAK;
+               writel(ctrl, hsotg->regs + reg);
+
+               dev_dbg(hsotg->dev,
+                       "writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
+                       ctrl, reg, readl(hsotg->regs + reg));
+
+               /* don't belive we need to anything more to get the EP
+                * to reply with a STALL packet */
+       }
+}
+
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
+
+/**
+ * s3c_hsotg_complete_setup - completion of a setup transfer
+ * @ep: The endpoint the request was on.
+ * @req: The request completed.
+ *
+ * Called on completion of any requests the driver itself submitted for
+ * EP0 setup packets
+ */
+static void s3c_hsotg_complete_setup(struct usb_ep *ep,
+                                    struct usb_request *req)
+{
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hsotg = hs_ep->parent;
+
+       if (req->status < 0) {
+               dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
+               return;
+       }
+
+       if (req->actual == 0)
+               s3c_hsotg_enqueue_setup(hsotg);
+       else
+               s3c_hsotg_process_control(hsotg, req->buf);
+}
+
+/**
+ * s3c_hsotg_enqueue_setup - start a request for EP0 packets
+ * @hsotg: The device state.
+ *
+ * Enqueue a request on EP0 if necessary to received any SETUP packets
+ * received from the host.
+ */
+static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
+{
+       struct usb_request *req = hsotg->ctrl_req;
+       struct s3c_hsotg_req *hs_req = our_req(req);
+       int ret;
+
+       dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
+
+       req->zero = 0;
+       req->length = 8;
+       req->buf = hsotg->ctrl_buff;
+       req->complete = s3c_hsotg_complete_setup;
+
+       if (!list_empty(&hs_req->queue)) {
+               dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
+               return;
+       }
+
+       hsotg->eps[0].dir_in = 0;
+
+       ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
+       if (ret < 0) {
+               dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
+               /* Don't think there's much we can do other than watch the
+                * driver fail. */
+       }
+}
+
+/**
+ * get_ep_head - return the first request on the endpoint
+ * @hs_ep: The controller endpoint to get
+ *
+ * Get the first request on the endpoint.
+*/
+static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
+{
+       if (list_empty(&hs_ep->queue))
+               return NULL;
+
+       return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
+}
+
+/**
+ * s3c_hsotg_complete_request - complete a request given to us
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * The given request has finished, so call the necessary completion
+ * if it has one and then look to see if we can start a new request
+ * on the endpoint.
+ *
+ * Note, expects the ep to already be locked as appropriate.
+*/
+static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
+                                      struct s3c_hsotg_ep *hs_ep,
+                                      struct s3c_hsotg_req *hs_req,
+                                      int result)
+{
+       bool restart;
+
+       if (!hs_req) {
+               dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
+               return;
+       }
+
+       dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
+               hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
+
+       /* only replace the status if we've not already set an error
+        * from a previous transaction */
+
+       if (hs_req->req.status == -EINPROGRESS)
+               hs_req->req.status = result;
+
+       hs_ep->req = NULL;
+       list_del_init(&hs_req->queue);
+
+       if (using_dma(hsotg))
+               s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
+
+       /* call the complete request with the locks off, just in case the
+        * request tries to queue more work for this endpoint. */
+
+       if (hs_req->req.complete) {
+               spin_unlock(&hs_ep->lock);
+               hs_req->req.complete(&hs_ep->ep, &hs_req->req);
+               spin_lock(&hs_ep->lock);
+       }
+
+       /* Look to see if there is anything else to do. Note, the completion
+        * of the previous request may have caused a new request to be started
+        * so be careful when doing this. */
+
+       if (!hs_ep->req && result >= 0) {
+               restart = !list_empty(&hs_ep->queue);
+               if (restart) {
+                       hs_req = get_ep_head(hs_ep);
+                       s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+               }
+       }
+}
+
+/**
+ * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint the request was on.
+ * @hs_req: The request to complete.
+ * @result: The result code (0 => Ok, otherwise errno)
+ *
+ * See s3c_hsotg_complete_request(), but called with the endpoint's
+ * lock held.
+*/
+static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
+                                           struct s3c_hsotg_ep *hs_ep,
+                                           struct s3c_hsotg_req *hs_req,
+                                           int result)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&hs_ep->lock, flags);
+       s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
+       spin_unlock_irqrestore(&hs_ep->lock, flags);
+}
+
+/**
+ * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
+ * @hsotg: The device state.
+ * @ep_idx: The endpoint index for the data
+ * @size: The size of data in the fifo, in bytes
+ *
+ * The FIFO status shows there is data to read from the FIFO for a given
+ * endpoint, so sort out whether we need to read the data into a request
+ * that has been made for that endpoint.
+ */
+static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
+{
+       struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
+       struct s3c_hsotg_req *hs_req = hs_ep->req;
+       void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
+       int to_read;
+       int max_req;
+       int read_ptr;
+
+       if (!hs_req) {
+               u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
+               int ptr;
+
+               dev_warn(hsotg->dev,
+                        "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
+                        __func__, size, ep_idx, epctl);
+
+               /* dump the data from the FIFO, we've nothing we can do */
+               for (ptr = 0; ptr < size; ptr += 4)
+                       (void)readl(fifo);
+
+               return;
+       }
+
+       spin_lock(&hs_ep->lock);
+
+       to_read = size;
+       read_ptr = hs_req->req.actual;
+       max_req = hs_req->req.length - read_ptr;
+
+       if (to_read > max_req) {
+               /* more data appeared than we where willing
+                * to deal with in this request.
+                */
+
+               /* currently we don't deal this */
+               WARN_ON_ONCE(1);
+       }
+
+       dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
+               __func__, to_read, max_req, read_ptr, hs_req->req.length);
+
+       hs_ep->total_data += to_read;
+       hs_req->req.actual += to_read;
+       to_read = DIV_ROUND_UP(to_read, 4);
+
+       /* note, we might over-write the buffer end by 3 bytes depending on
+        * alignment of the data. */
+       readsl(fifo, hs_req->req.buf + read_ptr, to_read);
+
+       spin_unlock(&hs_ep->lock);
+}
+
+/**
+ * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
+ * @hsotg: The device instance
+ * @req: The request currently on this endpoint
+ *
+ * Generate a zero-length IN packet request for terminating a SETUP
+ * transaction.
+ *
+ * Note, since we don't write any data to the TxFIFO, then it is
+ * currently belived that we do not need to wait for any space in
+ * the TxFIFO.
+ */
+static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
+                              struct s3c_hsotg_req *req)
+{
+       u32 ctrl;
+
+       if (!req) {
+               dev_warn(hsotg->dev, "%s: no request?\n", __func__);
+               return;
+       }
+
+       if (req->req.length == 0) {
+               hsotg->eps[0].sent_zlp = 1;
+               s3c_hsotg_enqueue_setup(hsotg);
+               return;
+       }
+
+       hsotg->eps[0].dir_in = 1;
+       hsotg->eps[0].sent_zlp = 1;
+
+       dev_dbg(hsotg->dev, "sending zero-length packet\n");
+
+       /* issue a zero-sized packet to terminate this */
+       writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+              S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
+
+       ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
+       ctrl |= S3C_DxEPCTL_CNAK;  /* clear NAK set by core */
+       ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
+       ctrl |= S3C_DxEPCTL_USBActEp;
+       writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
+}
+
+/**
+ * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
+ * @hsotg: The device instance
+ * @epnum: The endpoint received from
+ * @was_setup: Set if processing a SetupDone event.
+ *
+ * The RXFIFO has delivered an OutDone event, which means that the data
+ * transfer for an OUT endpoint has been completed, either by a short
+ * packet or by the finish of a transfer.
+*/
+static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
+                                    int epnum, bool was_setup)
+{
+       struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
+       struct s3c_hsotg_req *hs_req = hs_ep->req;
+       struct usb_request *req = &hs_req->req;
+       int result = 0;
+
+       if (!hs_req) {
+               dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
+               return;
+       }
+
+       if (using_dma(hsotg)) {
+               u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
+               unsigned size_done;
+               unsigned size_left;
+
+               /* Calculate the size of the transfer by checking how much
+                * is left in the endpoint size register and then working it
+                * out from the amount we loaded for the transfer.
+                *
+                * We need to do this as DMA pointers are always 32bit aligned
+                * so may overshoot/undershoot the transfer.
+                */
+
+               size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+               size_done = hs_ep->size_loaded - size_left;
+               size_done += hs_ep->last_load;
+
+               req->actual = size_done;
+       }
+
+       if (req->actual < req->length && req->short_not_ok) {
+               dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
+                       __func__, req->actual, req->length);
+
+               /* todo - what should we return here? there's no one else
+                * even bothering to check the status. */
+       }
+
+       if (epnum == 0) {
+               if (!was_setup && req->complete != s3c_hsotg_complete_setup)
+                       s3c_hsotg_send_zlp(hsotg, hs_req);
+       }
+
+       s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
+}
+
+/**
+ * s3c_hsotg_read_frameno - read current frame number
+ * @hsotg: The device instance
+ *
+ * Return the current frame number
+*/
+static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
+{
+       u32 dsts;
+
+       dsts = readl(hsotg->regs + S3C_DSTS);
+       dsts &= S3C_DSTS_SOFFN_MASK;
+       dsts >>= S3C_DSTS_SOFFN_SHIFT;
+
+       return dsts;
+}
+
+/**
+ * s3c_hsotg_handle_rx - RX FIFO has data
+ * @hsotg: The device instance
+ *
+ * The IRQ handler has detected that the RX FIFO has some data in it
+ * that requires processing, so find out what is in there and do the
+ * appropriate read.
+ *
+ * The RXFIFO is a true FIFO, the packets comming out are still in packet
+ * chunks, so if you have x packets received on an endpoint you'll get x
+ * FIFO events delivered, each with a packet's worth of data in it.
+ *
+ * When using DMA, we should not be processing events from the RXFIFO
+ * as the actual data should be sent to the memory directly and we turn
+ * on the completion interrupts to get notifications of transfer completion.
+ */
+void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+{
+       u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
+       u32 epnum, status, size;
+
+       WARN_ON(using_dma(hsotg));
+
+       epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
+       status = grxstsr & S3C_GRXSTS_PktSts_MASK;
+
+       size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
+       size >>= S3C_GRXSTS_ByteCnt_SHIFT;
+
+       if (1)
+               dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
+                       __func__, grxstsr, size, epnum);
+
+#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
+
+       switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
+       case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
+               dev_dbg(hsotg->dev, "GlobalOutNAK\n");
+               break;
+
+       case __status(S3C_GRXSTS_PktSts_OutDone):
+               dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
+                       s3c_hsotg_read_frameno(hsotg));
+
+               if (!using_dma(hsotg))
+                       s3c_hsotg_handle_outdone(hsotg, epnum, false);
+               break;
+
+       case __status(S3C_GRXSTS_PktSts_SetupDone):
+               dev_dbg(hsotg->dev,
+                       "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+                       s3c_hsotg_read_frameno(hsotg),
+                       readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+               s3c_hsotg_handle_outdone(hsotg, epnum, true);
+               break;
+
+       case __status(S3C_GRXSTS_PktSts_OutRX):
+               s3c_hsotg_rx_data(hsotg, epnum, size);
+               break;
+
+       case __status(S3C_GRXSTS_PktSts_SetupRX):
+               dev_dbg(hsotg->dev,
+                       "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
+                       s3c_hsotg_read_frameno(hsotg),
+                       readl(hsotg->regs + S3C_DOEPCTL(0)));
+
+               s3c_hsotg_rx_data(hsotg, epnum, size);
+               break;
+
+       default:
+               dev_warn(hsotg->dev, "%s: unknown status %08x\n",
+                        __func__, grxstsr);
+
+               s3c_hsotg_dump(hsotg);
+               break;
+       }
+}
+
+/**
+ * s3c_hsotg_ep0_mps - turn max packet size into register setting
+ * @mps: The maximum packet size in bytes.
+*/
+static u32 s3c_hsotg_ep0_mps(unsigned int mps)
+{
+       switch (mps) {
+       case 64:
+               return S3C_D0EPCTL_MPS_64;
+       case 32:
+               return S3C_D0EPCTL_MPS_32;
+       case 16:
+               return S3C_D0EPCTL_MPS_16;
+       case 8:
+               return S3C_D0EPCTL_MPS_8;
+       }
+
+       /* bad max packet size, warn and return invalid result */
+       WARN_ON(1);
+       return (u32)-1;
+}
+
+/**
+ * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
+ * @hsotg: The driver state.
+ * @ep: The index number of the endpoint
+ * @mps: The maximum packet size in bytes
+ *
+ * Configure the maximum packet size for the given endpoint, updating
+ * the hardware control registers to reflect this.
+ */
+static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
+                                      unsigned int ep, unsigned int mps)
+{
+       struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
+       void __iomem *regs = hsotg->regs;
+       u32 mpsval;
+       u32 reg;
+
+       if (ep == 0) {
+               /* EP0 is a special case */
+               mpsval = s3c_hsotg_ep0_mps(mps);
+               if (mpsval > 3)
+                       goto bad_mps;
+       } else {
+               if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
+                       goto bad_mps;
+
+               mpsval = mps;
+       }
+
+       hs_ep->ep.maxpacket = mps;
+
+       /* update both the in and out endpoint controldir_ registers, even
+        * if one of the directions may not be in use. */
+
+       reg = readl(regs + S3C_DIEPCTL(ep));
+       reg &= ~S3C_DxEPCTL_MPS_MASK;
+       reg |= mpsval;
+       writel(reg, regs + S3C_DIEPCTL(ep));
+
+       reg = readl(regs + S3C_DOEPCTL(ep));
+       reg &= ~S3C_DxEPCTL_MPS_MASK;
+       reg |= mpsval;
+       writel(reg, regs + S3C_DOEPCTL(ep));
+
+       return;
+
+bad_mps:
+       dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
+}
+
+
+/**
+ * s3c_hsotg_trytx - check to see if anything needs transmitting
+ * @hsotg: The driver state
+ * @hs_ep: The driver endpoint to check.
+ *
+ * Check to see if there is a request that has data to send, and if so
+ * make an attempt to write data into the FIFO.
+ */
+static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
+                          struct s3c_hsotg_ep *hs_ep)
+{
+       struct s3c_hsotg_req *hs_req = hs_ep->req;
+
+       if (!hs_ep->dir_in || !hs_req)
+               return 0;
+
+       if (hs_req->req.actual < hs_req->req.length) {
+               dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
+                       hs_ep->index);
+               return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
+       }
+
+       return 0;
+}
+
+/**
+ * s3c_hsotg_complete_in - complete IN transfer
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint that has just completed.
+ *
+ * An IN transfer has been completed, update the transfer's state and then
+ * call the relevant completion routines.
+ */
+static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
+                                 struct s3c_hsotg_ep *hs_ep)
+{
+       struct s3c_hsotg_req *hs_req = hs_ep->req;
+       u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
+       int size_left, size_done;
+
+       if (!hs_req) {
+               dev_dbg(hsotg->dev, "XferCompl but no req\n");
+               return;
+       }
+
+       /* Calculate the size of the transfer by checking how much is left
+        * in the endpoint size register and then working it out from
+        * the amount we loaded for the transfer.
+        *
+        * We do this even for DMA, as the transfer may have incremented
+        * past the end of the buffer (DMA transfers are always 32bit
+        * aligned).
+        */
+
+       size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
+
+       size_done = hs_ep->size_loaded - size_left;
+       size_done += hs_ep->last_load;
+
+       if (hs_req->req.actual != size_done)
+               dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
+                       __func__, hs_req->req.actual, size_done);
+
+       hs_req->req.actual = size_done;
+
+       /* if we did all of the transfer, and there is more data left
+        * around, then try restarting the rest of the request */
+
+       if (!size_left && hs_req->req.actual < hs_req->req.length) {
+               dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
+               s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
+       } else
+               s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
+}
+
+/**
+ * s3c_hsotg_epint - handle an in/out endpoint interrupt
+ * @hsotg: The driver state
+ * @idx: The index for the endpoint (0..15)
+ * @dir_in: Set if this is an IN endpoint
+ *
+ * Process and clear any interrupt pending for an individual endpoint
+*/
+static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
+                           int dir_in)
+{
+       struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
+       u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
+       u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
+       u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
+       u32 ints;
+       u32 clear = 0;
+
+       ints = readl(hsotg->regs + epint_reg);
+
+       dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
+               __func__, idx, dir_in ? "in" : "out", ints);
+
+       if (ints & S3C_DxEPINT_XferCompl) {
+               dev_dbg(hsotg->dev,
+                       "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
+                       __func__, readl(hsotg->regs + epctl_reg),
+                       readl(hsotg->regs + epsiz_reg));
+
+               /* we get OutDone from the FIFO, so we only need to look
+                * at completing IN requests here */
+               if (dir_in) {
+                       s3c_hsotg_complete_in(hsotg, hs_ep);
+
+                       if (idx == 0)
+                               s3c_hsotg_enqueue_setup(hsotg);
+               } else if (using_dma(hsotg)) {
+                       /* We're using DMA, we need to fire an OutDone here
+                        * as we ignore the RXFIFO. */
+
+                       s3c_hsotg_handle_outdone(hsotg, idx, false);
+               }
+
+               clear |= S3C_DxEPINT_XferCompl;
+       }
+
+       if (ints & S3C_DxEPINT_EPDisbld) {
+               dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+               clear |= S3C_DxEPINT_EPDisbld;
+       }
+
+       if (ints & S3C_DxEPINT_AHBErr) {
+               dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
+               clear |= S3C_DxEPINT_AHBErr;
+       }
+
+       if (ints & S3C_DxEPINT_Setup) {  /* Setup or Timeout */
+               dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
+
+               if (using_dma(hsotg) && idx == 0) {
+                       /* this is the notification we've received a
+                        * setup packet. In non-DMA mode we'd get this
+                        * from the RXFIFO, instead we need to process
+                        * the setup here. */
+
+                       if (dir_in)
+                               WARN_ON_ONCE(1);
+                       else
+                               s3c_hsotg_handle_outdone(hsotg, 0, true);
+               }
+
+               clear |= S3C_DxEPINT_Setup;
+       }
+
+       if (ints & S3C_DxEPINT_Back2BackSetup) {
+               dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
+               clear |= S3C_DxEPINT_Back2BackSetup;
+       }
+
+       if (dir_in) {
+               /* not sure if this is important, but we'll clear it anyway
+                */
+               if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
+                       dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
+                               __func__, idx);
+                       clear |= S3C_DIEPMSK_INTknTXFEmpMsk;
+               }
+
+               /* this probably means something bad is happening */
+               if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
+                       dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
+                                __func__, idx);
+                       clear |= S3C_DIEPMSK_INTknEPMisMsk;
+               }
+       }
+
+       writel(clear, hsotg->regs + epint_reg);
+}
+
+/**
+ * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
+ * @hsotg: The device state.
+ *
+ * Handle updating the device settings after the enumeration phase has
+ * been completed.
+*/
+static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
+{
+       u32 dsts = readl(hsotg->regs + S3C_DSTS);
+       int ep0_mps = 0, ep_mps;
+
+       /* This should signal the finish of the enumeration phase
+        * of the USB handshaking, so we should now know what rate
+        * we connected at. */
+
+       dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
+
+       /* note, since we're limited by the size of transfer on EP0, and
+        * it seems IN transfers must be a even number of packets we do
+        * not advertise a 64byte MPS on EP0. */
+
+       /* catch both EnumSpd_FS and EnumSpd_FS48 */
+       switch (dsts & S3C_DSTS_EnumSpd_MASK) {
+       case S3C_DSTS_EnumSpd_FS:
+       case S3C_DSTS_EnumSpd_FS48:
+               hsotg->gadget.speed = USB_SPEED_FULL;
+               dev_info(hsotg->dev, "new device is full-speed\n");
+
+               ep0_mps = EP0_MPS_LIMIT;
+               ep_mps = 64;
+               break;
+
+       case S3C_DSTS_EnumSpd_HS:
+               dev_info(hsotg->dev, "new device is high-speed\n");
+               hsotg->gadget.speed = USB_SPEED_HIGH;
+
+               ep0_mps = EP0_MPS_LIMIT;
+               ep_mps = 512;
+               break;
+
+       case S3C_DSTS_EnumSpd_LS:
+               hsotg->gadget.speed = USB_SPEED_LOW;
+               dev_info(hsotg->dev, "new device is low-speed\n");
+
+               /* note, we don't actually support LS in this driver at the
+                * moment, and the documentation seems to imply that it isn't
+                * supported by the PHYs on some of the devices.
+                */
+               break;
+       }
+
+       /* we should now know the maximum packet size for an
+        * endpoint, so set the endpoints to a default value. */
+
+       if (ep0_mps) {
+               int i;
+               s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
+               for (i = 1; i < S3C_HSOTG_EPS; i++)
+                       s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
+       }
+
+       /* ensure after enumeration our EP0 is active */
+
+       s3c_hsotg_enqueue_setup(hsotg);
+
+       dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+               readl(hsotg->regs + S3C_DIEPCTL0),
+               readl(hsotg->regs + S3C_DOEPCTL0));
+}
+
+/**
+ * kill_all_requests - remove all requests from the endpoint's queue
+ * @hsotg: The device state.
+ * @ep: The endpoint the requests may be on.
+ * @result: The result code to use.
+ * @force: Force removal of any current requests
+ *
+ * Go through the requests on the given endpoint and mark them
+ * completed with the given result code.
+ */
+static void kill_all_requests(struct s3c_hsotg *hsotg,
+                             struct s3c_hsotg_ep *ep,
+                             int result, bool force)
+{
+       struct s3c_hsotg_req *req, *treq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ep->lock, flags);
+
+       list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+               /* currently, we can't do much about an already
+                * running request on an in endpoint */
+
+               if (ep->req == req && ep->dir_in && !force)
+                       continue;
+
+               s3c_hsotg_complete_request(hsotg, ep, req,
+                                          result);
+       }
+
+       spin_unlock_irqrestore(&ep->lock, flags);
+}
+
+#define call_gadget(_hs, _entry) \
+       if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
+           (_hs)->driver && (_hs)->driver->_entry)     \
+               (_hs)->driver->_entry(&(_hs)->gadget);
+
+/**
+ * s3c_hsotg_disconnect_irq - disconnect irq service
+ * @hsotg: The device state.
+ *
+ * A disconnect IRQ has been received, meaning that the host has
+ * lost contact with the bus. Remove all current transactions
+ * and signal the gadget driver that this has happened.
+*/
+static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
+{
+       unsigned ep;
+
+       for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+               kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
+
+       call_gadget(hsotg, disconnect);
+}
+
+/**
+ * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
+ * @hsotg: The device state:
+ * @periodic: True if this is a periodic FIFO interrupt
+ */
+static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
+{
+       struct s3c_hsotg_ep *ep;
+       int epno, ret;
+
+       /* look through for any more data to transmit */
+
+       for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
+               ep = &hsotg->eps[epno];
+
+               if (!ep->dir_in)
+                       continue;
+
+               if ((periodic && !ep->periodic) ||
+                   (!periodic && ep->periodic))
+                       continue;
+
+               ret = s3c_hsotg_trytx(hsotg, ep);
+               if (ret < 0)
+                       break;
+       }
+}
+
+static struct s3c_hsotg *our_hsotg;
+
+/* IRQ flags which will trigger a retry around the IRQ loop */
+#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
+                       S3C_GINTSTS_PTxFEmp |  \
+                       S3C_GINTSTS_RxFLvl)
+
+/**
+ * s3c_hsotg_irq - handle device interrupt
+ * @irq: The IRQ number triggered
+ * @pw: The pw value when registered the handler.
+ */
+static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
+{
+       struct s3c_hsotg *hsotg = pw;
+       int retry_count = 8;
+       u32 gintsts;
+       u32 gintmsk;
+
+irq_retry:
+       gintsts = readl(hsotg->regs + S3C_GINTSTS);
+       gintmsk = readl(hsotg->regs + S3C_GINTMSK);
+
+       dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
+               __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
+
+       gintsts &= gintmsk;
+
+       if (gintsts & S3C_GINTSTS_OTGInt) {
+               u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
+
+               dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
+
+               writel(otgint, hsotg->regs + S3C_GOTGINT);
+               writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_DisconnInt) {
+               dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
+               writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
+
+               s3c_hsotg_disconnect_irq(hsotg);
+       }
+
+       if (gintsts & S3C_GINTSTS_SessReqInt) {
+               dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
+               writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_EnumDone) {
+               s3c_hsotg_irq_enumdone(hsotg);
+               writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_ConIDStsChng) {
+               dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
+                       readl(hsotg->regs + S3C_DSTS),
+                       readl(hsotg->regs + S3C_GOTGCTL));
+
+               writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
+               u32 daint = readl(hsotg->regs + S3C_DAINT);
+               u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
+               u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
+               int ep;
+
+               dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
+
+               for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
+                       if (daint_out & 1)
+                               s3c_hsotg_epint(hsotg, ep, 0);
+               }
+
+               for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
+                       if (daint_in & 1)
+                               s3c_hsotg_epint(hsotg, ep, 1);
+               }
+
+               writel(daint, hsotg->regs + S3C_DAINT);
+               writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt),
+                      hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_USBRst) {
+               dev_info(hsotg->dev, "%s: USBRst\n", __func__);
+               dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
+                       readl(hsotg->regs + S3C_GNPTXSTS));
+
+               kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
+
+               /* it seems after a reset we can end up with a situation
+                * where the TXFIFO still has data in it... try flushing
+                * it to remove anything that may still be in it.
+                */
+
+               if (1) {
+                       writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh,
+                              hsotg->regs + S3C_GRSTCTL);
+
+                       dev_info(hsotg->dev, "GNPTXSTS=%08x\n",
+                                readl(hsotg->regs + S3C_GNPTXSTS));
+               }
+
+               s3c_hsotg_enqueue_setup(hsotg);
+
+               writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
+       }
+
+       /* check both FIFOs */
+
+       if (gintsts & S3C_GINTSTS_NPTxFEmp) {
+               dev_dbg(hsotg->dev, "NPTxFEmp\n");
+
+               /* Disable the interrupt to stop it happening again
+                * unless one of these endpoint routines decides that
+                * it needs re-enabling */
+
+               s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
+               s3c_hsotg_irq_fifoempty(hsotg, false);
+
+               writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_PTxFEmp) {
+               dev_dbg(hsotg->dev, "PTxFEmp\n");
+
+               /* See note in S3C_GINTSTS_NPTxFEmp */
+
+               s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
+               s3c_hsotg_irq_fifoempty(hsotg, true);
+
+               writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_RxFLvl) {
+               /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
+                * we need to retry s3c_hsotg_handle_rx if this is still
+                * set. */
+
+               s3c_hsotg_handle_rx(hsotg);
+               writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_ModeMis) {
+               dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
+               writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_USBSusp) {
+               dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
+               writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
+
+               call_gadget(hsotg, suspend);
+       }
+
+       if (gintsts & S3C_GINTSTS_WkUpInt) {
+               dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
+               writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
+
+               call_gadget(hsotg, resume);
+       }
+
+       if (gintsts & S3C_GINTSTS_ErlySusp) {
+               dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
+               writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
+       }
+
+       /* these next two seem to crop-up occasionally causing the core
+        * to shutdown the USB transfer, so try clearing them and logging
+        * the occurence. */
+
+       if (gintsts & S3C_GINTSTS_GOUTNakEff) {
+               dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+
+               s3c_hsotg_dump(hsotg);
+
+               writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
+               writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS);
+       }
+
+       if (gintsts & S3C_GINTSTS_GINNakEff) {
+               dev_info(hsotg->dev, "GINNakEff triggered\n");
+
+               s3c_hsotg_dump(hsotg);
+
+               writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
+               writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS);
+       }
+
+       /* if we've had fifo events, we should try and go around the
+        * loop again to see if there's any point in returning yet. */
+
+       if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
+                       goto irq_retry;
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * s3c_hsotg_ep_enable - enable the given endpoint
+ * @ep: The USB endpint to configure
+ * @desc: The USB endpoint descriptor to configure with.
+ *
+ * This is called from the USB gadget code's usb_ep_enable().
+*/
+static int s3c_hsotg_ep_enable(struct usb_ep *ep,
+                              const struct usb_endpoint_descriptor *desc)
+{
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hsotg = hs_ep->parent;
+       unsigned long flags;
+       int index = hs_ep->index;
+       u32 epctrl_reg;
+       u32 epctrl;
+       u32 mps;
+       int dir_in;
+
+       dev_dbg(hsotg->dev,
+               "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
+               __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
+               desc->wMaxPacketSize, desc->bInterval);
+
+       /* not to be called for EP0 */
+       WARN_ON(index == 0);
+
+       dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
+       if (dir_in != hs_ep->dir_in) {
+               dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
+               return -EINVAL;
+       }
+
+       mps = le16_to_cpu(desc->wMaxPacketSize);
+
+       /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
+
+       epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+       epctrl = readl(hsotg->regs + epctrl_reg);
+
+       dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
+               __func__, epctrl, epctrl_reg);
+
+       spin_lock_irqsave(&hs_ep->lock, flags);
+
+       epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
+       epctrl |= S3C_DxEPCTL_MPS(mps);
+
+       /* mark the endpoint as active, otherwise the core may ignore
+        * transactions entirely for this endpoint */
+       epctrl |= S3C_DxEPCTL_USBActEp;
+
+       /* set the NAK status on the endpoint, otherwise we might try and
+        * do something with data that we've yet got a request to process
+        * since the RXFIFO will take data for an endpoint even if the
+        * size register hasn't been set.
+        */
+
+       epctrl |= S3C_DxEPCTL_SNAK;
+
+       /* update the endpoint state */
+       hs_ep->ep.maxpacket = mps;
+
+       /* default, set to non-periodic */
+       hs_ep->periodic = 0;
+
+       switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+       case USB_ENDPOINT_XFER_ISOC:
+               dev_err(hsotg->dev, "no current ISOC support\n");
+               return -EINVAL;
+
+       case USB_ENDPOINT_XFER_BULK:
+               epctrl |= S3C_DxEPCTL_EPType_Bulk;
+               break;
+
+       case USB_ENDPOINT_XFER_INT:
+               if (dir_in) {
+                       /* Allocate our TxFNum by simply using the index
+                        * of the endpoint for the moment. We could do
+                        * something better if the host indicates how
+                        * many FIFOs we are expecting to use. */
+
+                       hs_ep->periodic = 1;
+                       epctrl |= S3C_DxEPCTL_TxFNum(index);
+               }
+
+               epctrl |= S3C_DxEPCTL_EPType_Intterupt;
+               break;
+
+       case USB_ENDPOINT_XFER_CONTROL:
+               epctrl |= S3C_DxEPCTL_EPType_Control;
+               break;
+       }
+
+       /* for non control endpoints, set PID to D0 */
+       if (index)
+               epctrl |= S3C_DxEPCTL_SetD0PID;
+
+       dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
+               __func__, epctrl);
+
+       writel(epctrl, hsotg->regs + epctrl_reg);
+       dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
+               __func__, readl(hsotg->regs + epctrl_reg));
+
+       /* enable the endpoint interrupt */
+       s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
+
+       spin_unlock_irqrestore(&hs_ep->lock, flags);
+       return 0;
+}
+
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hsotg = hs_ep->parent;
+       int dir_in = hs_ep->dir_in;
+       int index = hs_ep->index;
+       unsigned long flags;
+       u32 epctrl_reg;
+       u32 ctrl;
+
+       dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
+
+       if (ep == &hsotg->eps[0].ep) {
+               dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
+               return -EINVAL;
+       }
+
+       epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
+
+       /* terminate all requests with shutdown */
+       kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+
+       spin_lock_irqsave(&hs_ep->lock, flags);
+
+       ctrl = readl(hsotg->regs + epctrl_reg);
+       ctrl &= ~S3C_DxEPCTL_EPEna;
+       ctrl &= ~S3C_DxEPCTL_USBActEp;
+       ctrl |= S3C_DxEPCTL_SNAK;
+
+       dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
+       writel(ctrl, hsotg->regs + epctrl_reg);
+
+       /* disable endpoint interrupts */
+       s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
+
+       spin_unlock_irqrestore(&hs_ep->lock, flags);
+       return 0;
+}
+
+/**
+ * on_list - check request is on the given endpoint
+ * @ep: The endpoint to check.
+ * @test: The request to test if it is on the endpoint.
+*/
+static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
+{
+       struct s3c_hsotg_req *req, *treq;
+
+       list_for_each_entry_safe(req, treq, &ep->queue, queue) {
+               if (req == test)
+                       return true;
+       }
+
+       return false;
+}
+
+static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+       struct s3c_hsotg_req *hs_req = our_req(req);
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hs = hs_ep->parent;
+       unsigned long flags;
+
+       dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
+
+       if (hs_req == hs_ep->req) {
+               dev_dbg(hs->dev, "%s: already in progress\n", __func__);
+               return -EINPROGRESS;
+       }
+
+       spin_lock_irqsave(&hs_ep->lock, flags);
+
+       if (!on_list(hs_ep, hs_req)) {
+               spin_unlock_irqrestore(&hs_ep->lock, flags);
+               return -EINVAL;
+       }
+
+       s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
+       spin_unlock_irqrestore(&hs_ep->lock, flags);
+
+       return 0;
+}
+
+static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+{
+       struct s3c_hsotg_ep *hs_ep = our_ep(ep);
+       struct s3c_hsotg *hs = hs_ep->parent;
+       int index = hs_ep->index;
+       unsigned long irqflags;
+       u32 epreg;
+       u32 epctl;
+
+       dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
+
+       spin_lock_irqsave(&hs_ep->lock, irqflags);
+
+       /* write both IN and OUT control registers */
+
+       epreg = S3C_DIEPCTL(index);
+       epctl = readl(hs->regs + epreg);
+
+       if (value)
+               epctl |= S3C_DxEPCTL_Stall;
+       else
+               epctl &= ~S3C_DxEPCTL_Stall;
+
+       writel(epctl, hs->regs + epreg);
+
+       epreg = S3C_DOEPCTL(index);
+       epctl = readl(hs->regs + epreg);
+
+       if (value)
+               epctl |= S3C_DxEPCTL_Stall;
+       else
+               epctl &= ~S3C_DxEPCTL_Stall;
+
+       writel(epctl, hs->regs + epreg);
+
+       spin_unlock_irqrestore(&hs_ep->lock, irqflags);
+
+       return 0;
+}
+
+static struct usb_ep_ops s3c_hsotg_ep_ops = {
+       .enable         = s3c_hsotg_ep_enable,
+       .disable        = s3c_hsotg_ep_disable,
+       .alloc_request  = s3c_hsotg_ep_alloc_request,
+       .free_request   = s3c_hsotg_ep_free_request,
+       .queue          = s3c_hsotg_ep_queue,
+       .dequeue        = s3c_hsotg_ep_dequeue,
+       .set_halt       = s3c_hsotg_ep_sethalt,
+       /* note, don't belive we have any call for the fifo routines */
+};
+
+/**
+ * s3c_hsotg_corereset - issue softreset to the core
+ * @hsotg: The device state
+ *
+ * Issue a soft reset to the core, and await the core finishing it.
+*/
+static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
+{
+       int timeout;
+       u32 grstctl;
+
+       dev_dbg(hsotg->dev, "resetting core\n");
+
+       /* issue soft reset */
+       writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
+
+       timeout = 1000;
+       do {
+               grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+       } while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
+
+       if (!grstctl & S3C_GRSTCTL_CSftRst) {
+               dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
+               return -EINVAL;
+       }
+
+       timeout = 1000;
+
+       while (1) {
+               u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
+
+               if (timeout-- < 0) {
+                       dev_info(hsotg->dev,
+                                "%s: reset failed, GRSTCTL=%08x\n",
+                                __func__, grstctl);
+                       return -ETIMEDOUT;
+               }
+
+               if (grstctl & S3C_GRSTCTL_CSftRst)
+                       continue;
+
+               if (!(grstctl & S3C_GRSTCTL_AHBIdle))
+                       continue;
+
+               break;          /* reset done */
+       }
+
+       dev_dbg(hsotg->dev, "reset successful\n");
+       return 0;
+}
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+       struct s3c_hsotg *hsotg = our_hsotg;
+       int ret;
+
+       if (!hsotg) {
+               printk(KERN_ERR "%s: called with no device\n", __func__);
+               return -ENODEV;
+       }
+
+       if (!driver) {
+               dev_err(hsotg->dev, "%s: no driver\n", __func__);
+               return -EINVAL;
+       }
+
+       if (driver->speed != USB_SPEED_HIGH &&
+           driver->speed != USB_SPEED_FULL) {
+               dev_err(hsotg->dev, "%s: bad speed\n", __func__);
+       }
+
+       if (!driver->bind || !driver->setup) {
+               dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
+               return -EINVAL;
+       }
+
+       WARN_ON(hsotg->driver);
+
+       driver->driver.bus = NULL;
+       hsotg->driver = driver;
+       hsotg->gadget.dev.driver = &driver->driver;
+       hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
+       hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+       ret = device_add(&hsotg->gadget.dev);
+       if (ret) {
+               dev_err(hsotg->dev, "failed to register gadget device\n");
+               goto err;
+       }
+
+       ret = driver->bind(&hsotg->gadget);
+       if (ret) {
+               dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
+
+               hsotg->gadget.dev.driver = NULL;
+               hsotg->driver = NULL;
+               goto err;
+       }
+
+       /* we must now enable ep0 ready for host detection and then
+        * set configuration. */
+
+       s3c_hsotg_corereset(hsotg);
+
+       /* set the PLL on, remove the HNP/SRP and set the PHY */
+       writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
+              (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
+
+       /* looks like soft-reset changes state of FIFOs */
+       s3c_hsotg_init_fifo(hsotg);
+
+       __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+       writel(1 << 18 | S3C_DCFG_DevSpd_HS,  hsotg->regs + S3C_DCFG);
+
+       writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
+              S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
+              S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
+              S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
+              S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
+              S3C_GINTSTS_ErlySusp,
+              hsotg->regs + S3C_GINTMSK);
+
+       if (using_dma(hsotg))
+               writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
+                      S3C_GAHBCFG_HBstLen_Incr4,
+                      hsotg->regs + S3C_GAHBCFG);
+       else
+               writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
+
+       /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
+        * up being flooded with interrupts if the host is polling the
+        * endpoint to try and read data. */
+
+       writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+              S3C_DIEPMSK_INTknEPMisMsk |
+              S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+              hsotg->regs + S3C_DIEPMSK);
+
+       /* don't need XferCompl, we get that from RXFIFO in slave mode. In
+        * DMA mode we may need this. */
+       writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+              S3C_DOEPMSK_EPDisbldMsk |
+              using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
+                                  S3C_DIEPMSK_TimeOUTMsk) : 0,
+              hsotg->regs + S3C_DOEPMSK);
+
+       writel(0, hsotg->regs + S3C_DAINTMSK);
+
+       dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+                readl(hsotg->regs + S3C_DIEPCTL0),
+                readl(hsotg->regs + S3C_DOEPCTL0));
+
+       /* enable in and out endpoint interrupts */
+       s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
+
+       /* Enable the RXFIFO when in slave mode, as this is how we collect
+        * the data. In DMA mode, we get events from the FIFO but also
+        * things we cannot process, so do not use it. */
+       if (!using_dma(hsotg))
+               s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
+
+       /* Enable interrupts for EP0 in and out */
+       s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
+       s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
+
+       __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+       udelay(10);  /* see openiboot */
+       __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
+
+       dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
+
+       /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
+          writing to the EPCTL register.. */
+
+       /* set to read 1 8byte packet */
+       writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
+              S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
+
+       writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+              S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
+              S3C_DxEPCTL_USBActEp,
+              hsotg->regs + S3C_DOEPCTL0);
+
+       /* enable, but don't activate EP0in */
+       writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
+              S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
+
+       s3c_hsotg_enqueue_setup(hsotg);
+
+       dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+                readl(hsotg->regs + S3C_DIEPCTL0),
+                readl(hsotg->regs + S3C_DOEPCTL0));
+
+       /* clear global NAKs */
+       writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
+              hsotg->regs + S3C_DCTL);
+
+       /* remove the soft-disconnect and let's go */
+       __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
+
+       /* report to the user, and return */
+
+       dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
+       return 0;
+
+err:
+       hsotg->driver = NULL;
+       hsotg->gadget.dev.driver = NULL;
+       return ret;
+}
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+       struct s3c_hsotg *hsotg = our_hsotg;
+       int ep;
+
+       if (!hsotg)
+               return -ENODEV;
+
+       if (!driver || driver != hsotg->driver || !driver->unbind)
+               return -EINVAL;
+
+       /* all endpoints should be shutdown */
+       for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
+               s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+
+       call_gadget(hsotg, disconnect);
+
+       driver->unbind(&hsotg->gadget);
+       hsotg->driver = NULL;
+       hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+
+       device_del(&hsotg->gadget.dev);
+
+       dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
+                driver->driver.name);
+
+       return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
+{
+       return s3c_hsotg_read_frameno(to_hsotg(gadget));
+}
+
+static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
+       .get_frame      = s3c_hsotg_gadget_getframe,
+};
+
+/**
+ * s3c_hsotg_initep - initialise a single endpoint
+ * @hsotg: The device state.
+ * @hs_ep: The endpoint to be initialised.
+ * @epnum: The endpoint number
+ *
+ * Initialise the given endpoint (as part of the probe and device state
+ * creation) to give to the gadget driver. Setup the endpoint name, any
+ * direction information and other state that may be required.
+ */
+static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
+                                      struct s3c_hsotg_ep *hs_ep,
+                                      int epnum)
+{
+       u32 ptxfifo;
+       char *dir;
+
+       if (epnum == 0)
+               dir = "";
+       else if ((epnum % 2) == 0) {
+               dir = "out";
+       } else {
+               dir = "in";
+               hs_ep->dir_in = 1;
+       }
+
+       hs_ep->index = epnum;
+
+       snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
+
+       INIT_LIST_HEAD(&hs_ep->queue);
+       INIT_LIST_HEAD(&hs_ep->ep.ep_list);
+
+       spin_lock_init(&hs_ep->lock);
+
+       /* add to the list of endpoints known by the gadget driver */
+       if (epnum)
+               list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
+
+       hs_ep->parent = hsotg;
+       hs_ep->ep.name = hs_ep->name;
+       hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
+       hs_ep->ep.ops = &s3c_hsotg_ep_ops;
+
+       /* Read the FIFO size for the Periodic TX FIFO, even if we're
+        * an OUT endpoint, we may as well do this if in future the
+        * code is changed to make each endpoint's direction changeable.
+        */
+
+       ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
+       hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo);
+
+       /* if we're using dma, we need to set the next-endpoint pointer
+        * to be something valid.
+        */
+
+       if (using_dma(hsotg)) {
+               u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
+               writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
+               writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
+       }
+}
+
+/**
+ * s3c_hsotg_otgreset - reset the OtG phy block
+ * @hsotg: The host state.
+ *
+ * Power up the phy, set the basic configuration and start the PHY.
+ */
+static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
+{
+       u32 osc;
+
+       writel(0, S3C_PHYPWR);
+       mdelay(1);
+
+       osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
+
+       writel(osc | 0x10, S3C_PHYCLK);
+
+       /* issue a full set of resets to the otg and core */
+
+       writel(S3C_RSTCON_PHY, S3C_RSTCON);
+       udelay(20);     /* at-least 10uS */
+       writel(0, S3C_RSTCON);
+}
+
+
+static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
+{
+       /* unmask subset of endpoint interrupts */
+
+       writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
+              S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
+              hsotg->regs + S3C_DIEPMSK);
+
+       writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
+              S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
+              hsotg->regs + S3C_DOEPMSK);
+
+       writel(0, hsotg->regs + S3C_DAINTMSK);
+
+       if (0) {
+               /* post global nak until we're ready */
+               writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
+                      hsotg->regs + S3C_DCTL);
+       }
+
+       /* setup fifos */
+
+       dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+                readl(hsotg->regs + S3C_GRXFSIZ),
+                readl(hsotg->regs + S3C_GNPTXFSIZ));
+
+       s3c_hsotg_init_fifo(hsotg);
+
+       /* set the PLL on, remove the HNP/SRP and set the PHY */
+       writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
+              hsotg->regs + S3C_GUSBCFG);
+
+       writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
+              hsotg->regs + S3C_GAHBCFG);
+}
+
+static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
+{
+       struct device *dev = hsotg->dev;
+       void __iomem *regs = hsotg->regs;
+       u32 val;
+       int idx;
+
+       dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
+                readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
+                readl(regs + S3C_DIEPMSK));
+
+       dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
+                readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
+
+       dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
+                readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
+
+       /* show periodic fifo settings */
+
+       for (idx = 1; idx <= 15; idx++) {
+               val = readl(regs + S3C_DPTXFSIZn(idx));
+               dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
+                        val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+                        val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+       }
+
+       for (idx = 0; idx < 15; idx++) {
+               dev_info(dev,
+                        "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
+                        readl(regs + S3C_DIEPCTL(idx)),
+                        readl(regs + S3C_DIEPTSIZ(idx)),
+                        readl(regs + S3C_DIEPDMA(idx)));
+
+               val = readl(regs + S3C_DOEPCTL(idx));
+               dev_info(dev,
+                        "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
+                        idx, readl(regs + S3C_DOEPCTL(idx)),
+                        readl(regs + S3C_DOEPTSIZ(idx)),
+                        readl(regs + S3C_DOEPDMA(idx)));
+
+       }
+
+       dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
+                readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
+}
+
+
+/**
+ * state_show - debugfs: show overall driver and device state.
+ * @seq: The seq file to write to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the overall state of the hardware and
+ * some general information about each of the endpoints available
+ * to the system.
+ */
+static int state_show(struct seq_file *seq, void *v)
+{
+       struct s3c_hsotg *hsotg = seq->private;
+       void __iomem *regs = hsotg->regs;
+       int idx;
+
+       seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
+                readl(regs + S3C_DCFG),
+                readl(regs + S3C_DCTL),
+                readl(regs + S3C_DSTS));
+
+       seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
+                  readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
+
+       seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
+                  readl(regs + S3C_GINTMSK),
+                  readl(regs + S3C_GINTSTS));
+
+       seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
+                  readl(regs + S3C_DAINTMSK),
+                  readl(regs + S3C_DAINT));
+
+       seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
+                  readl(regs + S3C_GNPTXSTS),
+                  readl(regs + S3C_GRXSTSR));
+
+       seq_printf(seq, "\nEndpoint status:\n");
+
+       for (idx = 0; idx < 15; idx++) {
+               u32 in, out;
+
+               in = readl(regs + S3C_DIEPCTL(idx));
+               out = readl(regs + S3C_DOEPCTL(idx));
+
+               seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
+                          idx, in, out);
+
+               in = readl(regs + S3C_DIEPTSIZ(idx));
+               out = readl(regs + S3C_DOEPTSIZ(idx));
+
+               seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
+                          in, out);
+
+               seq_printf(seq, "\n");
+       }
+
+       return 0;
+}
+
+static int state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, state_show, inode->i_private);
+}
+
+static const struct file_operations state_fops = {
+       .owner          = THIS_MODULE,
+       .open           = state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/**
+ * fifo_show - debugfs: show the fifo information
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * Show the FIFO information for the overall fifo and all the
+ * periodic transmission FIFOs.
+*/
+static int fifo_show(struct seq_file *seq, void *v)
+{
+       struct s3c_hsotg *hsotg = seq->private;
+       void __iomem *regs = hsotg->regs;
+       u32 val;
+       int idx;
+
+       seq_printf(seq, "Non-periodic FIFOs:\n");
+       seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
+
+       val = readl(regs + S3C_GNPTXFSIZ);
+       seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
+                  val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
+                  val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
+
+       seq_printf(seq, "\nPeriodic TXFIFOs:\n");
+
+       for (idx = 1; idx <= 15; idx++) {
+               val = readl(regs + S3C_DPTXFSIZn(idx));
+
+               seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
+                          val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
+                          val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
+       }
+
+       return 0;
+}
+
+static int fifo_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, fifo_show, inode->i_private);
+}
+
+static const struct file_operations fifo_fops = {
+       .owner          = THIS_MODULE,
+       .open           = fifo_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+
+static const char *decode_direction(int is_in)
+{
+       return is_in ? "in" : "out";
+}
+
+/**
+ * ep_show - debugfs: show the state of an endpoint.
+ * @seq: The seq_file to write data to.
+ * @v: Unused parameter.
+ *
+ * This debugfs entry shows the state of the given endpoint (one is
+ * registered for each available).
+*/
+static int ep_show(struct seq_file *seq, void *v)
+{
+       struct s3c_hsotg_ep *ep = seq->private;
+       struct s3c_hsotg *hsotg = ep->parent;
+       struct s3c_hsotg_req *req;
+       void __iomem *regs = hsotg->regs;
+       int index = ep->index;
+       int show_limit = 15;
+       unsigned long flags;
+
+       seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",
+                  ep->index, ep->ep.name, decode_direction(ep->dir_in));
+
+       /* first show the register state */
+
+       seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
+                  readl(regs + S3C_DIEPCTL(index)),
+                  readl(regs + S3C_DOEPCTL(index)));
+
+       seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
+                  readl(regs + S3C_DIEPDMA(index)),
+                  readl(regs + S3C_DOEPDMA(index)));
+
+       seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
+                  readl(regs + S3C_DIEPINT(index)),
+                  readl(regs + S3C_DOEPINT(index)));
+
+       seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
+                  readl(regs + S3C_DIEPTSIZ(index)),
+                  readl(regs + S3C_DOEPTSIZ(index)));
+
+       seq_printf(seq, "\n");
+       seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
+       seq_printf(seq, "total_data=%ld\n", ep->total_data);
+
+       seq_printf(seq, "request list (%p,%p):\n",
+                  ep->queue.next, ep->queue.prev);
+
+       spin_lock_irqsave(&ep->lock, flags);
+
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (--show_limit < 0) {
+                       seq_printf(seq, "not showing more requests...\n");
+                       break;
+               }
+
+               seq_printf(seq, "%c req %p: %d bytes @%p, ",
+                          req == ep->req ? '*' : ' ',
+                          req, req->req.length, req->req.buf);
+               seq_printf(seq, "%d done, res %d\n",
+                          req->req.actual, req->req.status);
+       }
+
+       spin_unlock_irqrestore(&ep->lock, flags);
+
+       return 0;
+}
+
+static int ep_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ep_show, inode->i_private);
+}
+
+static const struct file_operations ep_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ep_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/**
+ * s3c_hsotg_create_debug - create debugfs directory and files
+ * @hsotg: The driver state
+ *
+ * Create the debugfs files to allow the user to get information
+ * about the state of the system. The directory name is created
+ * with the same name as the device itself, in case we end up
+ * with multiple blocks in future systems.
+*/
+static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
+{
+       struct dentry *root;
+       unsigned epidx;
+
+       root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+       hsotg->debug_root = root;
+       if (IS_ERR(root)) {
+               dev_err(hsotg->dev, "cannot create debug root\n");
+               return;
+       }
+
+       /* create general state file */
+
+       hsotg->debug_file = debugfs_create_file("state", 0444, root,
+                                               hsotg, &state_fops);
+
+       if (IS_ERR(hsotg->debug_file))
+               dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
+
+       hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
+                                               hsotg, &fifo_fops);
+
+       if (IS_ERR(hsotg->debug_fifo))
+               dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
+
+       /* create one file for each endpoint */
+
+       for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+               struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+
+               ep->debugfs = debugfs_create_file(ep->name, 0444,
+                                                 root, ep, &ep_fops);
+
+               if (IS_ERR(ep->debugfs))
+                       dev_err(hsotg->dev, "failed to create %s debug file\n",
+                               ep->name);
+       }
+}
+
+/**
+ * s3c_hsotg_delete_debug - cleanup debugfs entries
+ * @hsotg: The driver state
+ *
+ * Cleanup (remove) the debugfs files for use on module exit.
+*/
+static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
+{
+       unsigned epidx;
+
+       for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
+               struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
+               debugfs_remove(ep->debugfs);
+       }
+
+       debugfs_remove(hsotg->debug_file);
+       debugfs_remove(hsotg->debug_fifo);
+       debugfs_remove(hsotg->debug_root);
+}
+
+/**
+ * s3c_hsotg_gate - set the hardware gate for the block
+ * @pdev: The device we bound to
+ * @on: On or off.
+ *
+ * Set the hardware gate setting into the block. If we end up on
+ * something other than an S3C64XX, then we might need to change this
+ * to using a platform data callback, or some other mechanism.
+ */
+static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
+{
+       unsigned long flags;
+       u32 others;
+
+       local_irq_save(flags);
+
+       others = __raw_readl(S3C64XX_OTHERS);
+       if (on)
+               others |= S3C64XX_OTHERS_USBMASK;
+       else
+               others &= ~S3C64XX_OTHERS_USBMASK;
+       __raw_writel(others, S3C64XX_OTHERS);
+
+       local_irq_restore(flags);
+}
+
+struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+
+static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
+{
+       struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+       struct device *dev = &pdev->dev;
+       struct s3c_hsotg *hsotg;
+       struct resource *res;
+       int epnum;
+       int ret;
+
+       if (!plat)
+               plat = &s3c_hsotg_default_pdata;
+
+       hsotg = kzalloc(sizeof(struct s3c_hsotg) +
+                       sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
+                       GFP_KERNEL);
+       if (!hsotg) {
+               dev_err(dev, "cannot get memory\n");
+               return -ENOMEM;
+       }
+
+       hsotg->dev = dev;
+       hsotg->plat = plat;
+
+       platform_set_drvdata(pdev, hsotg);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "cannot find register resource 0\n");
+               ret = -EINVAL;
+               goto err_mem;
+       }
+
+       hsotg->regs_res = request_mem_region(res->start, resource_size(res),
+                                            dev_name(dev));
+       if (!hsotg->regs_res) {
+               dev_err(dev, "cannot reserve registers\n");
+               ret = -ENOENT;
+               goto err_mem;
+       }
+
+       hsotg->regs = ioremap(res->start, resource_size(res));
+       if (!hsotg->regs) {
+               dev_err(dev, "cannot map registers\n");
+               ret = -ENXIO;
+               goto err_regs_res;
+       }
+
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(dev, "cannot find IRQ\n");
+               goto err_regs;
+       }
+
+       hsotg->irq = ret;
+
+       ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
+       if (ret < 0) {
+               dev_err(dev, "cannot claim IRQ\n");
+               goto err_regs;
+       }
+
+       dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
+
+       device_initialize(&hsotg->gadget.dev);
+
+       dev_set_name(&hsotg->gadget.dev, "gadget");
+
+       hsotg->gadget.is_dualspeed = 1;
+       hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
+       hsotg->gadget.name = dev_name(dev);
+
+       hsotg->gadget.dev.parent = dev;
+       hsotg->gadget.dev.dma_mask = dev->dma_mask;
+
+       /* setup endpoint information */
+
+       INIT_LIST_HEAD(&hsotg->gadget.ep_list);
+       hsotg->gadget.ep0 = &hsotg->eps[0].ep;
+
+       /* allocate EP0 request */
+
+       hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
+                                                    GFP_KERNEL);
+       if (!hsotg->ctrl_req) {
+               dev_err(dev, "failed to allocate ctrl req\n");
+               goto err_regs;
+       }
+
+       /* reset the system */
+
+       s3c_hsotg_gate(pdev, true);
+
+       s3c_hsotg_otgreset(hsotg);
+       s3c_hsotg_corereset(hsotg);
+       s3c_hsotg_init(hsotg);
+
+       /* initialise the endpoints now the core has been initialised */
+       for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
+               s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+
+       s3c_hsotg_create_debug(hsotg);
+
+       s3c_hsotg_dump(hsotg);
+
+       our_hsotg = hsotg;
+       return 0;
+
+err_regs:
+       iounmap(hsotg->regs);
+
+err_regs_res:
+       release_resource(hsotg->regs_res);
+       kfree(hsotg->regs_res);
+
+err_mem:
+       kfree(hsotg);
+       return ret;
+}
+
+static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
+{
+       struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+
+       s3c_hsotg_delete_debug(hsotg);
+
+       usb_gadget_unregister_driver(hsotg->driver);
+
+       free_irq(hsotg->irq, hsotg);
+       iounmap(hsotg->regs);
+
+       release_resource(hsotg->regs_res);
+       kfree(hsotg->regs_res);
+
+       s3c_hsotg_gate(pdev, false);
+
+       kfree(hsotg);
+       return 0;
+}
+
+#if 1
+#define s3c_hsotg_suspend NULL
+#define s3c_hsotg_resume NULL
+#endif
+
+static struct platform_driver s3c_hsotg_driver = {
+       .driver         = {
+               .name   = "s3c-hsotg",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = s3c_hsotg_probe,
+       .remove         = __devexit_p(s3c_hsotg_remove),
+       .suspend        = s3c_hsotg_suspend,
+       .resume         = s3c_hsotg_resume,
+};
+
+static int __init s3c_hsotg_modinit(void)
+{
+       return platform_driver_register(&s3c_hsotg_driver);
+}
+
+static void __exit s3c_hsotg_modexit(void)
+{
+       platform_driver_unregister(&s3c_hsotg_driver);
+}
+
+module_init(s3c_hsotg_modinit);
+module_exit(s3c_hsotg_modexit);
+
+MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
new file mode 100644 (file)
index 0000000..0f3d22f
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * u_audio.c -- ALSA audio utilities for Gadget stack
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/syscalls.h>
+
+#include "u_audio.h"
+
+/*
+ * This component encapsulates the ALSA devices for USB audio gadget
+ */
+
+#define FILE_PCM_PLAYBACK      "/dev/snd/pcmC0D0p"
+#define FILE_PCM_CAPTURE       "/dev/snd/pcmC0D0c"
+#define FILE_CONTROL           "/dev/snd/controlC0"
+
+static char *fn_play = FILE_PCM_PLAYBACK;
+module_param(fn_play, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
+
+static char *fn_cap = FILE_PCM_CAPTURE;
+module_param(fn_cap, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
+
+static char *fn_cntl = FILE_CONTROL;
+module_param(fn_cntl, charp, S_IRUGO);
+MODULE_PARM_DESC(fn_cntl, "Control device file name");
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Some ALSA internal helper functions
+ */
+static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
+{
+       struct snd_interval t;
+       t.empty = 0;
+       t.min = t.max = val;
+       t.openmin = t.openmax = 0;
+       t.integer = 1;
+       return snd_interval_refine(i, &t);
+}
+
+static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
+                                snd_pcm_hw_param_t var, unsigned int val,
+                                int dir)
+{
+       int changed;
+       if (hw_is_mask(var)) {
+               struct snd_mask *m = hw_param_mask(params, var);
+               if (val == 0 && dir < 0) {
+                       changed = -EINVAL;
+                       snd_mask_none(m);
+               } else {
+                       if (dir > 0)
+                               val++;
+                       else if (dir < 0)
+                               val--;
+                       changed = snd_mask_refine_set(
+                                       hw_param_mask(params, var), val);
+               }
+       } else if (hw_is_interval(var)) {
+               struct snd_interval *i = hw_param_interval(params, var);
+               if (val == 0 && dir < 0) {
+                       changed = -EINVAL;
+                       snd_interval_none(i);
+               } else if (dir == 0)
+                       changed = snd_interval_refine_set(i, val);
+               else {
+                       struct snd_interval t;
+                       t.openmin = 1;
+                       t.openmax = 1;
+                       t.empty = 0;
+                       t.integer = 0;
+                       if (dir < 0) {
+                               t.min = val - 1;
+                               t.max = val;
+                       } else {
+                               t.min = val;
+                               t.max = val+1;
+                       }
+                       changed = snd_interval_refine(i, &t);
+               }
+       } else
+               return -EINVAL;
+       if (changed) {
+               params->cmask |= 1 << var;
+               params->rmask |= 1 << var;
+       }
+       return changed;
+}
+/*-------------------------------------------------------------------------*/
+
+/**
+ * Set default hardware params
+ */
+static int playback_default_hw_params(struct gaudio_snd_dev *snd)
+{
+       struct snd_pcm_substream *substream = snd->substream;
+       struct snd_pcm_hw_params *params;
+       snd_pcm_sframes_t result;
+
+       /*
+       * SNDRV_PCM_ACCESS_RW_INTERLEAVED,
+       * SNDRV_PCM_FORMAT_S16_LE
+       * CHANNELS: 2
+       * RATE: 48000
+       */
+       snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
+       snd->format = SNDRV_PCM_FORMAT_S16_LE;
+       snd->channels = 2;
+       snd->rate = 48000;
+
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
+       if (!params)
+               return -ENOMEM;
+
+       _snd_pcm_hw_params_any(params);
+       _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
+                       snd->access, 0);
+       _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
+                       snd->format, 0);
+       _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
+                       snd->channels, 0);
+       _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
+                       snd->rate, 0);
+
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
+
+       result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
+       if (result < 0) {
+               ERROR(snd->card,
+                       "Preparing sound card failed: %d\n", (int)result);
+               kfree(params);
+               return result;
+       }
+
+       /* Store the hardware parameters */
+       snd->access = params_access(params);
+       snd->format = params_format(params);
+       snd->channels = params_channels(params);
+       snd->rate = params_rate(params);
+
+       kfree(params);
+
+       INFO(snd->card,
+               "Hardware params: access %x, format %x, channels %d, rate %d\n",
+               snd->access, snd->format, snd->channels, snd->rate);
+
+       return 0;
+}
+
+/**
+ * Playback audio buffer data by ALSA PCM device
+ */
+static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
+{
+       struct gaudio_snd_dev   *snd = &card->playback;
+       struct snd_pcm_substream *substream = snd->substream;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       mm_segment_t old_fs;
+       ssize_t result;
+       snd_pcm_sframes_t frames;
+
+try_again:
+       if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
+               runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+               result = snd_pcm_kernel_ioctl(substream,
+                               SNDRV_PCM_IOCTL_PREPARE, NULL);
+               if (result < 0) {
+                       ERROR(card, "Preparing sound card failed: %d\n",
+                                       (int)result);
+                       return result;
+               }
+       }
+
+       frames = bytes_to_frames(runtime, count);
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       result = snd_pcm_lib_write(snd->substream, buf, frames);
+       if (result != frames) {
+               ERROR(card, "Playback error: %d\n", (int)result);
+               set_fs(old_fs);
+               goto try_again;
+       }
+       set_fs(old_fs);
+
+       return 0;
+}
+
+static int u_audio_get_playback_channels(struct gaudio *card)
+{
+       return card->playback.channels;
+}
+
+static int u_audio_get_playback_rate(struct gaudio *card)
+{
+       return card->playback.rate;
+}
+
+/**
+ * Open ALSA PCM and control device files
+ * Initial the PCM or control device
+ */
+static int gaudio_open_snd_dev(struct gaudio *card)
+{
+       struct snd_pcm_file *pcm_file;
+       struct gaudio_snd_dev *snd;
+
+       if (!card)
+               return -ENODEV;
+
+       /* Open control device */
+       snd = &card->control;
+       snd->filp = filp_open(fn_cntl, O_RDWR, 0);
+       if (IS_ERR(snd->filp)) {
+               int ret = PTR_ERR(snd->filp);
+               ERROR(card, "unable to open sound control device file: %s\n",
+                               fn_cntl);
+               snd->filp = NULL;
+               return ret;
+       }
+       snd->card = card;
+
+       /* Open PCM playback device and setup substream */
+       snd = &card->playback;
+       snd->filp = filp_open(fn_play, O_WRONLY, 0);
+       if (IS_ERR(snd->filp)) {
+               ERROR(card, "No such PCM playback device: %s\n", fn_play);
+               snd->filp = NULL;
+       }
+       pcm_file = snd->filp->private_data;
+       snd->substream = pcm_file->substream;
+       snd->card = card;
+       playback_default_hw_params(snd);
+
+       /* Open PCM capture device and setup substream */
+       snd = &card->capture;
+       snd->filp = filp_open(fn_cap, O_RDONLY, 0);
+       if (IS_ERR(snd->filp)) {
+               ERROR(card, "No such PCM capture device: %s\n", fn_cap);
+               snd->filp = NULL;
+       }
+       pcm_file = snd->filp->private_data;
+       snd->substream = pcm_file->substream;
+       snd->card = card;
+
+       return 0;
+}
+
+/**
+ * Close ALSA PCM and control device files
+ */
+static int gaudio_close_snd_dev(struct gaudio *gau)
+{
+       struct gaudio_snd_dev   *snd;
+
+       /* Close control device */
+       snd = &gau->control;
+       if (!IS_ERR(snd->filp))
+               filp_close(snd->filp, current->files);
+
+       /* Close PCM playback device and setup substream */
+       snd = &gau->playback;
+       if (!IS_ERR(snd->filp))
+               filp_close(snd->filp, current->files);
+
+       /* Close PCM capture device and setup substream */
+       snd = &gau->capture;
+       if (!IS_ERR(snd->filp))
+               filp_close(snd->filp, current->files);
+
+       return 0;
+}
+
+/**
+ * gaudio_setup - setup ALSA interface and preparing for USB transfer
+ *
+ * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
+ *
+ * Returns negative errno, or zero on success
+ */
+int __init gaudio_setup(struct gaudio *card)
+{
+       int     ret;
+
+       ret = gaudio_open_snd_dev(card);
+       if (ret)
+               ERROR(card, "we need at least one control device\n");
+
+       return ret;
+
+}
+
+/**
+ * gaudio_cleanup - remove ALSA device interface
+ *
+ * This is called to free all resources allocated by @gaudio_setup().
+ */
+void gaudio_cleanup(struct gaudio *card)
+{
+       if (card)
+               gaudio_close_snd_dev(card);
+}
+
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
new file mode 100644 (file)
index 0000000..cc8d159
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * u_audio.h -- interface to USB gadget "ALSA AUDIO" utilities
+ *
+ * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
+ * Copyright (C) 2008 Analog Devices, Inc
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __U_AUDIO_H
+#define __U_AUDIO_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/composite.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "gadget_chips.h"
+
+/*
+ * This represents the USB side of an audio card device, managed by a USB
+ * function which provides control and stream interfaces.
+ */
+
+struct gaudio_snd_dev {
+       struct gaudio                   *card;
+       struct file                     *filp;
+       struct snd_pcm_substream        *substream;
+       int                             access;
+       int                             format;
+       int                             channels;
+       int                             rate;
+};
+
+struct gaudio {
+       struct usb_function             func;
+       struct usb_gadget               *gadget;
+
+       /* ALSA sound device interfaces */
+       struct gaudio_snd_dev           control;
+       struct gaudio_snd_dev           playback;
+       struct gaudio_snd_dev           capture;
+
+       /* TODO */
+};
+
+int gaudio_setup(struct gaudio *card);
+void gaudio_cleanup(struct gaudio *card);
+
+#endif /* __U_AUDIO_H */
index 0a4d99ab40d8c0c224a78d060e93574b63fcbd2e..fc6e709f45b1b184891b0201cffcbeef17daf09f 100644 (file)
@@ -371,6 +371,7 @@ __acquires(&port->port_lock)
 
                req->length = len;
                list_del(&req->list);
+               req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
 
                pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
                                port->port_num, len, *((u8 *)req->buf),
index 845479f7c70773cacff061d3a04c0f3ca0b21d86..1576a0520adf35ba813ef9ebd30cd8fb28f16e62 100644 (file)
@@ -17,6 +17,26 @@ config USB_C67X00_HCD
          To compile this driver as a module, choose M here: the
          module will be called c67x00.
 
+config USB_XHCI_HCD
+       tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
+       depends on USB && PCI && EXPERIMENTAL
+       ---help---
+         The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
+         "SuperSpeed" host controller hardware.
+
+         To compile this driver as a module, choose M here: the
+         module will be called xhci-hcd.
+
+config USB_XHCI_HCD_DEBUGGING
+       bool "Debugging for the xHCI host controller"
+       depends on USB_XHCI_HCD
+       ---help---
+         Say 'Y' to turn on debugging for the xHCI host controller driver.
+         This will spew debugging output, even in interrupt context.
+         This should only be used for debugging xHCI driver bugs.
+
+         If unsure, say N.
+
 config USB_EHCI_HCD
        tristate "EHCI HCD (USB 2.0) support"
        depends on USB && USB_ARCH_HAS_EHCI
index f163571e33d8dc0e61f10ec1addf50d4956baf39..289d748bb41422ae6d607130a09fe3c410037049 100644 (file)
@@ -12,6 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
 ifeq ($(CONFIG_FHCI_DEBUG),y)
 fhci-objs += fhci-dbg.o
 endif
+xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
 
 obj-$(CONFIG_USB_WHCI_HCD)     += whci/
 
@@ -23,6 +24,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
 obj-$(CONFIG_USB_OHCI_HCD)     += ohci-hcd.o
 obj-$(CONFIG_USB_UHCI_HCD)     += uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)     += fhci.o
+obj-$(CONFIG_USB_XHCI_HCD)     += xhci.o
 obj-$(CONFIG_USB_SL811_HCD)    += sl811-hcd.o
 obj-$(CONFIG_USB_SL811_CS)     += sl811_cs.o
 obj-$(CONFIG_USB_U132_HCD)     += u132-hcd.o
index bf69f473910785465c8c20ef4be46ea7acab2946..c3a778bd359c9b883bd6e361e78951818fd3a280 100644 (file)
@@ -97,6 +97,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
 
        /*
         * scheduling support
index 01c3da34f678e4a52107f64bca1674bb7e64c807..bf86809c5120795eac8521b68e2159d2fef37127 100644 (file)
@@ -309,6 +309,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
        .urb_enqueue = ehci_urb_enqueue,
        .urb_dequeue = ehci_urb_dequeue,
        .endpoint_disable = ehci_endpoint_disable,
+       .endpoint_reset = ehci_endpoint_reset,
 
        /*
         * scheduling support
index c637207a1c80b63eab4bfa5587d4250bfe0d3c86..2b72473544d31d798034ed5aa92cb555384a8055 100644 (file)
@@ -1024,6 +1024,51 @@ done:
        return;
 }
 
+static void
+ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+       struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
+       struct ehci_qh          *qh;
+       int                     eptype = usb_endpoint_type(&ep->desc);
+
+       if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+               return;
+
+ rescan:
+       spin_lock_irq(&ehci->lock);
+       qh = ep->hcpriv;
+
+       /* For Bulk and Interrupt endpoints we maintain the toggle state
+        * in the hardware; the toggle bits in udev aren't used at all.
+        * When an endpoint is reset by usb_clear_halt() we must reset
+        * the toggle bit in the QH.
+        */
+       if (qh) {
+               if (!list_empty(&qh->qtd_list)) {
+                       WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+               } else if (qh->qh_state == QH_STATE_IDLE) {
+                       qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+               } else {
+                       /* It's not safe to write into the overlay area
+                        * while the QH is active.  Unlink it first and
+                        * wait for the unlink to complete.
+                        */
+                       if (qh->qh_state == QH_STATE_LINKED) {
+                               if (eptype == USB_ENDPOINT_XFER_BULK) {
+                                       unlink_async(ehci, qh);
+                               } else {
+                                       intr_deschedule(ehci, qh);
+                                       (void) qh_schedule(ehci, qh);
+                               }
+                       }
+                       spin_unlock_irq(&ehci->lock);
+                       schedule_timeout_uninterruptible(1);
+                       goto rescan;
+               }
+       }
+       spin_unlock_irq(&ehci->lock);
+}
+
 static int ehci_get_frame (struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@ static int __init ehci_hcd_init(void)
                 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
 
 #ifdef DEBUG
-       ehci_debug_root = debugfs_create_dir("ehci", NULL);
+       ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
        if (!ehci_debug_root) {
                retval = -ENOENT;
                goto err_debug;
index 97a53a48a3d8e24383e5f14cc2590bc36b0408a9..f46ad27c9a90e2cda2e8bbb8b9b45c861af765c5 100644 (file)
@@ -391,7 +391,7 @@ static inline void create_companion_file(struct ehci_hcd *ehci)
 
        /* with integrated TT there is no companion! */
        if (!ehci_is_TDI(ehci))
-               i = device_create_file(ehci_to_hcd(ehci)->self.dev,
+               i = device_create_file(ehci_to_hcd(ehci)->self.controller,
                                       &dev_attr_companion);
 }
 
@@ -399,7 +399,7 @@ static inline void remove_companion_file(struct ehci_hcd *ehci)
 {
        /* with integrated TT there is no companion! */
        if (!ehci_is_TDI(ehci))
-               device_remove_file(ehci_to_hcd(ehci)->self.dev,
+               device_remove_file(ehci_to_hcd(ehci)->self.controller,
                                   &dev_attr_companion);
 }
 
index 9c32063a0c2f6611fc938760ea84906d1f3f9666..a44bb4a949543d797f336827edb8ac6fa088d8ac 100644 (file)
@@ -51,6 +51,7 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
        .get_frame_number       = ehci_get_frame,
        .hub_status_data        = ehci_hub_status_data,
        .hub_control            = ehci_hub_control,
index 9d487908012e56fb515d82576ccdbd5db71fa480..770dd9aba62a9f07aabd9863e59fc4acae6a81dd 100644 (file)
@@ -149,6 +149,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
        .urb_enqueue = ehci_urb_enqueue,
        .urb_dequeue = ehci_urb_dequeue,
        .endpoint_disable = ehci_endpoint_disable,
+       .endpoint_reset = ehci_endpoint_reset,
 
        /*
         * scheduling support
@@ -187,7 +188,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
        }
 }
 
-static int __init ehci_orion_drv_probe(struct platform_device *pdev)
+static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
 {
        struct orion_ehci_data *pd = pdev->dev.platform_data;
        struct resource *res;
index 5aa8bce90e1f3856d7195db5614394a6c709ba9f..f3683e1da16134b3c20a6239b5edfd406af923aa 100644 (file)
@@ -268,7 +268,7 @@ done:
  * Also they depend on separate root hub suspend/resume.
  */
 
-static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int ehci_pci_suspend(struct usb_hcd *hcd)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
        unsigned long           flags;
@@ -293,12 +293,6 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
        ehci_writel(ehci, 0, &ehci->regs->intr_enable);
        (void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
-       /* make sure snapshot being resumed re-enumerates everything */
-       if (message.event == PM_EVENT_PRETHAW) {
-               ehci_halt(ehci);
-               ehci_reset(ehci);
-       }
-
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  bail:
        spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
        return rc;
 }
 
-static int ehci_pci_resume(struct usb_hcd *hcd)
+static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
        struct ehci_hcd         *ehci = hcd_to_ehci(hcd);
        struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
        /* Mark hardware accessible again as we are out of D3 state by now */
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 
-       /* If CF is still set, we maintained PCI Vaux power.
+       /* If CF is still set and we aren't resuming from hibernation
+        * then we maintained PCI Vaux power.
         * Just undo the effect of ehci_pci_suspend().
         */
-       if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
+       if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
+                               !hibernated) {
                int     mask = INTR_MASK;
 
                if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
                return 0;
        }
 
-       ehci_dbg(ehci, "lost power, restarting\n");
        usb_root_hub_lost_power(hcd->self.root_hub);
 
        /* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
        .urb_enqueue =          ehci_urb_enqueue,
        .urb_dequeue =          ehci_urb_dequeue,
        .endpoint_disable =     ehci_endpoint_disable,
+       .endpoint_reset =       ehci_endpoint_reset,
 
        /*
         * scheduling support
@@ -429,10 +425,11 @@ static struct pci_driver ehci_pci_driver = {
 
        .probe =        usb_hcd_pci_probe,
        .remove =       usb_hcd_pci_remove,
+       .shutdown =     usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM
-       .suspend =      usb_hcd_pci_suspend,
-       .resume =       usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+       .driver =       {
+               .pm =   &usb_hcd_pci_pm_ops
+       },
 #endif
-       .shutdown =     usb_hcd_pci_shutdown,
 };
index ef732b704f53d996aed213bc1563c10063e89978..fbd272288fc2cf799ef1d075adea282dd01aa863 100644 (file)
@@ -61,6 +61,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
 
        /*
         * scheduling support
index bb870b8f81bc53c9a05202f7d5ce76e7e9c2ad82..eecd2a0680a216bd68a26a3eba3459cd16037ac3 100644 (file)
@@ -65,6 +65,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
        .urb_enqueue            = ehci_urb_enqueue,
        .urb_dequeue            = ehci_urb_dequeue,
        .endpoint_disable       = ehci_endpoint_disable,
+       .endpoint_reset         = ehci_endpoint_reset,
        .get_frame_number       = ehci_get_frame,
        .hub_status_data        = ehci_hub_status_data,
        .hub_control            = ehci_hub_control,
index 1976b1b3778cd3de2d7946733fbda37edb6a7108..3192f683f8073293a6366b532ced3ff0dd5007f1 100644 (file)
@@ -93,22 +93,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
        qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
        qh->hw_alt_next = EHCI_LIST_END(ehci);
 
-       /* Except for control endpoints, we make hardware maintain data
-        * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
-        * and set the pseudo-toggle in udev. Only usb_clear_halt() will
-        * ever clear it.
-        */
-       if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
-               unsigned        is_out, epnum;
-
-               is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
-               epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
-               if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
-                       qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
-                       usb_settoggle (qh->dev, epnum, is_out, 1);
-               }
-       }
-
        /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
        wmb ();
        qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@ done:
        qh->qh_state = QH_STATE_IDLE;
        qh->hw_info1 = cpu_to_hc32(ehci, info1);
        qh->hw_info2 = cpu_to_hc32(ehci, info2);
-       usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
        qh_refresh (ehci, qh);
        return qh;
 }
@@ -881,7 +864,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
                }
        }
 
-       /* clear halt and/or toggle; and maybe recover from silicon quirk */
+       /* clear halt and maybe recover from silicon quirk */
        if (qh->qh_state == QH_STATE_IDLE)
                qh_refresh (ehci, qh);
 
index 556d0ec0c1f81048044a7b6e51019a5a1d4443e5..9d1babc7ff6553c64de8cec32fcfcff3f5ce5967 100644 (file)
@@ -760,8 +760,10 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
        if (status) {
                /* "normal" case, uframing flexible except with splits */
                if (qh->period) {
-                       frame = qh->period - 1;
-                       do {
+                       int             i;
+
+                       for (i = qh->period; status && i > 0; --i) {
+                               frame = ++ehci->random_frame % qh->period;
                                for (uframe = 0; uframe < 8; uframe++) {
                                        status = check_intr_schedule (ehci,
                                                        frame, uframe, qh,
@@ -769,7 +771,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
                                        if (status == 0)
                                                break;
                                }
-                       } while (status && frame--);
+                       }
 
                /* qh->period == 0 means every uframe */
                } else {
index 6cff195e1a365e46d739bc42263b57381365bb3b..90ad3395bb21f0f96923ab21431dc7c44165badb 100644 (file)
@@ -116,6 +116,7 @@ struct ehci_hcd {                   /* one per controller */
        struct timer_list       watchdog;
        unsigned long           actions;
        unsigned                stamp;
+       unsigned                random_frame;
        unsigned long           next_statechange;
        u32                     command;
 
index ea8a4255c5da25b29136da57b8849c7bdd3f4285..e799f86dab1169c3542dc4c7c8eb5411feb2c2c8 100644 (file)
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
 {
        struct device *dev = fhci_to_hcd(fhci)->self.controller;
 
-       fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
+       fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
        if (!fhci->dfs_root) {
                WARN_ON(1);
                return;
index cbf30e515f29f2e7f7668f1143c8f09d02a45738..88b03214622b96683f3c0f6d9b5db73e8d98f2d5 100644 (file)
@@ -172,25 +172,6 @@ error_cluster_id_get:
 
 }
 
-static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
-{
-       struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-       struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-       dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
-               usb_hcd, hwahc, *(unsigned long *) &msg);
-       return -ENOSYS;
-}
-
-static int hwahc_op_resume(struct usb_hcd *usb_hcd)
-{
-       struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
-       struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
-
-       dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
-               usb_hcd, hwahc);
-       return -ENOSYS;
-}
-
 /*
  * No need to abort pipes, as when this is called, all the children
  * has been disconnected and that has done it [through
@@ -598,8 +579,6 @@ static struct hc_driver hwahc_hc_driver = {
        .flags = HCD_USB2,              /* FIXME */
        .reset = hwahc_op_reset,
        .start = hwahc_op_start,
-       .pci_suspend = hwahc_op_suspend,
-       .pci_resume = hwahc_op_resume,
        .stop = hwahc_op_stop,
        .get_frame_number = hwahc_op_get_frame_number,
        .urb_enqueue = hwahc_op_urb_enqueue,
index d3269656aa4d8c81036b6e5880123c42a254c821..811f5dfdc582447c18716a79e1bb055735975fff 100644 (file)
@@ -431,7 +431,7 @@ static struct dentry *ohci_debug_root;
 
 struct debug_buffer {
        ssize_t (*fill_func)(struct debug_buffer *);    /* fill method */
-       struct device *dev;
+       struct ohci_hcd *ohci;
        struct mutex mutex;     /* protect filling of buffer */
        size_t count;           /* number of characters filled into buffer */
        char *page;
@@ -505,15 +505,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
 
 static ssize_t fill_async_buffer(struct debug_buffer *buf)
 {
-       struct usb_bus          *bus;
-       struct usb_hcd          *hcd;
        struct ohci_hcd         *ohci;
        size_t                  temp;
        unsigned long           flags;
 
-       bus = dev_get_drvdata(buf->dev);
-       hcd = bus_to_hcd(bus);
-       ohci = hcd_to_ohci(hcd);
+       ohci = buf->ohci;
 
        /* display control and bulk lists together, for simplicity */
        spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
 
 static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
 {
-       struct usb_bus          *bus;
-       struct usb_hcd          *hcd;
        struct ohci_hcd         *ohci;
        struct ed               **seen, *ed;
        unsigned long           flags;
@@ -542,9 +536,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
                return 0;
        seen_count = 0;
 
-       bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
-       hcd = bus_to_hcd(bus);
-       ohci = hcd_to_ohci(hcd);
+       ohci = buf->ohci;
        next = buf->page;
        size = PAGE_SIZE;
 
@@ -626,7 +618,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
 
 static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 {
-       struct usb_bus          *bus;
        struct usb_hcd          *hcd;
        struct ohci_hcd         *ohci;
        struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        char                    *next;
        u32                     rdata;
 
-       bus = (struct usb_bus *)dev_get_drvdata(buf->dev);
-       hcd = bus_to_hcd(bus);
-       ohci = hcd_to_ohci(hcd);
+       ohci = buf->ohci;
+       hcd = ohci_to_hcd(ohci);
        regs = ohci->regs;
        next = buf->page;
        size = PAGE_SIZE;
@@ -710,7 +700,7 @@ done:
        return PAGE_SIZE - size;
 }
 
-static struct debug_buffer *alloc_buffer(struct device *dev,
+static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
                                ssize_t (*fill_func)(struct debug_buffer *))
 {
        struct debug_buffer *buf;
@@ -718,7 +708,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
        buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
 
        if (buf) {
-               buf->dev = dev;
+               buf->ohci = ohci;
                buf->fill_func = fill_func;
                mutex_init(&buf->mutex);
        }
@@ -810,26 +800,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
 static inline void create_debug_files (struct ohci_hcd *ohci)
 {
        struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
-       struct device *dev = bus->dev;
 
        ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
        if (!ohci->debug_dir)
                goto dir_error;
 
        ohci->debug_async = debugfs_create_file("async", S_IRUGO,
-                                               ohci->debug_dir, dev,
+                                               ohci->debug_dir, ohci,
                                                &debug_async_fops);
        if (!ohci->debug_async)
                goto async_error;
 
        ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
-                                                  ohci->debug_dir, dev,
+                                                  ohci->debug_dir, ohci,
                                                   &debug_periodic_fops);
        if (!ohci->debug_periodic)
                goto periodic_error;
 
        ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
-                                                   ohci->debug_dir, dev,
+                                                   ohci->debug_dir, ohci,
                                                    &debug_registers_fops);
        if (!ohci->debug_registers)
                goto registers_error;
index 25db704f3a2aa8c7d60ae1c48eab54270abab45e..58151687d3518ef8da2938ae1fb34babe1a3caca 100644 (file)
@@ -571,7 +571,7 @@ static int ohci_init (struct ohci_hcd *ohci)
  */
 static int ohci_run (struct ohci_hcd *ohci)
 {
-       u32                     mask, temp;
+       u32                     mask, val;
        int                     first = ohci->fminterval == 0;
        struct usb_hcd          *hcd = ohci_to_hcd(ohci);
 
@@ -580,8 +580,8 @@ static int ohci_run (struct ohci_hcd *ohci)
        /* boot firmware should have set this up (5.1.1.3.1) */
        if (first) {
 
-               temp = ohci_readl (ohci, &ohci->regs->fminterval);
-               ohci->fminterval = temp & 0x3fff;
+               val = ohci_readl (ohci, &ohci->regs->fminterval);
+               ohci->fminterval = val & 0x3fff;
                if (ohci->fminterval != FI)
                        ohci_dbg (ohci, "fminterval delta %d\n",
                                ohci->fminterval - FI);
@@ -600,25 +600,25 @@ static int ohci_run (struct ohci_hcd *ohci)
 
        switch (ohci->hc_control & OHCI_CTRL_HCFS) {
        case OHCI_USB_OPER:
-               temp = 0;
+               val = 0;
                break;
        case OHCI_USB_SUSPEND:
        case OHCI_USB_RESUME:
                ohci->hc_control &= OHCI_CTRL_RWC;
                ohci->hc_control |= OHCI_USB_RESUME;
-               temp = 10 /* msec wait */;
+               val = 10 /* msec wait */;
                break;
        // case OHCI_USB_RESET:
        default:
                ohci->hc_control &= OHCI_CTRL_RWC;
                ohci->hc_control |= OHCI_USB_RESET;
-               temp = 50 /* msec wait */;
+               val = 50 /* msec wait */;
                break;
        }
        ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
        // flush the writes
        (void) ohci_readl (ohci, &ohci->regs->control);
-       msleep(temp);
+       msleep(val);
 
        memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
 
@@ -628,9 +628,9 @@ static int ohci_run (struct ohci_hcd *ohci)
 retry:
        /* HC Reset requires max 10 us delay */
        ohci_writel (ohci, OHCI_HCR,  &ohci->regs->cmdstatus);
-       temp = 30;      /* ... allow extra time */
+       val = 30;       /* ... allow extra time */
        while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
-               if (--temp == 0) {
+               if (--val == 0) {
                        spin_unlock_irq (&ohci->lock);
                        ohci_err (ohci, "USB HC reset timed out!\n");
                        return -1;
@@ -699,23 +699,23 @@ retry:
        ohci_writel (ohci, mask, &ohci->regs->intrenable);
 
        /* handle root hub init quirks ... */
-       temp = roothub_a (ohci);
-       temp &= ~(RH_A_PSM | RH_A_OCPM);
+       val = roothub_a (ohci);
+       val &= ~(RH_A_PSM | RH_A_OCPM);
        if (ohci->flags & OHCI_QUIRK_SUPERIO) {
                /* NSC 87560 and maybe others */
-               temp |= RH_A_NOCP;
-               temp &= ~(RH_A_POTPGT | RH_A_NPS);
-               ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+               val |= RH_A_NOCP;
+               val &= ~(RH_A_POTPGT | RH_A_NPS);
+               ohci_writel (ohci, val, &ohci->regs->roothub.a);
        } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
                        (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
                /* hub power always on; required for AMD-756 and some
                 * Mac platforms.  ganged overcurrent reporting, if any.
                 */
-               temp |= RH_A_NPS;
-               ohci_writel (ohci, temp, &ohci->regs->roothub.a);
+               val |= RH_A_NPS;
+               ohci_writel (ohci, val, &ohci->regs->roothub.a);
        }
        ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
-       ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM,
+       ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
                                                &ohci->regs->roothub.b);
        // flush those writes
        (void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@ retry:
        spin_unlock_irq (&ohci->lock);
 
        // POTPGT delay is bits 24-31, in 2 ms units.
-       mdelay ((temp >> 23) & 0x1fe);
+       mdelay ((val >> 23) & 0x1fe);
        hcd->state = HC_STATE_RUNNING;
 
        if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@ static int __init ohci_hcd_mod_init(void)
        set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
 
 #ifdef DEBUG
-       ohci_debug_root = debugfs_create_dir("ohci", NULL);
+       ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
        if (!ohci_debug_root) {
                retval = -ENOENT;
                goto error_debug;
index f9961b4c0da3e5065f054aa7dc34a619e00b8187..d2ba04dd785e4942d69ad28f4b2a3a727d3efd54 100644 (file)
@@ -372,7 +372,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
 
 #ifdef CONFIG_PM
 
-static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
+static int ohci_pci_suspend(struct usb_hcd *hcd)
 {
        struct ohci_hcd *ohci = hcd_to_ohci (hcd);
        unsigned long   flags;
@@ -394,10 +394,6 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
        ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
        (void)ohci_readl(ohci, &ohci->regs->intrdisable);
 
-       /* make sure snapshot being resumed re-enumerates everything */
-       if (message.event == PM_EVENT_PRETHAW)
-               ohci_usb_reset(ohci);
-
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  bail:
        spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
 }
 
 
-static int ohci_pci_resume (struct usb_hcd *hcd)
+static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+       /* Make sure resume from hibernation re-enumerates everything */
+       if (hibernated)
+               ohci_usb_reset(hcd_to_ohci(hcd));
+
        ohci_finish_controller_resume(hcd);
        return 0;
 }
@@ -484,12 +485,11 @@ static struct pci_driver ohci_pci_driver = {
 
        .probe =        usb_hcd_pci_probe,
        .remove =       usb_hcd_pci_remove,
+       .shutdown =     usb_hcd_pci_shutdown,
 
-#ifdef CONFIG_PM
-       .suspend =      usb_hcd_pci_suspend,
-       .resume =       usb_hcd_pci_resume,
+#ifdef CONFIG_PM_SLEEP
+       .driver =       {
+               .pm =   &usb_hcd_pci_pm_ops
+       },
 #endif
-
-       .shutdown =     usb_hcd_pci_shutdown,
 };
-
index 033c2846ce5923e05f0805bff889ab45268ea056..83b5f9cea85ac9f43af8d3b1eb87a259517cc05c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/acpi.h>
 #include "pci-quirks.h"
+#include "xhci-ext-caps.h"
 
 
 #define UHCI_USBLEGSUP         0xc0            /* legacy support */
@@ -341,7 +342,127 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
        return;
 }
 
+/*
+ * handshake - spin reading a register until handshake completes
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @wait_usec: timeout in microseconds
+ * @delay_usec: delay in microseconds to wait between polling
+ *
+ * Polls a register every delay_usec microseconds.
+ * Returns 0 when the mask bits have the value done.
+ * Returns -ETIMEDOUT if this condition is not true after
+ * wait_usec microseconds have passed.
+ */
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+               int wait_usec, int delay_usec)
+{
+       u32     result;
+
+       do {
+               result = readl(ptr);
+               result &= mask;
+               if (result == done)
+                       return 0;
+               udelay(delay_usec);
+               wait_usec -= delay_usec;
+       } while (wait_usec > 0);
+       return -ETIMEDOUT;
+}
+
+/**
+ * PCI Quirks for xHCI.
+ *
+ * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+ * It signals to the BIOS that the OS wants control of the host controller,
+ * and then waits 5 seconds for the BIOS to hand over control.
+ * If we timeout, assume the BIOS is broken and take control anyway.
+ */
+static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+{
+       void __iomem *base;
+       int ext_cap_offset;
+       void __iomem *op_reg_base;
+       u32 val;
+       int timeout;
+
+       if (!mmio_resource_enabled(pdev, 0))
+               return;
+
+       base = ioremap_nocache(pci_resource_start(pdev, 0),
+                               pci_resource_len(pdev, 0));
+       if (base == NULL)
+               return;
 
+       /*
+        * Find the Legacy Support Capability register -
+        * this is optional for xHCI host controllers.
+        */
+       ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+       do {
+               if (!ext_cap_offset)
+                       /* We've reached the end of the extended capabilities */
+                       goto hc_init;
+               val = readl(base + ext_cap_offset);
+               if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+                       break;
+               ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
+       } while (1);
+
+       /* If the BIOS owns the HC, signal that the OS wants it, and wait */
+       if (val & XHCI_HC_BIOS_OWNED) {
+               writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+
+               /* Wait for 5 seconds with 10 microsecond polling interval */
+               timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+                               0, 5000, 10);
+
+               /* Assume a buggy BIOS and take HC ownership anyway */
+               if (timeout) {
+                       dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
+                                       " (BIOS bug ?) %08x\n", val);
+                       writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
+               }
+       }
+
+       /* Disable any BIOS SMIs */
+       writel(XHCI_LEGACY_DISABLE_SMI,
+                       base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+hc_init:
+       op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+       /* Wait for the host controller to be ready before writing any
+        * operational or runtime registers.  Wait 5 seconds and no more.
+        */
+       timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+                       5000, 10);
+       /* Assume a buggy HC and start HC initialization anyway */
+       if (timeout) {
+               val = readl(op_reg_base + XHCI_STS_OFFSET);
+               dev_warn(&pdev->dev,
+                               "xHCI HW not ready after 5 sec (HC bug?) "
+                               "status = 0x%x\n", val);
+       }
+
+       /* Send the halt and disable interrupts command */
+       val = readl(op_reg_base + XHCI_CMD_OFFSET);
+       val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
+       writel(val, op_reg_base + XHCI_CMD_OFFSET);
+
+       /* Wait for the HC to halt - poll every 125 usec (one microframe). */
+       timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
+                       XHCI_MAX_HALT_USEC, 125);
+       if (timeout) {
+               val = readl(op_reg_base + XHCI_STS_OFFSET);
+               dev_warn(&pdev->dev,
+                               "xHCI HW did not halt within %d usec "
+                               "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
+       }
+
+       iounmap(base);
+}
 
 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
 {
@@ -351,5 +472,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
                quirk_usb_handoff_ohci(pdev);
        else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
                quirk_usb_disable_ehci(pdev);
+       else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+               quirk_usb_handoff_xhci(pdev);
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
index f1626e58c141ff207fac18c57d3ce58212f86b72..56976cc0352a9ae9e87a0a1cb681135410e0e581 100644 (file)
@@ -46,31 +46,10 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Yoshihiro Shimoda");
 MODULE_ALIAS("platform:r8a66597_hcd");
 
-#define DRIVER_VERSION "10 Apr 2008"
+#define DRIVER_VERSION "2009-05-26"
 
 static const char hcd_name[] = "r8a66597_hcd";
 
-/* module parameters */
-#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
-static unsigned short clock = XTAL12;
-module_param(clock, ushort, 0644);
-MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
-               "(default=0)");
-#endif
-
-static unsigned short vif = LDRV;
-module_param(vif, ushort, 0644);
-MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
-
-static unsigned short endian;
-module_param(endian, ushort, 0644);
-MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
-
-static unsigned short irq_sense = 0xff;
-module_param(irq_sense, ushort, 0644);
-MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
-               "(default=32)");
-
 static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
 static int r8a66597_get_frame(struct usb_hcd *hcd);
 
@@ -136,7 +115,8 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
                }
        } while ((tmp & USBE) != USBE);
        r8a66597_bclr(r8a66597, USBE, SYSCFG0);
-       r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0);
+       r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
+                       SYSCFG0);
 
        i = 0;
        r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@ static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
 static int enable_controller(struct r8a66597 *r8a66597)
 {
        int ret, port;
+       u16 vif = r8a66597->pdata->vif ? LDRV : 0;
+       u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
+       u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
 
        ret = r8a66597_clock_enable(r8a66597);
        if (ret < 0)
@@ -2373,7 +2356,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __init r8a66597_probe(struct platform_device *pdev)
+static int __devinit r8a66597_probe(struct platform_device *pdev)
 {
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
        char clk_name[8];
@@ -2418,6 +2401,12 @@ static int __init r8a66597_probe(struct platform_device *pdev)
                goto clean_up;
        }
 
+       if (pdev->dev.platform_data == NULL) {
+               dev_err(&pdev->dev, "no platform data\n");
+               ret = -ENODEV;
+               goto clean_up;
+       }
+
        /* initialize hcd */
        hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
        if (!hcd) {
@@ -2428,6 +2417,8 @@ static int __init r8a66597_probe(struct platform_device *pdev)
        r8a66597 = hcd_to_r8a66597(hcd);
        memset(r8a66597, 0, sizeof(struct r8a66597));
        dev_set_drvdata(&pdev->dev, r8a66597);
+       r8a66597->pdata = pdev->dev.platform_data;
+       r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
 
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
        snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
 
        hcd->rsrc_start = res->start;
 
-       /* irq_sense setting on cmdline takes precedence over resource
-        * settings, so the introduction of irqflags in IRQ resourse
-        * won't disturb existing setups */
-       switch (irq_sense) {
-               case INTL:
-                       irq_trigger = IRQF_TRIGGER_LOW;
-                       break;
-               case 0:
-                       irq_trigger = IRQF_TRIGGER_FALLING;
-                       break;
-               case 0xff:
-                       if (irq_trigger)
-                               irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
-                                           INTL : 0;
-                       else {
-                               irq_sense = INTL;
-                               irq_trigger = IRQF_TRIGGER_LOW;
-                       }
-                       break;
-               default:
-                       dev_err(&pdev->dev, "Unknown irq_sense value.\n");
-       }
-
        ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
        if (ret != 0) {
                dev_err(&pdev->dev, "Failed to add hcd\n");
index f49208f1bb74d80003c9a79b1e87090d298ac5f4..d72680b433f93c2309d7c17acbee40cab8281c45 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/clk.h>
 #endif
 
+#include <linux/usb/r8a66597.h>
+
 #define SYSCFG0                0x00
 #define SYSCFG1                0x02
 #define SYSSTS0                0x04
@@ -488,6 +490,7 @@ struct r8a66597 {
 #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
        struct clk *clk;
 #endif
+       struct r8a66597_platdata        *pdata;
        struct r8a66597_device          device0;
        struct r8a66597_root_hub        root_hub[R8A66597_MAX_ROOT_HUB];
        struct list_head                pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@ struct r8a66597 {
        unsigned long child_connect_map[4];
 
        unsigned bus_suspended:1;
+       unsigned irq_sense_low:1;
 };
 
 static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@ static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
 {
        unsigned long dvstctr_reg = get_dvstctr_reg(port);
 
-       if (power)
-               r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
-       else
-               r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+       if (r8a66597->pdata->port_power) {
+               r8a66597->pdata->port_power(port, power);
+       } else {
+               if (power)
+                       r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+               else
+                       r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+       }
+}
+
+static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
+{
+       u16 clock = 0;
+
+       switch (pdata->xtal) {
+       case R8A66597_PLATDATA_XTAL_12MHZ:
+               clock = XTAL12;
+               break;
+       case R8A66597_PLATDATA_XTAL_24MHZ:
+               clock = XTAL24;
+               break;
+       case R8A66597_PLATDATA_XTAL_48MHZ:
+               clock = XTAL48;
+               break;
+       default:
+               printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
+               break;
+       }
+
+       return clock;
 }
 
 #define get_pipectr_addr(pipenum)      (PIPE1CTR + (pipenum - 1) * 2)
index cf5e4cf7ea425828ca099af27dc6d2639ded8f9a..274751b4409c2bc2f71b7ea17a49a374d4494190 100644 (file)
@@ -769,7 +769,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
        return rc;
 }
 
-static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
+static int uhci_pci_suspend(struct usb_hcd *hcd)
 {
        struct uhci_hcd *uhci = hcd_to_uhci(hcd);
        int rc = 0;
@@ -795,10 +795,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
 
        /* FIXME: Enable non-PME# remote wakeup? */
 
-       /* make sure snapshot being resumed re-enumerates everything */
-       if (message.event == PM_EVENT_PRETHAW)
-               uhci_hc_died(uhci);
-
 done_okay:
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
 done:
@@ -806,7 +802,7 @@ done:
        return rc;
 }
 
-static int uhci_pci_resume(struct usb_hcd *hcd)
+static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
 {
        struct uhci_hcd *uhci = hcd_to_uhci(hcd);
 
@@ -820,6 +816,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
 
        spin_lock_irq(&uhci->lock);
 
+       /* Make sure resume from hibernation re-enumerates everything */
+       if (hibernated)
+               uhci_hc_died(uhci);
+
        /* FIXME: Disable non-PME# remote wakeup? */
 
        /* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@ static struct pci_driver uhci_pci_driver = {
        .remove =       usb_hcd_pci_remove,
        .shutdown =     uhci_shutdown,
 
-#ifdef CONFIG_PM
-       .suspend =      usb_hcd_pci_suspend,
-       .resume =       usb_hcd_pci_resume,
-#endif /* PM */
+#ifdef CONFIG_PM_SLEEP
+       .driver =       {
+               .pm =   &usb_hcd_pci_pm_ops
+       },
+#endif
 };
  
 static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@ static int __init uhci_hcd_init(void)
                errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
                if (!errbuf)
                        goto errbuf_failed;
-               uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
+               uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
                if (!uhci_debugfs_root)
                        goto debug_failed;
        }
index 3e5807d14ffb5ed89d765b9b82087cb2e2d5fb30..64e57bfe236ba89019c0884432cb44e9f2d72fd8 100644 (file)
@@ -260,7 +260,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
        INIT_LIST_HEAD(&qh->node);
 
        if (udev) {             /* Normal QH */
-               qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+               qh->type = usb_endpoint_type(&hep->desc);
                if (qh->type != USB_ENDPOINT_XFER_ISOC) {
                        qh->dummy_td = uhci_alloc_td(uhci);
                        if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644 (file)
index 0000000..2501c57
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "xhci.h"
+
+#define XHCI_INIT_VALUE 0x0
+
+/* Add verbose debugging later, just print everything for now */
+
+void xhci_dbg_regs(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
+                       xhci->cap_regs);
+       temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+       xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
+                       &xhci->cap_regs->hc_capbase, temp);
+       xhci_dbg(xhci, "//   CAPLENGTH: 0x%x\n",
+                       (unsigned int) HC_LENGTH(temp));
+#if 0
+       xhci_dbg(xhci, "//   HCIVERSION: 0x%x\n",
+                       (unsigned int) HC_VERSION(temp));
+#endif
+
+       xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+       xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
+                       &xhci->cap_regs->run_regs_off,
+                       (unsigned int) temp & RTSOFF_MASK);
+       xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
+       xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
+       xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
+}
+
+static void xhci_print_cap_regs(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
+       xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
+                       (unsigned int) temp);
+       xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
+                       (unsigned int) HC_LENGTH(temp));
+       xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
+                       (unsigned int) HC_VERSION(temp));
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+       xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
+                       (unsigned int) temp);
+       xhci_dbg(xhci, "  Max device slots: %u\n",
+                       (unsigned int) HCS_MAX_SLOTS(temp));
+       xhci_dbg(xhci, "  Max interrupters: %u\n",
+                       (unsigned int) HCS_MAX_INTRS(temp));
+       xhci_dbg(xhci, "  Max ports: %u\n",
+                       (unsigned int) HCS_MAX_PORTS(temp));
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+       xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
+                       (unsigned int) temp);
+       xhci_dbg(xhci, "  Isoc scheduling threshold: %u\n",
+                       (unsigned int) HCS_IST(temp));
+       xhci_dbg(xhci, "  Maximum allowed segments in event ring: %u\n",
+                       (unsigned int) HCS_ERST_MAX(temp));
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+       xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
+                       (unsigned int) temp);
+       xhci_dbg(xhci, "  Worst case U1 device exit latency: %u\n",
+                       (unsigned int) HCS_U1_LATENCY(temp));
+       xhci_dbg(xhci, "  Worst case U2 device exit latency: %u\n",
+                       (unsigned int) HCS_U2_LATENCY(temp));
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+       xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
+       xhci_dbg(xhci, "  HC generates %s bit addresses\n",
+                       HCC_64BIT_ADDR(temp) ? "64" : "32");
+       /* FIXME */
+       xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
+
+       temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
+       xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+}
+
+static void xhci_print_command_reg(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
+       xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
+       xhci_dbg(xhci, "  HC is %s\n",
+                       (temp & CMD_RUN) ? "running" : "being stopped");
+       xhci_dbg(xhci, "  HC has %sfinished hard reset\n",
+                       (temp & CMD_RESET) ? "not " : "");
+       xhci_dbg(xhci, "  Event Interrupts %s\n",
+                       (temp & CMD_EIE) ? "enabled " : "disabled");
+       xhci_dbg(xhci, "  Host System Error Interrupts %s\n",
+                       (temp & CMD_EIE) ? "enabled " : "disabled");
+       xhci_dbg(xhci, "  HC has %sfinished light reset\n",
+                       (temp & CMD_LRESET) ? "not " : "");
+}
+
+static void xhci_print_status(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       temp = xhci_readl(xhci, &xhci->op_regs->status);
+       xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
+       xhci_dbg(xhci, "  Event ring is %sempty\n",
+                       (temp & STS_EINT) ? "not " : "");
+       xhci_dbg(xhci, "  %sHost System Error\n",
+                       (temp & STS_FATAL) ? "WARNING: " : "No ");
+       xhci_dbg(xhci, "  HC is %s\n",
+                       (temp & STS_HALT) ? "halted" : "running");
+}
+
+static void xhci_print_op_regs(struct xhci_hcd *xhci)
+{
+       xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
+       xhci_print_command_reg(xhci);
+       xhci_print_status(xhci);
+}
+
+static void xhci_print_ports(struct xhci_hcd *xhci)
+{
+       u32 __iomem *addr;
+       int i, j;
+       int ports;
+       char *names[NUM_PORT_REGS] = {
+               "status",
+               "power",
+               "link",
+               "reserved",
+       };
+
+       ports = HCS_MAX_PORTS(xhci->hcs_params1);
+       addr = &xhci->op_regs->port_status_base;
+       for (i = 0; i < ports; i++) {
+               for (j = 0; j < NUM_PORT_REGS; ++j) {
+                       xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
+                                       addr, names[j],
+                                       (unsigned int) xhci_readl(xhci, addr));
+                       addr++;
+               }
+       }
+}
+
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+{
+       void *addr;
+       u32 temp;
+
+       addr = &ir_set->irq_pending;
+       temp = xhci_readl(xhci, addr);
+       if (temp == XHCI_INIT_VALUE)
+               return;
+
+       xhci_dbg(xhci, "  %p: ir_set[%i]\n", ir_set, set_num);
+
+       xhci_dbg(xhci, "  %p: ir_set.pending = 0x%x\n", addr,
+                       (unsigned int)temp);
+
+       addr = &ir_set->irq_control;
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.control = 0x%x\n", addr,
+                       (unsigned int)temp);
+
+       addr = &ir_set->erst_size;
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_size = 0x%x\n", addr,
+                       (unsigned int)temp);
+
+       addr = &ir_set->rsvd;
+       temp = xhci_readl(xhci, addr);
+       if (temp != XHCI_INIT_VALUE)
+               xhci_dbg(xhci, "  WARN: %p: ir_set.rsvd = 0x%x\n",
+                               addr, (unsigned int)temp);
+
+       addr = &ir_set->erst_base[0];
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_base[0] = 0x%x\n",
+                       addr, (unsigned int) temp);
+
+       addr = &ir_set->erst_base[1];
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_base[1] = 0x%x\n",
+                       addr, (unsigned int) temp);
+
+       addr = &ir_set->erst_dequeue[0];
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[0] = 0x%x\n",
+                       addr, (unsigned int) temp);
+
+       addr = &ir_set->erst_dequeue[1];
+       temp = xhci_readl(xhci, addr);
+       xhci_dbg(xhci, "  %p: ir_set.erst_dequeue[1] = 0x%x\n",
+                       addr, (unsigned int) temp);
+}
+
+void xhci_print_run_regs(struct xhci_hcd *xhci)
+{
+       u32 temp;
+       int i;
+
+       xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
+       temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
+       xhci_dbg(xhci, "  %p: Microframe index = 0x%x\n",
+                       &xhci->run_regs->microframe_index,
+                       (unsigned int) temp);
+       for (i = 0; i < 7; ++i) {
+               temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
+               if (temp != XHCI_INIT_VALUE)
+                       xhci_dbg(xhci, "  WARN: %p: Rsvd[%i] = 0x%x\n",
+                                       &xhci->run_regs->rsvd[i],
+                                       i, (unsigned int) temp);
+       }
+}
+
+void xhci_print_registers(struct xhci_hcd *xhci)
+{
+       xhci_print_cap_regs(xhci);
+       xhci_print_op_regs(xhci);
+       xhci_print_ports(xhci);
+}
+
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+       int i;
+       for (i = 0; i < 4; ++i)
+               xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
+                               i*4, trb->generic.field[i]);
+}
+
+/**
+ * Debug a transfer request block (TRB).
+ */
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
+{
+       u64     address;
+       u32     type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
+
+       switch (type) {
+       case TRB_TYPE(TRB_LINK):
+               xhci_dbg(xhci, "Link TRB:\n");
+               xhci_print_trb_offsets(xhci, trb);
+
+               address = trb->link.segment_ptr[0] +
+                       (((u64) trb->link.segment_ptr[1]) << 32);
+               xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
+
+               xhci_dbg(xhci, "Interrupter target = 0x%x\n",
+                               GET_INTR_TARGET(trb->link.intr_target));
+               xhci_dbg(xhci, "Cycle bit = %u\n",
+                               (unsigned int) (trb->link.control & TRB_CYCLE));
+               xhci_dbg(xhci, "Toggle cycle bit = %u\n",
+                               (unsigned int) (trb->link.control & LINK_TOGGLE));
+               xhci_dbg(xhci, "No Snoop bit = %u\n",
+                               (unsigned int) (trb->link.control & TRB_NO_SNOOP));
+               break;
+       case TRB_TYPE(TRB_TRANSFER):
+               address = trb->trans_event.buffer[0] +
+                       (((u64) trb->trans_event.buffer[1]) << 32);
+               /*
+                * FIXME: look at flags to figure out if it's an address or if
+                * the data is directly in the buffer field.
+                */
+               xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
+               break;
+       case TRB_TYPE(TRB_COMPLETION):
+               address = trb->event_cmd.cmd_trb[0] +
+                       (((u64) trb->event_cmd.cmd_trb[1]) << 32);
+               xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
+               xhci_dbg(xhci, "Completion status = %u\n",
+                               (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
+               xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
+               break;
+       default:
+               xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
+                               (unsigned int) type>>10);
+               xhci_print_trb_offsets(xhci, trb);
+               break;
+       }
+}
+
+/**
+ * Debug a segment with an xHCI ring.
+ *
+ * @return The Link TRB of the segment, or NULL if there is no Link TRB
+ * (which is a bug, since all segments must have a Link TRB).
+ *
+ * Prints out all TRBs in the segment, even those after the Link TRB.
+ *
+ * XXX: should we print out TRBs that the HC owns?  As long as we don't
+ * write, that should be fine...  We shouldn't expect that the memory pointed to
+ * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
+ * for HC debugging.
+ */
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+       int i;
+       u32 addr = (u32) seg->dma;
+       union xhci_trb *trb = seg->trbs;
+
+       for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
+               trb = &seg->trbs[i];
+               xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
+                               (unsigned int) trb->link.segment_ptr[0],
+                               (unsigned int) trb->link.segment_ptr[1],
+                               (unsigned int) trb->link.intr_target,
+                               (unsigned int) trb->link.control);
+               addr += sizeof(*trb);
+       }
+}
+
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+       xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
+                       ring->dequeue,
+                       (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
+                                                           ring->dequeue));
+       xhci_dbg(xhci, "Ring deq updated %u times\n",
+                       ring->deq_updates);
+       xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
+                       ring->enqueue,
+                       (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
+                                                           ring->enqueue));
+       xhci_dbg(xhci, "Ring enq updated %u times\n",
+                       ring->enq_updates);
+}
+
+/**
+ * Debugging for an xHCI ring, which is a queue broken into multiple segments.
+ *
+ * Print out each segment in the ring.  Check that the DMA address in
+ * each link segment actually matches the segment's stored DMA address.
+ * Check that the link end bit is only set at the end of the ring.
+ * Check that the dequeue and enqueue pointers point to real data in this ring
+ * (not some other ring).
+ */
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+       /* FIXME: Throw an error if any segment doesn't have a Link TRB */
+       struct xhci_segment *seg;
+       struct xhci_segment *first_seg = ring->first_seg;
+       xhci_debug_segment(xhci, first_seg);
+
+       if (!ring->enq_updates && !ring->deq_updates) {
+               xhci_dbg(xhci, "  Ring has not been updated\n");
+               return;
+       }
+       for (seg = first_seg->next; seg != first_seg; seg = seg->next)
+               xhci_debug_segment(xhci, seg);
+}
+
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+{
+       u32 addr = (u32) erst->erst_dma_addr;
+       int i;
+       struct xhci_erst_entry *entry;
+
+       for (i = 0; i < erst->num_entries; ++i) {
+               entry = &erst->entries[i];
+               xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
+                               (unsigned int) addr,
+                               (unsigned int) entry->seg_addr[0],
+                               (unsigned int) entry->seg_addr[1],
+                               (unsigned int) entry->seg_size,
+                               (unsigned int) entry->rsvd);
+               addr += sizeof(*entry);
+       }
+}
+
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
+{
+       u32 val;
+
+       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+       xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
+       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
+       xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
+}
+
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
+{
+       int i, j;
+       int last_ep_ctx = 31;
+       /* Fields are 32 bits wide, DMA addresses are in bytes */
+       int field_size = 32 / 8;
+
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
+                       &ctx->drop_flags, (unsigned long long)dma,
+                       ctx->drop_flags);
+       dma += field_size;
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
+                       &ctx->add_flags, (unsigned long long)dma,
+                       ctx->add_flags);
+       dma += field_size;
+       for (i = 0; i > 6; ++i) {
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+                               &ctx->rsvd[i], (unsigned long long)dma,
+                               ctx->rsvd[i], i);
+               dma += field_size;
+       }
+
+       xhci_dbg(xhci, "Slot Context:\n");
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
+                       &ctx->slot.dev_info,
+                       (unsigned long long)dma, ctx->slot.dev_info);
+       dma += field_size;
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
+                       &ctx->slot.dev_info2,
+                       (unsigned long long)dma, ctx->slot.dev_info2);
+       dma += field_size;
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
+                       &ctx->slot.tt_info,
+                       (unsigned long long)dma, ctx->slot.tt_info);
+       dma += field_size;
+       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
+                       &ctx->slot.dev_state,
+                       (unsigned long long)dma, ctx->slot.dev_state);
+       dma += field_size;
+       for (i = 0; i > 4; ++i) {
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+                               &ctx->slot.reserved[i], (unsigned long long)dma,
+                               ctx->slot.reserved[i], i);
+               dma += field_size;
+       }
+
+       if (last_ep < 31)
+               last_ep_ctx = last_ep + 1;
+       for (i = 0; i < last_ep_ctx; ++i) {
+               xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
+                               &ctx->ep[i].ep_info,
+                               (unsigned long long)dma, ctx->ep[i].ep_info);
+               dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
+                               &ctx->ep[i].ep_info2,
+                               (unsigned long long)dma, ctx->ep[i].ep_info2);
+               dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
+                               &ctx->ep[i].deq[0],
+                               (unsigned long long)dma, ctx->ep[i].deq[0]);
+               dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
+                               &ctx->ep[i].deq[1],
+                               (unsigned long long)dma, ctx->ep[i].deq[1]);
+               dma += field_size;
+               xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
+                               &ctx->ep[i].tx_info,
+                               (unsigned long long)dma, ctx->ep[i].tx_info);
+               dma += field_size;
+               for (j = 0; j < 3; ++j) {
+                       xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
+                                       &ctx->ep[i].reserved[j],
+                                       (unsigned long long)dma,
+                                       ctx->ep[i].reserved[j], j);
+                       dma += field_size;
+               }
+       }
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644 (file)
index 0000000..ecc131c
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
+#define XHCI_MAX_HALT_USEC     (16*125)
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define XHCI_STS_HALT          (1<<0)
+
+/* HCCPARAMS offset from PCI base address */
+#define XHCI_HCC_PARAMS_OFFSET 0x10
+/* HCCPARAMS contains the first extended capability pointer */
+#define XHCI_HCC_EXT_CAPS(p)   (((p)>>16)&0xffff)
+
+/* Command and Status registers offset from the Operational Registers address */
+#define XHCI_CMD_OFFSET                0x00
+#define XHCI_STS_OFFSET                0x04
+
+#define XHCI_MAX_EXT_CAPS              50
+
+/* Capability Register */
+/* bits 7:0 - how long is the Capabilities register */
+#define XHCI_HC_LENGTH(p)      (((p)>>00)&0x00ff)
+
+/* Extended capability register fields */
+#define XHCI_EXT_CAPS_ID(p)    (((p)>>0)&0xff)
+#define XHCI_EXT_CAPS_NEXT(p)  (((p)>>8)&0xff)
+#define        XHCI_EXT_CAPS_VAL(p)    ((p)>>16)
+/* Extended capability IDs - ID 0 reserved */
+#define XHCI_EXT_CAPS_LEGACY   1
+#define XHCI_EXT_CAPS_PROTOCOL 2
+#define XHCI_EXT_CAPS_PM       3
+#define XHCI_EXT_CAPS_VIRT     4
+#define XHCI_EXT_CAPS_ROUTE    5
+/* IDs 6-9 reserved */
+#define XHCI_EXT_CAPS_DEBUG    10
+/* USB Legacy Support Capability - section 7.1.1 */
+#define XHCI_HC_BIOS_OWNED     (1 << 16)
+#define XHCI_HC_OS_OWNED       (1 << 24)
+
+/* USB Legacy Support Capability - section 7.1.1 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_SUPPORT_OFFSET     (0x00)
+
+/* USB Legacy Support Control and Status Register  - section 7.1.2 */
+/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+#define XHCI_LEGACY_CONTROL_OFFSET     (0x04)
+/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+#define        XHCI_LEGACY_DISABLE_SMI         ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
+
+/* command register values to disable interrupts and halt the HC */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define XHCI_CMD_RUN           (1 << 0)
+/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
+#define XHCI_CMD_EIE           (1 << 2)
+/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
+#define XHCI_CMD_HSEIE         (1 << 3)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define XHCI_CMD_EWE           (1 << 10)
+
+#define XHCI_IRQS              (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
+
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define XHCI_STS_CNR           (1 << 11)
+
+#include <linux/io.h>
+
+/**
+ * Return the next extended capability pointer register.
+ *
+ * @base       PCI register base address.
+ *
+ * @ext_offset Offset of the 32-bit register that contains the extended
+ * capabilites pointer.  If searching for the first extended capability, pass
+ * in XHCI_HCC_PARAMS_OFFSET.  If searching for the next extended capability,
+ * pass in the offset of the current extended capability register.
+ *
+ * Returns 0 if there is no next extended capability register or returns the register offset
+ * from the PCI registers base address.
+ */
+static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
+{
+       u32 next;
+
+       next = readl(base + ext_offset);
+
+       if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+               /* Find the first extended capability */
+               next = XHCI_HCC_EXT_CAPS(next);
+       else
+               /* Find the next extended capability */
+               next = XHCI_EXT_CAPS_NEXT(next);
+       if (!next)
+               return 0;
+       /*
+        * Address calculation from offset of extended capabilities
+        * (or HCCPARAMS) register - see section 5.3.6 and section 7.
+        */
+       return ext_offset + (next << 2);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ *
+ * @base PCI MMIO registers base address.
+ * @ext_offset Offset from base of the first extended capability to look at,
+ *             or the address of HCCPARAMS.
+ * @id Extended capability ID to search for.
+ *
+ * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
+ * to make sure that the list doesn't contain a loop.
+ */
+static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
+{
+       u32 val;
+       int limit = XHCI_MAX_EXT_CAPS;
+
+       while (ext_offset && limit > 0) {
+               val = readl(base + ext_offset);
+               if (XHCI_EXT_CAPS_ID(val) == id)
+                       break;
+               ext_offset = xhci_find_next_cap_offset(base, ext_offset);
+               limit--;
+       }
+       if (limit > 0)
+               return ext_offset;
+       return 0;
+}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644 (file)
index 0000000..dba3e07
--- /dev/null
@@ -0,0 +1,1274 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include "xhci.h"
+
+#define DRIVER_AUTHOR "Sarah Sharp"
+#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+
+/* TODO: copied from ehci-hcd.c - can this be refactored? */
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done).  There are two failure modes:  "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+                     u32 mask, u32 done, int usec)
+{
+       u32     result;
+
+       do {
+               result = xhci_readl(xhci, ptr);
+               if (result == ~(u32)0)          /* card removed */
+                       return -ENODEV;
+               result &= mask;
+               if (result == done)
+                       return 0;
+               udelay(1);
+               usec--;
+       } while (usec > 0);
+       return -ETIMEDOUT;
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+       u32 halted;
+       u32 cmd;
+       u32 mask;
+
+       xhci_dbg(xhci, "// Halt the HC\n");
+       /* Disable all interrupts from the host controller */
+       mask = ~(XHCI_IRQS);
+       halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
+       if (!halted)
+               mask &= ~CMD_RUN;
+
+       cmd = xhci_readl(xhci, &xhci->op_regs->command);
+       cmd &= mask;
+       xhci_writel(xhci, cmd, &xhci->op_regs->command);
+
+       return handshake(xhci, &xhci->op_regs->status,
+                       STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+}
+
+/*
+ * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int xhci_reset(struct xhci_hcd *xhci)
+{
+       u32 command;
+       u32 state;
+
+       state = xhci_readl(xhci, &xhci->op_regs->status);
+       BUG_ON((state & STS_HALT) == 0);
+
+       xhci_dbg(xhci, "// Reset the HC\n");
+       command = xhci_readl(xhci, &xhci->op_regs->command);
+       command |= CMD_RESET;
+       xhci_writel(xhci, command, &xhci->op_regs->command);
+       /* XXX: Why does EHCI set this here?  Shouldn't other code do this? */
+       xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+
+       return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
+}
+
+/*
+ * Stop the HC from processing the endpoint queues.
+ */
+static void xhci_quiesce(struct xhci_hcd *xhci)
+{
+       /*
+        * Queues are per endpoint, so we need to disable an endpoint or slot.
+        *
+        * To disable a slot, we need to insert a disable slot command on the
+        * command ring and ring the doorbell.  This will also free any internal
+        * resources associated with the slot (which might not be what we want).
+        *
+        * A Release Endpoint command sounds better - doesn't free internal HC
+        * memory, but removes the endpoints from the schedule and releases the
+        * bandwidth, disables the doorbells, and clears the endpoint enable
+        * flag.  Usually used prior to a set interface command.
+        *
+        * TODO: Implement after command ring code is done.
+        */
+       BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
+       xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
+}
+
+#if 0
+/* Set up MSI-X table for entry 0 (may claim other entries later) */
+static int xhci_setup_msix(struct xhci_hcd *xhci)
+{
+       int ret;
+       struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+
+       xhci->msix_count = 0;
+       /* XXX: did I do this right?  ixgbe does kcalloc for more than one */
+       xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
+       if (!xhci->msix_entries) {
+               xhci_err(xhci, "Failed to allocate MSI-X entries\n");
+               return -ENOMEM;
+       }
+       xhci->msix_entries[0].entry = 0;
+
+       ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+       if (ret) {
+               xhci_err(xhci, "Failed to enable MSI-X\n");
+               goto free_entries;
+       }
+
+       /*
+        * Pass the xhci pointer value as the request_irq "cookie".
+        * If more irqs are added, this will need to be unique for each one.
+        */
+       ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
+                       "xHCI", xhci_to_hcd(xhci));
+       if (ret) {
+               xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
+               goto disable_msix;
+       }
+       xhci_dbg(xhci, "Finished setting up MSI-X\n");
+       return 0;
+
+disable_msix:
+       pci_disable_msix(pdev);
+free_entries:
+       kfree(xhci->msix_entries);
+       xhci->msix_entries = NULL;
+       return ret;
+}
+
+/* XXX: code duplication; can xhci_setup_msix call this? */
+/* Free any IRQs and disable MSI-X */
+static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+{
+       struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+       if (!xhci->msix_entries)
+               return;
+
+       free_irq(xhci->msix_entries[0].vector, xhci);
+       pci_disable_msix(pdev);
+       kfree(xhci->msix_entries);
+       xhci->msix_entries = NULL;
+       xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
+}
+#endif
+
+/*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts (?), set up a command ring segment (or two?), create event
+ * ring (one for now).
+ */
+int xhci_init(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       int retval = 0;
+
+       xhci_dbg(xhci, "xhci_init\n");
+       spin_lock_init(&xhci->lock);
+       retval = xhci_mem_init(xhci, GFP_KERNEL);
+       xhci_dbg(xhci, "Finished xhci_init\n");
+
+       return retval;
+}
+
+/*
+ * Called in interrupt context when there might be work
+ * queued on the event ring
+ *
+ * xhci->lock must be held by caller.
+ */
+static void xhci_work(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       /*
+        * Clear the op reg interrupt status first,
+        * so we can receive interrupts from other MSI-X interrupters.
+        * Write 1 to clear the interrupt status.
+        */
+       temp = xhci_readl(xhci, &xhci->op_regs->status);
+       temp |= STS_EINT;
+       xhci_writel(xhci, temp, &xhci->op_regs->status);
+       /* FIXME when MSI-X is supported and there are multiple vectors */
+       /* Clear the MSI-X event interrupt status */
+
+       /* Acknowledge the interrupt */
+       temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       temp |= 0x3;
+       xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
+       /* Flush posted writes */
+       xhci_readl(xhci, &xhci->ir_set->irq_pending);
+
+       /* FIXME this should be a delayed service routine that clears the EHB */
+       xhci_handle_event(xhci);
+
+       /* Clear the event handler busy flag; the event ring should be empty. */
+       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+       xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
+       /* Flush posted writes -- FIXME is this necessary? */
+       xhci_readl(xhci, &xhci->ir_set->irq_pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+ * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
+ * indicators of an event TRB error, but we check the status *first* to be safe.
+ */
+irqreturn_t xhci_irq(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       u32 temp, temp2;
+
+       spin_lock(&xhci->lock);
+       /* Check if the xHC generated the interrupt, or the irq is shared */
+       temp = xhci_readl(xhci, &xhci->op_regs->status);
+       temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
+               spin_unlock(&xhci->lock);
+               return IRQ_NONE;
+       }
+
+       if (temp & STS_FATAL) {
+               xhci_warn(xhci, "WARNING: Host System Error\n");
+               xhci_halt(xhci);
+               xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+               spin_unlock(&xhci->lock);
+               return -ESHUTDOWN;
+       }
+
+       xhci_work(xhci);
+       spin_unlock(&xhci->lock);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+void xhci_event_ring_work(unsigned long arg)
+{
+       unsigned long flags;
+       int temp;
+       struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
+       int i, j;
+
+       xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       temp = xhci_readl(xhci, &xhci->op_regs->status);
+       xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
+       temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
+       xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
+       xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
+       xhci->error_bitmask = 0;
+       xhci_dbg(xhci, "Event ring:\n");
+       xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
+       xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+       temp &= ERST_PTR_MASK;
+       xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+       xhci_dbg(xhci, "Command ring:\n");
+       xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
+       xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+       xhci_dbg_cmd_ptrs(xhci);
+       for (i = 0; i < MAX_HC_SLOTS; ++i) {
+               if (xhci->devs[i]) {
+                       for (j = 0; j < 31; ++j) {
+                               if (xhci->devs[i]->ep_rings[j]) {
+                                       xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
+                                       xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
+                               }
+                       }
+               }
+       }
+
+       if (xhci->noops_submitted != NUM_TEST_NOOPS)
+               if (xhci_setup_one_noop(xhci))
+                       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       if (!xhci->zombie)
+               mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
+       else
+               xhci_dbg(xhci, "Quit polling the event ring.\n");
+}
+#endif
+
+/*
+ * Start the HC after it was halted.
+ *
+ * This function is called by the USB core when the HC driver is added.
+ * Its opposite is xhci_stop().
+ *
+ * xhci_init() must be called once before this function can be called.
+ * Reset the HC, enable device slot contexts, program DCBAAP, and
+ * set command ring pointer and event ring pointer.
+ *
+ * Setup MSI-X vectors and enable interrupts.
+ */
+int xhci_run(struct usb_hcd *hcd)
+{
+       u32 temp;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       void (*doorbell)(struct xhci_hcd *) = NULL;
+
+       hcd->uses_new_polling = 1;
+       hcd->poll_rh = 0;
+
+       xhci_dbg(xhci, "xhci_run\n");
+#if 0  /* FIXME: MSI not setup yet */
+       /* Do this at the very last minute */
+       ret = xhci_setup_msix(xhci);
+       if (!ret)
+               return ret;
+
+       return -ENOSYS;
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+       init_timer(&xhci->event_ring_timer);
+       xhci->event_ring_timer.data = (unsigned long) xhci;
+       xhci->event_ring_timer.function = xhci_event_ring_work;
+       /* Poll the event ring */
+       xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
+       xhci->zombie = 0;
+       xhci_dbg(xhci, "Setting event ring polling timer\n");
+       add_timer(&xhci->event_ring_timer);
+#endif
+
+       xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+       temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+       temp &= ~ER_IRQ_INTERVAL_MASK;
+       temp |= (u32) 160;
+       xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+       /* Set the HCD state before we enable the irqs */
+       hcd->state = HC_STATE_RUNNING;
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
+       temp |= (CMD_EIE);
+       xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
+                       temp);
+       xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+       temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+                       xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
+       xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+                       &xhci->ir_set->irq_pending);
+       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+       if (NUM_TEST_NOOPS > 0)
+               doorbell = xhci_setup_one_noop(xhci);
+
+       xhci_dbg(xhci, "Command ring memory map follows:\n");
+       xhci_debug_ring(xhci, xhci->cmd_ring);
+       xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
+       xhci_dbg_cmd_ptrs(xhci);
+
+       xhci_dbg(xhci, "ERST memory map follows:\n");
+       xhci_dbg_erst(xhci, &xhci->erst);
+       xhci_dbg(xhci, "Event ring:\n");
+       xhci_debug_ring(xhci, xhci->event_ring);
+       xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
+       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+       temp &= ERST_PTR_MASK;
+       xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
+       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
+       xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
+
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
+       temp |= (CMD_RUN);
+       xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+                       temp);
+       xhci_writel(xhci, temp, &xhci->op_regs->command);
+       /* Flush PCI posted writes */
+       temp = xhci_readl(xhci, &xhci->op_regs->command);
+       xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
+       if (doorbell)
+               (*doorbell)(xhci);
+
+       xhci_dbg(xhci, "Finished xhci_run\n");
+       return 0;
+}
+
+/*
+ * Stop xHCI driver.
+ *
+ * This function is called by the USB core when the HC driver is removed.
+ * Its opposite is xhci_run().
+ *
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+void xhci_stop(struct usb_hcd *hcd)
+{
+       u32 temp;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+       spin_lock_irq(&xhci->lock);
+       if (HC_IS_RUNNING(hcd->state))
+               xhci_quiesce(xhci);
+       xhci_halt(xhci);
+       xhci_reset(xhci);
+       spin_unlock_irq(&xhci->lock);
+
+#if 0  /* No MSI yet */
+       xhci_cleanup_msix(xhci);
+#endif
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+       /* Tell the event ring poll function not to reschedule */
+       xhci->zombie = 1;
+       del_timer_sync(&xhci->event_ring_timer);
+#endif
+
+       xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+       temp = xhci_readl(xhci, &xhci->op_regs->status);
+       xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
+       temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+       xhci_writel(xhci, ER_IRQ_DISABLE(temp),
+                       &xhci->ir_set->irq_pending);
+       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+       xhci_dbg(xhci, "cleaning up memory\n");
+       xhci_mem_cleanup(xhci);
+       xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
+                   xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*
+ * Shutdown HC (not bus-specific)
+ *
+ * This is called when the machine is rebooting or halting.  We assume that the
+ * machine will be powered off, and the HC's internal state will be reset.
+ * Don't bother to free memory.
+ */
+void xhci_shutdown(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+       spin_lock_irq(&xhci->lock);
+       xhci_halt(xhci);
+       spin_unlock_irq(&xhci->lock);
+
+#if 0
+       xhci_cleanup_msix(xhci);
+#endif
+
+       xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+                   xhci_readl(xhci, &xhci->op_regs->status));
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
+ * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
+ * value to right shift 1 for the bitmask.
+ *
+ * Index  = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
+{
+       unsigned int index;
+       if (usb_endpoint_xfer_control(desc))
+               index = (unsigned int) (usb_endpoint_num(desc)*2);
+       else
+               index = (unsigned int) (usb_endpoint_num(desc)*2) +
+                       (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+       return index;
+}
+
+/* Find the flag for this endpoint (for use in the control context).  Use the
+ * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+{
+       return 1 << (xhci_get_endpoint_index(desc) + 1);
+}
+
+/* Compute the last valid endpoint context index.  Basically, this is the
+ * endpoint index plus one.  For slot contexts with more than valid endpoint,
+ * we find the most significant bit set in the added contexts flags.
+ * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
+ * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
+ */
+static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
+{
+       return fls(added_ctxs) - 1;
+}
+
+/* Returns 1 if the arguments are OK;
+ * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
+ */
+int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+               struct usb_host_endpoint *ep, int check_ep, const char *func) {
+       if (!hcd || (check_ep && !ep) || !udev) {
+               printk(KERN_DEBUG "xHCI %s called with invalid args\n",
+                               func);
+               return -EINVAL;
+       }
+       if (!udev->parent) {
+               printk(KERN_DEBUG "xHCI %s called for root hub\n",
+                               func);
+               return 0;
+       }
+       if (!udev->slot_id) {
+               printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
+                               func);
+               return -EINVAL;
+       }
+       return 1;
+}
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ */
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       unsigned long flags;
+       int ret = 0;
+       unsigned int slot_id, ep_index;
+
+       if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
+               return -EINVAL;
+
+       slot_id = urb->dev->slot_id;
+       ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       if (!xhci->devs || !xhci->devs[slot_id]) {
+               if (!in_interrupt())
+                       dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
+               ret = -EINVAL;
+               goto exit;
+       }
+       if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+               if (!in_interrupt())
+                       xhci_dbg(xhci, "urb submitted during PCI suspend\n");
+               ret = -ESHUTDOWN;
+               goto exit;
+       }
+       if (usb_endpoint_xfer_control(&urb->ep->desc))
+               ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
+                               slot_id, ep_index);
+       else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
+               ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
+                               slot_id, ep_index);
+       else
+               ret = -EINVAL;
+exit:
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       return ret;
+}
+
+/*
+ * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
+ * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
+ * should pick up where it left off in the TD, unless a Set Transfer Ring
+ * Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled URB will be "removed" from
+ * the ring.  Since the ring is a contiguous structure, they can't be physically
+ * removed.  Instead, there are two options:
+ *
+ *  1) If the HC is in the middle of processing the URB to be canceled, we
+ *     simply move the ring's dequeue pointer past those TRBs using the Set
+ *     Transfer Ring Dequeue Pointer command.  This will be the common case,
+ *     when drivers timeout on the last submitted URB and attempt to cancel.
+ *
+ *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
+ *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
+ *     HC will need to invalidate the any TRBs it has cached after the stop
+ *     endpoint command, as noted in the xHCI 0.95 errata.
+ *
+ *  3) The TD may have completed by the time the Stop Endpoint Command
+ *     completes, so software needs to handle that case too.
+ *
+ * This function should protect against the TD enqueueing code ringing the
+ * doorbell while this code is waiting for a Stop Endpoint command to complete.
+ * It also needs to account for multiple cancellations on happening at the same
+ * time for the same endpoint.
+ *
+ * Note that this function can be called in any context, or so says
+ * usb_hcd_unlink_urb()
+ */
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+       unsigned long flags;
+       int ret;
+       struct xhci_hcd *xhci;
+       struct xhci_td *td;
+       unsigned int ep_index;
+       struct xhci_ring *ep_ring;
+
+       xhci = hcd_to_xhci(hcd);
+       spin_lock_irqsave(&xhci->lock, flags);
+       /* Make sure the URB hasn't completed or been unlinked already */
+       ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (ret || !urb->hcpriv)
+               goto done;
+
+       xhci_dbg(xhci, "Cancel URB %p\n", urb);
+       ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+       ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
+       td = (struct xhci_td *) urb->hcpriv;
+
+       ep_ring->cancels_pending++;
+       list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
+       /* Queue a stop endpoint command, but only if this is
+        * the first cancellation to be handled.
+        */
+       if (ep_ring->cancels_pending == 1) {
+               xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
+               xhci_ring_cmd_db(xhci);
+       }
+done:
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       return ret;
+}
+
+/* Drop an endpoint from a new bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint that is being
+ * disabled, so there's no need for mutual exclusion to protect
+ * the xhci->devs[slot_id] structure.
+ */
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       struct xhci_hcd *xhci;
+       struct xhci_device_control *in_ctx;
+       unsigned int last_ctx;
+       unsigned int ep_index;
+       struct xhci_ep_ctx *ep_ctx;
+       u32 drop_flag;
+       u32 new_add_flags, new_drop_flags, new_slot_info;
+       int ret;
+
+       ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+       if (ret <= 0)
+               return ret;
+       xhci = hcd_to_xhci(hcd);
+       xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+
+       drop_flag = xhci_get_endpoint_flag(&ep->desc);
+       if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
+               xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
+                               __func__, drop_flag);
+               return 0;
+       }
+
+       if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+               xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+       /* If the HC already knows the endpoint is disabled,
+        * or the HCD has noted it is disabled, ignore this request
+        */
+       if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
+                       in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
+               xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
+                               __func__, ep);
+               return 0;
+       }
+
+       in_ctx->drop_flags |= drop_flag;
+       new_drop_flags = in_ctx->drop_flags;
+
+       in_ctx->add_flags = ~drop_flag;
+       new_add_flags = in_ctx->add_flags;
+
+       last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
+       /* Update the last valid endpoint context, if we deleted the last one */
+       if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
+               in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+               in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+       }
+       new_slot_info = in_ctx->slot.dev_info;
+
+       xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
+
+       xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+                       (unsigned int) ep->desc.bEndpointAddress,
+                       udev->slot_id,
+                       (unsigned int) new_drop_flags,
+                       (unsigned int) new_add_flags,
+                       (unsigned int) new_slot_info);
+       return 0;
+}
+
+/* Add an endpoint to a new possible bandwidth configuration for this device.
+ * Only one call to this function is allowed per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
+ * add the endpoint to the schedule with possibly new parameters denoted by a
+ * different endpoint descriptor in usb_host_endpoint.
+ * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
+ * not allowed.
+ *
+ * The USB core will not allow URBs to be queued to an endpoint until the
+ * configuration or alt setting is installed in the device, so there's no need
+ * for mutual exclusion to protect the xhci->devs[slot_id] structure.
+ */
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       struct xhci_hcd *xhci;
+       struct xhci_device_control *in_ctx;
+       unsigned int ep_index;
+       struct xhci_ep_ctx *ep_ctx;
+       u32 added_ctxs;
+       unsigned int last_ctx;
+       u32 new_add_flags, new_drop_flags, new_slot_info;
+       int ret = 0;
+
+       ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+       if (ret <= 0)
+               return ret;
+       xhci = hcd_to_xhci(hcd);
+
+       added_ctxs = xhci_get_endpoint_flag(&ep->desc);
+       last_ctx = xhci_last_valid_endpoint(added_ctxs);
+       if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+               /* FIXME when we have to issue an evaluate endpoint command to
+                * deal with ep0 max packet size changing once we get the
+                * descriptors
+                */
+               xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
+                               __func__, added_ctxs);
+               return 0;
+       }
+
+       if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+               xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+                               __func__);
+               return -EINVAL;
+       }
+
+       in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
+       /* If the HCD has already noted the endpoint is enabled,
+        * ignore this request.
+        */
+       if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
+               xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
+                               __func__, ep);
+               return 0;
+       }
+
+       /*
+        * Configuration and alternate setting changes must be done in
+        * process context, not interrupt context (or so documenation
+        * for usb_set_interface() and usb_set_configuration() claim).
+        */
+       if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+                               udev, ep, GFP_KERNEL) < 0) {
+               dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+                               __func__, ep->desc.bEndpointAddress);
+               return -ENOMEM;
+       }
+
+       in_ctx->add_flags |= added_ctxs;
+       new_add_flags = in_ctx->add_flags;
+
+       /* If xhci_endpoint_disable() was called for this endpoint, but the
+        * xHC hasn't been notified yet through the check_bandwidth() call,
+        * this re-adds a new state for the endpoint from the new endpoint
+        * descriptors.  We must drop and re-add this endpoint, so we leave the
+        * drop flags alone.
+        */
+       new_drop_flags = in_ctx->drop_flags;
+
+       /* Update the last valid endpoint context, if we just added one past */
+       if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
+               in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+               in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
+       }
+       new_slot_info = in_ctx->slot.dev_info;
+
+       xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
+                       (unsigned int) ep->desc.bEndpointAddress,
+                       udev->slot_id,
+                       (unsigned int) new_drop_flags,
+                       (unsigned int) new_add_flags,
+                       (unsigned int) new_slot_info);
+       return 0;
+}
+
+static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
+{
+       struct xhci_ep_ctx *ep_ctx;
+       int i;
+
+       /* When a device's add flag and drop flag are zero, any subsequent
+        * configure endpoint command will leave that endpoint's state
+        * untouched.  Make sure we don't leave any old state in the input
+        * endpoint contexts.
+        */
+       virt_dev->in_ctx->drop_flags = 0;
+       virt_dev->in_ctx->add_flags = 0;
+       virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
+       /* Endpoint 0 is always valid */
+       virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+       for (i = 1; i < 31; ++i) {
+               ep_ctx = &virt_dev->in_ctx->ep[i];
+               ep_ctx->ep_info = 0;
+               ep_ctx->ep_info2 = 0;
+               ep_ctx->deq[0] = 0;
+               ep_ctx->deq[1] = 0;
+               ep_ctx->tx_info = 0;
+       }
+}
+
+/* Called after one or more calls to xhci_add_endpoint() or
+ * xhci_drop_endpoint().  If this call fails, the USB core is expected
+ * to call xhci_reset_bandwidth().
+ *
+ * Since we are in the middle of changing either configuration or
+ * installing a new alt setting, the USB core won't allow URBs to be
+ * enqueued for any endpoint on the old config or interface.  Nothing
+ * else should be touching the xhci->devs[slot_id] structure, so we
+ * don't need to take the xhci->lock for manipulating that.
+ */
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       int i;
+       int ret = 0;
+       int timeleft;
+       unsigned long flags;
+       struct xhci_hcd *xhci;
+       struct xhci_virt_device *virt_dev;
+
+       ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+       if (ret <= 0)
+               return ret;
+       xhci = hcd_to_xhci(hcd);
+
+       if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
+               xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+                               __func__);
+               return -EINVAL;
+       }
+       xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
+       virt_dev->in_ctx->add_flags |= SLOT_FLAG;
+       virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
+       virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
+       virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
+       xhci_dbg(xhci, "New Input Control Context:\n");
+       xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
+                       LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
+                       udev->slot_id);
+       if (ret < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+               return -ENOMEM;
+       }
+       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       /* Wait for the configure endpoint command to complete */
+       timeleft = wait_for_completion_interruptible_timeout(
+                       &virt_dev->cmd_completion,
+                       USB_CTRL_SET_TIMEOUT);
+       if (timeleft <= 0) {
+               xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
+                               timeleft == 0 ? "Timeout" : "Signal");
+               /* FIXME cancel the configure endpoint command */
+               return -ETIME;
+       }
+
+       switch (virt_dev->cmd_status) {
+       case COMP_ENOMEM:
+               dev_warn(&udev->dev, "Not enough host controller resources "
+                               "for new device state.\n");
+               ret = -ENOMEM;
+               /* FIXME: can we allocate more resources for the HC? */
+               break;
+       case COMP_BW_ERR:
+               dev_warn(&udev->dev, "Not enough bandwidth "
+                               "for new device state.\n");
+               ret = -ENOSPC;
+               /* FIXME: can we go back to the old state? */
+               break;
+       case COMP_TRB_ERR:
+               /* the HCD set up something wrong */
+               dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
+                               "and endpoint is not disabled.\n");
+               ret = -EINVAL;
+               break;
+       case COMP_SUCCESS:
+               dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+               break;
+       default:
+               xhci_err(xhci, "ERROR: unexpected command completion "
+                               "code 0x%x.\n", virt_dev->cmd_status);
+               ret = -EINVAL;
+               break;
+       }
+       if (ret) {
+               /* Callee should call reset_bandwidth() */
+               return ret;
+       }
+
+       xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
+       xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
+                       LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
+
+       xhci_zero_in_ctx(virt_dev);
+       /* Free any old rings */
+       for (i = 1; i < 31; ++i) {
+               if (virt_dev->new_ep_rings[i]) {
+                       xhci_ring_free(xhci, virt_dev->ep_rings[i]);
+                       virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
+                       virt_dev->new_ep_rings[i] = NULL;
+               }
+       }
+
+       return ret;
+}
+
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       struct xhci_hcd *xhci;
+       struct xhci_virt_device *virt_dev;
+       int i, ret;
+
+       ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+       if (ret <= 0)
+               return;
+       xhci = hcd_to_xhci(hcd);
+
+       if (!xhci->devs || !xhci->devs[udev->slot_id]) {
+               xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
+                               __func__);
+               return;
+       }
+       xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+       virt_dev = xhci->devs[udev->slot_id];
+       /* Free any rings allocated for added endpoints */
+       for (i = 0; i < 31; ++i) {
+               if (virt_dev->new_ep_rings[i]) {
+                       xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
+                       virt_dev->new_ep_rings[i] = NULL;
+               }
+       }
+       xhci_zero_in_ctx(virt_dev);
+}
+
+/*
+ * At this point, the struct usb_device is about to go away, the device has
+ * disconnected, and all traffic has been stopped and the endpoints have been
+ * disabled.  Free any HC data structures associated with that device.
+ */
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       unsigned long flags;
+
+       if (udev->slot_id == 0)
+               return;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+               return;
+       }
+       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       /*
+        * Event command completion handler will free any data structures
+        * associated with the slot.  XXX Can free sleep?
+        */
+}
+
+/*
+ * Returns 0 if the xHC ran out of device slots, the Enable Slot command
+ * timed out, or allocating memory failed.  Returns 1 on success.
+ */
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       unsigned long flags;
+       int timeleft;
+       int ret;
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+       if (ret) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+               return 0;
+       }
+       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       /* XXX: how much time for xHC slot assignment? */
+       timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+                       USB_CTRL_SET_TIMEOUT);
+       if (timeleft <= 0) {
+               xhci_warn(xhci, "%s while waiting for a slot\n",
+                               timeleft == 0 ? "Timeout" : "Signal");
+               /* FIXME cancel the enable slot request */
+               return 0;
+       }
+
+       if (!xhci->slot_id) {
+               xhci_err(xhci, "Error while assigning device slot ID\n");
+               return 0;
+       }
+       /* xhci_alloc_virt_device() does not touch rings; no need to lock */
+       if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+               /* Disable slot, if we can do it without mem alloc */
+               xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
+               spin_lock_irqsave(&xhci->lock, flags);
+               if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+                       xhci_ring_cmd_db(xhci);
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return 0;
+       }
+       udev->slot_id = xhci->slot_id;
+       /* Is this a LS or FS device under a HS hub? */
+       /* Hub or peripherial? */
+       return 1;
+}
+
+/*
+ * Issue an Address Device command (which will issue a SetAddress request to
+ * the device).
+ * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
+ * we should only issue and wait on one address command at the same time.
+ *
+ * We add one to the device address issued by the hardware because the USB core
+ * uses address 1 for the root hubs (even though they're not really devices).
+ */
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       unsigned long flags;
+       int timeleft;
+       struct xhci_virt_device *virt_dev;
+       int ret = 0;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       u32 temp;
+
+       if (!udev->slot_id) {
+               xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+               return -EINVAL;
+       }
+
+       virt_dev = xhci->devs[udev->slot_id];
+
+       /* If this is a Set Address to an unconfigured device, setup ep 0 */
+       if (!udev->config)
+               xhci_setup_addressable_virt_dev(xhci, udev);
+       /* Otherwise, assume the core has the device configured how it wants */
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
+                       udev->slot_id);
+       if (ret) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+               return ret;
+       }
+       xhci_ring_cmd_db(xhci);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
+       /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+       timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+                       USB_CTRL_SET_TIMEOUT);
+       /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+        * the SetAddress() "recovery interval" required by USB and aborting the
+        * command on a timeout.
+        */
+       if (timeleft <= 0) {
+               xhci_warn(xhci, "%s while waiting for a slot\n",
+                               timeleft == 0 ? "Timeout" : "Signal");
+               /* FIXME cancel the address device command */
+               return -ETIME;
+       }
+
+       switch (virt_dev->cmd_status) {
+       case COMP_CTX_STATE:
+       case COMP_EBADSLT:
+               xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
+                               udev->slot_id);
+               ret = -EINVAL;
+               break;
+       case COMP_TX_ERR:
+               dev_warn(&udev->dev, "Device not responding to set address.\n");
+               ret = -EPROTO;
+               break;
+       case COMP_SUCCESS:
+               xhci_dbg(xhci, "Successful Address Device command\n");
+               break;
+       default:
+               xhci_err(xhci, "ERROR: unexpected command completion "
+                               "code 0x%x.\n", virt_dev->cmd_status);
+               ret = -EINVAL;
+               break;
+       }
+       if (ret) {
+               return ret;
+       }
+       temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
+       xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
+       temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
+       xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
+       xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
+                       udev->slot_id,
+                       &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
+                       xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
+       xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
+                       udev->slot_id,
+                       &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
+                       xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
+       xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+                       (unsigned long long)virt_dev->out_ctx_dma);
+       xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
+       xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
+       xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
+       xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
+       /*
+        * USB core uses address 1 for the roothubs, so we add one to the
+        * address given back to us by the HC.
+        */
+       udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
+       /* Zero the input context control for later use */
+       virt_dev->in_ctx->add_flags = 0;
+       virt_dev->in_ctx->drop_flags = 0;
+       /* Mirror flags in the output context for future ep enable/disable */
+       virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+       virt_dev->out_ctx->drop_flags = 0;
+
+       xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
+       /* XXX Meh, not sure if anyone else but choose_address uses this. */
+       set_bit(udev->devnum, udev->bus->devmap.devicemap);
+
+       return 0;
+}
+
+int xhci_get_frame(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       /* EHCI mods by the periodic size.  Why? */
+       return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
+}
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static int __init xhci_hcd_init(void)
+{
+#ifdef CONFIG_PCI
+       int retval = 0;
+
+       retval = xhci_register_pci();
+
+       if (retval < 0) {
+               printk(KERN_DEBUG "Problem registering PCI driver.");
+               return retval;
+       }
+#endif
+       /*
+        * Check the compiler generated sizes of structures that must be laid
+        * out in specific ways for hardware access.
+        */
+       BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
+       /* xhci_device_control has eight fields, and also
+        * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
+        */
+       BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
+       BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+       /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+       BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+       BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
+       return 0;
+}
+module_init(xhci_hcd_init);
+
+static void __exit xhci_hcd_cleanup(void)
+{
+#ifdef CONFIG_PCI
+       xhci_unregister_pci();
+#endif
+}
+module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644 (file)
index 0000000..eac5b53
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+
+static void xhci_hub_descriptor(struct xhci_hcd *xhci,
+               struct usb_hub_descriptor *desc)
+{
+       int ports;
+       u16 temp;
+
+       ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+       /* USB 3.0 hubs have a different descriptor, but we fake this for now */
+       desc->bDescriptorType = 0x29;
+       desc->bPwrOn2PwrGood = 10;      /* xhci section 5.4.9 says 20ms max */
+       desc->bHubContrCurrent = 0;
+
+       desc->bNbrPorts = ports;
+       temp = 1 + (ports / 8);
+       desc->bDescLength = 7 + 2 * temp;
+
+       /* Why does core/hcd.h define bitmap?  It's just confusing. */
+       memset(&desc->DeviceRemovable[0], 0, temp);
+       memset(&desc->DeviceRemovable[temp], 0xff, temp);
+
+       /* Ugh, these should be #defines, FIXME */
+       /* Using table 11-13 in USB 2.0 spec. */
+       temp = 0;
+       /* Bits 1:0 - support port power switching, or power always on */
+       if (HCC_PPC(xhci->hcc_params))
+               temp |= 0x0001;
+       else
+               temp |= 0x0002;
+       /* Bit  2 - root hubs are not part of a compound device */
+       /* Bits 4:3 - individual port over current protection */
+       temp |= 0x0008;
+       /* Bits 6:5 - no TTs in root ports */
+       /* Bit  7 - no port indicators */
+       desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
+}
+
+static unsigned int xhci_port_speed(unsigned int port_status)
+{
+       if (DEV_LOWSPEED(port_status))
+               return 1 << USB_PORT_FEAT_LOWSPEED;
+       if (DEV_HIGHSPEED(port_status))
+               return 1 << USB_PORT_FEAT_HIGHSPEED;
+       if (DEV_SUPERSPEED(port_status))
+               return 1 << USB_PORT_FEAT_SUPERSPEED;
+       /*
+        * FIXME: Yes, we should check for full speed, but the core uses that as
+        * a default in portspeed() in usb/core/hub.c (which is the only place
+        * USB_PORT_FEAT_*SPEED is used).
+        */
+       return 0;
+}
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0, 3, 10:13, 30
+ * connect status, over-current status, port speed, and device removable.
+ * connect status and port speed are also sticky - meaning they're in
+ * the AUX well and they aren't changed by a hot, warm, or cold reset.
+ */
+#define        XHCI_PORT_RO    ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8, 9, 14:15, 25:27
+ * link state, port power, port indicator state, "wake on" enable state
+ */
+#define XHCI_PORT_RWS  ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
+/*
+ * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
+ * bit 4 (port reset)
+ */
+#define        XHCI_PORT_RW1S  ((1<<4))
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1, 17, 18, 19, 20, 21, 22, 23
+ * port enable/disable, and
+ * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
+ * over-current, reset, link state, and L1 change
+ */
+#define XHCI_PORT_RW1CS        ((1<<1) | (0x7f<<17))
+/*
+ * Bit 16 is RW, and writing a '1' to it causes the link state control to be
+ * latched in
+ */
+#define        XHCI_PORT_RW    ((1<<16))
+/*
+ * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
+ * bits 2, 24, 28:31
+ */
+#define        XHCI_PORT_RZ    ((1<<2) | (1<<24) | (0xf<<28))
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+static u32 xhci_port_state_to_neutral(u32 state)
+{
+       /* Save read-only status and port state */
+       return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
+}
+
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+               u16 wIndex, char *buf, u16 wLength)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       int ports;
+       unsigned long flags;
+       u32 temp, status;
+       int retval = 0;
+       u32 __iomem *addr;
+       char *port_change_bit;
+
+       ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       switch (typeReq) {
+       case GetHubStatus:
+               /* No power source, over-current reported per port */
+               memset(buf, 0, 4);
+               break;
+       case GetHubDescriptor:
+               xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
+               break;
+       case GetPortStatus:
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               status = 0;
+               addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+               temp = xhci_readl(xhci, addr);
+               xhci_dbg(xhci, "get port status, actual port %d status  = 0x%x\n", wIndex, temp);
+
+               /* wPortChange bits */
+               if (temp & PORT_CSC)
+                       status |= 1 << USB_PORT_FEAT_C_CONNECTION;
+               if (temp & PORT_PEC)
+                       status |= 1 << USB_PORT_FEAT_C_ENABLE;
+               if ((temp & PORT_OCC))
+                       status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+               /*
+                * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
+                * changes
+                */
+               if (temp & PORT_CONNECT) {
+                       status |= 1 << USB_PORT_FEAT_CONNECTION;
+                       status |= xhci_port_speed(temp);
+               }
+               if (temp & PORT_PE)
+                       status |= 1 << USB_PORT_FEAT_ENABLE;
+               if (temp & PORT_OC)
+                       status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
+               if (temp & PORT_RESET)
+                       status |= 1 << USB_PORT_FEAT_RESET;
+               if (temp & PORT_POWER)
+                       status |= 1 << USB_PORT_FEAT_POWER;
+               xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+               put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+               break;
+       case SetPortFeature:
+               wIndex &= 0xff;
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
+               temp = xhci_readl(xhci, addr);
+               temp = xhci_port_state_to_neutral(temp);
+               switch (wValue) {
+               case USB_PORT_FEAT_POWER:
+                       /*
+                        * Turn on ports, even if there isn't per-port switching.
+                        * HC will report connect events even before this is set.
+                        * However, khubd will ignore the roothub events until
+                        * the roothub is registered.
+                        */
+                       xhci_writel(xhci, temp | PORT_POWER, addr);
+
+                       temp = xhci_readl(xhci, addr);
+                       xhci_dbg(xhci, "set port power, actual port %d status  = 0x%x\n", wIndex, temp);
+                       break;
+               case USB_PORT_FEAT_RESET:
+                       temp = (temp | PORT_RESET);
+                       xhci_writel(xhci, temp, addr);
+
+                       temp = xhci_readl(xhci, addr);
+                       xhci_dbg(xhci, "set port reset, actual port %d status  = 0x%x\n", wIndex, temp);
+                       break;
+               default:
+                       goto error;
+               }
+               temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+               break;
+       case ClearPortFeature:
+               if (!wIndex || wIndex > ports)
+                       goto error;
+               wIndex--;
+               addr = &xhci->op_regs->port_status_base +
+                       NUM_PORT_REGS*(wIndex & 0xff);
+               temp = xhci_readl(xhci, addr);
+               temp = xhci_port_state_to_neutral(temp);
+               switch (wValue) {
+               case USB_PORT_FEAT_C_RESET:
+                       status = PORT_RC;
+                       port_change_bit = "reset";
+                       break;
+               case USB_PORT_FEAT_C_CONNECTION:
+                       status = PORT_CSC;
+                       port_change_bit = "connect";
+                       break;
+               case USB_PORT_FEAT_C_OVER_CURRENT:
+                       status = PORT_OCC;
+                       port_change_bit = "over-current";
+                       break;
+               default:
+                       goto error;
+               }
+               /* Change bits are all write 1 to clear */
+               xhci_writel(xhci, temp | status, addr);
+               temp = xhci_readl(xhci, addr);
+               xhci_dbg(xhci, "clear port %s change, actual port %d status  = 0x%x\n",
+                               port_change_bit, wIndex, temp);
+               temp = xhci_readl(xhci, addr); /* unblock any posted writes */
+               break;
+       default:
+error:
+               /* "stall" on error */
+               retval = -EPIPE;
+       }
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       return retval;
+}
+
+/*
+ * Returns 0 if the status hasn't changed, or the number of bytes in buf.
+ * Ports are 0-indexed from the HCD point of view,
+ * and 1-indexed from the USB core pointer of view.
+ * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
+ *
+ * Note that the status change bits will be cleared as soon as a port status
+ * change event is generated, so we use the saved status from that event.
+ */
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+       unsigned long flags;
+       u32 temp, status;
+       int i, retval;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       int ports;
+       u32 __iomem *addr;
+
+       ports = HCS_MAX_PORTS(xhci->hcs_params1);
+
+       /* Initial status is no changes */
+       buf[0] = 0;
+       status = 0;
+       if (ports > 7) {
+               buf[1] = 0;
+               retval = 2;
+       } else {
+               retval = 1;
+       }
+
+       spin_lock_irqsave(&xhci->lock, flags);
+       /* For each port, did anything change?  If so, set that bit in buf. */
+       for (i = 0; i < ports; i++) {
+               addr = &xhci->op_regs->port_status_base +
+                       NUM_PORT_REGS*i;
+               temp = xhci_readl(xhci, addr);
+               if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
+                       if (i < 7)
+                               buf[0] |= 1 << (i + 1);
+                       else
+                               buf[1] |= 1 << (i - 7);
+                       status = 1;
+               }
+       }
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       return status ? retval : 0;
+}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644 (file)
index 0000000..c8a72de
--- /dev/null
@@ -0,0 +1,769 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+
+#include "xhci.h"
+
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * Section 4.11.1.1:
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
+{
+       struct xhci_segment *seg;
+       dma_addr_t      dma;
+
+       seg = kzalloc(sizeof *seg, flags);
+       if (!seg)
+               return 0;
+       xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
+
+       seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
+       if (!seg->trbs) {
+               kfree(seg);
+               return 0;
+       }
+       xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
+                       seg->trbs, (unsigned long long)dma);
+
+       memset(seg->trbs, 0, SEGMENT_SIZE);
+       seg->dma = dma;
+       seg->next = NULL;
+
+       return seg;
+}
+
+static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+       if (!seg)
+               return;
+       if (seg->trbs) {
+               xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
+                               seg->trbs, (unsigned long long)seg->dma);
+               dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
+               seg->trbs = NULL;
+       }
+       xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
+       kfree(seg);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment.  The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
+               struct xhci_segment *next, bool link_trbs)
+{
+       u32 val;
+
+       if (!prev || !next)
+               return;
+       prev->next = next;
+       if (link_trbs) {
+               prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
+
+               /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
+               val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
+               val &= ~TRB_TYPE_BITMASK;
+               val |= TRB_TYPE(TRB_LINK);
+               prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
+       }
+       xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
+                       (unsigned long long)prev->dma,
+                       (unsigned long long)next->dma);
+}
+
+/* XXX: Do we need the hcd structure in all these functions? */
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+       struct xhci_segment *seg;
+       struct xhci_segment *first_seg;
+
+       if (!ring || !ring->first_seg)
+               return;
+       first_seg = ring->first_seg;
+       seg = first_seg->next;
+       xhci_dbg(xhci, "Freeing ring at %p\n", ring);
+       while (seg != first_seg) {
+               struct xhci_segment *next = seg->next;
+               xhci_segment_free(xhci, seg);
+               seg = next;
+       }
+       xhci_segment_free(xhci, first_seg);
+       ring->first_seg = NULL;
+       kfree(ring);
+}
+
+/**
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+               unsigned int num_segs, bool link_trbs, gfp_t flags)
+{
+       struct xhci_ring        *ring;
+       struct xhci_segment     *prev;
+
+       ring = kzalloc(sizeof *(ring), flags);
+       xhci_dbg(xhci, "Allocating ring at %p\n", ring);
+       if (!ring)
+               return 0;
+
+       INIT_LIST_HEAD(&ring->td_list);
+       INIT_LIST_HEAD(&ring->cancelled_td_list);
+       if (num_segs == 0)
+               return ring;
+
+       ring->first_seg = xhci_segment_alloc(xhci, flags);
+       if (!ring->first_seg)
+               goto fail;
+       num_segs--;
+
+       prev = ring->first_seg;
+       while (num_segs > 0) {
+               struct xhci_segment     *next;
+
+               next = xhci_segment_alloc(xhci, flags);
+               if (!next)
+                       goto fail;
+               xhci_link_segments(xhci, prev, next, link_trbs);
+
+               prev = next;
+               num_segs--;
+       }
+       xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
+
+       if (link_trbs) {
+               /* See section 4.9.2.1 and 6.4.4.1 */
+               prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
+               xhci_dbg(xhci, "Wrote link toggle flag to"
+                               " segment %p (virtual), 0x%llx (DMA)\n",
+                               prev, (unsigned long long)prev->dma);
+       }
+       /* The ring is empty, so the enqueue pointer == dequeue pointer */
+       ring->enqueue = ring->first_seg->trbs;
+       ring->enq_seg = ring->first_seg;
+       ring->dequeue = ring->enqueue;
+       ring->deq_seg = ring->first_seg;
+       /* The ring is initialized to 0. The producer must write 1 to the cycle
+        * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
+        * compare CCS to the cycle bit to check ownership, so CCS = 1.
+        */
+       ring->cycle_state = 1;
+
+       return ring;
+
+fail:
+       xhci_ring_free(xhci, ring);
+       return 0;
+}
+
+/* All the xhci_tds in the ring's TD list should be freed at this point */
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+{
+       struct xhci_virt_device *dev;
+       int i;
+
+       /* Slot ID 0 is reserved */
+       if (slot_id == 0 || !xhci->devs[slot_id])
+               return;
+
+       dev = xhci->devs[slot_id];
+       xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
+       xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+       if (!dev)
+               return;
+
+       for (i = 0; i < 31; ++i)
+               if (dev->ep_rings[i])
+                       xhci_ring_free(xhci, dev->ep_rings[i]);
+
+       if (dev->in_ctx)
+               dma_pool_free(xhci->device_pool,
+                               dev->in_ctx, dev->in_ctx_dma);
+       if (dev->out_ctx)
+               dma_pool_free(xhci->device_pool,
+                               dev->out_ctx, dev->out_ctx_dma);
+       kfree(xhci->devs[slot_id]);
+       xhci->devs[slot_id] = 0;
+}
+
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+               struct usb_device *udev, gfp_t flags)
+{
+       dma_addr_t      dma;
+       struct xhci_virt_device *dev;
+
+       /* Slot ID 0 is reserved */
+       if (slot_id == 0 || xhci->devs[slot_id]) {
+               xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
+               return 0;
+       }
+
+       xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+       if (!xhci->devs[slot_id])
+               return 0;
+       dev = xhci->devs[slot_id];
+
+       /* Allocate the (output) device context that will be used in the HC */
+       dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+       if (!dev->out_ctx)
+               goto fail;
+       dev->out_ctx_dma = dma;
+       xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
+                       (unsigned long long)dma);
+       memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
+
+       /* Allocate the (input) device context for address device command */
+       dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
+       if (!dev->in_ctx)
+               goto fail;
+       dev->in_ctx_dma = dma;
+       xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
+                       (unsigned long long)dma);
+       memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
+
+       /* Allocate endpoint 0 ring */
+       dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
+       if (!dev->ep_rings[0])
+               goto fail;
+
+       init_completion(&dev->cmd_completion);
+
+       /*
+        * Point to output device context in dcbaa; skip the output control
+        * context, which is eight 32 bit fields (or 32 bytes long)
+        */
+       xhci->dcbaa->dev_context_ptrs[2*slot_id] =
+               (u32) dev->out_ctx_dma + (32);
+       xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
+                       slot_id,
+                       &xhci->dcbaa->dev_context_ptrs[2*slot_id],
+                       (unsigned long long)dev->out_ctx_dma);
+       xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
+
+       return 1;
+fail:
+       xhci_free_virt_device(xhci, slot_id);
+       return 0;
+}
+
+/* Setup an xHCI virtual device for a Set Address command */
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
+{
+       struct xhci_virt_device *dev;
+       struct xhci_ep_ctx      *ep0_ctx;
+       struct usb_device       *top_dev;
+
+       dev = xhci->devs[udev->slot_id];
+       /* Slot ID 0 is reserved */
+       if (udev->slot_id == 0 || !dev) {
+               xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
+                               udev->slot_id);
+               return -EINVAL;
+       }
+       ep0_ctx = &dev->in_ctx->ep[0];
+
+       /* 2) New slot context and endpoint 0 context are valid*/
+       dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
+
+       /* 3) Only the control endpoint is valid - one endpoint context */
+       dev->in_ctx->slot.dev_info |= LAST_CTX(1);
+
+       switch (udev->speed) {
+       case USB_SPEED_SUPER:
+               dev->in_ctx->slot.dev_info |= (u32) udev->route;
+               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
+               break;
+       case USB_SPEED_HIGH:
+               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
+               break;
+       case USB_SPEED_FULL:
+               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
+               break;
+       case USB_SPEED_LOW:
+               dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
+               break;
+       case USB_SPEED_VARIABLE:
+               xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
+               return -EINVAL;
+               break;
+       default:
+               /* Speed was set earlier, this shouldn't happen. */
+               BUG();
+       }
+       /* Find the root hub port this device is under */
+       for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
+                       top_dev = top_dev->parent)
+               /* Found device below root hub */;
+       dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
+       xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
+
+       /* Is this a LS/FS device under a HS hub? */
+       /*
+        * FIXME: I don't think this is right, where does the TT info for the
+        * roothub or parent hub come from?
+        */
+       if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
+                       udev->tt) {
+               dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
+               dev->in_ctx->slot.tt_info |= udev->ttport << 8;
+       }
+       xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
+       xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
+
+       /* Step 4 - ring already allocated */
+       /* Step 5 */
+       ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
+       /*
+        * See section 4.3 bullet 6:
+        * The default Max Packet size for ep0 is "8 bytes for a USB2
+        * LS/FS/HS device or 512 bytes for a USB3 SS device"
+        * XXX: Not sure about wireless USB devices.
+        */
+       if (udev->speed == USB_SPEED_SUPER)
+               ep0_ctx->ep_info2 |= MAX_PACKET(512);
+       else
+               ep0_ctx->ep_info2 |= MAX_PACKET(8);
+       /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
+       ep0_ctx->ep_info2 |= MAX_BURST(0);
+       ep0_ctx->ep_info2 |= ERROR_COUNT(3);
+
+       ep0_ctx->deq[0] =
+               dev->ep_rings[0]->first_seg->dma;
+       ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
+       ep0_ctx->deq[1] = 0;
+
+       /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
+
+       return 0;
+}
+
+/* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes".  If xHCI's Interval field
+ * is set to N, it will service the endpoint every 2^(Interval)*125us.
+ *
+ * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
+ * is set to 0.
+ */
+static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       unsigned int interval = 0;
+
+       switch (udev->speed) {
+       case USB_SPEED_HIGH:
+               /* Max NAK rate */
+               if (usb_endpoint_xfer_control(&ep->desc) ||
+                               usb_endpoint_xfer_bulk(&ep->desc))
+                       interval = ep->desc.bInterval;
+               /* Fall through - SS and HS isoc/int have same decoding */
+       case USB_SPEED_SUPER:
+               if (usb_endpoint_xfer_int(&ep->desc) ||
+                               usb_endpoint_xfer_isoc(&ep->desc)) {
+                       if (ep->desc.bInterval == 0)
+                               interval = 0;
+                       else
+                               interval = ep->desc.bInterval - 1;
+                       if (interval > 15)
+                               interval = 15;
+                       if (interval != ep->desc.bInterval + 1)
+                               dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+                                               ep->desc.bEndpointAddress, 1 << interval);
+               }
+               break;
+       /* Convert bInterval (in 1-255 frames) to microframes and round down to
+        * nearest power of 2.
+        */
+       case USB_SPEED_FULL:
+       case USB_SPEED_LOW:
+               if (usb_endpoint_xfer_int(&ep->desc) ||
+                               usb_endpoint_xfer_isoc(&ep->desc)) {
+                       interval = fls(8*ep->desc.bInterval) - 1;
+                       if (interval > 10)
+                               interval = 10;
+                       if (interval < 3)
+                               interval = 3;
+                       if ((1 << interval) != 8*ep->desc.bInterval)
+                               dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
+                                               ep->desc.bEndpointAddress, 1 << interval);
+               }
+               break;
+       default:
+               BUG();
+       }
+       return EP_INTERVAL(interval);
+}
+
+static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+               struct usb_host_endpoint *ep)
+{
+       int in;
+       u32 type;
+
+       in = usb_endpoint_dir_in(&ep->desc);
+       if (usb_endpoint_xfer_control(&ep->desc)) {
+               type = EP_TYPE(CTRL_EP);
+       } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
+               if (in)
+                       type = EP_TYPE(BULK_IN_EP);
+               else
+                       type = EP_TYPE(BULK_OUT_EP);
+       } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
+               if (in)
+                       type = EP_TYPE(ISOC_IN_EP);
+               else
+                       type = EP_TYPE(ISOC_OUT_EP);
+       } else if (usb_endpoint_xfer_int(&ep->desc)) {
+               if (in)
+                       type = EP_TYPE(INT_IN_EP);
+               else
+                       type = EP_TYPE(INT_OUT_EP);
+       } else {
+               BUG();
+       }
+       return type;
+}
+
+int xhci_endpoint_init(struct xhci_hcd *xhci,
+               struct xhci_virt_device *virt_dev,
+               struct usb_device *udev,
+               struct usb_host_endpoint *ep,
+               gfp_t mem_flags)
+{
+       unsigned int ep_index;
+       struct xhci_ep_ctx *ep_ctx;
+       struct xhci_ring *ep_ring;
+       unsigned int max_packet;
+       unsigned int max_burst;
+
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+       /* Set up the endpoint ring */
+       virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
+       if (!virt_dev->new_ep_rings[ep_index])
+               return -ENOMEM;
+       ep_ring = virt_dev->new_ep_rings[ep_index];
+       ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
+       ep_ctx->deq[1] = 0;
+
+       ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+
+       /* FIXME dig Mult and streams info out of ep companion desc */
+
+       /* Allow 3 retries for everything but isoc */
+       if (!usb_endpoint_xfer_isoc(&ep->desc))
+               ep_ctx->ep_info2 = ERROR_COUNT(3);
+       else
+               ep_ctx->ep_info2 = ERROR_COUNT(0);
+
+       ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
+
+       /* Set the max packet size and max burst */
+       switch (udev->speed) {
+       case USB_SPEED_SUPER:
+               max_packet = ep->desc.wMaxPacketSize;
+               ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+               /* dig out max burst from ep companion desc */
+               max_packet = ep->ss_ep_comp->desc.bMaxBurst;
+               ep_ctx->ep_info2 |= MAX_BURST(max_packet);
+               break;
+       case USB_SPEED_HIGH:
+               /* bits 11:12 specify the number of additional transaction
+                * opportunities per microframe (USB 2.0, section 9.6.6)
+                */
+               if (usb_endpoint_xfer_isoc(&ep->desc) ||
+                               usb_endpoint_xfer_int(&ep->desc)) {
+                       max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+                       ep_ctx->ep_info2 |= MAX_BURST(max_burst);
+               }
+               /* Fall through */
+       case USB_SPEED_FULL:
+       case USB_SPEED_LOW:
+               max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+               ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
+               break;
+       default:
+               BUG();
+       }
+       /* FIXME Debug endpoint context */
+       return 0;
+}
+
+void xhci_endpoint_zero(struct xhci_hcd *xhci,
+               struct xhci_virt_device *virt_dev,
+               struct usb_host_endpoint *ep)
+{
+       unsigned int ep_index;
+       struct xhci_ep_ctx *ep_ctx;
+
+       ep_index = xhci_get_endpoint_index(&ep->desc);
+       ep_ctx = &virt_dev->in_ctx->ep[ep_index];
+
+       ep_ctx->ep_info = 0;
+       ep_ctx->ep_info2 = 0;
+       ep_ctx->deq[0] = 0;
+       ep_ctx->deq[1] = 0;
+       ep_ctx->tx_info = 0;
+       /* Don't free the endpoint ring until the set interface or configuration
+        * request succeeds.
+        */
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+       struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+       int size;
+       int i;
+
+       /* Free the Event Ring Segment Table and the actual Event Ring */
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+       size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+       if (xhci->erst.entries)
+               pci_free_consistent(pdev, size,
+                               xhci->erst.entries, xhci->erst.erst_dma_addr);
+       xhci->erst.entries = NULL;
+       xhci_dbg(xhci, "Freed ERST\n");
+       if (xhci->event_ring)
+               xhci_ring_free(xhci, xhci->event_ring);
+       xhci->event_ring = NULL;
+       xhci_dbg(xhci, "Freed event ring\n");
+
+       xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
+       xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
+       if (xhci->cmd_ring)
+               xhci_ring_free(xhci, xhci->cmd_ring);
+       xhci->cmd_ring = NULL;
+       xhci_dbg(xhci, "Freed command ring\n");
+
+       for (i = 1; i < MAX_HC_SLOTS; ++i)
+               xhci_free_virt_device(xhci, i);
+
+       if (xhci->segment_pool)
+               dma_pool_destroy(xhci->segment_pool);
+       xhci->segment_pool = NULL;
+       xhci_dbg(xhci, "Freed segment pool\n");
+
+       if (xhci->device_pool)
+               dma_pool_destroy(xhci->device_pool);
+       xhci->device_pool = NULL;
+       xhci_dbg(xhci, "Freed device context pool\n");
+
+       xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
+       xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
+       if (xhci->dcbaa)
+               pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
+                               xhci->dcbaa, xhci->dcbaa->dma);
+       xhci->dcbaa = NULL;
+
+       xhci->page_size = 0;
+       xhci->page_shift = 0;
+}
+
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+       dma_addr_t      dma;
+       struct device   *dev = xhci_to_hcd(xhci)->self.controller;
+       unsigned int    val, val2;
+       struct xhci_segment     *seg;
+       u32 page_size;
+       int i;
+
+       page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+       xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+       for (i = 0; i < 16; i++) {
+               if ((0x1 & page_size) != 0)
+                       break;
+               page_size = page_size >> 1;
+       }
+       if (i < 16)
+               xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+       else
+               xhci_warn(xhci, "WARN: no supported page size\n");
+       /* Use 4K pages, since that's common and the minimum the HC supports */
+       xhci->page_shift = 12;
+       xhci->page_size = 1 << xhci->page_shift;
+       xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+
+       /*
+        * Program the Number of Device Slots Enabled field in the CONFIG
+        * register with the max value of slots the HC can handle.
+        */
+       val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
+       xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
+                       (unsigned int) val);
+       val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+       val |= (val2 & ~HCS_SLOTS_MASK);
+       xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
+                       (unsigned int) val);
+       xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+       /*
+        * Section 5.4.8 - doorbell array must be
+        * "physically contiguous and 64-byte (cache line) aligned".
+        */
+       xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
+                       sizeof(*xhci->dcbaa), &dma);
+       if (!xhci->dcbaa)
+               goto fail;
+       memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
+       xhci->dcbaa->dma = dma;
+       xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+                       (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
+       xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
+       xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
+
+       /*
+        * Initialize the ring segment pool.  The ring must be a contiguous
+        * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
+        * however, the command ring segment needs 64-byte aligned segments,
+        * so we pick the greater alignment need.
+        */
+       xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
+                       SEGMENT_SIZE, 64, xhci->page_size);
+       /* See Table 46 and Note on Figure 55 */
+       /* FIXME support 64-byte contexts */
+       xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
+                       sizeof(struct xhci_device_control),
+                       64, xhci->page_size);
+       if (!xhci->segment_pool || !xhci->device_pool)
+               goto fail;
+
+       /* Set up the command ring to have one segments for now. */
+       xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
+       if (!xhci->cmd_ring)
+               goto fail;
+       xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+       xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+                       (unsigned long long)xhci->cmd_ring->first_seg->dma);
+
+       /* Set the address in the Command Ring Control register */
+       val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
+       val = (val & ~CMD_RING_ADDR_MASK) |
+               (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
+               xhci->cmd_ring->cycle_state;
+       xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
+       xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
+       xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
+       xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
+       xhci_dbg_cmd_ptrs(xhci);
+
+       val = xhci_readl(xhci, &xhci->cap_regs->db_off);
+       val &= DBOFF_MASK;
+       xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
+                       " from cap regs base addr\n", val);
+       xhci->dba = (void *) xhci->cap_regs + val;
+       xhci_dbg_regs(xhci);
+       xhci_print_run_regs(xhci);
+       /* Set ir_set to interrupt register set 0 */
+       xhci->ir_set = (void *) xhci->run_regs->ir_set;
+
+       /*
+        * Event ring setup: Allocate a normal ring, but also setup
+        * the event ring segment table (ERST).  Section 4.9.3.
+        */
+       xhci_dbg(xhci, "// Allocating event ring\n");
+       xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
+       if (!xhci->event_ring)
+               goto fail;
+
+       xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
+                       sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
+       if (!xhci->erst.entries)
+               goto fail;
+       xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+                       (unsigned long long)dma);
+
+       memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+       xhci->erst.num_entries = ERST_NUM_SEGS;
+       xhci->erst.erst_dma_addr = dma;
+       xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+                       xhci->erst.num_entries,
+                       xhci->erst.entries,
+                       (unsigned long long)xhci->erst.erst_dma_addr);
+
+       /* set ring base address and size for each segment table entry */
+       for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
+               struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+               entry->seg_addr[0] = seg->dma;
+               entry->seg_addr[1] = 0;
+               entry->seg_size = TRBS_PER_SEGMENT;
+               entry->rsvd = 0;
+               seg = seg->next;
+       }
+
+       /* set ERST count with the number of entries in the segment table */
+       val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+       val &= ERST_SIZE_MASK;
+       val |= ERST_NUM_SEGS;
+       xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+                       val);
+       xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+       xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+       /* set the segment table base address */
+       xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+                       (unsigned long long)xhci->erst.erst_dma_addr);
+       val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
+       val &= ERST_PTR_MASK;
+       val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
+       xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
+
+       /* Set the event ring dequeue address */
+       xhci_set_hc_event_deq(xhci);
+       xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+       xhci_print_ir_set(xhci, xhci->ir_set, 0);
+
+       /*
+        * XXX: Might need to set the Interrupter Moderation Register to
+        * something other than the default (~1ms minimum between interrupts).
+        * See section 5.5.1.2.
+        */
+       init_completion(&xhci->addr_dev);
+       for (i = 0; i < MAX_HC_SLOTS; ++i)
+               xhci->devs[i] = 0;
+
+       return 0;
+fail:
+       xhci_warn(xhci, "Couldn't initialize memory\n");
+       xhci_mem_cleanup(xhci);
+       return -ENOMEM;
+}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644 (file)
index 0000000..1462709
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+ * xHCI host controller driver PCI Bus Glue.
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+
+#include "xhci.h"
+
+static const char hcd_name[] = "xhci_hcd";
+
+/* called after powerup, by probe or system-pm "wakeup" */
+static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
+{
+       /*
+        * TODO: Implement finding debug ports later.
+        * TODO: see if there are any quirks that need to be added to handle
+        * new extended capabilities.
+        */
+
+       /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
+       if (!pci_set_mwi(pdev))
+               xhci_dbg(xhci, "MWI active\n");
+
+       xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
+       return 0;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_pci_setup(struct usb_hcd *hcd)
+{
+       struct xhci_hcd         *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+       int                     retval;
+
+       xhci->cap_regs = hcd->regs;
+       xhci->op_regs = hcd->regs +
+               HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
+       xhci->run_regs = hcd->regs +
+               (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
+       /* Cache read-only capability registers */
+       xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
+       xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
+       xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
+       xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
+       xhci_print_registers(xhci);
+
+       /* Make sure the HC is halted. */
+       retval = xhci_halt(xhci);
+       if (retval)
+               return retval;
+
+       xhci_dbg(xhci, "Resetting HCD\n");
+       /* Reset the internal HC memory state and registers. */
+       retval = xhci_reset(xhci);
+       if (retval)
+               return retval;
+       xhci_dbg(xhci, "Reset complete\n");
+
+       xhci_dbg(xhci, "Calling HCD init\n");
+       /* Initialize HCD and host controller data structures. */
+       retval = xhci_init(hcd);
+       if (retval)
+               return retval;
+       xhci_dbg(xhci, "Called HCD init\n");
+
+       pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+       xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+
+       /* Find any debug ports */
+       return xhci_pci_reinit(xhci, pdev);
+}
+
+static const struct hc_driver xhci_pci_hc_driver = {
+       .description =          hcd_name,
+       .product_desc =         "xHCI Host Controller",
+       .hcd_priv_size =        sizeof(struct xhci_hcd),
+
+       /*
+        * generic hardware linkage
+        */
+       .irq =                  xhci_irq,
+       .flags =                HCD_MEMORY | HCD_USB3,
+
+       /*
+        * basic lifecycle operations
+        */
+       .reset =                xhci_pci_setup,
+       .start =                xhci_run,
+       /* suspend and resume implemented later */
+       .stop =                 xhci_stop,
+       .shutdown =             xhci_shutdown,
+
+       /*
+        * managing i/o requests and associated device resources
+        */
+       .urb_enqueue =          xhci_urb_enqueue,
+       .urb_dequeue =          xhci_urb_dequeue,
+       .alloc_dev =            xhci_alloc_dev,
+       .free_dev =             xhci_free_dev,
+       .add_endpoint =         xhci_add_endpoint,
+       .drop_endpoint =        xhci_drop_endpoint,
+       .check_bandwidth =      xhci_check_bandwidth,
+       .reset_bandwidth =      xhci_reset_bandwidth,
+       .address_device =       xhci_address_device,
+
+       /*
+        * scheduling support
+        */
+       .get_frame_number =     xhci_get_frame,
+
+       /* Root hub support */
+       .hub_control =          xhci_hub_control,
+       .hub_status_data =      xhci_hub_status_data,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* PCI driver selection metadata; PCI hotplugging uses this */
+static const struct pci_device_id pci_ids[] = { {
+       /* handle any USB 3.0 xHCI controller */
+       PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
+       .driver_data =  (unsigned long) &xhci_pci_hc_driver,
+       },
+       { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+/* pci driver glue; this is a "new style" PCI driver module */
+static struct pci_driver xhci_pci_driver = {
+       .name =         (char *) hcd_name,
+       .id_table =     pci_ids,
+
+       .probe =        usb_hcd_pci_probe,
+       .remove =       usb_hcd_pci_remove,
+       /* suspend and resume implemented later */
+
+       .shutdown =     usb_hcd_pci_shutdown,
+};
+
+int xhci_register_pci()
+{
+       return pci_register_driver(&xhci_pci_driver);
+}
+
+void xhci_unregister_pci()
+{
+       pci_unregister_driver(&xhci_pci_driver);
+}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644 (file)
index 0000000..02d8198
--- /dev/null
@@ -0,0 +1,1648 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
+ *    Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
+ *    least one free TRB in the ring.  This is useful if you want to turn that
+ *    into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ *    link TRB, then load the pointer with the address in the link TRB.  If the
+ *    link TRB had its toggle bit set, you may need to update the ring cycle
+ *    state (see cycle bit rules).  You may have to do this multiple times
+ *    until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ *    equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ *    Update enqueue pointer between each write (which may update the ring
+ *    cycle state).
+ * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
+ *    and endpoint rings.  If HC is the producer for the event ring,
+ *    and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
+ *    the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ *    continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer.  SW is the consumer for the event ring, and it
+ *   updates event ring dequeue pointer.  HC is the consumer for the command and
+ *   endpoint rings; it generates events on the event ring for these.
+ */
+
+#include <linux/scatterlist.h>
+#include "xhci.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+               union xhci_trb *trb)
+{
+       unsigned long segment_offset;
+
+       if (!seg || !trb || trb < seg->trbs)
+               return 0;
+       /* offset in TRBs */
+       segment_offset = trb - seg->trbs;
+       if (segment_offset > TRBS_PER_SEGMENT)
+               return 0;
+       return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+/* Does this link TRB point to the first segment in a ring,
+ * or was the previous TRB the last TRB on the last segment in the ERST?
+ */
+static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+               struct xhci_segment *seg, union xhci_trb *trb)
+{
+       if (ring == xhci->event_ring)
+               return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
+                       (seg->next == xhci->event_ring->first_seg);
+       else
+               return trb->link.control & LINK_TOGGLE;
+}
+
+/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
+ * segment?  I.e. would the updated event TRB pointer step off the end of the
+ * event seg?
+ */
+static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+               struct xhci_segment *seg, union xhci_trb *trb)
+{
+       if (ring == xhci->event_ring)
+               return trb == &seg->trbs[TRBS_PER_SEGMENT];
+       else
+               return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
+}
+
+/* Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment.  This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct xhci_hcd *xhci,
+               struct xhci_ring *ring,
+               struct xhci_segment **seg,
+               union xhci_trb **trb)
+{
+       if (last_trb(xhci, ring, *seg, *trb)) {
+               *seg = (*seg)->next;
+               *trb = ((*seg)->trbs);
+       } else {
+               *trb = (*trb)++;
+       }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ */
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+       union xhci_trb *next = ++(ring->dequeue);
+
+       ring->deq_updates++;
+       /* Update the dequeue pointer further if that was a link TRB or we're at
+        * the end of an event ring segment (which doesn't have link TRBS)
+        */
+       while (last_trb(xhci, ring, ring->deq_seg, next)) {
+               if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+                       ring->cycle_state = (ring->cycle_state ? 0 : 1);
+                       if (!in_interrupt())
+                               xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+                                               ring,
+                                               (unsigned int) ring->cycle_state);
+               }
+               ring->deq_seg = ring->deq_seg->next;
+               ring->dequeue = ring->deq_seg->trbs;
+               next = ring->dequeue;
+       }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set.
+ * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
+ */
+static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+{
+       u32 chain;
+       union xhci_trb *next;
+
+       chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
+       next = ++(ring->enqueue);
+
+       ring->enq_updates++;
+       /* Update the dequeue pointer further if that was a link TRB or we're at
+        * the end of an event ring segment (which doesn't have link TRBS)
+        */
+       while (last_trb(xhci, ring, ring->enq_seg, next)) {
+               if (!consumer) {
+                       if (ring != xhci->event_ring) {
+                               next->link.control &= ~TRB_CHAIN;
+                               next->link.control |= chain;
+                               /* Give this link TRB to the hardware */
+                               wmb();
+                               if (next->link.control & TRB_CYCLE)
+                                       next->link.control &= (u32) ~TRB_CYCLE;
+                               else
+                                       next->link.control |= (u32) TRB_CYCLE;
+                       }
+                       /* Toggle the cycle bit after the last ring segment. */
+                       if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
+                               ring->cycle_state = (ring->cycle_state ? 0 : 1);
+                               if (!in_interrupt())
+                                       xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+                                                       ring,
+                                                       (unsigned int) ring->cycle_state);
+                       }
+               }
+               ring->enq_seg = ring->enq_seg->next;
+               ring->enqueue = ring->enq_seg->trbs;
+               next = ring->enqueue;
+       }
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring.  See rules
+ * above.
+ * FIXME: this would be simpler and faster if we just kept track of the number
+ * of free TRBs in a ring.
+ */
+static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+               unsigned int num_trbs)
+{
+       int i;
+       union xhci_trb *enq = ring->enqueue;
+       struct xhci_segment *enq_seg = ring->enq_seg;
+
+       /* Check if ring is empty */
+       if (enq == ring->dequeue)
+               return 1;
+       /* Make sure there's an extra empty TRB available */
+       for (i = 0; i <= num_trbs; ++i) {
+               if (enq == ring->dequeue)
+                       return 0;
+               enq++;
+               while (last_trb(xhci, ring, enq_seg, enq)) {
+                       enq_seg = enq_seg->next;
+                       enq = enq_seg->trbs;
+               }
+       }
+       return 1;
+}
+
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+       u32 temp;
+       dma_addr_t deq;
+
+       deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+                       xhci->event_ring->dequeue);
+       if (deq == 0 && !in_interrupt())
+               xhci_warn(xhci, "WARN something wrong with SW event ring "
+                               "dequeue ptr.\n");
+       /* Update HC event ring dequeue pointer */
+       temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
+       temp &= ERST_PTR_MASK;
+       if (!in_interrupt())
+               xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
+       xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
+       xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
+                       &xhci->ir_set->erst_dequeue[0]);
+}
+
+/* Ring the host controller doorbell after placing a command on the ring */
+void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+{
+       u32 temp;
+
+       xhci_dbg(xhci, "// Ding dong!\n");
+       temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
+       xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+       /* Flush PCI posted writes */
+       xhci_readl(xhci, &xhci->dba->doorbell[0]);
+}
+
+static void ring_ep_doorbell(struct xhci_hcd *xhci,
+               unsigned int slot_id,
+               unsigned int ep_index)
+{
+       struct xhci_ring *ep_ring;
+       u32 field;
+       __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+
+       ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+       /* Don't ring the doorbell for this endpoint if there are pending
+        * cancellations because the we don't want to interrupt processing.
+        */
+       if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
+               field = xhci_readl(xhci, db_addr) & DB_MASK;
+               xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
+               /* Flush PCI posted writes - FIXME Matthew Wilcox says this
+                * isn't time-critical and we shouldn't make the CPU wait for
+                * the flush.
+                */
+               xhci_readl(xhci, db_addr);
+       }
+}
+
+/*
+ * Find the segment that trb is in.  Start searching in start_seg.
+ * If we must move past a segment that has a link TRB with a toggle cycle state
+ * bit set, then we will toggle the value pointed at by cycle_state.
+ */
+static struct xhci_segment *find_trb_seg(
+               struct xhci_segment *start_seg,
+               union xhci_trb  *trb, int *cycle_state)
+{
+       struct xhci_segment *cur_seg = start_seg;
+       struct xhci_generic_trb *generic_trb;
+
+       while (cur_seg->trbs > trb ||
+                       &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
+               generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
+               if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
+                               (generic_trb->field[3] & LINK_TOGGLE))
+                       *cycle_state = ~(*cycle_state) & 0x1;
+               cur_seg = cur_seg->next;
+               if (cur_seg == start_seg)
+                       /* Looped over the entire list.  Oops! */
+                       return 0;
+       }
+       return cur_seg;
+}
+
+struct dequeue_state {
+       struct xhci_segment *new_deq_seg;
+       union xhci_trb *new_deq_ptr;
+       int new_cycle_state;
+};
+
+/*
+ * Move the xHC's endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the xHC's endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ *  - First we update our new ring state to be the same as when the xHC stopped.
+ *  - Then we traverse the ring to find the segment that contains
+ *    the last TRB in the TD.  We toggle the xHC's new cycle state when we pass
+ *    any link TRBs with the toggle cycle bit set.
+ *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ *    if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void find_new_dequeue_state(struct xhci_hcd *xhci,
+               unsigned int slot_id, unsigned int ep_index,
+               struct xhci_td *cur_td, struct dequeue_state *state)
+{
+       struct xhci_virt_device *dev = xhci->devs[slot_id];
+       struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
+       struct xhci_generic_trb *trb;
+
+       state->new_cycle_state = 0;
+       state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+                       ep_ring->stopped_trb,
+                       &state->new_cycle_state);
+       if (!state->new_deq_seg)
+               BUG();
+       /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+       state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
+
+       state->new_deq_ptr = cur_td->last_trb;
+       state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+                       state->new_deq_ptr,
+                       &state->new_cycle_state);
+       if (!state->new_deq_seg)
+               BUG();
+
+       trb = &state->new_deq_ptr->generic;
+       if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
+                               (trb->field[3] & LINK_TOGGLE))
+               state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+       next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+       /* Don't update the ring cycle state for the producer (us). */
+       ep_ring->dequeue = state->new_deq_ptr;
+       ep_ring->deq_seg = state->new_deq_seg;
+}
+
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+               struct xhci_td *cur_td)
+{
+       struct xhci_segment *cur_seg;
+       union xhci_trb *cur_trb;
+
+       for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
+                       true;
+                       next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+               if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
+                               TRB_TYPE(TRB_LINK)) {
+                       /* Unchain any chained Link TRBs, but
+                        * leave the pointers intact.
+                        */
+                       cur_trb->generic.field[3] &= ~TRB_CHAIN;
+                       xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
+                       xhci_dbg(xhci, "Address = %p (0x%llx dma); "
+                                       "in seg %p (0x%llx dma)\n",
+                                       cur_trb,
+                                       (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+                                       cur_seg,
+                                       (unsigned long long)cur_seg->dma);
+               } else {
+                       cur_trb->generic.field[0] = 0;
+                       cur_trb->generic.field[1] = 0;
+                       cur_trb->generic.field[2] = 0;
+                       /* Preserve only the cycle bit of this TRB */
+                       cur_trb->generic.field[3] &= TRB_CYCLE;
+                       cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
+                       xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
+                                       "in seg %p (0x%llx dma)\n",
+                                       cur_trb,
+                                       (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
+                                       cur_seg,
+                                       (unsigned long long)cur_seg->dma);
+               }
+               if (cur_trb == cur_td->last_trb)
+                       break;
+       }
+}
+
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index, struct xhci_segment *deq_seg,
+               union xhci_trb *deq_ptr, u32 cycle_state);
+
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring.  There are two ways to do that:
+ *
+ *  1. If the HW was in the middle of processing the TD that needs to be
+ *     cancelled, then we must move the ring's dequeue pointer past the last TRB
+ *     in the TD with a Set Dequeue Pointer Command.
+ *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ *     bit cleared) so that the HW will skip over them.
+ */
+static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+               union xhci_trb *trb)
+{
+       unsigned int slot_id;
+       unsigned int ep_index;
+       struct xhci_ring *ep_ring;
+       struct list_head *entry;
+       struct xhci_td *cur_td = 0;
+       struct xhci_td *last_unlinked_td;
+
+       struct dequeue_state deq_state;
+#ifdef CONFIG_USB_HCD_STAT
+       ktime_t stop_time = ktime_get();
+#endif
+
+       memset(&deq_state, 0, sizeof(deq_state));
+       slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+       ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+       ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+       if (list_empty(&ep_ring->cancelled_td_list))
+               return;
+
+       /* Fix up the ep ring first, so HW stops executing cancelled TDs.
+        * We have the xHCI lock, so nothing can modify this list until we drop
+        * it.  We're also in the event handler, so we can't get re-interrupted
+        * if another Stop Endpoint command completes
+        */
+       list_for_each(entry, &ep_ring->cancelled_td_list) {
+               cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
+               xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
+                               cur_td->first_trb,
+                               (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+               /*
+                * If we stopped on the TD we need to cancel, then we have to
+                * move the xHC endpoint ring dequeue pointer past this TD.
+                */
+               if (cur_td == ep_ring->stopped_td)
+                       find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
+                                       &deq_state);
+               else
+                       td_to_noop(xhci, ep_ring, cur_td);
+               /*
+                * The event handler won't see a completion for this TD anymore,
+                * so remove it from the endpoint ring's TD list.  Keep it in
+                * the cancelled TD list for URB completion later.
+                */
+               list_del(&cur_td->td_list);
+               ep_ring->cancels_pending--;
+       }
+       last_unlinked_td = cur_td;
+
+       /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
+       if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+               xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+                               "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+                               deq_state.new_deq_seg,
+                               (unsigned long long)deq_state.new_deq_seg->dma,
+                               deq_state.new_deq_ptr,
+                               (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
+                               deq_state.new_cycle_state);
+               queue_set_tr_deq(xhci, slot_id, ep_index,
+                               deq_state.new_deq_seg,
+                               deq_state.new_deq_ptr,
+                               (u32) deq_state.new_cycle_state);
+               /* Stop the TD queueing code from ringing the doorbell until
+                * this command completes.  The HC won't set the dequeue pointer
+                * if the ring is running, and ringing the doorbell starts the
+                * ring running.
+                */
+               ep_ring->state |= SET_DEQ_PENDING;
+               xhci_ring_cmd_db(xhci);
+       } else {
+               /* Otherwise just ring the doorbell to restart the ring */
+               ring_ep_doorbell(xhci, slot_id, ep_index);
+       }
+
+       /*
+        * Drop the lock and complete the URBs in the cancelled TD list.
+        * New TDs to be cancelled might be added to the end of the list before
+        * we can complete all the URBs for the TDs we already unlinked.
+        * So stop when we've completed the URB for the last TD we unlinked.
+        */
+       do {
+               cur_td = list_entry(ep_ring->cancelled_td_list.next,
+                               struct xhci_td, cancelled_td_list);
+               list_del(&cur_td->cancelled_td_list);
+
+               /* Clean up the cancelled URB */
+#ifdef CONFIG_USB_HCD_STAT
+               hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
+                               ktime_sub(stop_time, cur_td->start_time));
+#endif
+               cur_td->urb->hcpriv = NULL;
+               usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
+
+               xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
+               spin_unlock(&xhci->lock);
+               /* Doesn't matter what we pass for status, since the core will
+                * just overwrite it (because the URB has been unlinked).
+                */
+               usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
+               kfree(cur_td);
+
+               spin_lock(&xhci->lock);
+       } while (cur_td != last_unlinked_td);
+
+       /* Return to the event handler with xhci->lock re-acquired */
+}
+
+/*
+ * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
+ * we need to clear the set deq pending flag in the endpoint ring state, so that
+ * the TD queueing code can ring the doorbell again.  We also need to ring the
+ * endpoint doorbell to restart the ring, but only if there aren't more
+ * cancellations pending.
+ */
+static void handle_set_deq_completion(struct xhci_hcd *xhci,
+               struct xhci_event_cmd *event,
+               union xhci_trb *trb)
+{
+       unsigned int slot_id;
+       unsigned int ep_index;
+       struct xhci_ring *ep_ring;
+       struct xhci_virt_device *dev;
+
+       slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
+       ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
+       dev = xhci->devs[slot_id];
+       ep_ring = dev->ep_rings[ep_index];
+
+       if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
+               unsigned int ep_state;
+               unsigned int slot_state;
+
+               switch (GET_COMP_CODE(event->status)) {
+               case COMP_TRB_ERR:
+                       xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
+                                       "of stream ID configuration\n");
+                       break;
+               case COMP_CTX_STATE:
+                       xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
+                                       "to incorrect slot or ep state.\n");
+                       ep_state = dev->out_ctx->ep[ep_index].ep_info;
+                       ep_state &= EP_STATE_MASK;
+                       slot_state = dev->out_ctx->slot.dev_state;
+                       slot_state = GET_SLOT_STATE(slot_state);
+                       xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+                                       slot_state, ep_state);
+                       break;
+               case COMP_EBADSLT:
+                       xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
+                                       "slot %u was not enabled.\n", slot_id);
+                       break;
+               default:
+                       xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
+                                       "completion code of %u.\n",
+                                       GET_COMP_CODE(event->status));
+                       break;
+               }
+               /* OK what do we do now?  The endpoint state is hosed, and we
+                * should never get to this point if the synchronization between
+                * queueing, and endpoint state are correct.  This might happen
+                * if the device gets disconnected after we've finished
+                * cancelling URBs, which might not be an error...
+                */
+       } else {
+               xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
+                               "deq[1] = 0x%x.\n",
+                               dev->out_ctx->ep[ep_index].deq[0],
+                               dev->out_ctx->ep[ep_index].deq[1]);
+       }
+
+       ep_ring->state &= ~SET_DEQ_PENDING;
+       ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+               struct xhci_event_cmd *event)
+{
+       int slot_id = TRB_TO_SLOT_ID(event->flags);
+       u64 cmd_dma;
+       dma_addr_t cmd_dequeue_dma;
+
+       cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
+       cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+                       xhci->cmd_ring->dequeue);
+       /* Is the command ring deq ptr out of sync with the deq seg ptr? */
+       if (cmd_dequeue_dma == 0) {
+               xhci->error_bitmask |= 1 << 4;
+               return;
+       }
+       /* Does the DMA address match our internal dequeue pointer address? */
+       if (cmd_dma != (u64) cmd_dequeue_dma) {
+               xhci->error_bitmask |= 1 << 5;
+               return;
+       }
+       switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
+       case TRB_TYPE(TRB_ENABLE_SLOT):
+               if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
+                       xhci->slot_id = slot_id;
+               else
+                       xhci->slot_id = 0;
+               complete(&xhci->addr_dev);
+               break;
+       case TRB_TYPE(TRB_DISABLE_SLOT):
+               if (xhci->devs[slot_id])
+                       xhci_free_virt_device(xhci, slot_id);
+               break;
+       case TRB_TYPE(TRB_CONFIG_EP):
+               xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+               complete(&xhci->devs[slot_id]->cmd_completion);
+               break;
+       case TRB_TYPE(TRB_ADDR_DEV):
+               xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
+               complete(&xhci->addr_dev);
+               break;
+       case TRB_TYPE(TRB_STOP_RING):
+               handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
+               break;
+       case TRB_TYPE(TRB_SET_DEQ):
+               handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+               break;
+       case TRB_TYPE(TRB_CMD_NOOP):
+               ++xhci->noops_handled;
+               break;
+       default:
+               /* Skip over unknown commands on the event ring */
+               xhci->error_bitmask |= 1 << 6;
+               break;
+       }
+       inc_deq(xhci, xhci->cmd_ring, false);
+}
+
+static void handle_port_status(struct xhci_hcd *xhci,
+               union xhci_trb *event)
+{
+       u32 port_id;
+
+       /* Port status change events always have a successful completion code */
+       if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
+               xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
+               xhci->error_bitmask |= 1 << 8;
+       }
+       /* FIXME: core doesn't care about all port link state changes yet */
+       port_id = GET_PORT_ID(event->generic.field[0]);
+       xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
+
+       /* Update event ring dequeue pointer before dropping the lock */
+       inc_deq(xhci, xhci->event_ring, true);
+       xhci_set_hc_event_deq(xhci);
+
+       spin_unlock(&xhci->lock);
+       /* Pass this up to the core */
+       usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+       spin_lock(&xhci->lock);
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment.  If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment.  Otherwise it
+ * returns 0.
+ */
+static struct xhci_segment *trb_in_td(
+               struct xhci_segment *start_seg,
+               union xhci_trb  *start_trb,
+               union xhci_trb  *end_trb,
+               dma_addr_t      suspect_dma)
+{
+       dma_addr_t start_dma;
+       dma_addr_t end_seg_dma;
+       dma_addr_t end_trb_dma;
+       struct xhci_segment *cur_seg;
+
+       start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
+       cur_seg = start_seg;
+
+       do {
+               /* We may get an event for a Link TRB in the middle of a TD */
+               end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+                               &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+               /* If the end TRB isn't in this segment, this is set to 0 */
+               end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
+
+               if (end_trb_dma > 0) {
+                       /* The end TRB is in this segment, so suspect should be here */
+                       if (start_dma <= end_trb_dma) {
+                               if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+                                       return cur_seg;
+                       } else {
+                               /* Case for one segment with
+                                * a TD wrapped around to the top
+                                */
+                               if ((suspect_dma >= start_dma &&
+                                                       suspect_dma <= end_seg_dma) ||
+                                               (suspect_dma >= cur_seg->dma &&
+                                                suspect_dma <= end_trb_dma))
+                                       return cur_seg;
+                       }
+                       return 0;
+               } else {
+                       /* Might still be somewhere in this segment */
+                       if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+                               return cur_seg;
+               }
+               cur_seg = cur_seg->next;
+               start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+       } while (1);
+
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
+ * At this point, the host controller is probably hosed and should be reset.
+ */
+static int handle_tx_event(struct xhci_hcd *xhci,
+               struct xhci_transfer_event *event)
+{
+       struct xhci_virt_device *xdev;
+       struct xhci_ring *ep_ring;
+       int ep_index;
+       struct xhci_td *td = 0;
+       dma_addr_t event_dma;
+       struct xhci_segment *event_seg;
+       union xhci_trb *event_trb;
+       struct urb *urb = 0;
+       int status = -EINPROGRESS;
+
+       xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+       if (!xdev) {
+               xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+               return -ENODEV;
+       }
+
+       /* Endpoint ID is 1 based, our index is zero based */
+       ep_index = TRB_TO_EP_ID(event->flags) - 1;
+       ep_ring = xdev->ep_rings[ep_index];
+       if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
+               xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
+               return -ENODEV;
+       }
+
+       event_dma = event->buffer[0];
+       if (event->buffer[1] != 0)
+               xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
+
+       /* This TRB should be in the TD at the head of this ring's TD list */
+       if (list_empty(&ep_ring->td_list)) {
+               xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+                               TRB_TO_SLOT_ID(event->flags), ep_index);
+               xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+                               (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+               xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+               urb = NULL;
+               goto cleanup;
+       }
+       td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
+       /* Is this a TRB in the currently executing TD? */
+       event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
+                       td->last_trb, event_dma);
+       if (!event_seg) {
+               /* HC is busted, give up! */
+               xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
+               return -ESHUTDOWN;
+       }
+       event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
+       xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+                       (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
+       xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
+                       (unsigned int) event->buffer[0]);
+       xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
+                       (unsigned int) event->buffer[1]);
+       xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
+                       (unsigned int) event->transfer_len);
+       xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
+                       (unsigned int) event->flags);
+
+       /* Look for common error cases */
+       switch (GET_COMP_CODE(event->transfer_len)) {
+       /* Skip codes that require special handling depending on
+        * transfer type
+        */
+       case COMP_SUCCESS:
+       case COMP_SHORT_TX:
+               break;
+       case COMP_STOP:
+               xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+               break;
+       case COMP_STOP_INVAL:
+               xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+               break;
+       case COMP_STALL:
+               xhci_warn(xhci, "WARN: Stalled endpoint\n");
+               status = -EPIPE;
+               break;
+       case COMP_TRB_ERR:
+               xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+               status = -EILSEQ;
+               break;
+       case COMP_TX_ERR:
+               xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+               status = -EPROTO;
+               break;
+       case COMP_DB_ERR:
+               xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+               status = -ENOSR;
+               break;
+       default:
+               xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
+               urb = NULL;
+               goto cleanup;
+       }
+       /* Now update the urb's actual_length and give back to the core */
+       /* Was this a control transfer? */
+       if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
+               xhci_debug_trb(xhci, xhci->event_ring->dequeue);
+               switch (GET_COMP_CODE(event->transfer_len)) {
+               case COMP_SUCCESS:
+                       if (event_trb == ep_ring->dequeue) {
+                               xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
+                               status = -ESHUTDOWN;
+                       } else if (event_trb != td->last_trb) {
+                               xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
+                               status = -ESHUTDOWN;
+                       } else {
+                               xhci_dbg(xhci, "Successful control transfer!\n");
+                               status = 0;
+                       }
+                       break;
+               case COMP_SHORT_TX:
+                       xhci_warn(xhci, "WARN: short transfer on control ep\n");
+                       status = -EREMOTEIO;
+                       break;
+               default:
+                       /* Others already handled above */
+                       break;
+               }
+               /*
+                * Did we transfer any data, despite the errors that might have
+                * happened?  I.e. did we get past the setup stage?
+                */
+               if (event_trb != ep_ring->dequeue) {
+                       /* The event was for the status stage */
+                       if (event_trb == td->last_trb) {
+                               td->urb->actual_length =
+                                       td->urb->transfer_buffer_length;
+                       } else {
+                       /* Maybe the event was for the data stage? */
+                               if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+                                       /* We didn't stop on a link TRB in the middle */
+                                       td->urb->actual_length =
+                                               td->urb->transfer_buffer_length -
+                                               TRB_LEN(event->transfer_len);
+                       }
+               }
+       } else {
+               switch (GET_COMP_CODE(event->transfer_len)) {
+               case COMP_SUCCESS:
+                       /* Double check that the HW transferred everything. */
+                       if (event_trb != td->last_trb) {
+                               xhci_warn(xhci, "WARN Successful completion "
+                                               "on short TX\n");
+                               if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+                                       status = -EREMOTEIO;
+                               else
+                                       status = 0;
+                       } else {
+                               xhci_dbg(xhci, "Successful bulk transfer!\n");
+                               status = 0;
+                       }
+                       break;
+               case COMP_SHORT_TX:
+                       if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+                               status = -EREMOTEIO;
+                       else
+                               status = 0;
+                       break;
+               default:
+                       /* Others already handled above */
+                       break;
+               }
+               dev_dbg(&td->urb->dev->dev,
+                               "ep %#x - asked for %d bytes, "
+                               "%d bytes untransferred\n",
+                               td->urb->ep->desc.bEndpointAddress,
+                               td->urb->transfer_buffer_length,
+                               TRB_LEN(event->transfer_len));
+               /* Fast path - was this the last TRB in the TD for this URB? */
+               if (event_trb == td->last_trb) {
+                       if (TRB_LEN(event->transfer_len) != 0) {
+                               td->urb->actual_length =
+                                       td->urb->transfer_buffer_length -
+                                       TRB_LEN(event->transfer_len);
+                               if (td->urb->actual_length < 0) {
+                                       xhci_warn(xhci, "HC gave bad length "
+                                                       "of %d bytes left\n",
+                                                       TRB_LEN(event->transfer_len));
+                                       td->urb->actual_length = 0;
+                               }
+                               if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+                                       status = -EREMOTEIO;
+                               else
+                                       status = 0;
+                       } else {
+                               td->urb->actual_length = td->urb->transfer_buffer_length;
+                               /* Ignore a short packet completion if the
+                                * untransferred length was zero.
+                                */
+                               status = 0;
+                       }
+               } else {
+                       /* Slow path - walk the list, starting from the dequeue
+                        * pointer, to get the actual length transferred.
+                        */
+                       union xhci_trb *cur_trb;
+                       struct xhci_segment *cur_seg;
+
+                       td->urb->actual_length = 0;
+                       for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
+                                       cur_trb != event_trb;
+                                       next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
+                               if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
+                                               TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
+                                       td->urb->actual_length +=
+                                               TRB_LEN(cur_trb->generic.field[2]);
+                       }
+                       /* If the ring didn't stop on a Link or No-op TRB, add
+                        * in the actual bytes transferred from the Normal TRB
+                        */
+                       if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
+                               td->urb->actual_length +=
+                                       TRB_LEN(cur_trb->generic.field[2]) -
+                                       TRB_LEN(event->transfer_len);
+               }
+       }
+       /* The Endpoint Stop Command completion will take care of
+        * any stopped TDs.  A stopped TD may be restarted, so don't update the
+        * ring dequeue pointer or take this TD off any lists yet.
+        */
+       if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
+                       GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
+               ep_ring->stopped_td = td;
+               ep_ring->stopped_trb = event_trb;
+       } else {
+               /* Update ring dequeue pointer */
+               while (ep_ring->dequeue != td->last_trb)
+                       inc_deq(xhci, ep_ring, false);
+               inc_deq(xhci, ep_ring, false);
+
+               /* Clean up the endpoint's TD list */
+               urb = td->urb;
+               list_del(&td->td_list);
+               /* Was this TD slated to be cancelled but completed anyway? */
+               if (!list_empty(&td->cancelled_td_list)) {
+                       list_del(&td->cancelled_td_list);
+                       ep_ring->cancels_pending--;
+               }
+               kfree(td);
+               urb->hcpriv = NULL;
+       }
+cleanup:
+       inc_deq(xhci, xhci->event_ring, true);
+       xhci_set_hc_event_deq(xhci);
+
+       /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
+       if (urb) {
+               usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+               spin_unlock(&xhci->lock);
+               usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+               spin_lock(&xhci->lock);
+       }
+       return 0;
+}
+
+/*
+ * This function handles all OS-owned events on the event ring.  It may drop
+ * xhci->lock between event processing (e.g. to pass up port status changes).
+ */
+void xhci_handle_event(struct xhci_hcd *xhci)
+{
+       union xhci_trb *event;
+       int update_ptrs = 1;
+       int ret;
+
+       if (!xhci->event_ring || !xhci->event_ring->dequeue) {
+               xhci->error_bitmask |= 1 << 1;
+               return;
+       }
+
+       event = xhci->event_ring->dequeue;
+       /* Does the HC or OS own the TRB? */
+       if ((event->event_cmd.flags & TRB_CYCLE) !=
+                       xhci->event_ring->cycle_state) {
+               xhci->error_bitmask |= 1 << 2;
+               return;
+       }
+
+       /* FIXME: Handle more event types. */
+       switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
+       case TRB_TYPE(TRB_COMPLETION):
+               handle_cmd_completion(xhci, &event->event_cmd);
+               break;
+       case TRB_TYPE(TRB_PORT_STATUS):
+               handle_port_status(xhci, event);
+               update_ptrs = 0;
+               break;
+       case TRB_TYPE(TRB_TRANSFER):
+               ret = handle_tx_event(xhci, &event->trans_event);
+               if (ret < 0)
+                       xhci->error_bitmask |= 1 << 9;
+               else
+                       update_ptrs = 0;
+               break;
+       default:
+               xhci->error_bitmask |= 1 << 3;
+       }
+
+       if (update_ptrs) {
+               /* Update SW and HC event ring dequeue pointer */
+               inc_deq(xhci, xhci->event_ring, true);
+               xhci_set_hc_event_deq(xhci);
+       }
+       /* Are there more items on the event ring? */
+       xhci_handle_event(xhci);
+}
+
+/****          Endpoint Ring Operations        ****/
+
+/*
+ * Generic function for queueing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ */
+static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+               bool consumer,
+               u32 field1, u32 field2, u32 field3, u32 field4)
+{
+       struct xhci_generic_trb *trb;
+
+       trb = &ring->enqueue->generic;
+       trb->field[0] = field1;
+       trb->field[1] = field2;
+       trb->field[2] = field3;
+       trb->field[3] = field4;
+       inc_enq(xhci, ring, consumer);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
+ * FIXME allocate segments if the ring is full.
+ */
+static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
+               u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
+{
+       /* Make sure the endpoint has been added to xHC schedule */
+       xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
+       switch (ep_state) {
+       case EP_STATE_DISABLED:
+               /*
+                * USB core changed config/interfaces without notifying us,
+                * or hardware is reporting the wrong state.
+                */
+               xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
+               return -ENOENT;
+       case EP_STATE_HALTED:
+       case EP_STATE_ERROR:
+               xhci_warn(xhci, "WARN waiting for halt or error on ep "
+                               "to be cleared\n");
+               /* FIXME event handling code for error needs to clear it */
+               /* XXX not sure if this should be -ENOENT or not */
+               return -EINVAL;
+       case EP_STATE_STOPPED:
+       case EP_STATE_RUNNING:
+               break;
+       default:
+               xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
+               /*
+                * FIXME issue Configure Endpoint command to try to get the HC
+                * back into a known state.
+                */
+               return -EINVAL;
+       }
+       if (!room_on_ring(xhci, ep_ring, num_trbs)) {
+               /* FIXME allocate more room */
+               xhci_err(xhci, "ERROR no room on ep ring\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static int prepare_transfer(struct xhci_hcd *xhci,
+               struct xhci_virt_device *xdev,
+               unsigned int ep_index,
+               unsigned int num_trbs,
+               struct urb *urb,
+               struct xhci_td **td,
+               gfp_t mem_flags)
+{
+       int ret;
+
+       ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
+                       xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
+                       num_trbs, mem_flags);
+       if (ret)
+               return ret;
+       *td = kzalloc(sizeof(struct xhci_td), mem_flags);
+       if (!*td)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&(*td)->td_list);
+       INIT_LIST_HEAD(&(*td)->cancelled_td_list);
+
+       ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+       if (unlikely(ret)) {
+               kfree(*td);
+               return ret;
+       }
+
+       (*td)->urb = urb;
+       urb->hcpriv = (void *) (*td);
+       /* Add this TD to the tail of the endpoint ring's TD list */
+       list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
+       (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
+       (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
+
+       return 0;
+}
+
+static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+{
+       int num_sgs, num_trbs, running_total, temp, i;
+       struct scatterlist *sg;
+
+       sg = NULL;
+       num_sgs = urb->num_sgs;
+       temp = urb->transfer_buffer_length;
+
+       xhci_dbg(xhci, "count sg list trbs: \n");
+       num_trbs = 0;
+       for_each_sg(urb->sg->sg, sg, num_sgs, i) {
+               unsigned int previous_total_trbs = num_trbs;
+               unsigned int len = sg_dma_len(sg);
+
+               /* Scatter gather list entries may cross 64KB boundaries */
+               running_total = TRB_MAX_BUFF_SIZE -
+                       (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+               if (running_total != 0)
+                       num_trbs++;
+
+               /* How many more 64KB chunks to transfer, how many more TRBs? */
+               while (running_total < sg_dma_len(sg)) {
+                       num_trbs++;
+                       running_total += TRB_MAX_BUFF_SIZE;
+               }
+               xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
+                               i, (unsigned long long)sg_dma_address(sg),
+                               len, len, num_trbs - previous_total_trbs);
+
+               len = min_t(int, len, temp);
+               temp -= len;
+               if (temp == 0)
+                       break;
+       }
+       xhci_dbg(xhci, "\n");
+       if (!in_interrupt())
+               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+                               urb->ep->desc.bEndpointAddress,
+                               urb->transfer_buffer_length,
+                               num_trbs);
+       return num_trbs;
+}
+
+static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+{
+       if (num_trbs != 0)
+               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+                               "TRBs, %d left\n", __func__,
+                               urb->ep->desc.bEndpointAddress, num_trbs);
+       if (running_total != urb->transfer_buffer_length)
+               dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+                               "queued %#x (%d), asked for %#x (%d)\n",
+                               __func__,
+                               urb->ep->desc.bEndpointAddress,
+                               running_total, running_total,
+                               urb->transfer_buffer_length,
+                               urb->transfer_buffer_length);
+}
+
+static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index, int start_cycle,
+               struct xhci_generic_trb *start_trb, struct xhci_td *td)
+{
+       /*
+        * Pass all the TRBs to the hardware at once and make sure this write
+        * isn't reordered.
+        */
+       wmb();
+       start_trb->field[3] |= start_cycle;
+       ring_ep_doorbell(xhci, slot_id, ep_index);
+}
+
+static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+               struct urb *urb, int slot_id, unsigned int ep_index)
+{
+       struct xhci_ring *ep_ring;
+       unsigned int num_trbs;
+       struct xhci_td *td;
+       struct scatterlist *sg;
+       int num_sgs;
+       int trb_buff_len, this_sg_len, running_total;
+       bool first_trb;
+       u64 addr;
+
+       struct xhci_generic_trb *start_trb;
+       int start_cycle;
+
+       ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+       num_trbs = count_sg_trbs_needed(xhci, urb);
+       num_sgs = urb->num_sgs;
+
+       trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+                       ep_index, num_trbs, urb, &td, mem_flags);
+       if (trb_buff_len < 0)
+               return trb_buff_len;
+       /*
+        * Don't give the first TRB to the hardware (by toggling the cycle bit)
+        * until we've finished creating all the other TRBs.  The ring's cycle
+        * state may change as we enqueue the other TRBs, so save it too.
+        */
+       start_trb = &ep_ring->enqueue->generic;
+       start_cycle = ep_ring->cycle_state;
+
+       running_total = 0;
+       /*
+        * How much data is in the first TRB?
+        *
+        * There are three forces at work for TRB buffer pointers and lengths:
+        * 1. We don't want to walk off the end of this sg-list entry buffer.
+        * 2. The transfer length that the driver requested may be smaller than
+        *    the amount of memory allocated for this scatter-gather list.
+        * 3. TRBs buffers can't cross 64KB boundaries.
+        */
+       sg = urb->sg->sg;
+       addr = (u64) sg_dma_address(sg);
+       this_sg_len = sg_dma_len(sg);
+       trb_buff_len = TRB_MAX_BUFF_SIZE -
+               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+       if (trb_buff_len > urb->transfer_buffer_length)
+               trb_buff_len = urb->transfer_buffer_length;
+       xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
+                       trb_buff_len);
+
+       first_trb = true;
+       /* Queue the first TRB, even if it's zero-length */
+       do {
+               u32 field = 0;
+
+               /* Don't change the cycle bit of the first TRB until later */
+               if (first_trb)
+                       first_trb = false;
+               else
+                       field |= ep_ring->cycle_state;
+
+               /* Chain all the TRBs together; clear the chain bit in the last
+                * TRB to indicate it's the last TRB in the chain.
+                */
+               if (num_trbs > 1) {
+                       field |= TRB_CHAIN;
+               } else {
+                       /* FIXME - add check for ZERO_PACKET flag before this */
+                       td->last_trb = ep_ring->enqueue;
+                       field |= TRB_IOC;
+               }
+               xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
+                               "64KB boundary at %#x, end dma = %#x\n",
+                               (unsigned int) addr, trb_buff_len, trb_buff_len,
+                               (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+                               (unsigned int) addr + trb_buff_len);
+               if (TRB_MAX_BUFF_SIZE -
+                               (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+                       xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
+                       xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
+                                       (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
+                                       (unsigned int) addr + trb_buff_len);
+               }
+               queue_trb(xhci, ep_ring, false,
+                               (u32) addr,
+                               (u32) ((u64) addr >> 32),
+                               TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+                               /* We always want to know if the TRB was short,
+                                * or we won't get an event when it completes.
+                                * (Unless we use event data TRBs, which are a
+                                * waste of space and HC resources.)
+                                */
+                               field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+               --num_trbs;
+               running_total += trb_buff_len;
+
+               /* Calculate length for next transfer --
+                * Are we done queueing all the TRBs for this sg entry?
+                */
+               this_sg_len -= trb_buff_len;
+               if (this_sg_len == 0) {
+                       --num_sgs;
+                       if (num_sgs == 0)
+                               break;
+                       sg = sg_next(sg);
+                       addr = (u64) sg_dma_address(sg);
+                       this_sg_len = sg_dma_len(sg);
+               } else {
+                       addr += trb_buff_len;
+               }
+
+               trb_buff_len = TRB_MAX_BUFF_SIZE -
+                       (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+               trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
+               if (running_total + trb_buff_len > urb->transfer_buffer_length)
+                       trb_buff_len =
+                               urb->transfer_buffer_length - running_total;
+       } while (running_total < urb->transfer_buffer_length);
+
+       check_trb_math(urb, num_trbs, running_total);
+       giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+       return 0;
+}
+
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+               struct urb *urb, int slot_id, unsigned int ep_index)
+{
+       struct xhci_ring *ep_ring;
+       struct xhci_td *td;
+       int num_trbs;
+       struct xhci_generic_trb *start_trb;
+       bool first_trb;
+       int start_cycle;
+       u32 field;
+
+       int running_total, trb_buff_len, ret;
+       u64 addr;
+
+       if (urb->sg)
+               return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
+
+       ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+       num_trbs = 0;
+       /* How much data is (potentially) left before the 64KB boundary? */
+       running_total = TRB_MAX_BUFF_SIZE -
+               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+
+       /* If there's some data on this 64KB chunk, or we have to send a
+        * zero-length transfer, we need at least one TRB
+        */
+       if (running_total != 0 || urb->transfer_buffer_length == 0)
+               num_trbs++;
+       /* How many more 64KB chunks to transfer, how many more TRBs? */
+       while (running_total < urb->transfer_buffer_length) {
+               num_trbs++;
+               running_total += TRB_MAX_BUFF_SIZE;
+       }
+       /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+
+       if (!in_interrupt())
+               dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+                               urb->ep->desc.bEndpointAddress,
+                               urb->transfer_buffer_length,
+                               urb->transfer_buffer_length,
+                               (unsigned long long)urb->transfer_dma,
+                               num_trbs);
+
+       ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
+                       num_trbs, urb, &td, mem_flags);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Don't give the first TRB to the hardware (by toggling the cycle bit)
+        * until we've finished creating all the other TRBs.  The ring's cycle
+        * state may change as we enqueue the other TRBs, so save it too.
+        */
+       start_trb = &ep_ring->enqueue->generic;
+       start_cycle = ep_ring->cycle_state;
+
+       running_total = 0;
+       /* How much data is in the first TRB? */
+       addr = (u64) urb->transfer_dma;
+       trb_buff_len = TRB_MAX_BUFF_SIZE -
+               (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+       if (urb->transfer_buffer_length < trb_buff_len)
+               trb_buff_len = urb->transfer_buffer_length;
+
+       first_trb = true;
+
+       /* Queue the first TRB, even if it's zero-length */
+       do {
+               field = 0;
+
+               /* Don't change the cycle bit of the first TRB until later */
+               if (first_trb)
+                       first_trb = false;
+               else
+                       field |= ep_ring->cycle_state;
+
+               /* Chain all the TRBs together; clear the chain bit in the last
+                * TRB to indicate it's the last TRB in the chain.
+                */
+               if (num_trbs > 1) {
+                       field |= TRB_CHAIN;
+               } else {
+                       /* FIXME - add check for ZERO_PACKET flag before this */
+                       td->last_trb = ep_ring->enqueue;
+                       field |= TRB_IOC;
+               }
+               queue_trb(xhci, ep_ring, false,
+                               (u32) addr,
+                               (u32) ((u64) addr >> 32),
+                               TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
+                               /* We always want to know if the TRB was short,
+                                * or we won't get an event when it completes.
+                                * (Unless we use event data TRBs, which are a
+                                * waste of space and HC resources.)
+                                */
+                               field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
+               --num_trbs;
+               running_total += trb_buff_len;
+
+               /* Calculate length for next transfer */
+               addr += trb_buff_len;
+               trb_buff_len = urb->transfer_buffer_length - running_total;
+               if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+                       trb_buff_len = TRB_MAX_BUFF_SIZE;
+       } while (running_total < urb->transfer_buffer_length);
+
+       check_trb_math(urb, num_trbs, running_total);
+       giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+       return 0;
+}
+
+/* Caller must have locked xhci->lock */
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+               struct urb *urb, int slot_id, unsigned int ep_index)
+{
+       struct xhci_ring *ep_ring;
+       int num_trbs;
+       int ret;
+       struct usb_ctrlrequest *setup;
+       struct xhci_generic_trb *start_trb;
+       int start_cycle;
+       u32 field;
+       struct xhci_td *td;
+
+       ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
+
+       /*
+        * Need to copy setup packet into setup TRB, so we can't use the setup
+        * DMA address.
+        */
+       if (!urb->setup_packet)
+               return -EINVAL;
+
+       if (!in_interrupt())
+               xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
+                               slot_id, ep_index);
+       /* 1 TRB for setup, 1 for status */
+       num_trbs = 2;
+       /*
+        * Don't need to check if we need additional event data and normal TRBs,
+        * since data in control transfers will never get bigger than 16MB
+        * XXX: can we get a buffer that crosses 64KB boundaries?
+        */
+       if (urb->transfer_buffer_length > 0)
+               num_trbs++;
+       ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
+                       urb, &td, mem_flags);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Don't give the first TRB to the hardware (by toggling the cycle bit)
+        * until we've finished creating all the other TRBs.  The ring's cycle
+        * state may change as we enqueue the other TRBs, so save it too.
+        */
+       start_trb = &ep_ring->enqueue->generic;
+       start_cycle = ep_ring->cycle_state;
+
+       /* Queue setup TRB - see section 6.4.1.2.1 */
+       /* FIXME better way to translate setup_packet into two u32 fields? */
+       setup = (struct usb_ctrlrequest *) urb->setup_packet;
+       queue_trb(xhci, ep_ring, false,
+                       /* FIXME endianness is probably going to bite my ass here. */
+                       setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
+                       setup->wIndex | setup->wLength << 16,
+                       TRB_LEN(8) | TRB_INTR_TARGET(0),
+                       /* Immediate data in pointer */
+                       TRB_IDT | TRB_TYPE(TRB_SETUP));
+
+       /* If there's data, queue data TRBs */
+       field = 0;
+       if (urb->transfer_buffer_length > 0) {
+               if (setup->bRequestType & USB_DIR_IN)
+                       field |= TRB_DIR_IN;
+               queue_trb(xhci, ep_ring, false,
+                               lower_32_bits(urb->transfer_dma),
+                               upper_32_bits(urb->transfer_dma),
+                               TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
+                               /* Event on short tx */
+                               field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
+       }
+
+       /* Save the DMA address of the last TRB in the TD */
+       td->last_trb = ep_ring->enqueue;
+
+       /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
+       /* If the device sent data, the status stage is an OUT transfer */
+       if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
+               field = 0;
+       else
+               field = TRB_DIR_IN;
+       queue_trb(xhci, ep_ring, false,
+                       0,
+                       0,
+                       TRB_INTR_TARGET(0),
+                       /* Event on completion */
+                       field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
+
+       giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
+       return 0;
+}
+
+/****          Command Ring Operations         ****/
+
+/* Generic function for queueing a command TRB on the command ring */
+static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+{
+       if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
+               if (!in_interrupt())
+                       xhci_err(xhci, "ERR: No room for command on command ring\n");
+               return -ENOMEM;
+       }
+       queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+                       field4 | xhci->cmd_ring->cycle_state);
+       return 0;
+}
+
+/* Queue a no-op command on the command ring */
+static int queue_cmd_noop(struct xhci_hcd *xhci)
+{
+       return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
+}
+
+/*
+ * Place a no-op command on the command ring to test the command and
+ * event ring.
+ */
+void *xhci_setup_one_noop(struct xhci_hcd *xhci)
+{
+       if (queue_cmd_noop(xhci) < 0)
+               return NULL;
+       xhci->noops_submitted++;
+       return xhci_ring_cmd_db;
+}
+
+/* Queue a slot enable or disable request on the command ring */
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+{
+       return queue_command(xhci, 0, 0, 0,
+                       TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue an address device command TRB */
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+               u32 slot_id)
+{
+       return queue_command(xhci, in_ctx_ptr, 0, 0,
+                       TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+               u32 slot_id)
+{
+       return queue_command(xhci, in_ctx_ptr, 0, 0,
+                       TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
+}
+
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index)
+{
+       u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+       u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+       u32 type = TRB_TYPE(TRB_STOP_RING);
+
+       return queue_command(xhci, 0, 0, 0,
+                       trb_slot_id | trb_ep_index | type);
+}
+
+/* Set Transfer Ring Dequeue Pointer command.
+ * This should not be used for endpoints that have streams enabled.
+ */
+static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index, struct xhci_segment *deq_seg,
+               union xhci_trb *deq_ptr, u32 cycle_state)
+{
+       dma_addr_t addr;
+       u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
+       u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
+       u32 type = TRB_TYPE(TRB_SET_DEQ);
+
+       addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
+       if (addr == 0)
+               xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+               xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+                               deq_seg, deq_ptr);
+       return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
+                       trb_slot_id | trb_ep_index | type);
+}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644 (file)
index 0000000..8936eeb
--- /dev/null
@@ -0,0 +1,1157 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2008 Intel Corp.
+ *
+ * Author: Sarah Sharp
+ * Some code borrowed from the Linux EHCI driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_XHCI_HCD_H
+#define __LINUX_XHCI_HCD_H
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+
+#include "../core/hcd.h"
+/* Code sharing between pci-quirks and xhci hcd */
+#include       "xhci-ext-caps.h"
+
+/* xHCI PCI Configuration Registers */
+#define XHCI_SBRN_OFFSET       (0x60)
+
+/* Max number of USB devices for any host controller - limit in section 6.1 */
+#define MAX_HC_SLOTS           256
+/* Section 5.3.3 - MaxPorts */
+#define MAX_HC_PORTS           127
+
+/*
+ * xHCI register interface.
+ * This corresponds to the eXtensible Host Controller Interface (xHCI)
+ * Revision 0.95 specification
+ *
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers.  Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+
+/**
+ * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
+ * @hc_capbase:                length of the capabilities register and HC version number
+ * @hcs_params1:       HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2:       HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3:       HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params:                HCCPARAMS - Capability Parameters
+ * @db_off:            DBOFF - Doorbell array offset
+ * @run_regs_off:      RTSOFF - Runtime register space offset
+ */
+struct xhci_cap_regs {
+       u32     hc_capbase;
+       u32     hcs_params1;
+       u32     hcs_params2;
+       u32     hcs_params3;
+       u32     hcc_params;
+       u32     db_off;
+       u32     run_regs_off;
+       /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p)           XHCI_HC_LENGTH(p)
+/* bits 31:16  */
+#define HC_VERSION(p)          (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p)       (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK         0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p)       (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p)       (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p)             (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p)                (((p) >> 4) & 0xf)
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p)      (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p)      (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p)      ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p)   ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p)  ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p)             ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p)       ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p)     ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p)             ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p)             ((p) & (1 << 7))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA            (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p)                XHCI_HCC_EXT_CAPS(p)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define        DBOFF_MASK      (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define        RTSOFF_MASK     (~0x1f)
+
+
+/* Number of registers per port */
+#define        NUM_PORT_REGS   4
+
+/**
+ * struct xhci_op_regs - xHCI Host Controller Operational Registers.
+ * @command:           USBCMD - xHC command register
+ * @status:            USBSTS - xHC status register
+ * @page_size:         This indicates the page size that the host controller
+ *                     supports.  If bit n is set, the HC supports a page size
+ *                     of 2^(n+12), up to a 128MB page size.
+ *                     4K is the minimum page size.
+ * @cmd_ring:          CRP - 64-bit Command Ring Pointer
+ * @dcbaa_ptr:         DCBAAP - 64-bit Device Context Base Address Array Pointer
+ * @config_reg:                CONFIG - Configure Register
+ * @port_status_base:  PORTSCn - base address for Port Status and Control
+ *                     Each port has a Port Status and Control register,
+ *                     followed by a Port Power Management Status and Control
+ *                     register, a Port Link Info register, and a reserved
+ *                     register.
+ * @port_power_base:   PORTPMSCn - base address for
+ *                     Port Power Management Status and Control
+ * @port_link_base:    PORTLIn - base address for Port Link Info (current
+ *                     Link PM state and control) for USB 2.1 and USB 3.0
+ *                     devices.
+ */
+struct xhci_op_regs {
+       u32     command;
+       u32     status;
+       u32     page_size;
+       u32     reserved1;
+       u32     reserved2;
+       u32     dev_notification;
+       u32     cmd_ring[2];
+       /* rsvd: offset 0x20-2F */
+       u32     reserved3[4];
+       u32     dcbaa_ptr[2];
+       u32     config_reg;
+       /* rsvd: offset 0x3C-3FF */
+       u32     reserved4[241];
+       /* port 1 registers, which serve as a base address for other ports */
+       u32     port_status_base;
+       u32     port_power_base;
+       u32     port_link_base;
+       u32     reserved5;
+       /* registers for ports 2-255 */
+       u32     reserved6[NUM_PORT_REGS*254];
+};
+
+/* USBCMD - USB command - command bitmasks */
+/* start/stop HC execution - do not write unless HC is halted*/
+#define CMD_RUN                XHCI_CMD_RUN
+/* Reset HC - resets internal HC state machine and all registers (except
+ * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
+ * The xHCI driver must reinitialize the xHC after setting this bit.
+ */
+#define CMD_RESET      (1 << 1)
+/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
+#define CMD_EIE                XHCI_CMD_EIE
+/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
+#define CMD_HSEIE      XHCI_CMD_HSEIE
+/* bits 4:6 are reserved (and should be preserved on writes). */
+/* light reset (port status stays unchanged) - reset completed when this is 0 */
+#define CMD_LRESET     (1 << 7)
+/* FIXME: ignoring host controller save/restore state for now. */
+#define CMD_CSS                (1 << 8)
+#define CMD_CRS                (1 << 9)
+/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
+#define CMD_EWE                XHCI_CMD_EWE
+/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
+ * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
+ * '0' means the xHC can power it off if all ports are in the disconnect,
+ * disabled, or powered-off state.
+ */
+#define CMD_PM_INDEX   (1 << 11)
+/* bits 12:31 are reserved (and should be preserved on writes). */
+
+/* USBSTS - USB status - status bitmasks */
+/* HC not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT       XHCI_STS_HALT
+/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
+#define STS_FATAL      (1 << 2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set*/
+#define STS_EINT       (1 << 3)
+/* port change detect */
+#define STS_PORT       (1 << 4)
+/* bits 5:7 reserved and zeroed */
+/* save state status - '1' means xHC is saving state */
+#define STS_SAVE       (1 << 8)
+/* restore state status - '1' means xHC is restoring state */
+#define STS_RESTORE    (1 << 9)
+/* true: save or restore error */
+#define STS_SRE                (1 << 10)
+/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
+#define STS_CNR                XHCI_STS_CNR
+/* true: internal Host Controller Error - SW needs to reset and reinitialize */
+#define STS_HCE                (1 << 12)
+/* bits 13:31 reserved and should be preserved */
+
+/*
+ * DNCTRL - Device Notification Control Register - dev_notification bitmasks
+ * Generate a device notification event when the HC sees a transaction with a
+ * notification type that matches a bit set in this bit field.
+ */
+#define        DEV_NOTE_MASK           (0xffff)
+#define ENABLE_DEV_NOTE(x)     (1 << x)
+/* Most of the device notification types should only be used for debug.
+ * SW does need to pay attention to function wake notifications.
+ */
+#define        DEV_NOTE_FWAKE          ENABLE_DEV_NOTE(1)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
+/* bit 0 is the command ring cycle state */
+/* stop ring operation after completion of the currently executing command */
+#define CMD_RING_PAUSE         (1 << 1)
+/* stop ring immediately - abort the currently executing command */
+#define CMD_RING_ABORT         (1 << 2)
+/* true: command ring is running */
+#define CMD_RING_RUNNING       (1 << 3)
+/* bits 4:5 reserved and should be preserved */
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_ADDR_MASK     (0xffffffc0)
+
+/* CONFIG - Configure Register - config_reg bitmasks */
+/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
+#define MAX_DEVS(p)    ((p) & 0xff)
+/* bits 8:31 - reserved and should be preserved */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT   (1 << 0)
+/* true: port enabled */
+#define PORT_PE                (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC                (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET     (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER     (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK         (0xf << 10)
+#define        XDEV_FS                 (0x1 << 10)
+#define        XDEV_LS                 (0x2 << 10)
+#define        XDEV_HS                 (0x3 << 10)
+#define        XDEV_SS                 (0x4 << 10)
+#define DEV_UNDEFSPEED(p)      (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p)       (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p)                (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p)       (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p)      (((p) & DEV_SPEED_MASK) == XDEV_SS)
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define        SLOT_SPEED_FS           (XDEV_FS << 10)
+#define        SLOT_SPEED_LS           (XDEV_LS << 10)
+#define        SLOT_SPEED_HS           (XDEV_HS << 10)
+#define        SLOT_SPEED_SS           (XDEV_SS << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF   (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK  (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE       (1 << 16)
+/* true: connect status change */
+#define PORT_CSC       (1 << 17)
+/* true: port enable change */
+#define PORT_PEC       (1 << 18)
+/* true: warm reset for a USB 3.0 device is done.  A "hot" reset puts the port
+ * into an enabled state, and the device into the default state.  A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC       (1 << 19)
+/* true: over-current change */
+#define PORT_OCC       (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC                (1 << 21)
+/* port link status change - set on some port link state transitions:
+ *  Transition                         Reason
+ *  ------------------------------------------------------------------------------
+ *  - U3 to Resume                     Wakeup signaling from a device
+ *  - Resume to Recovery to U0         USB 3.0 device resume
+ *  - Resume to U0                     USB 2.0 device resume
+ *  - U3 to Recovery to U0             Software resume of USB 3.0 device complete
+ *  - U3 to U0                         Software resume of USB 2.0 device complete
+ *  - U2 to U0                         L1 resume of USB 2.1 device complete
+ *  - U0 to U0 (???)                   L1 entry rejection by USB 2.1 device
+ *  - U0 to disabled                   L1 entry error with USB 2.1 device
+ *  - Any state to inactive            Error on USB 3.0 port
+ */
+#define PORT_PLC       (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC       (1 << 23)
+/* bit 24 reserved */
+/* wake on connect (enable) */
+#define PORT_WKCONN_E  (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E  (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E    (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE        (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR                (1 << 31)
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us.  0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p)     ((p) & 0xff)
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p)     (((p) & 0xff) << 8)
+/* Bits 24:31 for port testing */
+
+
+/**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending:       IMAN - Interrupt Management Register.  Used to enable
+ *                     interrupts and check for pending interrupts.
+ * @irq_control:       IMOD - Interrupt Moderation Register.
+ *                     Used to throttle interrupts.
+ * @erst_size:         Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base:         ERST base address.
+ * @erst_dequeue:      Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
+ * multiple segments of the same size.  The HC places events on the ring and
+ * "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The HCD (Linux) processes those events and
+ * updates the dequeue pointer.
+ */
+struct xhci_intr_reg {
+       u32     irq_pending;
+       u32     irq_control;
+       u32     erst_size;
+       u32     rsvd;
+       u32     erst_base[2];
+       u32     erst_dequeue[2];
+};
+
+/* irq_pending bitmasks */
+#define        ER_IRQ_PENDING(p)       ((p) & 0x1)
+/* bits 2:31 need to be preserved */
+/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
+#define        ER_IRQ_CLEAR(p)         ((p) & 0xfffffffe)
+#define        ER_IRQ_ENABLE(p)        ((ER_IRQ_CLEAR(p)) | 0x2)
+#define        ER_IRQ_DISABLE(p)       ((ER_IRQ_CLEAR(p)) & ~(0x2))
+
+/* irq_control bitmasks */
+/* Minimum interval between interrupts (in 250ns intervals).  The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define ER_IRQ_INTERVAL_MASK   (0xffff)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define ER_IRQ_COUNTER_MASK    (0xffff << 16)
+
+/* erst_size bitmasks */
+/* Preserve bits 16:31 of erst_size */
+#define        ERST_SIZE_MASK          (0xffff << 16)
+
+/* erst_dequeue bitmasks */
+/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies.  This is an optional HW hint.
+ */
+#define ERST_DESI_MASK         (0x7)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
+ * a work queue (or delayed service routine)?
+ */
+#define ERST_EHB               (1 << 3)
+#define ERST_PTR_MASK          (0xf)
+
+/**
+ * struct xhci_run_regs
+ * @microframe_index:
+ *             MFINDEX - current microframe number
+ *
+ * Section 5.5 Host Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct xhci_run_regs {
+       u32                     microframe_index;
+       u32                     rsvd[7];
+       struct xhci_intr_reg    ir_set[128];
+};
+
+/**
+ * struct doorbell_array
+ *
+ * Section 5.6
+ */
+struct xhci_doorbell_array {
+       u32     doorbell[256];
+};
+
+#define        DB_TARGET_MASK          0xFFFFFF00
+#define        DB_STREAM_ID_MASK       0x0000FFFF
+#define        DB_TARGET_HOST          0x0
+#define        DB_STREAM_ID_HOST       0x0
+#define        DB_MASK                 (0xff << 8)
+
+/* Endpoint Target - bits 0:7 */
+#define EPI_TO_DB(p)           (((p) + 1) & 0xff)
+
+
+/**
+ * struct xhci_slot_ctx
+ * @dev_info:  Route string, device speed, hub info, and last valid endpoint
+ * @dev_info2: Max exit latency for device number, root hub port number
+ * @tt_info:   tt_info is used to construct split transaction tokens
+ * @dev_state: slot state and device address
+ *
+ * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the slot context for HC internal use.
+ */
+struct xhci_slot_ctx {
+       u32     dev_info;
+       u32     dev_info2;
+       u32     tt_info;
+       u32     dev_state;
+       /* offset 0x10 to 0x1f reserved for HC internal use */
+       u32     reserved[4];
+};
+
+/* dev_info bitmasks */
+/* Route String - 0:19 */
+#define ROUTE_STRING_MASK      (0xfffff)
+/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
+#define DEV_SPEED      (0xf << 20)
+/* bit 24 reserved */
+/* Is this LS/FS device connected through a HS hub? - bit 25 */
+#define DEV_MTT                (0x1 << 25)
+/* Set if the device is a hub - bit 26 */
+#define DEV_HUB                (0x1 << 26)
+/* Index of the last valid endpoint context in this device context - 27:31 */
+#define LAST_CTX_MASK  (0x1f << 27)
+#define LAST_CTX(p)    ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p)  (((p) >> 27) - 1)
+#define SLOT_FLAG      (1 << 0)
+#define EP0_FLAG       (1 << 1)
+
+/* dev_info2 bitmasks */
+/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
+#define MAX_EXIT       (0xffff)
+/* Root hub port number that is needed to access the USB device */
+#define ROOT_HUB_PORT(p)       (((p) & 0xff) << 16)
+
+/* tt_info bitmasks */
+/*
+ * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
+ * The Slot ID of the hub that isolates the high speed signaling from
+ * this low or full-speed device.  '0' if attached to root hub port.
+ */
+#define TT_SLOT                (0xff)
+/*
+ * The number of the downstream facing port of the high-speed hub
+ * '0' if the device is not low or full speed.
+ */
+#define TT_PORT                (0xff << 8)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the HC */
+#define DEV_ADDR_MASK  (0xff)
+/* bits 8:26 reserved */
+/* Slot state */
+#define SLOT_STATE     (0x1f << 27)
+#define GET_SLOT_STATE(p)      (((p) & (0x1f << 27)) >> 27)
+
+
+/**
+ * struct xhci_ep_ctx
+ * @ep_info:   endpoint state, streams, mult, and interval information.
+ * @ep_info2:  information on endpoint type, max packet size, max burst size,
+ *             error count, and whether the HC will force an event for all
+ *             transactions.
+ * @deq:       64-bit ring dequeue pointer address.  If the endpoint only
+ *             defines one stream, this points to the endpoint transfer ring.
+ *             Otherwise, it points to a stream context array, which has a
+ *             ring pointer for each flow.
+ * @tx_info:
+ *             Average TRB lengths for the endpoint ring and
+ *             max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
+ * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
+ * reserved at the end of the endpoint context for HC internal use.
+ */
+struct xhci_ep_ctx {
+       u32     ep_info;
+       u32     ep_info2;
+       u32     deq[2];
+       u32     tx_info;
+       /* offset 0x14 - 0x1f reserved for HC internal use */
+       u32     reserved[3];
+};
+
+/* ep_info bitmasks */
+/*
+ * Endpoint State - bits 0:2
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition - ok to manipulate endpoint ring
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK          (0xf)
+#define EP_STATE_DISABLED      0
+#define EP_STATE_RUNNING       1
+#define EP_STATE_HALTED                2
+#define EP_STATE_STOPPED       3
+#define EP_STATE_ERROR         4
+/* Mult - Max number of burtst within an interval, in EP companion desc. */
+#define EP_MULT(p)             ((p & 0x3) << 8)
+/* bits 10:14 are Max Primary Streams */
+/* bit 15 is Linear Stream Array */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p)         ((p & 0xff) << 16)
+
+/* ep_info2 bitmasks */
+/*
+ * Force Event - generate transfer events for all TRBs for this endpoint
+ * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
+ */
+#define        FORCE_EVENT     (0x1)
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define EP_TYPE(p)     ((p) << 3)
+#define ISOC_OUT_EP    1
+#define BULK_OUT_EP    2
+#define INT_OUT_EP     3
+#define CTRL_EP                4
+#define ISOC_IN_EP     5
+#define BULK_IN_EP     6
+#define INT_IN_EP      7
+/* bit 6 reserved */
+/* bit 7 is Host Initiate Disable - for disabling stream selection */
+#define MAX_BURST(p)   (((p)&0xff) << 8)
+#define MAX_PACKET(p)  (((p)&0xffff) << 16)
+
+
+/**
+ * struct xhci_device_control
+ * Input/Output context; see section 6.2.5.
+ *
+ * @drop_context:      set the bit of the endpoint context you want to disable
+ * @add_context:       set the bit of the endpoint context you want to enable
+ */
+struct xhci_device_control {
+       u32     drop_flags;
+       u32     add_flags;
+       u32     rsvd[6];
+       struct xhci_slot_ctx    slot;
+       struct xhci_ep_ctx      ep[31];
+};
+
+/* drop context bitmasks */
+#define        DROP_EP(x)      (0x1 << x)
+/* add context bitmasks */
+#define        ADD_EP(x)       (0x1 << x)
+
+
+struct xhci_virt_device {
+       /*
+        * Commands to the hardware are passed an "input context" that
+        * tells the hardware what to change in its data structures.
+        * The hardware will return changes in an "output context" that
+        * software must allocate for the hardware.  We need to keep
+        * track of input and output contexts separately because
+        * these commands might fail and we don't trust the hardware.
+        */
+       struct xhci_device_control      *out_ctx;
+       dma_addr_t                      out_ctx_dma;
+       /* Used for addressing devices and configuration changes */
+       struct xhci_device_control      *in_ctx;
+       dma_addr_t                      in_ctx_dma;
+       /* FIXME when stream support is added */
+       struct xhci_ring                *ep_rings[31];
+       /* Temporary storage in case the configure endpoint command fails and we
+        * have to restore the device state to the previous state
+        */
+       struct xhci_ring                *new_ep_rings[31];
+       struct completion               cmd_completion;
+       /* Status of the last command issued for this device */
+       u32                             cmd_status;
+};
+
+
+/**
+ * struct xhci_device_context_array
+ * @dev_context_ptr    array of 64-bit DMA addresses for device contexts
+ */
+struct xhci_device_context_array {
+       /* 64-bit device addresses; we only write 32-bit addresses */
+       u32                     dev_context_ptrs[2*MAX_HC_SLOTS];
+       /* private xHCD pointers */
+       dma_addr_t      dma;
+};
+/* TODO: write function to set the 64-bit device DMA address */
+/*
+ * TODO: change this to be dynamically sized at HC mem init time since the HC
+ * might not be able to handle the maximum number of devices possible.
+ */
+
+
+struct xhci_stream_ctx {
+       /* 64-bit stream ring address, cycle state, and stream type */
+       u32     stream_ring[2];
+       /* offset 0x14 - 0x1f reserved for HC internal use */
+       u32     reserved[2];
+};
+
+
+struct xhci_transfer_event {
+       /* 64-bit buffer address, or immediate data */
+       u32     buffer[2];
+       u32     transfer_len;
+       /* This field is interpreted differently based on the type of TRB */
+       u32     flags;
+};
+
+/** Transfer Event bit fields **/
+#define        TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+
+/* Completion Code - only applicable for some types of TRBs */
+#define        COMP_CODE_MASK          (0xff << 24)
+#define GET_COMP_CODE(p)       (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_SUCCESS   1
+/* Data Buffer Error */
+#define COMP_DB_ERR    2
+/* Babble Detected Error */
+#define COMP_BABBLE    3
+/* USB Transaction Error */
+#define COMP_TX_ERR    4
+/* TRB Error - some TRB field is invalid */
+#define COMP_TRB_ERR   5
+/* Stall Error - USB device is stalled */
+#define COMP_STALL     6
+/* Resource Error - HC doesn't have memory for that device configuration */
+#define COMP_ENOMEM    7
+/* Bandwidth Error - not enough room in schedule for this dev config */
+#define COMP_BW_ERR    8
+/* No Slots Available Error - HC ran out of device slots */
+#define COMP_ENOSLOTS  9
+/* Invalid Stream Type Error */
+#define COMP_STREAM_ERR        10
+/* Slot Not Enabled Error - doorbell rung for disabled device slot */
+#define COMP_EBADSLT   11
+/* Endpoint Not Enabled Error */
+#define COMP_EBADEP    12
+/* Short Packet */
+#define COMP_SHORT_TX  13
+/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
+#define COMP_UNDERRUN  14
+/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
+#define COMP_OVERRUN   15
+/* Virtual Function Event Ring Full Error */
+#define COMP_VF_FULL   16
+/* Parameter Error - Context parameter is invalid */
+#define COMP_EINVAL    17
+/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
+#define COMP_BW_OVER   18
+/* Context State Error - illegal context state transition requested */
+#define COMP_CTX_STATE 19
+/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
+#define COMP_PING_ERR  20
+/* Event Ring is full */
+#define COMP_ER_FULL   21
+/* Missed Service Error - HC couldn't service an isoc ep within interval */
+#define COMP_MISSED_INT        23
+/* Successfully stopped command ring */
+#define COMP_CMD_STOP  24
+/* Successfully aborted current command and stopped command ring */
+#define COMP_CMD_ABORT 25
+/* Stopped - transfer was terminated by a stop endpoint command */
+#define COMP_STOP      26
+/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
+#define COMP_STOP_INVAL        27
+/* Control Abort Error - Debug Capability - control pipe aborted */
+#define COMP_DBG_ABORT 28
+/* TRB type 29 and 30 reserved */
+/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
+#define COMP_BUFF_OVER 31
+/* Event Lost Error - xHC has an "internal event overrun condition" */
+#define COMP_ISSUES    32
+/* Undefined Error - reported when other error codes don't apply */
+#define COMP_UNKNOWN   33
+/* Invalid Stream ID Error */
+#define COMP_STRID_ERR 34
+/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+/* FIXME - check for this */
+#define COMP_2ND_BW_ERR        35
+/* Split Transaction Error */
+#define        COMP_SPLIT_ERR  36
+
+struct xhci_link_trb {
+       /* 64-bit segment pointer*/
+       u32 segment_ptr[2];
+       u32 intr_target;
+       u32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE    (0x1<<1)
+
+/* Command completion event TRB */
+struct xhci_event_cmd {
+       /* Pointer to command TRB, or the value passed by the event data trb */
+       u32 cmd_trb[2];
+       u32 status;
+       u32 flags;
+};
+
+/* flags bitmasks */
+/* bits 16:23 are the virtual function ID */
+/* bits 24:31 are the slot ID */
+#define TRB_TO_SLOT_ID(p)      (((p) & (0xff<<24)) >> 24)
+#define SLOT_ID_FOR_TRB(p)     (((p) & 0xff) << 24)
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
+#define TRB_TO_EP_INDEX(p)             ((((p) & (0x1f << 16)) >> 16) - 1)
+#define        EP_ID_FOR_TRB(p)                ((((p) + 1) & 0x1f) << 16)
+
+
+/* Port Status Change Event TRB fields */
+/* Port ID - bits 31:24 */
+#define GET_PORT_ID(p)         (((p) & (0xff << 24)) >> 24)
+
+/* Normal TRB fields */
+/* transfer_len bitmasks - bits 0:16 */
+#define        TRB_LEN(p)              ((p) & 0x1ffff)
+/* TD size - number of bytes remaining in the TD (including this TRB):
+ * bits 17 - 21.  Shift the number of bytes by 10. */
+#define TD_REMAINDER(p)                ((((p) >> 10) & 0x1f) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at */
+#define TRB_INTR_TARGET(p)     (((p) & 0x3ff) << 22)
+#define GET_INTR_TARGET(p)     (((p) >> 22) & 0x3ff)
+
+/* Cycle bit - indicates TRB ownership by HC or HCD */
+#define TRB_CYCLE              (1<<0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT                        (1<<1)
+/* Interrupt on short packet */
+#define TRB_ISP                        (1<<2)
+/* Set PCIe no snoop attribute */
+#define TRB_NO_SNOOP           (1<<3)
+/* Chain multiple TRBs into a TD */
+#define TRB_CHAIN              (1<<4)
+/* Interrupt on completion */
+#define TRB_IOC                        (1<<5)
+/* The buffer pointer contains immediate data */
+#define TRB_IDT                        (1<<6)
+
+
+/* Control transfer TRB specific fields */
+#define TRB_DIR_IN             (1<<16)
+
+struct xhci_generic_trb {
+       u32 field[4];
+};
+
+union xhci_trb {
+       struct xhci_link_trb            link;
+       struct xhci_transfer_event      trans_event;
+       struct xhci_event_cmd           event_cmd;
+       struct xhci_generic_trb         generic;
+};
+
+/* TRB bit mask */
+#define        TRB_TYPE_BITMASK        (0xfc00)
+#define TRB_TYPE(p)            ((p) << 10)
+/* TRB type IDs */
+/* bulk, interrupt, isoc scatter/gather, and control data stage */
+#define TRB_NORMAL             1
+/* setup stage for control transfers */
+#define TRB_SETUP              2
+/* data stage for control transfers */
+#define TRB_DATA               3
+/* status stage for control transfers */
+#define TRB_STATUS             4
+/* isoc transfers */
+#define TRB_ISOC               5
+/* TRB for linking ring segments */
+#define TRB_LINK               6
+#define TRB_EVENT_DATA         7
+/* Transfer Ring No-op (not for the command ring) */
+#define TRB_TR_NOOP            8
+/* Command TRBs */
+/* Enable Slot Command */
+#define TRB_ENABLE_SLOT                9
+/* Disable Slot Command */
+#define TRB_DISABLE_SLOT       10
+/* Address Device Command */
+#define TRB_ADDR_DEV           11
+/* Configure Endpoint Command */
+#define TRB_CONFIG_EP          12
+/* Evaluate Context Command */
+#define TRB_EVAL_CONTEXT       13
+/* Reset Transfer Ring Command */
+#define TRB_RESET_RING         14
+/* Stop Transfer Ring Command */
+#define TRB_STOP_RING          15
+/* Set Transfer Ring Dequeue Pointer Command */
+#define TRB_SET_DEQ            16
+/* Reset Device Command */
+#define TRB_RESET_DEV          17
+/* Force Event Command (opt) */
+#define TRB_FORCE_EVENT                18
+/* Negotiate Bandwidth Command (opt) */
+#define TRB_NEG_BANDWIDTH      19
+/* Set Latency Tolerance Value Command (opt) */
+#define TRB_SET_LT             20
+/* Get port bandwidth Command */
+#define TRB_GET_BW             21
+/* Force Header Command - generate a transaction or link management packet */
+#define TRB_FORCE_HEADER       22
+/* No-op Command - not for transfer rings */
+#define TRB_CMD_NOOP           23
+/* TRB IDs 24-31 reserved */
+/* Event TRBS */
+/* Transfer Event */
+#define TRB_TRANSFER           32
+/* Command Completion Event */
+#define TRB_COMPLETION         33
+/* Port Status Change Event */
+#define TRB_PORT_STATUS                34
+/* Bandwidth Request Event (opt) */
+#define TRB_BANDWIDTH_EVENT    35
+/* Doorbell Event (opt) */
+#define TRB_DOORBELL           36
+/* Host Controller Event */
+#define TRB_HC_EVENT           37
+/* Device Notification Event - device sent function wake notification */
+#define TRB_DEV_NOTE           38
+/* MFINDEX Wrap Event - microframe counter wrapped */
+#define TRB_MFINDEX_WRAP       39
+/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4,
+ * since the command ring is 64-byte aligned.
+ * It must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT       64
+#define SEGMENT_SIZE           (TRBS_PER_SEGMENT*16)
+/* TRB buffer pointers can't cross 64KB boundaries */
+#define TRB_MAX_BUFF_SHIFT             16
+#define TRB_MAX_BUFF_SIZE      (1 << TRB_MAX_BUFF_SHIFT)
+
+struct xhci_segment {
+       union xhci_trb          *trbs;
+       /* private to HCD */
+       struct xhci_segment     *next;
+       dma_addr_t              dma;
+};
+
+struct xhci_td {
+       struct list_head        td_list;
+       struct list_head        cancelled_td_list;
+       struct urb              *urb;
+       struct xhci_segment     *start_seg;
+       union xhci_trb          *first_trb;
+       union xhci_trb          *last_trb;
+};
+
+struct xhci_ring {
+       struct xhci_segment     *first_seg;
+       union  xhci_trb         *enqueue;
+       struct xhci_segment     *enq_seg;
+       unsigned int            enq_updates;
+       union  xhci_trb         *dequeue;
+       struct xhci_segment     *deq_seg;
+       unsigned int            deq_updates;
+       struct list_head        td_list;
+       /* ----  Related to URB cancellation ---- */
+       struct list_head        cancelled_td_list;
+       unsigned int            cancels_pending;
+       unsigned int            state;
+#define SET_DEQ_PENDING                (1 << 0)
+       /* The TRB that was last reported in a stopped endpoint ring */
+       union xhci_trb          *stopped_trb;
+       struct xhci_td          *stopped_td;
+       /*
+        * Write the cycle state into the TRB cycle field to give ownership of
+        * the TRB to the host controller (if we are the producer), or to check
+        * if we own the TRB (if we are the consumer).  See section 4.9.1.
+        */
+       u32                     cycle_state;
+};
+
+struct xhci_erst_entry {
+       /* 64-bit event ring segment address */
+       u32     seg_addr[2];
+       u32     seg_size;
+       /* Set to zero */
+       u32     rsvd;
+};
+
+struct xhci_erst {
+       struct xhci_erst_entry  *entries;
+       unsigned int            num_entries;
+       /* xhci->event_ring keeps track of segment dma addresses */
+       dma_addr_t              erst_dma_addr;
+       /* Num entries the ERST can contain */
+       unsigned int            erst_size;
+};
+
+/*
+ * Each segment table entry is 4*32bits long.  1K seems like an ok size:
+ * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
+ * meaning 64 ring segments.
+ * Initial allocated size of the ERST, in number of entries */
+#define        ERST_NUM_SEGS   1
+/* Initial allocated size of the ERST, in number of entries */
+#define        ERST_SIZE       64
+/* Initial number of event segment rings allocated */
+#define        ERST_ENTRIES    1
+/* Poll every 60 seconds */
+#define        POLL_TIMEOUT    60
+/* XXX: Make these module parameters */
+
+
+/* There is one ehci_hci structure per controller */
+struct xhci_hcd {
+       /* glue to PCI and HCD framework */
+       struct xhci_cap_regs __iomem *cap_regs;
+       struct xhci_op_regs __iomem *op_regs;
+       struct xhci_run_regs __iomem *run_regs;
+       struct xhci_doorbell_array __iomem *dba;
+       /* Our HCD's current interrupter register set */
+       struct  xhci_intr_reg __iomem *ir_set;
+
+       /* Cached register copies of read-only HC data */
+       __u32           hcs_params1;
+       __u32           hcs_params2;
+       __u32           hcs_params3;
+       __u32           hcc_params;
+
+       spinlock_t      lock;
+
+       /* packed release number */
+       u8              sbrn;
+       u16             hci_version;
+       u8              max_slots;
+       u8              max_interrupters;
+       u8              max_ports;
+       u8              isoc_threshold;
+       int             event_ring_max;
+       int             addr_64;
+       /* 4KB min, 128MB max */
+       int             page_size;
+       /* Valid values are 12 to 20, inclusive */
+       int             page_shift;
+       /* only one MSI vector for now, but might need more later */
+       int             msix_count;
+       struct msix_entry       *msix_entries;
+       /* data structures */
+       struct xhci_device_context_array *dcbaa;
+       struct xhci_ring        *cmd_ring;
+       struct xhci_ring        *event_ring;
+       struct xhci_erst        erst;
+       /* slot enabling and address device helpers */
+       struct completion       addr_dev;
+       int slot_id;
+       /* Internal mirror of the HW's dcbaa */
+       struct xhci_virt_device *devs[MAX_HC_SLOTS];
+
+       /* DMA pools */
+       struct dma_pool *device_pool;
+       struct dma_pool *segment_pool;
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+       /* Poll the rings - for debugging */
+       struct timer_list       event_ring_timer;
+       int                     zombie;
+#endif
+       /* Statistics */
+       int                     noops_submitted;
+       int                     noops_handled;
+       int                     error_bitmask;
+};
+
+/* For testing purposes */
+#define NUM_TEST_NOOPS 0
+
+/* convert between an HCD pointer and the corresponding EHCI_HCD */
+static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
+{
+       return (struct xhci_hcd *) (hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
+{
+       return container_of((void *) xhci, struct usb_hcd, hcd_priv);
+}
+
+#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
+#define XHCI_DEBUG     1
+#else
+#define XHCI_DEBUG     0
+#endif
+
+#define xhci_dbg(xhci, fmt, args...) \
+       do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_info(xhci, fmt, args...) \
+       do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+#define xhci_err(xhci, fmt, args...) \
+       dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn(xhci, fmt, args...) \
+       dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+
+/* TODO: copied from ehci.h - can be refactored? */
+/* xHCI spec says all registers are little endian */
+static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
+               __u32 __iomem *regs)
+{
+       return readl(regs);
+}
+static inline void xhci_writel(struct xhci_hcd *xhci,
+               const unsigned int val, __u32 __iomem *regs)
+{
+       if (!in_interrupt())
+               xhci_dbg(xhci,
+                        "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
+                        regs, val);
+       writel(val, regs);
+}
+
+/* xHCI debugging */
+void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_registers(struct xhci_hcd *xhci);
+void xhci_dbg_regs(struct xhci_hcd *xhci);
+void xhci_print_run_regs(struct xhci_hcd *xhci);
+void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
+void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
+void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
+void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
+
+/* xHCI memory managment */
+void xhci_mem_cleanup(struct xhci_hcd *xhci);
+int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
+int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
+int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
+unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
+unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
+void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
+int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+               struct usb_device *udev, struct usb_host_endpoint *ep,
+               gfp_t mem_flags);
+void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+
+#ifdef CONFIG_PCI
+/* xHCI PCI glue */
+int xhci_register_pci(void);
+void xhci_unregister_pci(void);
+#endif
+
+/* xHCI host controller glue */
+int xhci_halt(struct xhci_hcd *xhci);
+int xhci_reset(struct xhci_hcd *xhci);
+int xhci_init(struct usb_hcd *hcd);
+int xhci_run(struct usb_hcd *hcd);
+void xhci_stop(struct usb_hcd *hcd);
+void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_get_frame(struct usb_hcd *hcd);
+irqreturn_t xhci_irq(struct usb_hcd *hcd);
+int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
+int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
+int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+
+/* xHCI ring, segment, TRB, and TD functions */
+dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+void xhci_ring_cmd_db(struct xhci_hcd *xhci);
+void *xhci_setup_one_noop(struct xhci_hcd *xhci);
+void xhci_handle_event(struct xhci_hcd *xhci);
+void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
+int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
+int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+               u32 slot_id);
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
+               unsigned int ep_index);
+int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+               int slot_id, unsigned int ep_index);
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
+               int slot_id, unsigned int ep_index);
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+               u32 slot_id);
+
+/* xHCI roothub code */
+int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
+               char *buf, u16 wLength);
+int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+
+#endif /* __LINUX_XHCI_HCD_H */
index a4ef77ef917d9bc6be7f2b29bff6a58ba6d105e1..3c5fe5cee05abab286dcc2f00d6cc1f70b031e06 100644 (file)
@@ -726,12 +726,18 @@ static const struct file_operations iowarrior_fops = {
        .poll = iowarrior_poll,
 };
 
+static char *iowarrior_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 /*
  * usb class driver info in order to get a minor number from the usb core,
  * and to have the device registered with devfs and the driver core
  */
 static struct usb_class_driver iowarrior_class = {
        .name = "iowarrior%d",
+       .nodename = iowarrior_nodename,
        .fops = &iowarrior_fops,
        .minor_base = IOWARRIOR_MINOR_BASE,
 };
index ab0f3226158b0d6ed735c041931a6b80ae869afb..c1e2433f640d133da219152b7f8ad7389ac51471 100644 (file)
@@ -266,12 +266,18 @@ static const struct file_operations tower_fops = {
        .llseek =       tower_llseek,
 };
 
+static char *legousbtower_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
+}
+
 /*
  * usb class driver info in order to get a minor number from the usb core,
  * and to have the device registered with the driver core
  */
 static struct usb_class_driver tower_class = {
        .name =         "legousbtower%d",
+       .nodename =     legousbtower_nodename,
        .fops =         &tower_fops,
        .minor_base =   LEGO_USB_TOWER_MINOR_BASE,
 };
index 7603cbe0865d1268811c717dcaadb0eb7224ce09..30ea7ca6846e2a9cd3c36ecf182b9b934ca2a338 100644 (file)
@@ -1,7 +1,7 @@
 
 config USB_SISUSBVGA
        tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
-       depends on USB && USB_EHCI_HCD
+       depends on USB && (USB_MUSB_HDRC || USB_EHCI_HCD)
         ---help---
          Say Y here if you intend to attach a USB2VGA dongle based on a
          Net2280 and a SiS315 chip.
index 5f1a19d1497d108a41a23a7d2d73898bc20cf289..a9f06d76960ffa936858adaf321d5c53d39605dd 100644 (file)
@@ -1072,23 +1072,34 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
         */
        msleep (jiffies % (2 * INTERRUPT_RATE));
        if (async) {
-retry:
-               retval = usb_unlink_urb (urb);
-               if (retval == -EBUSY || retval == -EIDRM) {
-                       /* we can't unlink urbs while they're completing.
-                        * or if they've completed, and we haven't resubmitted.
-                        * "normal" drivers would prevent resubmission, but
-                        * since we're testing unlink paths, we can't.
-                        */
-                       ERROR(dev,  "unlink retry\n");
-                       goto retry;
+               while (!completion_done(&completion)) {
+                       retval = usb_unlink_urb(urb);
+
+                       switch (retval) {
+                       case -EBUSY:
+                       case -EIDRM:
+                               /* we can't unlink urbs while they're completing
+                                * or if they've completed, and we haven't
+                                * resubmitted. "normal" drivers would prevent
+                                * resubmission, but since we're testing unlink
+                                * paths, we can't.
+                                */
+                               ERROR(dev, "unlink retry\n");
+                               continue;
+                       case 0:
+                       case -EINPROGRESS:
+                               break;
+
+                       default:
+                               dev_err(&dev->intf->dev,
+                                       "unlink fail %d\n", retval);
+                               return retval;
+                       }
+
+                       break;
                }
        } else
                usb_kill_urb (urb);
-       if (!(retval == 0 || retval == -EINPROGRESS)) {
-               dev_err(&dev->intf->dev, "unlink fail %d\n", retval);
-               return retval;
-       }
 
        wait_for_completion (&completion);
        retval = urb->status;
index 1f715436d6d3dec14a96639aa6abde814c9818b4..a7eb4c99342c91d0fabdb8a6d9beab5e09b093b9 100644 (file)
@@ -733,7 +733,7 @@ int __init mon_text_init(void)
 {
        struct dentry *mondir;
 
-       mondir = debugfs_create_dir("usbmon", NULL);
+       mondir = debugfs_create_dir("usbmon", usb_debug_root);
        if (IS_ERR(mondir)) {
                printk(KERN_NOTICE TAG ": debugfs is not available\n");
                return -ENODEV;
index b66e8544d8b90761a80a11a5a2c524284dd0711b..70073b157f0ab7e875ab1fdad3bedaa4e97e5c8c 100644 (file)
@@ -10,6 +10,7 @@ comment "Enable Host or Gadget support to see Inventra options"
 config USB_MUSB_HDRC
        depends on (USB || USB_GADGET) && HAVE_CLK
        depends on !SUPERH
+       select NOP_USB_XCEIV if ARCH_DAVINCI
        select TWL4030_USB if MACH_OMAP_3430SDP
        select USB_OTG_UTILS
        tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
@@ -55,6 +56,7 @@ comment "Blackfin high speed USB Support"
 config USB_TUSB6010
        boolean "TUSB 6010 support"
        depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+       select NOP_USB_XCEIV
        default y
        help
          The TUSB 6010 chip, from Texas Instruments, connects a discrete
index 786134852092cc0b324584577db51e5f8d054939..f2f66ebc73626a28265fe1f0dd0a6887fb10038b 100644 (file)
@@ -143,7 +143,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
        u16 val;
 
        spin_lock_irqsave(&musb->lock, flags);
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_IDLE:
        case OTG_STATE_A_WAIT_BCON:
                /* Start a new session */
@@ -154,7 +154,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
                val = musb_readw(musb->mregs, MUSB_DEVCTL);
                if (!(val & MUSB_DEVCTL_BDEVICE)) {
                        gpio_set_value(musb->config->gpio_vrsel, 1);
-                       musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+                       musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
                } else {
                        gpio_set_value(musb->config->gpio_vrsel, 0);
 
@@ -247,6 +247,11 @@ int __init musb_platform_init(struct musb *musb)
        }
        gpio_direction_output(musb->config->gpio_vrsel, 0);
 
+       usb_nop_xceiv_register();
+       musb->xceiv = otg_get_transceiver();
+       if (!musb->xceiv)
+               return -ENODEV;
+
        if (ANOMALY_05000346) {
                bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
                SSYNC();
@@ -291,7 +296,7 @@ int __init musb_platform_init(struct musb *musb)
                        musb_conn_timer_handler, (unsigned long) musb);
        }
        if (is_peripheral_enabled(musb))
-               musb->xceiv.set_power = bfin_set_power;
+               musb->xceiv->set_power = bfin_set_power;
 
        musb->isr = blackfin_interrupt;
 
index 1976e9b41800b27f6b47925bed29a04872b62aa0..c3577bbbae6c76722b5d9e7699f6045c8baf6cbc 100644 (file)
@@ -6,6 +6,7 @@
  * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
  */
 
+#include <linux/platform_device.h>
 #include <linux/usb.h>
 
 #include "musb_core.h"
@@ -1145,17 +1146,27 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
        return completed;
 }
 
-void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+irqreturn_t cppi_interrupt(int irq, void *dev_id)
 {
-       void __iomem            *tibase;
-       int                     i, index;
+       struct musb             *musb = dev_id;
        struct cppi             *cppi;
+       void __iomem            *tibase;
        struct musb_hw_ep       *hw_ep = NULL;
+       u32                     rx, tx;
+       int                     i, index;
 
        cppi = container_of(musb->dma_controller, struct cppi, controller);
 
        tibase = musb->ctrl_base;
 
+       tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+       rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+       if (!tx && !rx)
+               return IRQ_NONE;
+
+       DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+
        /* process TX channels */
        for (index = 0; tx; tx = tx >> 1, index++) {
                struct cppi_channel             *tx_ch;
@@ -1273,6 +1284,8 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
 
        /* write to CPPI EOI register to re-enable interrupts */
        musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+
+       return IRQ_HANDLED;
 }
 
 /* Instantiate a software object representing a DMA controller. */
@@ -1280,6 +1293,9 @@ struct dma_controller *__init
 dma_controller_create(struct musb *musb, void __iomem *mregs)
 {
        struct cppi             *controller;
+       struct device           *dev = musb->controller;
+       struct platform_device  *pdev = to_platform_device(dev);
+       int                     irq = platform_get_irq(pdev, 1);
 
        controller = kzalloc(sizeof *controller, GFP_KERNEL);
        if (!controller)
@@ -1310,6 +1326,15 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
                return NULL;
        }
 
+       if (irq > 0) {
+               if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
+                       dev_err(dev, "request_irq %d failed!\n", irq);
+                       dma_controller_destroy(&controller->controller);
+                       return NULL;
+               }
+               controller->irq = irq;
+       }
+
        return &controller->controller;
 }
 
@@ -1322,6 +1347,9 @@ void dma_controller_destroy(struct dma_controller *c)
 
        cppi = container_of(c, struct cppi, controller);
 
+       if (cppi->irq)
+               free_irq(cppi->irq, cppi->musb);
+
        /* assert:  caller stopped the controller first */
        dma_pool_destroy(cppi->pool);
 
index 729b4071787b3425a44e8f112c0704a4af9c5819..8a39de3e6e471c9c8f8c7b72f5b515218cb816c4 100644 (file)
@@ -119,6 +119,8 @@ struct cppi {
        void __iomem                    *mregs;         /* Mentor regs */
        void __iomem                    *tibase;        /* TI/CPPI regs */
 
+       int                             irq;
+
        struct cppi_channel             tx[4];
        struct cppi_channel             rx[4];
 
@@ -127,7 +129,7 @@ struct cppi {
        struct list_head                tx_complete;
 };
 
-/* irq handling hook */
-extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+/* CPPI IRQ handler */
+extern irqreturn_t cppi_interrupt(int, void *);
 
 #endif                         /* end of ifndef _CPPI_DMA_H_ */
index 10d11ab113ab3c38b50b87172ed2a910c746e242..180d7daa4099d50231f81f92c8ce537ab72dcbcc 100644 (file)
@@ -215,7 +215,7 @@ static void otg_timer(unsigned long _musb)
        DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
 
        spin_lock_irqsave(&musb->lock, flags);
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_WAIT_VFALL:
                /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
                 * seems to mis-handle session "start" otherwise (or in our
@@ -226,7 +226,7 @@ static void otg_timer(unsigned long _musb)
                        mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
                        break;
                }
-               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
                musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
                        MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
                break;
@@ -251,7 +251,7 @@ static void otg_timer(unsigned long _musb)
                if (devctl & MUSB_DEVCTL_BDEVICE)
                        mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
                else
-                       musb->xceiv.state = OTG_STATE_A_IDLE;
+                       musb->xceiv->state = OTG_STATE_A_IDLE;
                break;
        default:
                break;
@@ -265,6 +265,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
        irqreturn_t     retval = IRQ_NONE;
        struct musb     *musb = __hci;
        void __iomem    *tibase = musb->ctrl_base;
+       struct cppi     *cppi;
        u32             tmp;
 
        spin_lock_irqsave(&musb->lock, flags);
@@ -281,16 +282,9 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
        /* CPPI interrupts share the same IRQ line, but have their own
         * mask, state, "vector", and EOI registers.
         */
-       if (is_cppi_enabled()) {
-               u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
-               u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
-
-               if (cppi_tx || cppi_rx) {
-                       DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
-                       cppi_completion(musb, cppi_rx, cppi_tx);
-                       retval = IRQ_HANDLED;
-               }
-       }
+       cppi = container_of(musb->dma_controller, struct cppi, controller);
+       if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
+               retval = cppi_interrupt(irq, __hci);
 
        /* ack and handle non-CPPI interrupts */
        tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
@@ -331,21 +325,21 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
                         * to stop registering in devctl.
                         */
                        musb->int_usb &= ~MUSB_INTR_VBUSERROR;
-                       musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+                       musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
                        mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
                        WARNING("VBUS error workaround (delay coming)\n");
                } else if (is_host_enabled(musb) && drvvbus) {
                        musb->is_active = 1;
                        MUSB_HST_MODE(musb);
-                       musb->xceiv.default_a = 1;
-                       musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+                       musb->xceiv->default_a = 1;
+                       musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
                        portstate(musb->port1_status |= USB_PORT_STAT_POWER);
                        del_timer(&otg_workaround);
                } else {
                        musb->is_active = 0;
                        MUSB_DEV_MODE(musb);
-                       musb->xceiv.default_a = 0;
-                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       musb->xceiv->default_a = 0;
+                       musb->xceiv->state = OTG_STATE_B_IDLE;
                        portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
                }
 
@@ -367,17 +361,12 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
 
        /* poll for ID change */
        if (is_otg_enabled(musb)
-                       && musb->xceiv.state == OTG_STATE_B_IDLE)
+                       && musb->xceiv->state == OTG_STATE_B_IDLE)
                mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
 
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       /* REVISIT we sometimes get unhandled IRQs
-        * (e.g. ep0).  not clear why...
-        */
-       if (retval != IRQ_HANDLED)
-               DBG(5, "unhandled? %08x\n", tmp);
-       return IRQ_HANDLED;
+       return retval;
 }
 
 int musb_platform_set_mode(struct musb *musb, u8 mode)
@@ -391,6 +380,11 @@ int __init musb_platform_init(struct musb *musb)
        void __iomem    *tibase = musb->ctrl_base;
        u32             revision;
 
+       usb_nop_xceiv_register();
+       musb->xceiv = otg_get_transceiver();
+       if (!musb->xceiv)
+               return -ENODEV;
+
        musb->mregs += DAVINCI_BASE_OFFSET;
 
        clk_enable(musb->clock);
@@ -398,7 +392,7 @@ int __init musb_platform_init(struct musb *musb)
        /* returns zero if e.g. not clocked */
        revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
        if (revision == 0)
-               return -ENODEV;
+               goto fail;
 
        if (is_host_enabled(musb))
                setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
@@ -432,6 +426,10 @@ int __init musb_platform_init(struct musb *musb)
 
        musb->isr = davinci_interrupt;
        return 0;
+
+fail:
+       usb_nop_xceiv_unregister();
+       return -ENODEV;
 }
 
 int musb_platform_exit(struct musb *musb)
@@ -442,7 +440,7 @@ int musb_platform_exit(struct musb *musb)
        davinci_source_power(musb, 0 /*off*/, 1);
 
        /* delay, to avoid problems with module reload */
-       if (is_host_enabled(musb) && musb->xceiv.default_a) {
+       if (is_host_enabled(musb) && musb->xceiv->default_a) {
                int     maxdelay = 30;
                u8      devctl, warn = 0;
 
@@ -471,5 +469,7 @@ int musb_platform_exit(struct musb *musb)
 
        clk_disable(musb->clock);
 
+       usb_nop_xceiv_unregister();
+
        return 0;
 }
index 4000cf6d1e819a3cacd857b4316dd6efeeebe752..554a414f65d1104cbbbeeaed2885f2a209faf42d 100644 (file)
 #include "davinci.h"
 #endif
 
+#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
 
 
 unsigned musb_debug;
@@ -267,7 +268,7 @@ void musb_load_testpacket(struct musb *musb)
 
 const char *otg_state_string(struct musb *musb)
 {
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_IDLE:          return "a_idle";
        case OTG_STATE_A_WAIT_VRISE:    return "a_wait_vrise";
        case OTG_STATE_A_WAIT_BCON:     return "a_wait_bcon";
@@ -287,12 +288,6 @@ const char *otg_state_string(struct musb *musb)
 
 #ifdef CONFIG_USB_MUSB_OTG
 
-/*
- * See also USB_OTG_1-3.pdf 6.6.5 Timers
- * REVISIT: Are the other timers done in the hardware?
- */
-#define TB_ASE0_BRST           100     /* Min 3.125 ms */
-
 /*
  * Handles OTG hnp timeouts, such as b_ase0_brst
  */
@@ -302,16 +297,18 @@ void musb_otg_timer_func(unsigned long data)
        unsigned long   flags;
 
        spin_lock_irqsave(&musb->lock, flags);
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_B_WAIT_ACON:
                DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
                musb_g_disconnect(musb);
-               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                musb->is_active = 0;
                break;
+       case OTG_STATE_A_SUSPEND:
        case OTG_STATE_A_WAIT_BCON:
-               DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
-               musb_hnp_stop(musb);
+               DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
+               musb_set_vbus(musb, 0);
+               musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
                break;
        default:
                DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
@@ -320,10 +317,8 @@ void musb_otg_timer_func(unsigned long data)
        spin_unlock_irqrestore(&musb->lock, flags);
 }
 
-static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
-
 /*
- * Stops the B-device HNP state. Caller must take care of locking.
+ * Stops the HNP transition. Caller must take care of locking.
  */
 void musb_hnp_stop(struct musb *musb)
 {
@@ -331,20 +326,17 @@ void musb_hnp_stop(struct musb *musb)
        void __iomem    *mbase = musb->mregs;
        u8      reg;
 
-       switch (musb->xceiv.state) {
+       DBG(1, "HNP: stop from %s\n", otg_state_string(musb));
+
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_PERIPHERAL:
-       case OTG_STATE_A_WAIT_VFALL:
-       case OTG_STATE_A_WAIT_BCON:
-               DBG(1, "HNP: Switching back to A-host\n");
                musb_g_disconnect(musb);
-               musb->xceiv.state = OTG_STATE_A_IDLE;
-               MUSB_HST_MODE(musb);
-               musb->is_active = 0;
+               DBG(1, "HNP: back to %s\n", otg_state_string(musb));
                break;
        case OTG_STATE_B_HOST:
                DBG(1, "HNP: Disabling HR\n");
                hcd->self.is_b_host = 0;
-               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                MUSB_DEV_MODE(musb);
                reg = musb_readb(mbase, MUSB_POWER);
                reg |= MUSB_POWER_SUSPENDM;
@@ -402,7 +394,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 
                if (devctl & MUSB_DEVCTL_HM) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
-                       switch (musb->xceiv.state) {
+                       switch (musb->xceiv->state) {
                        case OTG_STATE_A_SUSPEND:
                                /* remote wakeup?  later, GetPortStatus
                                 * will stop RESUME signaling
@@ -425,12 +417,12 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                musb->rh_timer = jiffies
                                                + msecs_to_jiffies(20);
 
-                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               musb->xceiv->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
                                usb_hcd_resume_root_hub(musb_to_hcd(musb));
                                break;
                        case OTG_STATE_B_WAIT_ACON:
-                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                                musb->is_active = 1;
                                MUSB_DEV_MODE(musb);
                                break;
@@ -441,11 +433,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        }
 #endif
                } else {
-                       switch (musb->xceiv.state) {
+                       switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
                        case OTG_STATE_A_SUSPEND:
                                /* possibly DISCONNECT is upcoming */
-                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               musb->xceiv->state = OTG_STATE_A_HOST;
                                usb_hcd_resume_root_hub(musb_to_hcd(musb));
                                break;
 #endif
@@ -490,7 +482,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                 */
                musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
                musb->ep0_stage = MUSB_EP0_START;
-               musb->xceiv.state = OTG_STATE_A_IDLE;
+               musb->xceiv->state = OTG_STATE_A_IDLE;
                MUSB_HST_MODE(musb);
                musb_set_vbus(musb, 1);
 
@@ -516,7 +508,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                 * REVISIT:  do delays from lots of DEBUG_KERNEL checks
                 * make trouble here, keeping VBUS < 4.4V ?
                 */
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
                case OTG_STATE_A_HOST:
                        /* recovery is dicey once we've gotten past the
                         * initial stages of enumeration, but if VBUS
@@ -594,37 +586,40 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                if (devctl & MUSB_DEVCTL_LSDEV)
                        musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
 
-               if (hcd->status_urb)
-                       usb_hcd_poll_rh_status(hcd);
-               else
-                       usb_hcd_resume_root_hub(hcd);
-
-               MUSB_HST_MODE(musb);
-
                /* indicate new connection to OTG machine */
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
                case OTG_STATE_B_PERIPHERAL:
                        if (int_usb & MUSB_INTR_SUSPEND) {
                                DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
-                               musb->xceiv.state = OTG_STATE_B_HOST;
-                               hcd->self.is_b_host = 1;
                                int_usb &= ~MUSB_INTR_SUSPEND;
+                               goto b_host;
                        } else
                                DBG(1, "CONNECT as b_peripheral???\n");
                        break;
                case OTG_STATE_B_WAIT_ACON:
-                       DBG(1, "HNP: Waiting to switch to b_host state\n");
-                       musb->xceiv.state = OTG_STATE_B_HOST;
+                       DBG(1, "HNP: CONNECT, now b_host\n");
+b_host:
+                       musb->xceiv->state = OTG_STATE_B_HOST;
                        hcd->self.is_b_host = 1;
+                       musb->ignore_disconnect = 0;
+                       del_timer(&musb->otg_timer);
                        break;
                default:
                        if ((devctl & MUSB_DEVCTL_VBUS)
                                        == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
-                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               musb->xceiv->state = OTG_STATE_A_HOST;
                                hcd->self.is_b_host = 0;
                        }
                        break;
                }
+
+               /* poke the root hub */
+               MUSB_HST_MODE(musb);
+               if (hcd->status_urb)
+                       usb_hcd_poll_rh_status(hcd);
+               else
+                       usb_hcd_resume_root_hub(hcd);
+
                DBG(1, "CONNECT (%s) devctl %02x\n",
                                otg_state_string(musb), devctl);
        }
@@ -650,7 +645,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        }
                } else if (is_peripheral_capable()) {
                        DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
-                       switch (musb->xceiv.state) {
+                       switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_OTG
                        case OTG_STATE_A_SUSPEND:
                                /* We need to ignore disconnect on suspend
@@ -661,24 +656,27 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                musb_g_reset(musb);
                                /* FALLTHROUGH */
                        case OTG_STATE_A_WAIT_BCON:     /* OPT TD.4.7-900ms */
-                               DBG(1, "HNP: Setting timer as %s\n",
-                                               otg_state_string(musb));
-                               musb_otg_timer.data = (unsigned long)musb;
-                               mod_timer(&musb_otg_timer, jiffies
-                                       + msecs_to_jiffies(100));
+                               /* never use invalid T(a_wait_bcon) */
+                               DBG(1, "HNP: in %s, %d msec timeout\n",
+                                               otg_state_string(musb),
+                                               TA_WAIT_BCON(musb));
+                               mod_timer(&musb->otg_timer, jiffies
+                                       + msecs_to_jiffies(TA_WAIT_BCON(musb)));
                                break;
                        case OTG_STATE_A_PERIPHERAL:
-                               musb_hnp_stop(musb);
+                               musb->ignore_disconnect = 0;
+                               del_timer(&musb->otg_timer);
+                               musb_g_reset(musb);
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                DBG(1, "HNP: RESET (%s), to b_peripheral\n",
                                        otg_state_string(musb));
-                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                                musb_g_reset(musb);
                                break;
 #endif
                        case OTG_STATE_B_IDLE:
-                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                                /* FALLTHROUGH */
                        case OTG_STATE_B_PERIPHERAL:
                                musb_g_reset(musb);
@@ -763,7 +761,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
                                MUSB_MODE(musb), devctl);
                handled = IRQ_HANDLED;
 
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
                case OTG_STATE_A_HOST:
                case OTG_STATE_A_SUSPEND:
@@ -776,7 +774,16 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
 #endif /* HOST */
 #ifdef CONFIG_USB_MUSB_OTG
                case OTG_STATE_B_HOST:
-                       musb_hnp_stop(musb);
+                       /* REVISIT this behaves for "real disconnect"
+                        * cases; make sure the other transitions from
+                        * from B_HOST act right too.  The B_HOST code
+                        * in hnp_stop() is currently not used...
+                        */
+                       musb_root_disconnect(musb);
+                       musb_to_hcd(musb)->self.is_b_host = 0;
+                       musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+                       MUSB_DEV_MODE(musb);
+                       musb_g_disconnect(musb);
                        break;
                case OTG_STATE_A_PERIPHERAL:
                        musb_hnp_stop(musb);
@@ -805,26 +812,35 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
                                otg_state_string(musb), devctl, power);
                handled = IRQ_HANDLED;
 
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
 #ifdef CONFIG_USB_MUSB_OTG
                case OTG_STATE_A_PERIPHERAL:
-                       /*
-                        * We cannot stop HNP here, devctl BDEVICE might be
-                        * still set.
+                       /* We also come here if the cable is removed, since
+                        * this silicon doesn't report ID-no-longer-grounded.
+                        *
+                        * We depend on T(a_wait_bcon) to shut us down, and
+                        * hope users don't do anything dicey during this
+                        * undesired detour through A_WAIT_BCON.
                         */
+                       musb_hnp_stop(musb);
+                       usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                       musb_root_disconnect(musb);
+                       musb_platform_try_idle(musb, jiffies
+                                       + msecs_to_jiffies(musb->a_wait_bcon
+                                               ? : OTG_TIME_A_WAIT_BCON));
                        break;
 #endif
                case OTG_STATE_B_PERIPHERAL:
                        musb_g_suspend(musb);
                        musb->is_active = is_otg_enabled(musb)
-                                       && musb->xceiv.gadget->b_hnp_enable;
+                                       && musb->xceiv->gadget->b_hnp_enable;
                        if (musb->is_active) {
 #ifdef CONFIG_USB_MUSB_OTG
-                               musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+                               musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
                                DBG(1, "HNP: Setting timer for b_ase0_brst\n");
-                               musb_otg_timer.data = (unsigned long)musb;
-                               mod_timer(&musb_otg_timer, jiffies
-                                       + msecs_to_jiffies(TB_ASE0_BRST));
+                               mod_timer(&musb->otg_timer, jiffies
+                                       + msecs_to_jiffies(
+                                                       OTG_TIME_B_ASE0_BRST));
 #endif
                        }
                        break;
@@ -834,9 +850,9 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
                                        + msecs_to_jiffies(musb->a_wait_bcon));
                        break;
                case OTG_STATE_A_HOST:
-                       musb->xceiv.state = OTG_STATE_A_SUSPEND;
+                       musb->xceiv->state = OTG_STATE_A_SUSPEND;
                        musb->is_active = is_otg_enabled(musb)
-                                       && musb->xceiv.host->b_hnp_enable;
+                                       && musb->xceiv->host->b_hnp_enable;
                        break;
                case OTG_STATE_B_HOST:
                        /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
@@ -1068,14 +1084,13 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
 { .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
 { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
 { .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
-{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
+{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 64, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
 };
@@ -1335,11 +1350,11 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
        }
        if (reg & MUSB_CONFIGDATA_HBRXE) {
                strcat(aInfo, ", HB-ISO Rx");
-               strcat(aInfo, " (X)");          /* no driver support */
+               musb->hb_iso_rx = true;
        }
        if (reg & MUSB_CONFIGDATA_HBTXE) {
                strcat(aInfo, ", HB-ISO Tx");
-               strcat(aInfo, " (X)");          /* no driver support */
+               musb->hb_iso_tx = true;
        }
        if (reg & MUSB_CONFIGDATA_SOFTCONE)
                strcat(aInfo, ", SoftConn");
@@ -1481,13 +1496,7 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
 
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       /* REVISIT we sometimes get spurious IRQs on g_ep0
-        * not clear why...
-        */
-       if (retval != IRQ_HANDLED)
-               DBG(5, "spurious?\n");
-
-       return IRQ_HANDLED;
+       return retval;
 }
 
 #else
@@ -1687,8 +1696,9 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
        }
 
        spin_lock_irqsave(&musb->lock, flags);
-       musb->a_wait_bcon = val;
-       if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+       /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
+       musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
+       if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
                musb->is_active = 0;
        musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
        spin_unlock_irqrestore(&musb->lock, flags);
@@ -1706,10 +1716,13 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
 
        spin_lock_irqsave(&musb->lock, flags);
        val = musb->a_wait_bcon;
+       /* FIXME get_vbus_status() is normally #defined as false...
+        * and is effectively TUSB-specific.
+        */
        vbus = musb_platform_get_vbus_status(musb);
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       return sprintf(buf, "Vbus %s, timeout %lu\n",
+       return sprintf(buf, "Vbus %s, timeout %lu msec\n",
                        vbus ? "on" : "off", val);
 }
 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
@@ -1749,8 +1762,8 @@ static void musb_irq_work(struct work_struct *data)
        struct musb *musb = container_of(data, struct musb, irq_work);
        static int old_state;
 
-       if (musb->xceiv.state != old_state) {
-               old_state = musb->xceiv.state;
+       if (musb->xceiv->state != old_state) {
+               old_state = musb->xceiv->state;
                sysfs_notify(&musb->controller->kobj, NULL, "mode");
        }
 }
@@ -1782,6 +1795,7 @@ allocate_instance(struct device *dev,
        hcd->uses_new_polling = 1;
 
        musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+       musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
 #else
        musb = kzalloc(sizeof *musb, GFP_KERNEL);
        if (!musb)
@@ -1847,7 +1861,7 @@ static void musb_free(struct musb *musb)
        }
 
 #ifdef CONFIG_USB_MUSB_OTG
-       put_device(musb->xceiv.dev);
+       put_device(musb->xceiv->dev);
 #endif
 
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
@@ -1928,10 +1942,18 @@ bad_config:
                }
        }
 
-       /* assume vbus is off */
-
-       /* platform adjusts musb->mregs and musb->isr if needed,
-        * and activates clocks
+       /* The musb_platform_init() call:
+        *   - adjusts musb->mregs and musb->isr if needed,
+        *   - may initialize an integrated tranceiver
+        *   - initializes musb->xceiv, usually by otg_get_transceiver()
+        *   - activates clocks.
+        *   - stops powering VBUS
+        *   - assigns musb->board_set_vbus if host mode is enabled
+        *
+        * There are various transciever configurations.  Blackfin,
+        * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
+        * external/discrete ones in various flavors (twl4030 family,
+        * isp1504, non-OTG, etc) mostly hooking up through ULPI.
         */
        musb->isr = generic_interrupt;
        status = musb_platform_init(musb);
@@ -1968,6 +1990,10 @@ bad_config:
        if (status < 0)
                goto fail2;
 
+#ifdef CONFIG_USB_OTG
+       setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+#endif
+
        /* Init IRQ workqueue before request_irq */
        INIT_WORK(&musb->irq_work, musb_irq_work);
 
@@ -1999,17 +2025,17 @@ bad_config:
                                ? "DMA" : "PIO",
                        musb->nIrq);
 
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-       /* host side needs more setup, except for no-host modes */
-       if (musb->board_mode != MUSB_PERIPHERAL) {
+       /* host side needs more setup */
+       if (is_host_enabled(musb)) {
                struct usb_hcd  *hcd = musb_to_hcd(musb);
 
-               if (musb->board_mode == MUSB_OTG)
+               otg_set_host(musb->xceiv, &hcd->self);
+
+               if (is_otg_enabled(musb))
                        hcd->self.otg_port = 1;
-               musb->xceiv.host = &hcd->self;
+               musb->xceiv->host = &hcd->self;
                hcd->power_budget = 2 * (plat->power ? : 250);
        }
-#endif                         /* CONFIG_USB_MUSB_HDRC_HCD */
 
        /* For the host-only role, we can activate right away.
         * (We expect the ID pin to be forcibly grounded!!)
@@ -2017,8 +2043,8 @@ bad_config:
         */
        if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
                MUSB_HST_MODE(musb);
-               musb->xceiv.default_a = 1;
-               musb->xceiv.state = OTG_STATE_A_IDLE;
+               musb->xceiv->default_a = 1;
+               musb->xceiv->state = OTG_STATE_A_IDLE;
 
                status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
                if (status)
@@ -2033,8 +2059,8 @@ bad_config:
 
        } else /* peripheral is enabled */ {
                MUSB_DEV_MODE(musb);
-               musb->xceiv.default_a = 0;
-               musb->xceiv.state = OTG_STATE_B_IDLE;
+               musb->xceiv->default_a = 0;
+               musb->xceiv->state = OTG_STATE_B_IDLE;
 
                status = musb_gadget_setup(musb);
                if (status)
index efb39b5e55b5e62493dc64ea8d05ab15d10074b8..f3772ca3b2cf799569389870e60a159732f5ca70 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/interrupt.h>
 #include <linux/smp_lock.h>
 #include <linux/errno.h>
+#include <linux/timer.h>
 #include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/usb/ch9.h>
@@ -171,7 +172,8 @@ enum musb_h_ep0_state {
 
 /* peripheral side ep0 states */
 enum musb_g_ep0_state {
-       MUSB_EP0_STAGE_SETUP,           /* idle, waiting for setup */
+       MUSB_EP0_STAGE_IDLE,            /* idle, waiting for SETUP */
+       MUSB_EP0_STAGE_SETUP,           /* received SETUP */
        MUSB_EP0_STAGE_TX,              /* IN data */
        MUSB_EP0_STAGE_RX,              /* OUT data */
        MUSB_EP0_STAGE_STATUSIN,        /* (after OUT data) */
@@ -179,10 +181,15 @@ enum musb_g_ep0_state {
        MUSB_EP0_STAGE_ACKWAIT,         /* after zlp, before statusin */
 } __attribute__ ((packed));
 
-/* OTG protocol constants */
+/*
+ * OTG protocol constants.  See USB OTG 1.3 spec,
+ * sections 5.5 "Device Timings" and 6.6.5 "Timers".
+ */
 #define OTG_TIME_A_WAIT_VRISE  100             /* msec (max) */
-#define OTG_TIME_A_WAIT_BCON   0               /* 0=infinite; min 1000 msec */
-#define OTG_TIME_A_IDLE_BDIS   200             /* msec (min) */
+#define OTG_TIME_A_WAIT_BCON   1100            /* min 1 second */
+#define OTG_TIME_A_AIDL_BDIS   200             /* min 200 msec */
+#define OTG_TIME_B_ASE0_BRST   100             /* min 3.125 ms */
+
 
 /*************************** REGISTER ACCESS ********************************/
 
@@ -331,6 +338,8 @@ struct musb {
        struct list_head        control;        /* of musb_qh */
        struct list_head        in_bulk;        /* of musb_qh */
        struct list_head        out_bulk;       /* of musb_qh */
+
+       struct timer_list       otg_timer;
 #endif
 
        /* called with IRQs blocked; ON/nonzero implies starting a session,
@@ -355,7 +364,7 @@ struct musb {
        u16                     int_rx;
        u16                     int_tx;
 
-       struct otg_transceiver  xceiv;
+       struct otg_transceiver  *xceiv;
 
        int nIrq;
        unsigned                irq_wake:1;
@@ -386,6 +395,9 @@ struct musb {
        unsigned is_multipoint:1;
        unsigned ignore_disconnect:1;   /* during bus resets */
 
+       unsigned                hb_iso_rx:1;    /* high bandwidth iso rx? */
+       unsigned                hb_iso_tx:1;    /* high bandwidth iso tx? */
+
 #ifdef C_MP_TX
        unsigned bulk_split:1;
 #define        can_bulk_split(musb,type) \
index f79440cdfe7ee2e346ae59cd2e164618c7d2a3f7..8b3c4e2ed7b865aab0636bf18c6d5b7d0138da0f 100644 (file)
@@ -310,7 +310,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
                        /* setup DMA, then program endpoint CSR */
                        request_size = min(request->length,
                                                musb_ep->dma->max_len);
-                       if (request_size <= musb_ep->packet_sz)
+                       if (request_size < musb_ep->packet_sz)
                                musb_ep->dma->desired_mode = 0;
                        else
                                musb_ep->dma->desired_mode = 1;
@@ -349,7 +349,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
 #elif defined(CONFIG_USB_TI_CPPI_DMA)
                /* program endpoint CSR first, then setup DMA */
                csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
-               csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+               csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+                      MUSB_TXCSR_MODE;
                musb_writew(epio, MUSB_TXCSR,
                        (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
                                | csr);
@@ -1405,7 +1406,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
 
        spin_lock_irqsave(&musb->lock, flags);
 
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_B_PERIPHERAL:
                /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
                 * that's part of the standard usb 1.1 state machine, and
@@ -1507,9 +1508,9 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
 {
        struct musb     *musb = gadget_to_musb(gadget);
 
-       if (!musb->xceiv.set_power)
+       if (!musb->xceiv->set_power)
                return -EOPNOTSUPP;
-       return otg_set_power(&musb->xceiv, mA);
+       return otg_set_power(musb->xceiv, mA);
 }
 
 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
@@ -1732,11 +1733,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
 
                spin_lock_irqsave(&musb->lock, flags);
 
-               /* REVISIT always use otg_set_peripheral(), handling
-                * issues including the root hub one below ...
-                */
-               musb->xceiv.gadget = &musb->g;
-               musb->xceiv.state = OTG_STATE_B_IDLE;
+               otg_set_peripheral(musb->xceiv, &musb->g);
                musb->is_active = 1;
 
                /* FIXME this ignores the softconnect flag.  Drivers are
@@ -1748,6 +1745,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
                if (!is_otg_enabled(musb))
                        musb_start(musb);
 
+               otg_set_peripheral(musb->xceiv, &musb->g);
+
                spin_unlock_irqrestore(&musb->lock, flags);
 
                if (is_otg_enabled(musb)) {
@@ -1761,8 +1760,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
                        if (retval < 0) {
                                DBG(1, "add_hcd failed, %d\n", retval);
                                spin_lock_irqsave(&musb->lock, flags);
-                               musb->xceiv.gadget = NULL;
-                               musb->xceiv.state = OTG_STATE_UNDEFINED;
+                               otg_set_peripheral(musb->xceiv, NULL);
                                musb->gadget_driver = NULL;
                                musb->g.dev.driver = NULL;
                                spin_unlock_irqrestore(&musb->lock, flags);
@@ -1845,8 +1843,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 
                (void) musb_gadget_vbus_draw(&musb->g, 0);
 
-               musb->xceiv.state = OTG_STATE_UNDEFINED;
+               musb->xceiv->state = OTG_STATE_UNDEFINED;
                stop_activity(musb, driver);
+               otg_set_peripheral(musb->xceiv, NULL);
 
                DBG(3, "unregistering driver %s\n", driver->function);
                spin_unlock_irqrestore(&musb->lock, flags);
@@ -1882,7 +1881,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
 void musb_g_resume(struct musb *musb)
 {
        musb->is_suspended = 0;
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_B_IDLE:
                break;
        case OTG_STATE_B_WAIT_ACON:
@@ -1908,10 +1907,10 @@ void musb_g_suspend(struct musb *musb)
        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
        DBG(3, "devctl %02x\n", devctl);
 
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_B_IDLE:
                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
-                       musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                       musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                break;
        case OTG_STATE_B_PERIPHERAL:
                musb->is_suspended = 1;
@@ -1957,22 +1956,24 @@ void musb_g_disconnect(struct musb *musb)
                spin_lock(&musb->lock);
        }
 
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        default:
 #ifdef CONFIG_USB_MUSB_OTG
                DBG(2, "Unhandled disconnect %s, setting a_idle\n",
                        otg_state_string(musb));
-               musb->xceiv.state = OTG_STATE_A_IDLE;
+               musb->xceiv->state = OTG_STATE_A_IDLE;
+               MUSB_HST_MODE(musb);
                break;
        case OTG_STATE_A_PERIPHERAL:
-               musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+               musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
+               MUSB_HST_MODE(musb);
                break;
        case OTG_STATE_B_WAIT_ACON:
        case OTG_STATE_B_HOST:
 #endif
        case OTG_STATE_B_PERIPHERAL:
        case OTG_STATE_B_IDLE:
-               musb->xceiv.state = OTG_STATE_B_IDLE;
+               musb->xceiv->state = OTG_STATE_B_IDLE;
                break;
        case OTG_STATE_B_SRP_INIT:
                break;
@@ -2028,10 +2029,10 @@ __acquires(musb->lock)
         * or else after HNP, as A-Device
         */
        if (devctl & MUSB_DEVCTL_BDEVICE) {
-               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
                musb->g.is_a_peripheral = 0;
        } else if (is_otg_enabled(musb)) {
-               musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+               musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
                musb->g.is_a_peripheral = 1;
        } else
                WARN_ON(1);
index 3f5e30ddfa275abc7a4523eeb45001164df9a0e0..40ed50ecedff6ed84a72d4342616a6f07e36f362 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2005 Mentor Graphics Corporation
  * Copyright (C) 2005-2006 by Texas Instruments
  * Copyright (C) 2006-2007 Nokia Corporation
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -58,7 +59,8 @@
 static char *decode_ep0stage(u8 stage)
 {
        switch (stage) {
-       case MUSB_EP0_STAGE_SETUP:      return "idle";
+       case MUSB_EP0_STAGE_IDLE:       return "idle";
+       case MUSB_EP0_STAGE_SETUP:      return "setup";
        case MUSB_EP0_STAGE_TX:         return "in";
        case MUSB_EP0_STAGE_RX:         return "out";
        case MUSB_EP0_STAGE_ACKWAIT:    return "wait";
@@ -628,7 +630,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
                musb_writew(regs, MUSB_CSR0,
                                csr & ~MUSB_CSR0_P_SENTSTALL);
                retval = IRQ_HANDLED;
-               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               musb->ep0_state = MUSB_EP0_STAGE_IDLE;
                csr = musb_readw(regs, MUSB_CSR0);
        }
 
@@ -636,7 +638,18 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
        if (csr & MUSB_CSR0_P_SETUPEND) {
                musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
                retval = IRQ_HANDLED;
-               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               /* Transition into the early status phase */
+               switch (musb->ep0_state) {
+               case MUSB_EP0_STAGE_TX:
+                       musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+                       break;
+               case MUSB_EP0_STAGE_RX:
+                       musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+                       break;
+               default:
+                       ERR("SetupEnd came in a wrong ep0stage %s",
+                           decode_ep0stage(musb->ep0_state));
+               }
                csr = musb_readw(regs, MUSB_CSR0);
                /* NOTE:  request may need completion */
        }
@@ -697,11 +710,31 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
                        if (req)
                                musb_g_ep0_giveback(musb, req);
                }
+
+               /*
+                * In case when several interrupts can get coalesced,
+                * check to see if we've already received a SETUP packet...
+                */
+               if (csr & MUSB_CSR0_RXPKTRDY)
+                       goto setup;
+
+               retval = IRQ_HANDLED;
+               musb->ep0_state = MUSB_EP0_STAGE_IDLE;
+               break;
+
+       case MUSB_EP0_STAGE_IDLE:
+               /*
+                * This state is typically (but not always) indiscernible
+                * from the status states since the corresponding interrupts
+                * tend to happen within too little period of time (with only
+                * a zero-length packet in between) and so get coalesced...
+                */
                retval = IRQ_HANDLED;
                musb->ep0_state = MUSB_EP0_STAGE_SETUP;
                /* FALLTHROUGH */
 
        case MUSB_EP0_STAGE_SETUP:
+setup:
                if (csr & MUSB_CSR0_RXPKTRDY) {
                        struct usb_ctrlrequest  setup;
                        int                     handled = 0;
@@ -783,7 +816,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
 stall:
                                DBG(3, "stall (%d)\n", handled);
                                musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
-                               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+                               musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 finish:
                                musb_writew(regs, MUSB_CSR0,
                                                musb->ackpend);
@@ -803,7 +836,7 @@ finish:
                /* "can't happen" */
                WARN_ON(1);
                musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
-               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               musb->ep0_state = MUSB_EP0_STAGE_IDLE;
                break;
        }
 
@@ -959,7 +992,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
 
                csr |= MUSB_CSR0_P_SENDSTALL;
                musb_writew(regs, MUSB_CSR0, csr);
-               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               musb->ep0_state = MUSB_EP0_STAGE_IDLE;
                musb->ackpend = 0;
                break;
        default:
index db1b57415ec7ad2a443d35cce7a8202de2f4bfef..94a2a350a4141521e9e4d93426700ed3a31aaf6b 100644 (file)
@@ -181,6 +181,19 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
        musb_writew(ep->regs, MUSB_TXCSR, txcsr);
 }
 
+static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
+{
+       if (is_in != 0 || ep->is_shared_fifo)
+               ep->in_qh  = qh;
+       if (is_in == 0 || ep->is_shared_fifo)
+               ep->out_qh = qh;
+}
+
+static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
+{
+       return is_in ? ep->in_qh : ep->out_qh;
+}
+
 /*
  * Start the URB at the front of an endpoint's queue
  * end must be claimed from the caller.
@@ -210,7 +223,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
        case USB_ENDPOINT_XFER_CONTROL:
                /* control transfers always start with SETUP */
                is_in = 0;
-               hw_ep->out_qh = qh;
                musb->ep0_stage = MUSB_EP0_START;
                buf = urb->setup_packet;
                len = 8;
@@ -239,10 +251,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
                        epnum, buf + offset, len);
 
        /* Configure endpoint */
-       if (is_in || hw_ep->is_shared_fifo)
-               hw_ep->in_qh = qh;
-       else
-               hw_ep->out_qh = qh;
+       musb_ep_set_qh(hw_ep, is_in, qh);
        musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
 
        /* transmit may have more work: start it when it is time */
@@ -286,9 +295,8 @@ start:
        }
 }
 
-/* caller owns controller lock, irqs are blocked */
-static void
-__musb_giveback(struct musb *musb, struct urb *urb, int status)
+/* Context: caller owns controller lock, IRQs are blocked */
+static void musb_giveback(struct musb *musb, struct urb *urb, int status)
 __releases(musb->lock)
 __acquires(musb->lock)
 {
@@ -321,60 +329,57 @@ __acquires(musb->lock)
        spin_lock(&musb->lock);
 }
 
-/* for bulk/interrupt endpoints only */
-static inline void
-musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+/* For bulk/interrupt endpoints only */
+static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
+                                   struct urb *urb)
 {
-       struct usb_device       *udev = urb->dev;
+       void __iomem            *epio = qh->hw_ep->regs;
        u16                     csr;
-       void __iomem            *epio = ep->regs;
-       struct musb_qh          *qh;
 
-       /* FIXME:  the current Mentor DMA code seems to have
+       /*
+        * FIXME: the current Mentor DMA code seems to have
         * problems getting toggle correct.
         */
 
-       if (is_in || ep->is_shared_fifo)
-               qh = ep->in_qh;
+       if (is_in)
+               csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
        else
-               qh = ep->out_qh;
+               csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
 
-       if (!is_in) {
-               csr = musb_readw(epio, MUSB_TXCSR);
-               usb_settoggle(udev, qh->epnum, 1,
-                       (csr & MUSB_TXCSR_H_DATATOGGLE)
-                               ? 1 : 0);
-       } else {
-               csr = musb_readw(epio, MUSB_RXCSR);
-               usb_settoggle(udev, qh->epnum, 0,
-                       (csr & MUSB_RXCSR_H_DATATOGGLE)
-                               ? 1 : 0);
-       }
+       usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
 }
 
-/* caller owns controller lock, irqs are blocked */
-static struct musb_qh *
-musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+/*
+ * Advance this hardware endpoint's queue, completing the specified URB and
+ * advancing to either the next URB queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, IRQs are blocked
+ */
+static void musb_advance_schedule(struct musb *musb, struct urb *urb,
+                                 struct musb_hw_ep *hw_ep, int is_in)
 {
+       struct musb_qh          *qh = musb_ep_get_qh(hw_ep, is_in);
        struct musb_hw_ep       *ep = qh->hw_ep;
-       struct musb             *musb = ep->musb;
-       int                     is_in = usb_pipein(urb->pipe);
        int                     ready = qh->is_ready;
+       int                     status;
+
+       status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
 
        /* save toggle eagerly, for paranoia */
        switch (qh->type) {
        case USB_ENDPOINT_XFER_BULK:
        case USB_ENDPOINT_XFER_INT:
-               musb_save_toggle(ep, is_in, urb);
+               musb_save_toggle(qh, is_in, urb);
                break;
        case USB_ENDPOINT_XFER_ISOC:
-               if (status == 0 && urb->error_count)
+               if (urb->error_count)
                        status = -EXDEV;
                break;
        }
 
        qh->is_ready = 0;
-       __musb_giveback(musb, urb, status);
+       musb_giveback(musb, urb, status);
        qh->is_ready = ready;
 
        /* reclaim resources (and bandwidth) ASAP; deschedule it, and
@@ -388,11 +393,8 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
                else
                        ep->tx_reinit = 1;
 
-               /* clobber old pointers to this qh */
-               if (is_in || ep->is_shared_fifo)
-                       ep->in_qh = NULL;
-               else
-                       ep->out_qh = NULL;
+               /* Clobber old pointers to this qh */
+               musb_ep_set_qh(ep, is_in, NULL);
                qh->hep->hcpriv = NULL;
 
                switch (qh->type) {
@@ -421,36 +423,10 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
                        break;
                }
        }
-       return qh;
-}
-
-/*
- * Advance this hardware endpoint's queue, completing the specified urb and
- * advancing to either the next urb queued to that qh, or else invalidating
- * that qh and advancing to the next qh scheduled after the current one.
- *
- * Context: caller owns controller lock, irqs are blocked
- */
-static void
-musb_advance_schedule(struct musb *musb, struct urb *urb,
-               struct musb_hw_ep *hw_ep, int is_in)
-{
-       struct musb_qh  *qh;
-
-       if (is_in || hw_ep->is_shared_fifo)
-               qh = hw_ep->in_qh;
-       else
-               qh = hw_ep->out_qh;
-
-       if (urb->status == -EINPROGRESS)
-               qh = musb_giveback(qh, urb, 0);
-       else
-               qh = musb_giveback(qh, urb, urb->status);
 
        if (qh != NULL && qh->is_ready) {
                DBG(4, "... next ep%d %cX urb %p\n",
-                               hw_ep->epnum, is_in ? 'R' : 'T',
-                               next_urb(qh));
+                   hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
        }
 }
@@ -629,7 +605,8 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
        musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
        musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
        /* NOTE: bulk combining rewrites high bits of maxpacket */
-       musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+       musb_writew(ep->regs, MUSB_RXMAXP,
+                       qh->maxpacket | ((qh->hb_mult - 1) << 11));
 
        ep->rx_reinit = 0;
 }
@@ -651,9 +628,10 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
        csr = musb_readw(epio, MUSB_TXCSR);
        if (length > pkt_size) {
                mode = 1;
-               csr |= MUSB_TXCSR_AUTOSET
-                       | MUSB_TXCSR_DMAMODE
-                       | MUSB_TXCSR_DMAENAB;
+               csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
+               /* autoset shouldn't be set in high bandwidth */
+               if (qh->hb_mult == 1)
+                       csr |= MUSB_TXCSR_AUTOSET;
        } else {
                mode = 0;
                csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
@@ -703,15 +681,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
        void __iomem            *mbase = musb->mregs;
        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
        void __iomem            *epio = hw_ep->regs;
-       struct musb_qh          *qh;
-       u16                     packet_sz;
-
-       if (!is_out || hw_ep->is_shared_fifo)
-               qh = hw_ep->in_qh;
-       else
-               qh = hw_ep->out_qh;
-
-       packet_sz = qh->maxpacket;
+       struct musb_qh          *qh = musb_ep_get_qh(hw_ep, !is_out);
+       u16                     packet_sz = qh->maxpacket;
 
        DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
                                "h_addr%02x h_port%02x bytes %d\n",
@@ -1129,17 +1100,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
        u16                     tx_csr;
        size_t                  length = 0;
        size_t                  offset = 0;
-       struct urb              *urb;
        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
        void __iomem            *epio = hw_ep->regs;
-       struct musb_qh          *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
-                                                           : hw_ep->out_qh;
+       struct musb_qh          *qh = hw_ep->out_qh;
+       struct urb              *urb = next_urb(qh);
        u32                     status = 0;
        void __iomem            *mbase = musb->mregs;
        struct dma_channel      *dma;
 
-       urb = next_urb(qh);
-
        musb_ep_select(mbase, epnum);
        tx_csr = musb_readw(epio, MUSB_TXCSR);
 
@@ -1427,7 +1395,7 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
                        urb->actual_length += dma->actual_len;
                        dma->actual_len = 0L;
                }
-               musb_save_toggle(ep, 1, urb);
+               musb_save_toggle(cur_qh, 1, urb);
 
                /* move cur_qh to end of queue */
                list_move_tail(&cur_qh->ring, &musb->in_bulk);
@@ -1531,6 +1499,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        /* packet error reported later */
                        iso_err = true;
                }
+       } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
+               DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
+                               epnum);
+               status = -EPROTO;
        }
 
        /* faults abort the transfer */
@@ -1738,7 +1710,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                                val &= ~MUSB_RXCSR_H_AUTOREQ;
                        else
                                val |= MUSB_RXCSR_H_AUTOREQ;
-                       val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+                       val |= MUSB_RXCSR_DMAENAB;
+
+                       /* autoclear shouldn't be set in high bandwidth */
+                       if (qh->hb_mult == 1)
+                               val |= MUSB_RXCSR_AUTOCLEAR;
 
                        musb_writew(epio, MUSB_RXCSR,
                                MUSB_RXCSR_H_WZC_BITS | val);
@@ -1817,19 +1793,17 @@ static int musb_schedule(
                        epnum++, hw_ep++) {
                int     diff;
 
-               if (is_in || hw_ep->is_shared_fifo) {
-                       if (hw_ep->in_qh  != NULL)
-                               continue;
-               } else  if (hw_ep->out_qh != NULL)
+               if (musb_ep_get_qh(hw_ep, is_in) != NULL)
                        continue;
 
                if (hw_ep == musb->bulk_ep)
                        continue;
 
                if (is_in)
-                       diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+                       diff = hw_ep->max_packet_sz_rx;
                else
-                       diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+                       diff = hw_ep->max_packet_sz_tx;
+               diff -= (qh->maxpacket * qh->hb_mult);
 
                if (diff >= 0 && best_diff > diff) {
                        best_diff = diff;
@@ -1932,15 +1906,27 @@ static int musb_urb_enqueue(
        qh->is_ready = 1;
 
        qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+       qh->type = usb_endpoint_type(epd);
 
-       /* no high bandwidth support yet */
-       if (qh->maxpacket & ~0x7ff) {
-               ret = -EMSGSIZE;
-               goto done;
+       /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
+        * Some musb cores don't support high bandwidth ISO transfers; and
+        * we don't (yet!) support high bandwidth interrupt transfers.
+        */
+       qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+       if (qh->hb_mult > 1) {
+               int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
+
+               if (ok)
+                       ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
+                               || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
+               if (!ok) {
+                       ret = -EMSGSIZE;
+                       goto done;
+               }
+               qh->maxpacket &= 0x7ff;
        }
 
        qh->epnum = usb_endpoint_num(epd);
-       qh->type = usb_endpoint_type(epd);
 
        /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
        qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -2052,14 +2038,15 @@ done:
  * called with controller locked, irqs blocked
  * that hardware queue advances to the next transfer, unless prevented
  */
-static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
 {
        struct musb_hw_ep       *ep = qh->hw_ep;
        void __iomem            *epio = ep->regs;
        unsigned                hw_end = ep->epnum;
        void __iomem            *regs = ep->musb->mregs;
-       u16                     csr;
+       int                     is_in = usb_pipein(urb->pipe);
        int                     status = 0;
+       u16                     csr;
 
        musb_ep_select(regs, hw_end);
 
@@ -2112,14 +2099,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 {
        struct musb             *musb = hcd_to_musb(hcd);
        struct musb_qh          *qh;
-       struct list_head        *sched;
        unsigned long           flags;
+       int                     is_in  = usb_pipein(urb->pipe);
        int                     ret;
 
        DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
                        usb_pipedevice(urb->pipe),
                        usb_pipeendpoint(urb->pipe),
-                       usb_pipein(urb->pipe) ? "in" : "out");
+                       is_in ? "in" : "out");
 
        spin_lock_irqsave(&musb->lock, flags);
        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2130,47 +2117,25 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
        if (!qh)
                goto done;
 
-       /* Any URB not actively programmed into endpoint hardware can be
+       /*
+        * Any URB not actively programmed into endpoint hardware can be
         * immediately given back; that's any URB not at the head of an
         * endpoint queue, unless someday we get real DMA queues.  And even
         * if it's at the head, it might not be known to the hardware...
         *
-        * Otherwise abort current transfer, pending dma, etc.; urb->status
+        * Otherwise abort current transfer, pending DMA, etc.; urb->status
         * has already been updated.  This is a synchronous abort; it'd be
         * OK to hold off until after some IRQ, though.
+        *
+        * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
         */
-       if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
-               ret = -EINPROGRESS;
-       else {
-               switch (qh->type) {
-               case USB_ENDPOINT_XFER_CONTROL:
-                       sched = &musb->control;
-                       break;
-               case USB_ENDPOINT_XFER_BULK:
-                       if (qh->mux == 1) {
-                               if (usb_pipein(urb->pipe))
-                                       sched = &musb->in_bulk;
-                               else
-                                       sched = &musb->out_bulk;
-                               break;
-                       }
-               default:
-                       /* REVISIT when we get a schedule tree, periodic
-                        * transfers won't always be at the head of a
-                        * singleton queue...
-                        */
-                       sched = NULL;
-                       break;
-               }
-       }
-
-       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
-       if (ret < 0 || (sched && qh != first_qh(sched))) {
+       if (!qh->is_ready
+                       || urb->urb_list.prev != &qh->hep->urb_list
+                       || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
                int     ready = qh->is_ready;
 
-               ret = 0;
                qh->is_ready = 0;
-               __musb_giveback(musb, urb, 0);
+               musb_giveback(musb, urb, 0);
                qh->is_ready = ready;
 
                /* If nothing else (usually musb_giveback) is using it
@@ -2182,7 +2147,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                        kfree(qh);
                }
        } else
-               ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+               ret = musb_cleanup_urb(urb, qh);
 done:
        spin_unlock_irqrestore(&musb->lock, flags);
        return ret;
@@ -2192,13 +2157,11 @@ done:
 static void
 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
 {
-       u8                      epnum = hep->desc.bEndpointAddress;
+       u8                      is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
        unsigned long           flags;
        struct musb             *musb = hcd_to_musb(hcd);
-       u8                      is_in = epnum & USB_DIR_IN;
        struct musb_qh          *qh;
        struct urb              *urb;
-       struct list_head        *sched;
 
        spin_lock_irqsave(&musb->lock, flags);
 
@@ -2206,31 +2169,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
        if (qh == NULL)
                goto exit;
 
-       switch (qh->type) {
-       case USB_ENDPOINT_XFER_CONTROL:
-               sched = &musb->control;
-               break;
-       case USB_ENDPOINT_XFER_BULK:
-               if (qh->mux == 1) {
-                       if (is_in)
-                               sched = &musb->in_bulk;
-                       else
-                               sched = &musb->out_bulk;
-                       break;
-               }
-       default:
-               /* REVISIT when we get a schedule tree, periodic transfers
-                * won't always be at the head of a singleton queue...
-                */
-               sched = NULL;
-               break;
-       }
-
-       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+       /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
 
-       /* kick first urb off the hardware, if needed */
+       /* Kick the first URB off the hardware, if needed */
        qh->is_ready = 0;
-       if (!sched || qh == first_qh(sched)) {
+       if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
                urb = next_urb(qh);
 
                /* make software (then hardware) stop ASAP */
@@ -2238,7 +2181,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
                        urb->status = -ESHUTDOWN;
 
                /* cleanup */
-               musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+               musb_cleanup_urb(urb, qh);
 
                /* Then nuke all the others ... and advance the
                 * queue on hw_ep (e.g. bulk ring) when we're done.
@@ -2254,7 +2197,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
                 * will activate any of these as it advances.
                 */
                while (!list_empty(&hep->urb_list))
-                       __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+                       musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
 
                hep->hcpriv = NULL;
                list_del(&qh->ring);
@@ -2293,7 +2236,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
 {
        struct musb     *musb = hcd_to_musb(hcd);
 
-       if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+       if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
                return 0;
 
        if (is_host_active(musb) && musb->is_active) {
index 0b7fbcd21963ebcc23435d168a061bed741ff612..14b00776638dab987c233d72b9b66da462214e90 100644 (file)
@@ -67,6 +67,7 @@ struct musb_qh {
        u8                      is_ready;       /* safe to modify hw_ep */
        u8                      type;           /* XFERTYPE_* */
        u8                      epnum;
+       u8                      hb_mult;        /* high bandwidth pkts per uf */
        u16                     maxpacket;
        u16                     frame;          /* for periodic schedule */
        unsigned                iso_idx;        /* in urb->iso_frame_desc[] */
index bf677acc83db03e781bb786642b1e5121ee1f627..bfe5fe4ebfeeb87b49e6f762aa670d0aed4da8aa 100644 (file)
@@ -78,18 +78,22 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
                DBG(3, "Root port suspended, power %02x\n", power);
 
                musb->port1_status |= USB_PORT_STAT_SUSPEND;
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
                case OTG_STATE_A_HOST:
-                       musb->xceiv.state = OTG_STATE_A_SUSPEND;
+                       musb->xceiv->state = OTG_STATE_A_SUSPEND;
                        musb->is_active = is_otg_enabled(musb)
-                                       && musb->xceiv.host->b_hnp_enable;
+                                       && musb->xceiv->host->b_hnp_enable;
+                       if (musb->is_active)
+                               mod_timer(&musb->otg_timer, jiffies
+                                       + msecs_to_jiffies(
+                                               OTG_TIME_A_AIDL_BDIS));
                        musb_platform_try_idle(musb, 0);
                        break;
 #ifdef CONFIG_USB_MUSB_OTG
                case OTG_STATE_B_HOST:
-                       musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+                       musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
                        musb->is_active = is_otg_enabled(musb)
-                                       && musb->xceiv.host->b_hnp_enable;
+                                       && musb->xceiv->host->b_hnp_enable;
                        musb_platform_try_idle(musb, 0);
                        break;
 #endif
@@ -116,7 +120,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
        void __iomem    *mbase = musb->mregs;
 
 #ifdef CONFIG_USB_MUSB_OTG
-       if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+       if (musb->xceiv->state == OTG_STATE_B_IDLE) {
                DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
                musb->port1_status &= ~USB_PORT_STAT_RESET;
                return;
@@ -186,14 +190,23 @@ void musb_root_disconnect(struct musb *musb)
        usb_hcd_poll_rh_status(musb_to_hcd(musb));
        musb->is_active = 0;
 
-       switch (musb->xceiv.state) {
-       case OTG_STATE_A_HOST:
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_SUSPEND:
-               musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#ifdef CONFIG_USB_MUSB_OTG
+               if (is_otg_enabled(musb)
+                               && musb->xceiv->host->b_hnp_enable) {
+                       musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
+                       musb->g.is_a_peripheral = 1;
+                       break;
+               }
+#endif
+               /* FALLTHROUGH */
+       case OTG_STATE_A_HOST:
+               musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
                musb->is_active = 0;
                break;
        case OTG_STATE_A_WAIT_VFALL:
-               musb->xceiv.state = OTG_STATE_B_IDLE;
+               musb->xceiv->state = OTG_STATE_B_IDLE;
                break;
        default:
                DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
@@ -332,7 +345,7 @@ int musb_hub_control(
                        musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
                        usb_hcd_poll_rh_status(musb_to_hcd(musb));
                        /* NOTE: it might really be A_WAIT_BCON ... */
-                       musb->xceiv.state = OTG_STATE_A_HOST;
+                       musb->xceiv->state = OTG_STATE_A_HOST;
                }
 
                put_unaligned(cpu_to_le32(musb->port1_status
index 60924ce084934f7f9a619a5ff12f6d1fd8afc3d9..34875201ee041220c715a9df268840b026009696 100644 (file)
@@ -44,7 +44,6 @@
 #define        get_cpu_rev()   2
 #endif
 
-#define MUSB_TIMEOUT_A_WAIT_BCON       1100
 
 static struct timer_list musb_idle_timer;
 
@@ -61,17 +60,17 @@ static void musb_do_idle(unsigned long _musb)
 
        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_WAIT_BCON:
                devctl &= ~MUSB_DEVCTL_SESSION;
                musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
                devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
                if (devctl & MUSB_DEVCTL_BDEVICE) {
-                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       musb->xceiv->state = OTG_STATE_B_IDLE;
                        MUSB_DEV_MODE(musb);
                } else {
-                       musb->xceiv.state = OTG_STATE_A_IDLE;
+                       musb->xceiv->state = OTG_STATE_A_IDLE;
                        MUSB_HST_MODE(musb);
                }
                break;
@@ -89,7 +88,7 @@ static void musb_do_idle(unsigned long _musb)
                        musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
                        usb_hcd_poll_rh_status(musb_to_hcd(musb));
                        /* NOTE: it might really be A_WAIT_BCON ... */
-                       musb->xceiv.state = OTG_STATE_A_HOST;
+                       musb->xceiv->state = OTG_STATE_A_HOST;
                }
                break;
 #endif
@@ -97,9 +96,9 @@ static void musb_do_idle(unsigned long _musb)
        case OTG_STATE_A_HOST:
                devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
                if (devctl &  MUSB_DEVCTL_BDEVICE)
-                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       musb->xceiv->state = OTG_STATE_B_IDLE;
                else
-                       musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+                       musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
 #endif
        default:
                break;
@@ -118,7 +117,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
 
        /* Never idle if active, or when VBUS timeout is not set as host */
        if (musb->is_active || ((musb->a_wait_bcon == 0)
-                       && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+                       && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
                DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
                del_timer(&musb_idle_timer);
                last_timer = jiffies;
@@ -163,8 +162,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
 
        if (is_on) {
                musb->is_active = 1;
-               musb->xceiv.default_a = 1;
-               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               musb->xceiv->default_a = 1;
+               musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
                devctl |= MUSB_DEVCTL_SESSION;
 
                MUSB_HST_MODE(musb);
@@ -175,8 +174,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
                 * jumping right to B_IDLE...
                 */
 
-               musb->xceiv.default_a = 0;
-               musb->xceiv.state = OTG_STATE_B_IDLE;
+               musb->xceiv->default_a = 0;
+               musb->xceiv->state = OTG_STATE_B_IDLE;
                devctl &= ~MUSB_DEVCTL_SESSION;
 
                MUSB_DEV_MODE(musb);
@@ -188,10 +187,6 @@ static void omap_set_vbus(struct musb *musb, int is_on)
                otg_state_string(musb),
                musb_readb(musb->mregs, MUSB_DEVCTL));
 }
-static int omap_set_power(struct otg_transceiver *x, unsigned mA)
-{
-       return 0;
-}
 
 static int musb_platform_resume(struct musb *musb);
 
@@ -202,24 +197,6 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
        devctl |= MUSB_DEVCTL_SESSION;
        musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
-       switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-       case MUSB_HOST:
-               otg_set_host(&musb->xceiv, musb->xceiv.host);
-               break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-       case MUSB_PERIPHERAL:
-               otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
-               break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
-       case MUSB_OTG:
-               break;
-#endif
-       default:
-               return -EINVAL;
-       }
        return 0;
 }
 
@@ -231,6 +208,16 @@ int __init musb_platform_init(struct musb *musb)
        omap_cfg_reg(AE5_2430_USB0HS_STP);
 #endif
 
+       /* We require some kind of external transceiver, hooked
+        * up through ULPI.  TWL4030-family PMICs include one,
+        * which needs a driver, drivers aren't always needed.
+        */
+       musb->xceiv = otg_get_transceiver();
+       if (!musb->xceiv) {
+               pr_err("HS USB OTG: no transceiver configured\n");
+               return -ENODEV;
+       }
+
        musb_platform_resume(musb);
 
        l = omap_readl(OTG_SYSCONFIG);
@@ -240,7 +227,12 @@ int __init musb_platform_init(struct musb *musb)
        l &= ~AUTOIDLE;         /* disable auto idle */
        l &= ~NOIDLE;           /* remove possible noidle */
        l |= SMARTIDLE;         /* enable smart idle */
-       l |= AUTOIDLE;          /* enable auto idle */
+       /*
+        * MUSB AUTOIDLE don't work in 3430.
+        * Workaround by Richard Woodruff/TI
+        */
+       if (!cpu_is_omap3430())
+               l |= AUTOIDLE;          /* enable auto idle */
        omap_writel(l, OTG_SYSCONFIG);
 
        l = omap_readl(OTG_INTERFSEL);
@@ -257,9 +249,6 @@ int __init musb_platform_init(struct musb *musb)
 
        if (is_host_enabled(musb))
                musb->board_set_vbus = omap_set_vbus;
-       if (is_peripheral_enabled(musb))
-               musb->xceiv.set_power = omap_set_power;
-       musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
 
        setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
@@ -282,8 +271,7 @@ int musb_platform_suspend(struct musb *musb)
        l |= ENABLEWAKEUP;      /* enable wakeup */
        omap_writel(l, OTG_SYSCONFIG);
 
-       if (musb->xceiv.set_suspend)
-               musb->xceiv.set_suspend(&musb->xceiv, 1);
+       otg_set_suspend(musb->xceiv, 1);
 
        if (musb->set_clock)
                musb->set_clock(musb->clock, 0);
@@ -300,8 +288,7 @@ static int musb_platform_resume(struct musb *musb)
        if (!musb->clock)
                return 0;
 
-       if (musb->xceiv.set_suspend)
-               musb->xceiv.set_suspend(&musb->xceiv, 0);
+       otg_set_suspend(musb->xceiv, 0);
 
        if (musb->set_clock)
                musb->set_clock(musb->clock, 1);
index 4ac1477d356901b62b10758b9d5e073ff940d30e..88b587c703e96cfea7ce2149190cdecb1003ba42 100644 (file)
@@ -259,6 +259,8 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
                tusb_fifo_read_unaligned(fifo, buf, len);
 }
 
+static struct musb *the_musb;
+
 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
 
 /* This is used by gadget drivers, and OTG transceiver logic, allowing
@@ -269,7 +271,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
  */
 static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
 {
-       struct musb     *musb = container_of(x, struct musb, xceiv);
+       struct musb     *musb = the_musb;
        void __iomem    *tbase = musb->ctrl_base;
        u32             reg;
 
@@ -419,7 +421,7 @@ static void musb_do_idle(unsigned long _musb)
 
        spin_lock_irqsave(&musb->lock, flags);
 
-       switch (musb->xceiv.state) {
+       switch (musb->xceiv->state) {
        case OTG_STATE_A_WAIT_BCON:
                if ((musb->a_wait_bcon != 0)
                        && (musb->idle_timeout == 0
@@ -483,7 +485,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
 
        /* Never idle if active, or when VBUS timeout is not set as host */
        if (musb->is_active || ((musb->a_wait_bcon == 0)
-                       && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+                       && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
                DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
                del_timer(&musb_idle_timer);
                last_timer = jiffies;
@@ -532,8 +534,8 @@ static void tusb_source_power(struct musb *musb, int is_on)
                if (musb->set_clock)
                        musb->set_clock(musb->clock, 1);
                timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
-               musb->xceiv.default_a = 1;
-               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               musb->xceiv->default_a = 1;
+               musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
                devctl |= MUSB_DEVCTL_SESSION;
 
                conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -546,24 +548,24 @@ static void tusb_source_power(struct musb *musb, int is_on)
                /* If ID pin is grounded, we want to be a_idle */
                otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
                if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
-                       switch (musb->xceiv.state) {
+                       switch (musb->xceiv->state) {
                        case OTG_STATE_A_WAIT_VRISE:
                        case OTG_STATE_A_WAIT_BCON:
-                               musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+                               musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
                                break;
                        case OTG_STATE_A_WAIT_VFALL:
-                               musb->xceiv.state = OTG_STATE_A_IDLE;
+                               musb->xceiv->state = OTG_STATE_A_IDLE;
                                break;
                        default:
-                               musb->xceiv.state = OTG_STATE_A_IDLE;
+                               musb->xceiv->state = OTG_STATE_A_IDLE;
                        }
                        musb->is_active = 0;
-                       musb->xceiv.default_a = 1;
+                       musb->xceiv->default_a = 1;
                        MUSB_HST_MODE(musb);
                } else {
                        musb->is_active = 0;
-                       musb->xceiv.default_a = 0;
-                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       musb->xceiv->default_a = 0;
+                       musb->xceiv->state = OTG_STATE_B_IDLE;
                        MUSB_DEV_MODE(musb);
                }
 
@@ -674,7 +676,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
                else
                        default_a = is_host_enabled(musb);
                DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
-               musb->xceiv.default_a = default_a;
+               musb->xceiv->default_a = default_a;
                tusb_source_power(musb, default_a);
 
                /* Don't allow idling immediately */
@@ -686,7 +688,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
        if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
 
                /* B-dev state machine:  no vbus ~= disconnect */
-               if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+               if ((is_otg_enabled(musb) && !musb->xceiv->default_a)
                                || !is_host_enabled(musb)) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
                        /* ? musb_root_disconnect(musb); */
@@ -701,9 +703,9 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
 
                        if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
                                DBG(1, "Forcing disconnect (no interrupt)\n");
-                               if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+                               if (musb->xceiv->state != OTG_STATE_B_IDLE) {
                                        /* INTR_DISCONNECT can hide... */
-                                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                                       musb->xceiv->state = OTG_STATE_B_IDLE;
                                        musb->int_usb |= MUSB_INTR_DISCONNECT;
                                }
                                musb->is_active = 0;
@@ -717,7 +719,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
                        DBG(2, "vbus change, %s, otg %03x\n",
                                otg_state_string(musb), otg_stat);
 
-                       switch (musb->xceiv.state) {
+                       switch (musb->xceiv->state) {
                        case OTG_STATE_A_IDLE:
                                DBG(2, "Got SRP, turning on VBUS\n");
                                musb_set_vbus(musb, 1);
@@ -765,7 +767,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
 
                DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
 
-               switch (musb->xceiv.state) {
+               switch (musb->xceiv->state) {
                case OTG_STATE_A_WAIT_VRISE:
                        /* VBUS has probably been valid for a while now,
                         * but may well have bounced out of range a bit
@@ -777,7 +779,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
                                        DBG(2, "devctl %02x\n", devctl);
                                        break;
                                }
-                               musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+                               musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
                                musb->is_active = 0;
                                idle_timeout = jiffies
                                        + msecs_to_jiffies(musb->a_wait_bcon);
@@ -1093,9 +1095,14 @@ int __init musb_platform_init(struct musb *musb)
 {
        struct platform_device  *pdev;
        struct resource         *mem;
-       void __iomem            *sync;
+       void __iomem            *sync = NULL;
        int                     ret;
 
+       usb_nop_xceiv_register();
+       musb->xceiv = otg_get_transceiver();
+       if (!musb->xceiv)
+               return -ENODEV;
+
        pdev = to_platform_device(musb->controller);
 
        /* dma address for async dma */
@@ -1106,14 +1113,16 @@ int __init musb_platform_init(struct musb *musb)
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!mem) {
                pr_debug("no sync dma resource?\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto done;
        }
        musb->sync = mem->start;
 
        sync = ioremap(mem->start, mem->end - mem->start + 1);
        if (!sync) {
                pr_debug("ioremap for sync failed\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto done;
        }
        musb->sync_va = sync;
 
@@ -1126,28 +1135,37 @@ int __init musb_platform_init(struct musb *musb)
        if (ret) {
                printk(KERN_ERR "Could not start tusb6010 (%d)\n",
                                ret);
-               return -ENODEV;
+               goto done;
        }
        musb->isr = tusb_interrupt;
 
        if (is_host_enabled(musb))
                musb->board_set_vbus = tusb_source_power;
-       if (is_peripheral_enabled(musb))
-               musb->xceiv.set_power = tusb_draw_power;
+       if (is_peripheral_enabled(musb)) {
+               musb->xceiv->set_power = tusb_draw_power;
+               the_musb = musb;
+       }
 
        setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
+done:
+       if (ret < 0) {
+               if (sync)
+                       iounmap(sync);
+               usb_nop_xceiv_unregister();
+       }
        return ret;
 }
 
 int musb_platform_exit(struct musb *musb)
 {
        del_timer_sync(&musb_idle_timer);
+       the_musb = NULL;
 
        if (musb->board_set_power)
                musb->board_set_power(0);
 
        iounmap(musb->sync_va);
-
+       usb_nop_xceiv_unregister();
        return 0;
 }
index aa884d072f0b4c7202e5b421e52b0ca37a5163de..69feeec1628ce28bd8744ef5c6048d0b7a12746c 100644 (file)
@@ -59,4 +59,18 @@ config NOP_USB_XCEIV
         built-in with usb ip or which are autonomous and doesn't require any
         phy programming such as ISP1x04 etc.
 
+config USB_LANGWELL_OTG
+       tristate "Intel Langwell USB OTG dual-role support"
+       depends on USB && MRST
+       select USB_OTG
+       select USB_OTG_UTILS
+       help
+         Say Y here if you want to build Intel Langwell USB OTG
+         transciever driver in kernel. This driver implements role
+         switch between EHCI host driver and Langwell USB OTG
+         client driver.
+
+         To compile this driver as a module, choose M here: the
+         module will be called langwell_otg.
+
 endif # USB || OTG
index 208167856529f26225807cd7edea4fb29a3640e9..6d1abdd3c0ac9cc1a2cd688dd9fda768ee2562a1 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_OTG_UTILS)     += otg.o
 obj-$(CONFIG_USB_GPIO_VBUS)    += gpio_vbus.o
 obj-$(CONFIG_ISP1301_OMAP)     += isp1301_omap.o
 obj-$(CONFIG_TWL4030_USB)      += twl4030-usb.o
+obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
 obj-$(CONFIG_NOP_USB_XCEIV)    += nop-usb-xceiv.o
 
 ccflags-$(CONFIG_USB_DEBUG)    += -DDEBUG
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
new file mode 100644 (file)
index 0000000..6f628d0
--- /dev/null
@@ -0,0 +1,1915 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008 - 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+/* This driver helps to switch Langwell OTG controller function between host
+ * and peripheral. It works with EHCI driver and Langwell client controller
+ * driver together.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/notifier.h>
+#include <asm/ipc_defs.h>
+#include <linux/delay.h>
+#include "../core/hcd.h"
+
+#include <linux/usb/langwell_otg.h>
+
+#define        DRIVER_DESC             "Intel Langwell USB OTG transceiver driver"
+#define        DRIVER_VERSION          "3.0.0.32L.0002"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "langwell_otg";
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+                       const struct pci_device_id *id);
+static void langwell_otg_remove(struct pci_dev *pdev);
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
+static int langwell_otg_resume(struct pci_dev *pdev);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+                               struct usb_bus *host);
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+                               struct usb_gadget *gadget);
+static int langwell_otg_start_srp(struct otg_transceiver *otg);
+
+static const struct pci_device_id pci_ids[] = {{
+       .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+       .class_mask =   ~0,
+       .vendor =       0x8086,
+       .device =       0x0811,
+       .subvendor =    PCI_ANY_ID,
+       .subdevice =    PCI_ANY_ID,
+}, { /* end: all zeroes */ }
+};
+
+static struct pci_driver otg_pci_driver = {
+       .name =         (char *) driver_name,
+       .id_table =     pci_ids,
+
+       .probe =        langwell_otg_probe,
+       .remove =       langwell_otg_remove,
+
+       .suspend =      langwell_otg_suspend,
+       .resume =       langwell_otg_resume,
+};
+
+static const char *state_string(enum usb_otg_state state)
+{
+       switch (state) {
+       case OTG_STATE_A_IDLE:
+               return "a_idle";
+       case OTG_STATE_A_WAIT_VRISE:
+               return "a_wait_vrise";
+       case OTG_STATE_A_WAIT_BCON:
+               return "a_wait_bcon";
+       case OTG_STATE_A_HOST:
+               return "a_host";
+       case OTG_STATE_A_SUSPEND:
+               return "a_suspend";
+       case OTG_STATE_A_PERIPHERAL:
+               return "a_peripheral";
+       case OTG_STATE_A_WAIT_VFALL:
+               return "a_wait_vfall";
+       case OTG_STATE_A_VBUS_ERR:
+               return "a_vbus_err";
+       case OTG_STATE_B_IDLE:
+               return "b_idle";
+       case OTG_STATE_B_SRP_INIT:
+               return "b_srp_init";
+       case OTG_STATE_B_PERIPHERAL:
+               return "b_peripheral";
+       case OTG_STATE_B_WAIT_ACON:
+               return "b_wait_acon";
+       case OTG_STATE_B_HOST:
+               return "b_host";
+       default:
+               return "UNDEFINED";
+       }
+}
+
+/* HSM timers */
+static inline struct langwell_otg_timer *otg_timer_initializer
+(void (*function)(unsigned long), unsigned long expires, unsigned long data)
+{
+       struct langwell_otg_timer *timer;
+       timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
+       timer->function = function;
+       timer->expires = expires;
+       timer->data = data;
+       return timer;
+}
+
+static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
+       *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr,
+       *b_bus_suspend_tmr;
+
+static struct list_head active_timers;
+
+static struct langwell_otg *the_transceiver;
+
+/* host/client notify transceiver when event affects HNP state */
+void langwell_update_transceiver()
+{
+       otg_dbg("transceiver driver is notified\n");
+       queue_work(the_transceiver->qwork, &the_transceiver->work);
+}
+EXPORT_SYMBOL(langwell_update_transceiver);
+
+static int langwell_otg_set_host(struct otg_transceiver *otg,
+                                       struct usb_bus *host)
+{
+       otg->host = host;
+
+       return 0;
+}
+
+static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
+                                       struct usb_gadget *gadget)
+{
+       otg->gadget = gadget;
+
+       return 0;
+}
+
+static int langwell_otg_set_power(struct otg_transceiver *otg,
+                               unsigned mA)
+{
+       return 0;
+}
+
+/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
+static void langwell_otg_drv_vbus(int on)
+{
+       struct ipc_pmic_reg_data        pmic_data = {0};
+       struct ipc_pmic_reg_data        battery_data;
+
+       /* Check if battery is attached or not */
+       battery_data.pmic_reg_data[0].register_address = 0xd2;
+       battery_data.ioc = 0;
+       battery_data.num_entries = 1;
+       if (ipc_pmic_register_read(&battery_data)) {
+               otg_dbg("Failed to read PMIC register 0xd2.\n");
+               return;
+       }
+
+       if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) {
+               otg_dbg("no battery attached\n");
+               return;
+       }
+
+       /* Workaround for battery attachment issue */
+       if (battery_data.pmic_reg_data[0].value == 0x34) {
+               otg_dbg("battery \n");
+               return;
+       }
+
+       otg_dbg("battery attached\n");
+
+       pmic_data.ioc = 0;
+       pmic_data.pmic_reg_data[0].register_address = 0xD4;
+       pmic_data.num_entries = 1;
+       if (on)
+               pmic_data.pmic_reg_data[0].value = 0x20;
+       else
+               pmic_data.pmic_reg_data[0].value = 0xc0;
+
+       if (ipc_pmic_register_write(&pmic_data, TRUE))
+               otg_dbg("Failed to write PMIC.\n");
+
+}
+
+/* charge vbus or discharge vbus through a resistor to ground */
+static void langwell_otg_chrg_vbus(int on)
+{
+
+       u32     val;
+
+       val = readl(the_transceiver->regs + CI_OTGSC);
+
+       if (on)
+               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
+                               the_transceiver->regs + CI_OTGSC);
+       else
+               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
+                               the_transceiver->regs + CI_OTGSC);
+
+}
+
+/* Start SRP */
+static int langwell_otg_start_srp(struct otg_transceiver *otg)
+{
+       u32     val;
+
+       otg_dbg("Start SRP ->\n");
+
+       val = readl(the_transceiver->regs + CI_OTGSC);
+
+       writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
+               the_transceiver->regs + CI_OTGSC);
+
+       /* Check if the data plus is finished or not */
+       msleep(8);
+       val = readl(the_transceiver->regs + CI_OTGSC);
+       if (val & (OTGSC_HADP | OTGSC_DP))
+               otg_dbg("DataLine SRP Error\n");
+
+       /* FIXME: VBus SRP */
+
+       return 0;
+}
+
+
+/* stop SOF via bus_suspend */
+static void langwell_otg_loc_sof(int on)
+{
+       struct usb_hcd  *hcd;
+       int             err;
+
+       otg_dbg("loc_sof -> %d\n", on);
+
+       hcd = bus_to_hcd(the_transceiver->otg.host);
+       if (on)
+               err = hcd->driver->bus_resume(hcd);
+       else
+               err = hcd->driver->bus_suspend(hcd);
+
+       if (err)
+               otg_dbg("Failed to resume/suspend bus - %d\n", err);
+}
+
+static void langwell_otg_phy_low_power(int on)
+{
+       u32     val;
+
+       otg_dbg("phy low power mode-> %d\n", on);
+
+       val = readl(the_transceiver->regs + CI_HOSTPC1);
+       if (on)
+               writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+       else
+               writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
+}
+
+/* Enable/Disable OTG interrupt */
+static void langwell_otg_intr(int on)
+{
+       u32 val;
+
+       otg_dbg("interrupt -> %d\n", on);
+
+       val = readl(the_transceiver->regs + CI_OTGSC);
+       if (on) {
+               val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU);
+               writel(val, the_transceiver->regs + CI_OTGSC);
+       } else {
+               val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU);
+               writel(val, the_transceiver->regs + CI_OTGSC);
+       }
+}
+
+/* set HAAR: Hardware Assist Auto-Reset */
+static void langwell_otg_HAAR(int on)
+{
+       u32     val;
+
+       otg_dbg("HAAR -> %d\n", on);
+
+       val = readl(the_transceiver->regs + CI_OTGSC);
+       if (on)
+               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
+                               the_transceiver->regs + CI_OTGSC);
+       else
+               writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
+                               the_transceiver->regs + CI_OTGSC);
+}
+
+/* set HABA: Hardware Assist B-Disconnect to A-Connect */
+static void langwell_otg_HABA(int on)
+{
+       u32     val;
+
+       otg_dbg("HABA -> %d\n", on);
+
+       val = readl(the_transceiver->regs + CI_OTGSC);
+       if (on)
+               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
+                               the_transceiver->regs + CI_OTGSC);
+       else
+               writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
+                               the_transceiver->regs + CI_OTGSC);
+}
+
+static int langwell_otg_check_se0_srp(int on)
+{
+       u32 val;
+
+       int delay_time = TB_SE0_SRP * 10; /* step is 100us */
+
+       otg_dbg("check_se0_srp -> \n");
+
+       do {
+               udelay(100);
+               if (!delay_time--)
+                       break;
+               val = readl(the_transceiver->regs + CI_PORTSC1);
+               val &= PORTSC_LS;
+       } while (!val);
+
+       otg_dbg("check_se0_srp <- \n");
+       return val;
+}
+
+/* The timeout callback function to set time out bit */
+static void set_tmout(unsigned long indicator)
+{
+       *(int *)indicator = 1;
+}
+
+void langwell_otg_nsf_msg(unsigned long indicator)
+{
+       switch (indicator) {
+       case 2:
+       case 4:
+       case 6:
+       case 7:
+               printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
+                               indicator);
+               break;
+       case 3:
+               printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
+                               indicator);
+               break;
+       default:
+               printk(KERN_ERR "Do not have this kind of NSF\n");
+               break;
+       }
+}
+
+/* Initialize timers */
+static void langwell_otg_init_timers(struct otg_hsm *hsm)
+{
+       /* HSM used timers */
+       a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
+                               (unsigned long)&hsm->a_wait_vrise_tmout);
+       a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
+                               (unsigned long)&hsm->a_wait_bcon_tmout);
+       a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
+                               (unsigned long)&hsm->a_aidl_bdis_tmout);
+       b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
+                               (unsigned long)&hsm->b_ase0_brst_tmout);
+       b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
+                               (unsigned long)&hsm->b_se0_srp);
+       b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES,
+                               (unsigned long)&hsm->b_srp_res_tmout);
+       b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND,
+                               (unsigned long)&hsm->b_bus_suspend_tmout);
+}
+
+/* Free timers */
+static void langwell_otg_free_timers(void)
+{
+       kfree(a_wait_vrise_tmr);
+       kfree(a_wait_bcon_tmr);
+       kfree(a_aidl_bdis_tmr);
+       kfree(b_ase0_brst_tmr);
+       kfree(b_se0_srp_tmr);
+       kfree(b_srp_res_tmr);
+       kfree(b_bus_suspend_tmr);
+}
+
+/* Add timer to timer list */
+static void langwell_otg_add_timer(void *gtimer)
+{
+       struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+       struct langwell_otg_timer *tmp_timer;
+       u32     val32;
+
+       /* Check if the timer is already in the active list,
+        * if so update timer count
+        */
+       list_for_each_entry(tmp_timer, &active_timers, list)
+               if (tmp_timer == timer) {
+                       timer->count = timer->expires;
+                       return;
+               }
+       timer->count = timer->expires;
+
+       if (list_empty(&active_timers)) {
+               val32 = readl(the_transceiver->regs + CI_OTGSC);
+               writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+       }
+
+       list_add_tail(&timer->list, &active_timers);
+}
+
+/* Remove timer from the timer list; clear timeout status */
+static void langwell_otg_del_timer(void *gtimer)
+{
+       struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
+       struct langwell_otg_timer *tmp_timer, *del_tmp;
+       u32 val32;
+
+       list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
+               if (tmp_timer == timer)
+                       list_del(&timer->list);
+
+       if (list_empty(&active_timers)) {
+               val32 = readl(the_transceiver->regs + CI_OTGSC);
+               writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
+       }
+}
+
+/* Reduce timer count by 1, and find timeout conditions.*/
+static int langwell_otg_tick_timer(u32 *int_sts)
+{
+       struct langwell_otg_timer *tmp_timer, *del_tmp;
+       int expired = 0;
+
+       list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
+               tmp_timer->count--;
+               /* check if timer expires */
+               if (!tmp_timer->count) {
+                       list_del(&tmp_timer->list);
+                       tmp_timer->function(tmp_timer->data);
+                       expired = 1;
+               }
+       }
+
+       if (list_empty(&active_timers)) {
+               otg_dbg("tick timer: disable 1ms int\n");
+               *int_sts = *int_sts & ~OTGSC_1MSE;
+       }
+       return expired;
+}
+
+static void reset_otg(void)
+{
+       u32     val;
+       int     delay_time = 1000;
+
+       otg_dbg("reseting OTG controller ...\n");
+       val = readl(the_transceiver->regs + CI_USBCMD);
+       writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
+       do {
+               udelay(100);
+               if (!delay_time--)
+                       otg_dbg("reset timeout\n");
+               val = readl(the_transceiver->regs + CI_USBCMD);
+               val &= USBCMD_RST;
+       } while (val != 0);
+       otg_dbg("reset done.\n");
+}
+
+static void set_host_mode(void)
+{
+       u32     val;
+
+       reset_otg();
+       val = readl(the_transceiver->regs + CI_USBMODE);
+       val = (val & (~USBMODE_CM)) | USBMODE_HOST;
+       writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void set_client_mode(void)
+{
+       u32     val;
+
+       reset_otg();
+       val = readl(the_transceiver->regs + CI_USBMODE);
+       val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
+       writel(val, the_transceiver->regs + CI_USBMODE);
+}
+
+static void init_hsm(void)
+{
+       struct langwell_otg     *langwell = the_transceiver;
+       u32                     val32;
+
+       /* read OTGSC after reset */
+       val32 = readl(langwell->regs + CI_OTGSC);
+       otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
+
+       /* set init state */
+       if (val32 & OTGSC_ID) {
+               langwell->hsm.id = 1;
+               langwell->otg.default_a = 0;
+               set_client_mode();
+               langwell->otg.state = OTG_STATE_B_IDLE;
+               langwell_otg_drv_vbus(0);
+       } else {
+               langwell->hsm.id = 0;
+               langwell->otg.default_a = 1;
+               set_host_mode();
+               langwell->otg.state = OTG_STATE_A_IDLE;
+       }
+
+       /* set session indicator */
+       if (val32 & OTGSC_BSE)
+               langwell->hsm.b_sess_end = 1;
+       if (val32 & OTGSC_BSV)
+               langwell->hsm.b_sess_vld = 1;
+       if (val32 & OTGSC_ASV)
+               langwell->hsm.a_sess_vld = 1;
+       if (val32 & OTGSC_AVV)
+               langwell->hsm.a_vbus_vld = 1;
+
+       /* defautly power the bus */
+       langwell->hsm.a_bus_req = 1;
+       langwell->hsm.a_bus_drop = 0;
+       /* defautly don't request bus as B device */
+       langwell->hsm.b_bus_req = 0;
+       /* no system error */
+       langwell->hsm.a_clr_err = 0;
+}
+
+static irqreturn_t otg_dummy_irq(int irq, void *_dev)
+{
+       void __iomem    *reg_base = _dev;
+       u32     val;
+       u32     int_mask = 0;
+
+       val = readl(reg_base + CI_USBMODE);
+       if ((val & USBMODE_CM) != USBMODE_DEVICE)
+               return IRQ_NONE;
+
+       val = readl(reg_base + CI_USBSTS);
+       int_mask = val & INTR_DUMMY_MASK;
+
+       if (int_mask == 0)
+               return IRQ_NONE;
+
+       /* clear hsm.b_conn here since host driver can't detect it
+       *  otg_dummy_irq called means B-disconnect happened.
+       */
+       if (the_transceiver->hsm.b_conn) {
+               the_transceiver->hsm.b_conn = 0;
+               if (spin_trylock(&the_transceiver->wq_lock)) {
+                       queue_work(the_transceiver->qwork,
+                               &the_transceiver->work);
+                       spin_unlock(&the_transceiver->wq_lock);
+               }
+       }
+       /* Clear interrupts */
+       writel(int_mask, reg_base + CI_USBSTS);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t otg_irq(int irq, void *_dev)
+{
+       struct  langwell_otg *langwell = _dev;
+       u32     int_sts, int_en;
+       u32     int_mask = 0;
+       int     flag = 0;
+
+       int_sts = readl(langwell->regs + CI_OTGSC);
+       int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
+       int_mask = int_sts & int_en;
+       if (int_mask == 0)
+               return IRQ_NONE;
+
+       if (int_mask & OTGSC_IDIS) {
+               otg_dbg("%s: id change int\n", __func__);
+               langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
+               flag = 1;
+       }
+       if (int_mask & OTGSC_DPIS) {
+               otg_dbg("%s: data pulse int\n", __func__);
+               langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
+               flag = 1;
+       }
+       if (int_mask & OTGSC_BSEIS) {
+               otg_dbg("%s: b session end int\n", __func__);
+               langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
+               flag = 1;
+       }
+       if (int_mask & OTGSC_BSVIS) {
+               otg_dbg("%s: b session valid int\n", __func__);
+               langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
+               flag = 1;
+       }
+       if (int_mask & OTGSC_ASVIS) {
+               otg_dbg("%s: a session valid int\n", __func__);
+               langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
+               flag = 1;
+       }
+       if (int_mask & OTGSC_AVVIS) {
+               otg_dbg("%s: a vbus valid int\n", __func__);
+               langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
+               flag = 1;
+       }
+
+       if (int_mask & OTGSC_1MSS) {
+               /* need to schedule otg_work if any timer is expired */
+               if (langwell_otg_tick_timer(&int_sts))
+                       flag = 1;
+       }
+
+       writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
+                       langwell->regs + CI_OTGSC);
+       if (flag)
+               queue_work(langwell->qwork, &langwell->work);
+
+       return IRQ_HANDLED;
+}
+
+static void langwell_otg_work(struct work_struct *work)
+{
+       struct langwell_otg *langwell = container_of(work,
+                                       struct langwell_otg, work);
+       int     retval;
+
+       otg_dbg("%s: old state = %s\n", __func__,
+                       state_string(langwell->otg.state));
+
+       switch (langwell->otg.state) {
+       case OTG_STATE_UNDEFINED:
+       case OTG_STATE_B_IDLE:
+               if (!langwell->hsm.id) {
+                       langwell_otg_del_timer(b_srp_res_tmr);
+                       langwell->otg.default_a = 1;
+                       langwell->hsm.a_srp_det = 0;
+
+                       langwell_otg_chrg_vbus(0);
+                       langwell_otg_drv_vbus(0);
+
+                       set_host_mode();
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.b_srp_res_tmout) {
+                       langwell->hsm.b_srp_res_tmout = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       langwell_otg_nsf_msg(6);
+               } else if (langwell->hsm.b_sess_vld) {
+                       langwell_otg_del_timer(b_srp_res_tmr);
+                       langwell->hsm.b_sess_end = 0;
+                       langwell->hsm.a_bus_suspend = 0;
+
+                       langwell_otg_chrg_vbus(0);
+                       if (langwell->client_ops) {
+                               langwell->client_ops->resume(langwell->pdev);
+                               langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+                       } else
+                               otg_dbg("client driver not loaded.\n");
+
+               } else if (langwell->hsm.b_bus_req &&
+                               (langwell->hsm.b_sess_end)) {
+                       /* workaround for b_se0_srp detection */
+                       retval = langwell_otg_check_se0_srp(0);
+                       if (retval) {
+                               langwell->hsm.b_bus_req = 0;
+                               otg_dbg("LS is not SE0, try again later\n");
+                       } else {
+                               /* Start SRP */
+                               langwell_otg_start_srp(&langwell->otg);
+                               langwell_otg_add_timer(b_srp_res_tmr);
+                       }
+               }
+               break;
+       case OTG_STATE_B_SRP_INIT:
+               if (!langwell->hsm.id) {
+                       langwell->otg.default_a = 1;
+                       langwell->hsm.a_srp_det = 0;
+
+                       langwell_otg_drv_vbus(0);
+                       langwell_otg_chrg_vbus(0);
+
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.b_sess_vld) {
+                       langwell_otg_chrg_vbus(0);
+                       if (langwell->client_ops) {
+                               langwell->client_ops->resume(langwell->pdev);
+                               langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+                       } else
+                               otg_dbg("client driver not loaded.\n");
+               }
+               break;
+       case OTG_STATE_B_PERIPHERAL:
+               if (!langwell->hsm.id) {
+                       langwell->otg.default_a = 1;
+                       langwell->hsm.a_srp_det = 0;
+
+                       langwell_otg_drv_vbus(0);
+                       langwell_otg_chrg_vbus(0);
+                       set_host_mode();
+
+                       if (langwell->client_ops) {
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       } else
+                               otg_dbg("client driver has been removed.\n");
+
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.b_sess_vld) {
+                       langwell->hsm.b_hnp_enable = 0;
+
+                       if (langwell->client_ops) {
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       } else
+                               otg_dbg("client driver has been removed.\n");
+
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+               } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
+                       && langwell->hsm.a_bus_suspend) {
+
+                       if (langwell->client_ops) {
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       } else
+                               otg_dbg("client driver has been removed.\n");
+
+                       langwell_otg_HAAR(1);
+                       langwell->hsm.a_conn = 0;
+
+                       if (langwell->host_ops) {
+                               langwell->host_ops->probe(langwell->pdev,
+                                       langwell->host_ops->id_table);
+                               langwell->otg.state = OTG_STATE_B_WAIT_ACON;
+                       } else
+                               otg_dbg("host driver not loaded.\n");
+
+                       langwell->hsm.a_bus_resume = 0;
+                       langwell->hsm.b_ase0_brst_tmout = 0;
+                       langwell_otg_add_timer(b_ase0_brst_tmr);
+               }
+               break;
+
+       case OTG_STATE_B_WAIT_ACON:
+               if (!langwell->hsm.id) {
+                       langwell_otg_del_timer(b_ase0_brst_tmr);
+                       langwell->otg.default_a = 1;
+                       langwell->hsm.a_srp_det = 0;
+
+                       langwell_otg_drv_vbus(0);
+                       langwell_otg_chrg_vbus(0);
+                       set_host_mode();
+
+                       langwell_otg_HAAR(0);
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.b_sess_vld) {
+                       langwell_otg_del_timer(b_ase0_brst_tmr);
+                       langwell->hsm.b_hnp_enable = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       langwell_otg_chrg_vbus(0);
+                       langwell_otg_HAAR(0);
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+               } else if (langwell->hsm.a_conn) {
+                       langwell_otg_del_timer(b_ase0_brst_tmr);
+                       langwell_otg_HAAR(0);
+                       langwell->otg.state = OTG_STATE_B_HOST;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_bus_resume ||
+                               langwell->hsm.b_ase0_brst_tmout) {
+                       langwell_otg_del_timer(b_ase0_brst_tmr);
+                       langwell_otg_HAAR(0);
+                       langwell_otg_nsf_msg(7);
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+
+                       langwell->hsm.a_bus_suspend = 0;
+                       langwell->hsm.b_bus_req = 0;
+
+                       if (langwell->client_ops)
+                               langwell->client_ops->resume(langwell->pdev);
+                       else
+                               otg_dbg("client driver not loaded.\n");
+
+                       langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+               }
+               break;
+
+       case OTG_STATE_B_HOST:
+               if (!langwell->hsm.id) {
+                       langwell->otg.default_a = 1;
+                       langwell->hsm.a_srp_det = 0;
+
+                       langwell_otg_drv_vbus(0);
+                       langwell_otg_chrg_vbus(0);
+                       set_host_mode();
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.b_sess_vld) {
+                       langwell->hsm.b_hnp_enable = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       langwell_otg_chrg_vbus(0);
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+               } else if ((!langwell->hsm.b_bus_req) ||
+                               (!langwell->hsm.a_conn)) {
+                       langwell->hsm.b_bus_req = 0;
+                       langwell_otg_loc_sof(0);
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+
+                       langwell->hsm.a_bus_suspend = 0;
+
+                       if (langwell->client_ops)
+                               langwell->client_ops->resume(langwell->pdev);
+                       else
+                               otg_dbg("client driver not loaded.\n");
+
+                       langwell->otg.state = OTG_STATE_B_PERIPHERAL;
+               }
+               break;
+
+       case OTG_STATE_A_IDLE:
+               langwell->otg.default_a = 1;
+               if (langwell->hsm.id) {
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       langwell_otg_drv_vbus(0);
+                       langwell_otg_chrg_vbus(0);
+
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_sess_vld) {
+                       langwell_otg_drv_vbus(1);
+                       langwell->hsm.a_srp_det = 1;
+                       langwell->hsm.a_wait_vrise_tmout = 0;
+                       langwell_otg_add_timer(a_wait_vrise_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.a_bus_drop &&
+                       (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
+                       langwell_otg_drv_vbus(1);
+                       langwell->hsm.a_wait_vrise_tmout = 0;
+                       langwell_otg_add_timer(a_wait_vrise_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+                       queue_work(langwell->qwork, &langwell->work);
+               }
+               break;
+       case OTG_STATE_A_WAIT_VRISE:
+               if (langwell->hsm.id) {
+                       langwell_otg_del_timer(a_wait_vrise_tmr);
+                       langwell->hsm.b_bus_req = 0;
+                       langwell->otg.default_a = 0;
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+               } else if (langwell->hsm.a_vbus_vld) {
+                       langwell_otg_del_timer(a_wait_vrise_tmr);
+                       if (langwell->host_ops)
+                               langwell->host_ops->probe(langwell->pdev,
+                                               langwell->host_ops->id_table);
+                       else
+                               otg_dbg("host driver not loaded.\n");
+                       langwell->hsm.b_conn = 0;
+                       langwell->hsm.a_set_b_hnp_en = 0;
+                       langwell->hsm.a_wait_bcon_tmout = 0;
+                       langwell_otg_add_timer(a_wait_bcon_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+               } else if (langwell->hsm.a_wait_vrise_tmout) {
+                       if (langwell->hsm.a_vbus_vld) {
+                               if (langwell->host_ops)
+                                       langwell->host_ops->probe(
+                                               langwell->pdev,
+                                               langwell->host_ops->id_table);
+                               else
+                                       otg_dbg("host driver not loaded.\n");
+                               langwell->hsm.b_conn = 0;
+                               langwell->hsm.a_set_b_hnp_en = 0;
+                               langwell->hsm.a_wait_bcon_tmout = 0;
+                               langwell_otg_add_timer(a_wait_bcon_tmr);
+                               langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+                       } else {
+                               langwell_otg_drv_vbus(0);
+                               langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+                       }
+               }
+               break;
+       case OTG_STATE_A_WAIT_BCON:
+               if (langwell->hsm.id) {
+                       langwell_otg_del_timer(a_wait_bcon_tmr);
+
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.a_vbus_vld) {
+                       langwell_otg_del_timer(a_wait_bcon_tmr);
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+               } else if (langwell->hsm.a_bus_drop ||
+                               (langwell->hsm.a_wait_bcon_tmout &&
+                               !langwell->hsm.a_bus_req)) {
+                       langwell_otg_del_timer(a_wait_bcon_tmr);
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               } else if (langwell->hsm.b_conn) {
+                       langwell_otg_del_timer(a_wait_bcon_tmr);
+
+                       langwell->hsm.a_suspend_req = 0;
+                       langwell->otg.state = OTG_STATE_A_HOST;
+                       if (!langwell->hsm.a_bus_req &&
+                               langwell->hsm.a_set_b_hnp_en) {
+                               /* It is not safe enough to do a fast
+                                * transistion from A_WAIT_BCON to
+                                * A_SUSPEND */
+                               msleep(10000);
+                               if (langwell->hsm.a_bus_req)
+                                       break;
+
+                               if (request_irq(langwell->pdev->irq,
+                                       otg_dummy_irq, IRQF_SHARED,
+                                       driver_name, langwell->regs) != 0) {
+                                       otg_dbg("request interrupt %d fail\n",
+                                       langwell->pdev->irq);
+                               }
+
+                               langwell_otg_HABA(1);
+                               langwell->hsm.b_bus_resume = 0;
+                               langwell->hsm.a_aidl_bdis_tmout = 0;
+                               langwell_otg_add_timer(a_aidl_bdis_tmr);
+
+                               langwell_otg_loc_sof(0);
+                               langwell->otg.state = OTG_STATE_A_SUSPEND;
+                       } else if (!langwell->hsm.a_bus_req &&
+                               !langwell->hsm.a_set_b_hnp_en) {
+                               struct pci_dev *pdev = langwell->pdev;
+                               if (langwell->host_ops)
+                                       langwell->host_ops->remove(pdev);
+                               else
+                                       otg_dbg("host driver removed.\n");
+                               langwell_otg_drv_vbus(0);
+                               langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+                       }
+               }
+               break;
+       case OTG_STATE_A_HOST:
+               if (langwell->hsm.id) {
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_bus_drop ||
+               (!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) {
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               } else if (!langwell->hsm.a_vbus_vld) {
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+               } else if (langwell->hsm.a_set_b_hnp_en
+                               && !langwell->hsm.a_bus_req) {
+                       /* Set HABA to enable hardware assistance to signal
+                        *  A-connect after receiver B-disconnect. Hardware
+                        *  will then set client mode and enable URE, SLE and
+                        *  PCE after the assistance. otg_dummy_irq is used to
+                        *  clean these ints when client driver is not resumed.
+                        */
+                       if (request_irq(langwell->pdev->irq,
+                               otg_dummy_irq, IRQF_SHARED, driver_name,
+                               langwell->regs) != 0) {
+                               otg_dbg("request interrupt %d failed\n",
+                                               langwell->pdev->irq);
+                       }
+
+                       /* set HABA */
+                       langwell_otg_HABA(1);
+                       langwell->hsm.b_bus_resume = 0;
+                       langwell->hsm.a_aidl_bdis_tmout = 0;
+                       langwell_otg_add_timer(a_aidl_bdis_tmr);
+                       langwell_otg_loc_sof(0);
+                       langwell->otg.state = OTG_STATE_A_SUSPEND;
+               } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
+                       langwell->hsm.a_wait_bcon_tmout = 0;
+                       langwell->hsm.a_set_b_hnp_en = 0;
+                       langwell_otg_add_timer(a_wait_bcon_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+               }
+               break;
+       case OTG_STATE_A_SUSPEND:
+               if (langwell->hsm.id) {
+                       langwell_otg_del_timer(a_aidl_bdis_tmr);
+                       langwell_otg_HABA(0);
+                       free_irq(langwell->pdev->irq, langwell->regs);
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_bus_req ||
+                               langwell->hsm.b_bus_resume) {
+                       langwell_otg_del_timer(a_aidl_bdis_tmr);
+                       langwell_otg_HABA(0);
+                       free_irq(langwell->pdev->irq, langwell->regs);
+                       langwell->hsm.a_suspend_req = 0;
+                       langwell_otg_loc_sof(1);
+                       langwell->otg.state = OTG_STATE_A_HOST;
+               } else if (langwell->hsm.a_aidl_bdis_tmout ||
+                               langwell->hsm.a_bus_drop) {
+                       langwell_otg_del_timer(a_aidl_bdis_tmr);
+                       langwell_otg_HABA(0);
+                       free_irq(langwell->pdev->irq, langwell->regs);
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               } else if (!langwell->hsm.b_conn &&
+                               langwell->hsm.a_set_b_hnp_en) {
+                       langwell_otg_del_timer(a_aidl_bdis_tmr);
+                       langwell_otg_HABA(0);
+                       free_irq(langwell->pdev->irq, langwell->regs);
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+
+                       langwell->hsm.b_bus_suspend = 0;
+                       langwell->hsm.b_bus_suspend_vld = 0;
+                       langwell->hsm.b_bus_suspend_tmout = 0;
+
+                       /* msleep(200); */
+                       if (langwell->client_ops)
+                               langwell->client_ops->resume(langwell->pdev);
+                       else
+                               otg_dbg("client driver not loaded.\n");
+
+                       langwell_otg_add_timer(b_bus_suspend_tmr);
+                       langwell->otg.state = OTG_STATE_A_PERIPHERAL;
+                       break;
+               } else if (!langwell->hsm.a_vbus_vld) {
+                       langwell_otg_del_timer(a_aidl_bdis_tmr);
+                       langwell_otg_HABA(0);
+                       free_irq(langwell->pdev->irq, langwell->regs);
+                       if (langwell->host_ops)
+                               langwell->host_ops->remove(langwell->pdev);
+                       else
+                               otg_dbg("host driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+               }
+               break;
+       case OTG_STATE_A_PERIPHERAL:
+               if (langwell->hsm.id) {
+                       langwell_otg_del_timer(b_bus_suspend_tmr);
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.b_bus_req = 0;
+                       if (langwell->client_ops)
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       else
+                               otg_dbg("client driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (!langwell->hsm.a_vbus_vld) {
+                       langwell_otg_del_timer(b_bus_suspend_tmr);
+                       if (langwell->client_ops)
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       else
+                               otg_dbg("client driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_VBUS_ERR;
+               } else if (langwell->hsm.a_bus_drop) {
+                       langwell_otg_del_timer(b_bus_suspend_tmr);
+                       if (langwell->client_ops)
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       else
+                               otg_dbg("client driver has been removed.\n");
+                       langwell_otg_drv_vbus(0);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               } else if (langwell->hsm.b_bus_suspend) {
+                       langwell_otg_del_timer(b_bus_suspend_tmr);
+                       if (langwell->client_ops)
+                               langwell->client_ops->suspend(langwell->pdev,
+                                       PMSG_FREEZE);
+                       else
+                               otg_dbg("client driver has been removed.\n");
+
+                       if (langwell->host_ops)
+                               langwell->host_ops->probe(langwell->pdev,
+                                               langwell->host_ops->id_table);
+                       else
+                               otg_dbg("host driver not loaded.\n");
+                       langwell->hsm.a_set_b_hnp_en = 0;
+                       langwell->hsm.a_wait_bcon_tmout = 0;
+                       langwell_otg_add_timer(a_wait_bcon_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+               } else if (langwell->hsm.b_bus_suspend_tmout) {
+                       u32     val;
+                       val = readl(langwell->regs + CI_PORTSC1);
+                       if (!(val & PORTSC_SUSP))
+                               break;
+                       if (langwell->client_ops)
+                               langwell->client_ops->suspend(langwell->pdev,
+                                               PMSG_FREEZE);
+                       else
+                               otg_dbg("client driver has been removed.\n");
+                       if (langwell->host_ops)
+                               langwell->host_ops->probe(langwell->pdev,
+                                               langwell->host_ops->id_table);
+                       else
+                               otg_dbg("host driver not loaded.\n");
+                       langwell->hsm.a_set_b_hnp_en = 0;
+                       langwell->hsm.a_wait_bcon_tmout = 0;
+                       langwell_otg_add_timer(a_wait_bcon_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_BCON;
+               }
+               break;
+       case OTG_STATE_A_VBUS_ERR:
+               if (langwell->hsm.id) {
+                       langwell->otg.default_a = 0;
+                       langwell->hsm.a_clr_err = 0;
+                       langwell->hsm.a_srp_det = 0;
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_clr_err) {
+                       langwell->hsm.a_clr_err = 0;
+                       langwell->hsm.a_srp_det = 0;
+                       reset_otg();
+                       init_hsm();
+                       if (langwell->otg.state == OTG_STATE_A_IDLE)
+                               queue_work(langwell->qwork, &langwell->work);
+               }
+               break;
+       case OTG_STATE_A_WAIT_VFALL:
+               if (langwell->hsm.id) {
+                       langwell->otg.default_a = 0;
+                       langwell->otg.state = OTG_STATE_B_IDLE;
+                       queue_work(langwell->qwork, &langwell->work);
+               } else if (langwell->hsm.a_bus_req) {
+                       langwell_otg_drv_vbus(1);
+                       langwell->hsm.a_wait_vrise_tmout = 0;
+                       langwell_otg_add_timer(a_wait_vrise_tmr);
+                       langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
+               } else if (!langwell->hsm.a_sess_vld) {
+                       langwell->hsm.a_srp_det = 0;
+                       langwell_otg_drv_vbus(0);
+                       set_host_mode();
+                       langwell->otg.state = OTG_STATE_A_IDLE;
+               }
+               break;
+       default:
+               ;
+       }
+
+       otg_dbg("%s: new state = %s\n", __func__,
+                       state_string(langwell->otg.state));
+}
+
+       static ssize_t
+show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+       struct langwell_otg *langwell;
+       char *next;
+       unsigned size;
+       unsigned t;
+
+       langwell = the_transceiver;
+       next = buf;
+       size = PAGE_SIZE;
+
+       t = scnprintf(next, size,
+               "\n"
+               "USBCMD = 0x%08x \n"
+               "USBSTS = 0x%08x \n"
+               "USBINTR = 0x%08x \n"
+               "ASYNCLISTADDR = 0x%08x \n"
+               "PORTSC1 = 0x%08x \n"
+               "HOSTPC1 = 0x%08x \n"
+               "OTGSC = 0x%08x \n"
+               "USBMODE = 0x%08x \n",
+               readl(langwell->regs + 0x30),
+               readl(langwell->regs + 0x34),
+               readl(langwell->regs + 0x38),
+               readl(langwell->regs + 0x48),
+               readl(langwell->regs + 0x74),
+               readl(langwell->regs + 0xb4),
+               readl(langwell->regs + 0xf4),
+               readl(langwell->regs + 0xf8)
+               );
+       size -= t;
+       next += t;
+
+       return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+static ssize_t
+show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+       struct langwell_otg *langwell;
+       char *next;
+       unsigned size;
+       unsigned t;
+
+       langwell = the_transceiver;
+       next = buf;
+       size = PAGE_SIZE;
+
+       t = scnprintf(next, size,
+               "\n"
+               "current state = %s\n"
+               "a_bus_resume = \t%d\n"
+               "a_bus_suspend = \t%d\n"
+               "a_conn = \t%d\n"
+               "a_sess_vld = \t%d\n"
+               "a_srp_det = \t%d\n"
+               "a_vbus_vld = \t%d\n"
+               "b_bus_resume = \t%d\n"
+               "b_bus_suspend = \t%d\n"
+               "b_conn = \t%d\n"
+               "b_se0_srp = \t%d\n"
+               "b_sess_end = \t%d\n"
+               "b_sess_vld = \t%d\n"
+               "id = \t%d\n"
+               "a_set_b_hnp_en = \t%d\n"
+               "b_srp_done = \t%d\n"
+               "b_hnp_enable = \t%d\n"
+               "a_wait_vrise_tmout = \t%d\n"
+               "a_wait_bcon_tmout = \t%d\n"
+               "a_aidl_bdis_tmout = \t%d\n"
+               "b_ase0_brst_tmout = \t%d\n"
+               "a_bus_drop = \t%d\n"
+               "a_bus_req = \t%d\n"
+               "a_clr_err = \t%d\n"
+               "a_suspend_req = \t%d\n"
+               "b_bus_req = \t%d\n"
+               "b_bus_suspend_tmout = \t%d\n"
+               "b_bus_suspend_vld = \t%d\n",
+               state_string(langwell->otg.state),
+               langwell->hsm.a_bus_resume,
+               langwell->hsm.a_bus_suspend,
+               langwell->hsm.a_conn,
+               langwell->hsm.a_sess_vld,
+               langwell->hsm.a_srp_det,
+               langwell->hsm.a_vbus_vld,
+               langwell->hsm.b_bus_resume,
+               langwell->hsm.b_bus_suspend,
+               langwell->hsm.b_conn,
+               langwell->hsm.b_se0_srp,
+               langwell->hsm.b_sess_end,
+               langwell->hsm.b_sess_vld,
+               langwell->hsm.id,
+               langwell->hsm.a_set_b_hnp_en,
+               langwell->hsm.b_srp_done,
+               langwell->hsm.b_hnp_enable,
+               langwell->hsm.a_wait_vrise_tmout,
+               langwell->hsm.a_wait_bcon_tmout,
+               langwell->hsm.a_aidl_bdis_tmout,
+               langwell->hsm.b_ase0_brst_tmout,
+               langwell->hsm.a_bus_drop,
+               langwell->hsm.a_bus_req,
+               langwell->hsm.a_clr_err,
+               langwell->hsm.a_suspend_req,
+               langwell->hsm.b_bus_req,
+               langwell->hsm.b_bus_suspend_tmout,
+               langwell->hsm.b_bus_suspend_vld
+               );
+       size -= t;
+       next += t;
+
+       return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct langwell_otg *langwell;
+       char *next;
+       unsigned size;
+       unsigned t;
+
+       langwell =  the_transceiver;
+       next = buf;
+       size = PAGE_SIZE;
+
+       t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
+       size -= t;
+       next += t;
+
+       return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct langwell_otg *langwell;
+       langwell = the_transceiver;
+       if (!langwell->otg.default_a)
+               return -1;
+       if (count > 2)
+               return -1;
+
+       if (buf[0] == '0') {
+               langwell->hsm.a_bus_req = 0;
+               otg_dbg("a_bus_req = 0\n");
+       } else if (buf[0] == '1') {
+               /* If a_bus_drop is TRUE, a_bus_req can't be set */
+               if (langwell->hsm.a_bus_drop)
+                       return -1;
+               langwell->hsm.a_bus_req = 1;
+               otg_dbg("a_bus_req = 1\n");
+       }
+       if (spin_trylock(&langwell->wq_lock)) {
+               queue_work(langwell->qwork, &langwell->work);
+               spin_unlock(&langwell->wq_lock);
+       }
+       return count;
+}
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct langwell_otg *langwell;
+       char *next;
+       unsigned size;
+       unsigned t;
+
+       langwell =  the_transceiver;
+       next = buf;
+       size = PAGE_SIZE;
+
+       t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
+       size -= t;
+       next += t;
+
+       return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct langwell_otg *langwell;
+       langwell = the_transceiver;
+       if (!langwell->otg.default_a)
+               return -1;
+       if (count > 2)
+               return -1;
+
+       if (buf[0] == '0') {
+               langwell->hsm.a_bus_drop = 0;
+               otg_dbg("a_bus_drop = 0\n");
+       } else if (buf[0] == '1') {
+               langwell->hsm.a_bus_drop = 1;
+               langwell->hsm.a_bus_req = 0;
+               otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
+       }
+       if (spin_trylock(&langwell->wq_lock)) {
+               queue_work(langwell->qwork, &langwell->work);
+               spin_unlock(&langwell->wq_lock);
+       }
+       return count;
+}
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
+       get_a_bus_drop, set_a_bus_drop);
+
+static ssize_t
+get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct langwell_otg *langwell;
+       char *next;
+       unsigned size;
+       unsigned t;
+
+       langwell =  the_transceiver;
+       next = buf;
+       size = PAGE_SIZE;
+
+       t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
+       size -= t;
+       next += t;
+
+       return PAGE_SIZE - size;
+}
+
+static ssize_t
+set_b_bus_req(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct langwell_otg *langwell;
+       langwell = the_transceiver;
+
+       if (langwell->otg.default_a)
+               return -1;
+
+       if (count > 2)
+               return -1;
+
+       if (buf[0] == '0') {
+               langwell->hsm.b_bus_req = 0;
+               otg_dbg("b_bus_req = 0\n");
+       } else if (buf[0] == '1') {
+               langwell->hsm.b_bus_req = 1;
+               otg_dbg("b_bus_req = 1\n");
+       }
+       if (spin_trylock(&langwell->wq_lock)) {
+               queue_work(langwell->qwork, &langwell->work);
+               spin_unlock(&langwell->wq_lock);
+       }
+       return count;
+}
+static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t count)
+{
+       struct langwell_otg *langwell;
+       langwell = the_transceiver;
+
+       if (!langwell->otg.default_a)
+               return -1;
+       if (count > 2)
+               return -1;
+
+       if (buf[0] == '1') {
+               langwell->hsm.a_clr_err = 1;
+               otg_dbg("a_clr_err = 1\n");
+       }
+       if (spin_trylock(&langwell->wq_lock)) {
+               queue_work(langwell->qwork, &langwell->work);
+               spin_unlock(&langwell->wq_lock);
+       }
+       return count;
+}
+static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
+
+static struct attribute *inputs_attrs[] = {
+       &dev_attr_a_bus_req.attr,
+       &dev_attr_a_bus_drop.attr,
+       &dev_attr_b_bus_req.attr,
+       &dev_attr_a_clr_err.attr,
+       NULL,
+};
+
+static struct attribute_group debug_dev_attr_group = {
+       .name = "inputs",
+       .attrs = inputs_attrs,
+};
+
+int langwell_register_host(struct pci_driver *host_driver)
+{
+       int     ret = 0;
+
+       the_transceiver->host_ops = host_driver;
+       queue_work(the_transceiver->qwork, &the_transceiver->work);
+       otg_dbg("host controller driver is registered\n");
+
+       return ret;
+}
+EXPORT_SYMBOL(langwell_register_host);
+
+void langwell_unregister_host(struct pci_driver *host_driver)
+{
+       if (the_transceiver->host_ops)
+               the_transceiver->host_ops->remove(the_transceiver->pdev);
+       the_transceiver->host_ops = NULL;
+       the_transceiver->hsm.a_bus_drop = 1;
+       queue_work(the_transceiver->qwork, &the_transceiver->work);
+       otg_dbg("host controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_host);
+
+int langwell_register_peripheral(struct pci_driver *client_driver)
+{
+       int     ret = 0;
+
+       if (client_driver)
+               ret = client_driver->probe(the_transceiver->pdev,
+                               client_driver->id_table);
+       if (!ret) {
+               the_transceiver->client_ops = client_driver;
+               queue_work(the_transceiver->qwork, &the_transceiver->work);
+               otg_dbg("client controller driver is registered\n");
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(langwell_register_peripheral);
+
+void langwell_unregister_peripheral(struct pci_driver *client_driver)
+{
+       if (the_transceiver->client_ops)
+               the_transceiver->client_ops->remove(the_transceiver->pdev);
+       the_transceiver->client_ops = NULL;
+       the_transceiver->hsm.b_bus_req = 0;
+       queue_work(the_transceiver->qwork, &the_transceiver->work);
+       otg_dbg("client controller driver is unregistered\n");
+}
+EXPORT_SYMBOL(langwell_unregister_peripheral);
+
+static int langwell_otg_probe(struct pci_dev *pdev,
+               const struct pci_device_id *id)
+{
+       unsigned long           resource, len;
+       void __iomem            *base = NULL;
+       int                     retval;
+       u32                     val32;
+       struct langwell_otg     *langwell;
+       char                    qname[] = "langwell_otg_queue";
+
+       retval = 0;
+       otg_dbg("\notg controller is detected.\n");
+       if (pci_enable_device(pdev) < 0) {
+               retval = -ENODEV;
+               goto done;
+       }
+
+       langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
+       if (langwell == NULL) {
+               retval = -ENOMEM;
+               goto done;
+       }
+       the_transceiver = langwell;
+
+       /* control register: BAR 0 */
+       resource = pci_resource_start(pdev, 0);
+       len = pci_resource_len(pdev, 0);
+       if (!request_mem_region(resource, len, driver_name)) {
+               retval = -EBUSY;
+               goto err;
+       }
+       langwell->region = 1;
+
+       base = ioremap_nocache(resource, len);
+       if (base == NULL) {
+               retval = -EFAULT;
+               goto err;
+       }
+       langwell->regs = base;
+
+       if (!pdev->irq) {
+               otg_dbg("No IRQ.\n");
+               retval = -ENODEV;
+               goto err;
+       }
+
+       langwell->qwork = create_workqueue(qname);
+       if (!langwell->qwork) {
+               otg_dbg("cannot create workqueue %s\n", qname);
+               retval = -ENOMEM;
+               goto err;
+       }
+       INIT_WORK(&langwell->work, langwell_otg_work);
+
+       /* OTG common part */
+       langwell->pdev = pdev;
+       langwell->otg.dev = &pdev->dev;
+       langwell->otg.label = driver_name;
+       langwell->otg.set_host = langwell_otg_set_host;
+       langwell->otg.set_peripheral = langwell_otg_set_peripheral;
+       langwell->otg.set_power = langwell_otg_set_power;
+       langwell->otg.start_srp = langwell_otg_start_srp;
+       langwell->otg.state = OTG_STATE_UNDEFINED;
+       if (otg_set_transceiver(&langwell->otg)) {
+               otg_dbg("can't set transceiver\n");
+               retval = -EBUSY;
+               goto err;
+       }
+
+       reset_otg();
+       init_hsm();
+
+       spin_lock_init(&langwell->lock);
+       spin_lock_init(&langwell->wq_lock);
+       INIT_LIST_HEAD(&active_timers);
+       langwell_otg_init_timers(&langwell->hsm);
+
+       if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+                               driver_name, langwell) != 0) {
+               otg_dbg("request interrupt %d failed\n", pdev->irq);
+               retval = -EBUSY;
+               goto err;
+       }
+
+       /* enable OTGSC int */
+       val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
+               OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
+       writel(val32, langwell->regs + CI_OTGSC);
+
+       retval = device_create_file(&pdev->dev, &dev_attr_registers);
+       if (retval < 0) {
+               otg_dbg("Can't register sysfs attribute: %d\n", retval);
+               goto err;
+       }
+
+       retval = device_create_file(&pdev->dev, &dev_attr_hsm);
+       if (retval < 0) {
+               otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
+               goto err;
+       }
+
+       retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
+       if (retval < 0) {
+               otg_dbg("Can't register sysfs attr group: %d\n", retval);
+               goto err;
+       }
+
+       if (langwell->otg.state == OTG_STATE_A_IDLE)
+               queue_work(langwell->qwork, &langwell->work);
+
+       return 0;
+
+err:
+       if (the_transceiver)
+               langwell_otg_remove(pdev);
+done:
+       return retval;
+}
+
+static void langwell_otg_remove(struct pci_dev *pdev)
+{
+       struct langwell_otg *langwell;
+
+       langwell = the_transceiver;
+
+       if (langwell->qwork) {
+               flush_workqueue(langwell->qwork);
+               destroy_workqueue(langwell->qwork);
+       }
+       langwell_otg_free_timers();
+
+       /* disable OTGSC interrupt as OTGSC doesn't change in reset */
+       writel(0, langwell->regs + CI_OTGSC);
+
+       if (pdev->irq)
+               free_irq(pdev->irq, langwell);
+       if (langwell->regs)
+               iounmap(langwell->regs);
+       if (langwell->region)
+               release_mem_region(pci_resource_start(pdev, 0),
+                               pci_resource_len(pdev, 0));
+
+       otg_set_transceiver(NULL);
+       pci_disable_device(pdev);
+       sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
+       device_remove_file(&pdev->dev, &dev_attr_hsm);
+       device_remove_file(&pdev->dev, &dev_attr_registers);
+       kfree(langwell);
+       langwell = NULL;
+}
+
+static void transceiver_suspend(struct pci_dev *pdev)
+{
+       pci_save_state(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+       langwell_otg_phy_low_power(1);
+}
+
+static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
+{
+       int     ret = 0;
+       struct langwell_otg *langwell;
+
+       langwell = the_transceiver;
+
+       /* Disbale OTG interrupts */
+       langwell_otg_intr(0);
+
+       if (pdev->irq)
+               free_irq(pdev->irq, langwell);
+
+       /* Prevent more otg_work */
+       flush_workqueue(langwell->qwork);
+       spin_lock(&langwell->wq_lock);
+
+       /* start actions */
+       switch (langwell->otg.state) {
+       case OTG_STATE_A_IDLE:
+       case OTG_STATE_B_IDLE:
+       case OTG_STATE_A_WAIT_VFALL:
+       case OTG_STATE_A_VBUS_ERR:
+               transceiver_suspend(pdev);
+               break;
+       case OTG_STATE_A_WAIT_VRISE:
+               langwell_otg_del_timer(a_wait_vrise_tmr);
+               langwell->hsm.a_srp_det = 0;
+               langwell_otg_drv_vbus(0);
+               langwell->otg.state = OTG_STATE_A_IDLE;
+               transceiver_suspend(pdev);
+               break;
+       case OTG_STATE_A_WAIT_BCON:
+               langwell_otg_del_timer(a_wait_bcon_tmr);
+               if (langwell->host_ops)
+                       ret = langwell->host_ops->suspend(pdev, message);
+               langwell_otg_drv_vbus(0);
+               break;
+       case OTG_STATE_A_HOST:
+               if (langwell->host_ops)
+                       ret = langwell->host_ops->suspend(pdev, message);
+               langwell_otg_drv_vbus(0);
+               langwell_otg_phy_low_power(1);
+               break;
+       case OTG_STATE_A_SUSPEND:
+               langwell_otg_del_timer(a_aidl_bdis_tmr);
+               langwell_otg_HABA(0);
+               if (langwell->host_ops)
+                       langwell->host_ops->remove(pdev);
+               else
+                       otg_dbg("host driver has been removed.\n");
+               langwell_otg_drv_vbus(0);
+               transceiver_suspend(pdev);
+               langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               break;
+       case OTG_STATE_A_PERIPHERAL:
+               if (langwell->client_ops)
+                       ret = langwell->client_ops->suspend(pdev, message);
+               else
+                       otg_dbg("client driver has been removed.\n");
+               langwell_otg_drv_vbus(0);
+               transceiver_suspend(pdev);
+               langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
+               break;
+       case OTG_STATE_B_HOST:
+               if (langwell->host_ops)
+                       langwell->host_ops->remove(pdev);
+               else
+                       otg_dbg("host driver has been removed.\n");
+               langwell->hsm.b_bus_req = 0;
+               transceiver_suspend(pdev);
+               langwell->otg.state = OTG_STATE_B_IDLE;
+               break;
+       case OTG_STATE_B_PERIPHERAL:
+               if (langwell->client_ops)
+                       ret = langwell->client_ops->suspend(pdev, message);
+               else
+                       otg_dbg("client driver has been removed.\n");
+               break;
+       case OTG_STATE_B_WAIT_ACON:
+               langwell_otg_del_timer(b_ase0_brst_tmr);
+               langwell_otg_HAAR(0);
+               if (langwell->host_ops)
+                       langwell->host_ops->remove(pdev);
+               else
+                       otg_dbg("host driver has been removed.\n");
+               langwell->hsm.b_bus_req = 0;
+               langwell->otg.state = OTG_STATE_B_IDLE;
+               transceiver_suspend(pdev);
+               break;
+       default:
+               otg_dbg("error state before suspend\n ");
+               break;
+       }
+       spin_unlock(&langwell->wq_lock);
+
+       return ret;
+}
+
+static void transceiver_resume(struct pci_dev *pdev)
+{
+       pci_restore_state(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+       langwell_otg_phy_low_power(0);
+}
+
+static int langwell_otg_resume(struct pci_dev *pdev)
+{
+       int     ret = 0;
+       struct langwell_otg *langwell;
+
+       langwell = the_transceiver;
+
+       spin_lock(&langwell->wq_lock);
+
+       switch (langwell->otg.state) {
+       case OTG_STATE_A_IDLE:
+       case OTG_STATE_B_IDLE:
+       case OTG_STATE_A_WAIT_VFALL:
+       case OTG_STATE_A_VBUS_ERR:
+               transceiver_resume(pdev);
+               break;
+       case OTG_STATE_A_WAIT_BCON:
+               langwell_otg_add_timer(a_wait_bcon_tmr);
+               langwell_otg_drv_vbus(1);
+               if (langwell->host_ops)
+                       ret = langwell->host_ops->resume(pdev);
+               break;
+       case OTG_STATE_A_HOST:
+               langwell_otg_drv_vbus(1);
+               langwell_otg_phy_low_power(0);
+               if (langwell->host_ops)
+                       ret = langwell->host_ops->resume(pdev);
+               break;
+       case OTG_STATE_B_PERIPHERAL:
+               if (langwell->client_ops)
+                       ret = langwell->client_ops->resume(pdev);
+               else
+                       otg_dbg("client driver not loaded.\n");
+               break;
+       default:
+               otg_dbg("error state before suspend\n ");
+               break;
+       }
+
+       if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
+                               driver_name, the_transceiver) != 0) {
+               otg_dbg("request interrupt %d failed\n", pdev->irq);
+               ret = -EBUSY;
+       }
+
+       /* enable OTG interrupts */
+       langwell_otg_intr(1);
+
+       spin_unlock(&langwell->wq_lock);
+
+       queue_work(langwell->qwork, &langwell->work);
+
+
+       return ret;
+}
+
+static int __init langwell_otg_init(void)
+{
+       return pci_register_driver(&otg_pci_driver);
+}
+module_init(langwell_otg_init);
+
+static void __exit langwell_otg_cleanup(void)
+{
+       pci_unregister_driver(&otg_pci_driver);
+}
+module_exit(langwell_otg_cleanup);
index c567168f89af54c63a1b1f386758088c99ec3de6..9ed5ea568679c3715eae4a03700d1ff8bd05e409 100644 (file)
@@ -22,8 +22,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  *
  * Current status:
- *     this is to add "nop" transceiver for all those phy which is
- *     autonomous such as isp1504 etc.
+ *     This provides a "nop" transceiver for PHYs which are
+ *     autonomous such as isp1504, isp1707, etc.
  */
 
 #include <linux/module.h>
@@ -36,30 +36,25 @@ struct nop_usb_xceiv {
        struct device           *dev;
 };
 
-static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
-
-static struct platform_device nop_xceiv_device = {
-       .name           = "nop_usb_xceiv",
-       .id             = -1,
-       .dev = {
-               .dma_mask               = &nop_xceiv_dmamask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = NULL,
-       },
-};
+static struct platform_device *pd;
 
 void usb_nop_xceiv_register(void)
 {
-       if (platform_device_register(&nop_xceiv_device) < 0) {
+       if (pd)
+               return;
+       pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
+       if (!pd) {
                printk(KERN_ERR "Unable to register usb nop transceiver\n");
                return;
        }
 }
+EXPORT_SYMBOL(usb_nop_xceiv_register);
 
 void usb_nop_xceiv_unregister(void)
 {
-       platform_device_unregister(&nop_xceiv_device);
+       platform_device_unregister(pd);
 }
+EXPORT_SYMBOL(usb_nop_xceiv_unregister);
 
 static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x)
 {
index d9478d0e1c8bd6bb6d45c778d220a6ee96c8139a..9e3e7a5c258bc32d78678e5b05bb877dcac46be6 100644 (file)
 
 /* In module TWL4030_MODULE_PM_MASTER */
 #define PROTECT_KEY                    0x0E
+#define STS_HW_CONDITIONS              0x0F
 
 /* In module TWL4030_MODULE_PM_RECEIVER */
 #define VUSB_DEDICATED1                        0x7D
@@ -351,15 +352,26 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
        int     status;
        int     linkstat = USB_LINK_UNKNOWN;
 
-       /* STS_HW_CONDITIONS */
-       status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER, 0x0f);
+       /*
+        * For ID/VBUS sensing, see manual section 15.4.8 ...
+        * except when using only battery backup power, two
+        * comparators produce VBUS_PRES and ID_PRES signals,
+        * which don't match docs elsewhere.  But ... BIT(7)
+        * and BIT(2) of STS_HW_CONDITIONS, respectively, do
+        * seem to match up.  If either is true the USB_PRES
+        * signal is active, the OTG module is activated, and
+        * its interrupt may be raised (may wake the system).
+        */
+       status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
+                       STS_HW_CONDITIONS);
        if (status < 0)
                dev_err(twl->dev, "USB link status err %d\n", status);
-       else if (status & BIT(7))
-               linkstat = USB_LINK_VBUS;
-       else if (status & BIT(2))
-               linkstat = USB_LINK_ID;
-       else
+       else if (status & (BIT(7) | BIT(2))) {
+               if (status & BIT(2))
+                       linkstat = USB_LINK_ID;
+               else
+                       linkstat = USB_LINK_VBUS;
+       } else
                linkstat = USB_LINK_NONE;
 
        dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
@@ -641,7 +653,7 @@ static int twl4030_set_host(struct otg_transceiver *x, struct usb_bus *host)
        return 0;
 }
 
-static int __init twl4030_usb_probe(struct platform_device *pdev)
+static int __devinit twl4030_usb_probe(struct platform_device *pdev)
 {
        struct twl4030_usb_data *pdata = pdev->dev.platform_data;
        struct twl4030_usb      *twl;
index 6d106e74265e9d839e7a4e5123bb4ce11c97a538..2cbfab3716e59c3764cf333d5bdb933add1e69a9 100644 (file)
@@ -364,7 +364,7 @@ static int aircable_attach(struct usb_serial *serial)
        return 0;
 }
 
-static void aircable_shutdown(struct usb_serial *serial)
+static void aircable_release(struct usb_serial *serial)
 {
 
        struct usb_serial_port *port = serial->port[0];
@@ -375,7 +375,6 @@ static void aircable_shutdown(struct usb_serial *serial)
        if (priv) {
                serial_buf_free(priv->tx_buf);
                serial_buf_free(priv->rx_buf);
-               usb_set_serial_port_data(port, NULL);
                kfree(priv);
        }
 }
@@ -601,7 +600,7 @@ static struct usb_serial_driver aircable_device = {
        .num_ports =            1,
        .attach =               aircable_attach,
        .probe =                aircable_probe,
-       .shutdown =             aircable_shutdown,
+       .release =              aircable_release,
        .write =                aircable_write,
        .write_room =           aircable_write_room,
        .write_bulk_callback =  aircable_write_bulk_callback,
index 2bfd6dd85b5ad23624cef600a2e8e5f76de5fa18..7033b031b4439da94bb5a9c1015e7070a6721a45 100644 (file)
@@ -90,7 +90,7 @@ static int debug;
 
 /* function prototypes for a Belkin USB Serial Adapter F5U103 */
 static int  belkin_sa_startup(struct usb_serial *serial);
-static void belkin_sa_shutdown(struct usb_serial *serial);
+static void belkin_sa_release(struct usb_serial *serial);
 static int  belkin_sa_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void belkin_sa_close(struct usb_serial_port *port);
@@ -142,7 +142,7 @@ static struct usb_serial_driver belkin_device = {
        .tiocmget =             belkin_sa_tiocmget,
        .tiocmset =             belkin_sa_tiocmset,
        .attach =               belkin_sa_startup,
-       .shutdown =             belkin_sa_shutdown,
+       .release =              belkin_sa_release,
 };
 
 
@@ -197,14 +197,13 @@ static int belkin_sa_startup(struct usb_serial *serial)
 }
 
 
-static void belkin_sa_shutdown(struct usb_serial *serial)
+static void belkin_sa_release(struct usb_serial *serial)
 {
        struct belkin_sa_private *priv;
        int i;
 
        dbg("%s", __func__);
 
-       /* stop reads and writes on all ports */
        for (i = 0; i < serial->num_ports; ++i) {
                /* My special items, the standard routines free my urbs */
                priv = usb_get_serial_port_data(serial->port[i]);
index 83bbb5bca2efcc05af2aba7023f7a846f8fafa0a..ba555c528cc6cc8c5b88356140cd0a4e288d1ed1 100644 (file)
@@ -59,23 +59,22 @@ static int usb_serial_device_probe(struct device *dev)
                retval = -ENODEV;
                goto exit;
        }
+       if (port->dev_state != PORT_REGISTERING)
+               goto exit;
 
        driver = port->serial->type;
        if (driver->port_probe) {
-               if (!try_module_get(driver->driver.owner)) {
-                       dev_err(dev, "module get failed, exiting\n");
-                       retval = -EIO;
-                       goto exit;
-               }
                retval = driver->port_probe(port);
-               module_put(driver->driver.owner);
                if (retval)
                        goto exit;
        }
 
        retval = device_create_file(dev, &dev_attr_port_number);
-       if (retval)
+       if (retval) {
+               if (driver->port_remove)
+                       retval = driver->port_remove(port);
                goto exit;
+       }
 
        minor = port->number;
        tty_register_device(usb_serial_tty_driver, minor, dev);
@@ -98,19 +97,15 @@ static int usb_serial_device_remove(struct device *dev)
        if (!port)
                return -ENODEV;
 
+       if (port->dev_state != PORT_UNREGISTERING)
+               return retval;
+
        device_remove_file(&port->dev, &dev_attr_port_number);
 
        driver = port->serial->type;
-       if (driver->port_remove) {
-               if (!try_module_get(driver->driver.owner)) {
-                       dev_err(dev, "module get failed, exiting\n");
-                       retval = -EIO;
-                       goto exit;
-               }
+       if (driver->port_remove)
                retval = driver->port_remove(port);
-               module_put(driver->driver.owner);
-       }
-exit:
+
        minor = port->number;
        tty_unregister_device(usb_serial_tty_driver, minor);
        dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
index 16a154d3b2feeaaacf0e0c949405d992a66165be..2b9eeda62bfe7beb12a4b3abb8aec6b3954e9f9c 100644 (file)
@@ -50,7 +50,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
                unsigned int, unsigned int);
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
-static void cp210x_shutdown(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
 
 static int debug;
 
@@ -137,7 +137,7 @@ static struct usb_serial_driver cp210x_device = {
        .tiocmget               = cp210x_tiocmget,
        .tiocmset               = cp210x_tiocmset,
        .attach                 = cp210x_startup,
-       .shutdown               = cp210x_shutdown,
+       .disconnect             = cp210x_disconnect,
 };
 
 /* Config request types */
@@ -792,7 +792,7 @@ static int cp210x_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void cp210x_shutdown(struct usb_serial *serial)
+static void cp210x_disconnect(struct usb_serial *serial)
 {
        int i;
 
index 933ba913e66c5fc21950a3ccc2e5b8229f8c9d63..336523fd736671a4e8e98b1217070fc81360b7b5 100644 (file)
@@ -58,7 +58,8 @@ static int debug;
 
 /* Function prototypes */
 static int cyberjack_startup(struct usb_serial *serial);
-static void cyberjack_shutdown(struct usb_serial *serial);
+static void cyberjack_disconnect(struct usb_serial *serial);
+static void cyberjack_release(struct usb_serial *serial);
 static int  cyberjack_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void cyberjack_close(struct usb_serial_port *port);
@@ -94,7 +95,8 @@ static struct usb_serial_driver cyberjack_device = {
        .id_table =             id_table,
        .num_ports =            1,
        .attach =               cyberjack_startup,
-       .shutdown =             cyberjack_shutdown,
+       .disconnect =           cyberjack_disconnect,
+       .release =              cyberjack_release,
        .open =                 cyberjack_open,
        .close =                cyberjack_close,
        .write =                cyberjack_write,
@@ -148,17 +150,25 @@ static int cyberjack_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void cyberjack_shutdown(struct usb_serial *serial)
+static void cyberjack_disconnect(struct usb_serial *serial)
 {
        int i;
 
        dbg("%s", __func__);
 
-       for (i = 0; i < serial->num_ports; ++i) {
+       for (i = 0; i < serial->num_ports; ++i)
                usb_kill_urb(serial->port[i]->interrupt_in_urb);
+}
+
+static void cyberjack_release(struct usb_serial *serial)
+{
+       int i;
+
+       dbg("%s", __func__);
+
+       for (i = 0; i < serial->num_ports; ++i) {
                /* My special items, the standard routines free my urbs */
                kfree(usb_get_serial_port_data(serial->port[i]));
-               usb_set_serial_port_data(serial->port[i], NULL);
        }
 }
 
index 669f93848539560182b589131cc18788487477dc..9734085fd2fee19f99051320db97ae22d867dc1e 100644 (file)
@@ -171,7 +171,7 @@ struct cypress_buf {
 static int  cypress_earthmate_startup(struct usb_serial *serial);
 static int  cypress_hidcom_startup(struct usb_serial *serial);
 static int  cypress_ca42v2_startup(struct usb_serial *serial);
-static void cypress_shutdown(struct usb_serial *serial);
+static void cypress_release(struct usb_serial *serial);
 static int  cypress_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void cypress_close(struct usb_serial_port *port);
@@ -215,7 +215,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
        .id_table =                     id_table_earthmate,
        .num_ports =                    1,
        .attach =                       cypress_earthmate_startup,
-       .shutdown =                     cypress_shutdown,
+       .release =                      cypress_release,
        .open =                         cypress_open,
        .close =                        cypress_close,
        .dtr_rts =                      cypress_dtr_rts,
@@ -242,7 +242,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
        .id_table =                     id_table_cyphidcomrs232,
        .num_ports =                    1,
        .attach =                       cypress_hidcom_startup,
-       .shutdown =                     cypress_shutdown,
+       .release =                      cypress_release,
        .open =                         cypress_open,
        .close =                        cypress_close,
        .dtr_rts =                      cypress_dtr_rts,
@@ -269,7 +269,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
        .id_table =                     id_table_nokiaca42v2,
        .num_ports =                    1,
        .attach =                       cypress_ca42v2_startup,
-       .shutdown =                     cypress_shutdown,
+       .release =                      cypress_release,
        .open =                         cypress_open,
        .close =                        cypress_close,
        .dtr_rts =                      cypress_dtr_rts,
@@ -616,7 +616,7 @@ static int cypress_ca42v2_startup(struct usb_serial *serial)
 } /* cypress_ca42v2_startup */
 
 
-static void cypress_shutdown(struct usb_serial *serial)
+static void cypress_release(struct usb_serial *serial)
 {
        struct cypress_private *priv;
 
@@ -629,7 +629,6 @@ static void cypress_shutdown(struct usb_serial *serial)
        if (priv) {
                cypress_buf_free(priv->buf);
                kfree(priv);
-               usb_set_serial_port_data(serial->port[0], NULL);
        }
 }
 
index 30f5140eff03a3bbeef50e90f28c942223c99e0e..f4808091c47ca873e3e39f353ea415837d232203 100644 (file)
@@ -460,7 +460,8 @@ static int digi_carrier_raised(struct usb_serial_port *port);
 static void digi_dtr_rts(struct usb_serial_port *port, int on);
 static int digi_startup_device(struct usb_serial *serial);
 static int digi_startup(struct usb_serial *serial);
-static void digi_shutdown(struct usb_serial *serial);
+static void digi_disconnect(struct usb_serial *serial);
+static void digi_release(struct usb_serial *serial);
 static void digi_read_bulk_callback(struct urb *urb);
 static int digi_read_inb_callback(struct urb *urb);
 static int digi_read_oob_callback(struct urb *urb);
@@ -524,7 +525,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
        .tiocmget =                     digi_tiocmget,
        .tiocmset =                     digi_tiocmset,
        .attach =                       digi_startup,
-       .shutdown =                     digi_shutdown,
+       .disconnect =                   digi_disconnect,
+       .release =                      digi_release,
 };
 
 static struct usb_serial_driver digi_acceleport_4_device = {
@@ -550,7 +552,8 @@ static struct usb_serial_driver digi_acceleport_4_device = {
        .tiocmget =                     digi_tiocmget,
        .tiocmset =                     digi_tiocmset,
        .attach =                       digi_startup,
-       .shutdown =                     digi_shutdown,
+       .disconnect =                   digi_disconnect,
+       .release =                      digi_release,
 };
 
 
@@ -1556,16 +1559,23 @@ static int digi_startup(struct usb_serial *serial)
 }
 
 
-static void digi_shutdown(struct usb_serial *serial)
+static void digi_disconnect(struct usb_serial *serial)
 {
        int i;
-       dbg("digi_shutdown: TOP, in_interrupt()=%ld", in_interrupt());
+       dbg("digi_disconnect: TOP, in_interrupt()=%ld", in_interrupt());
 
        /* stop reads and writes on all ports */
        for (i = 0; i < serial->type->num_ports + 1; i++) {
                usb_kill_urb(serial->port[i]->read_urb);
                usb_kill_urb(serial->port[i]->write_urb);
        }
+}
+
+
+static void digi_release(struct usb_serial *serial)
+{
+       int i;
+       dbg("digi_release: TOP, in_interrupt()=%ld", in_interrupt());
 
        /* free the private data structures for all ports */
        /* number of regular ports + 1 for the out-of-band port */
index 2b141ccb0cd958ca56d51c476075dc621e0a5a4a..80cb3471adbe0fe64cae662184aef149180a942b 100644 (file)
@@ -90,7 +90,6 @@ static int  empeg_chars_in_buffer(struct tty_struct *tty);
 static void empeg_throttle(struct tty_struct *tty);
 static void empeg_unthrottle(struct tty_struct *tty);
 static int  empeg_startup(struct usb_serial *serial);
-static void empeg_shutdown(struct usb_serial *serial);
 static void empeg_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios);
 static void empeg_write_bulk_callback(struct urb *urb);
@@ -124,7 +123,6 @@ static struct usb_serial_driver empeg_device = {
        .throttle =             empeg_throttle,
        .unthrottle =           empeg_unthrottle,
        .attach =               empeg_startup,
-       .shutdown =             empeg_shutdown,
        .set_termios =          empeg_set_termios,
        .write =                empeg_write,
        .write_room =           empeg_write_room,
@@ -427,12 +425,6 @@ static int  empeg_startup(struct usb_serial *serial)
 }
 
 
-static void empeg_shutdown(struct usb_serial *serial)
-{
-       dbg("%s", __func__);
-}
-
-
 static void empeg_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
index 683304d60615a334554c3b6a0328de5d96879df5..3dc3768ca71ca0afb4a2a8f19541b3b345787b72 100644 (file)
@@ -47,7 +47,7 @@
 /*
  * Version Information
  */
-#define DRIVER_VERSION "v1.4.3"
+#define DRIVER_VERSION "v1.5.0"
 #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
 #define DRIVER_DESC "USB FTDI Serial Converters Driver"
 
@@ -82,7 +82,8 @@ struct ftdi_private {
        int rx_processed;
        unsigned long rx_bytes;
 
-       __u16 interface;        /* FT2232C port interface (0 for FT232/245) */
+       __u16 interface;        /* FT2232C, FT2232H or FT4232H port interface
+                                  (0 for FT232/245) */
 
        speed_t force_baud;     /* if non-zero, force the baud rate to
                                   this value */
@@ -94,6 +95,7 @@ struct ftdi_private {
        unsigned long tx_bytes;
        unsigned long tx_outstanding_bytes;
        unsigned long tx_outstanding_urbs;
+       unsigned short max_packet_size;
 };
 
 /* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -164,6 +166,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -673,6 +676,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -693,12 +697,13 @@ static const char *ftdi_chip_name[] = {
        [FT232BM] = "FT232BM",
        [FT2232C] = "FT2232C",
        [FT232RL] = "FT232RL",
+       [FT2232H] = "FT2232H",
+       [FT4232H] = "FT4232H"
 };
 
 
 /* Constants for read urb and write urb */
 #define BUFSZ 512
-#define PKTSZ 64
 
 /* rx_flags */
 #define THROTTLED              0x01
@@ -715,7 +720,6 @@ static const char *ftdi_chip_name[] = {
 /* function prototypes for a FTDI serial converter */
 static int  ftdi_sio_probe(struct usb_serial *serial,
                                        const struct usb_device_id *id);
-static void ftdi_shutdown(struct usb_serial *serial);
 static int  ftdi_sio_port_probe(struct usb_serial_port *port);
 static int  ftdi_sio_port_remove(struct usb_serial_port *port);
 static int  ftdi_open(struct tty_struct *tty,
@@ -744,6 +748,8 @@ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
 static unsigned short int ftdi_232am_baud_to_divisor(int baud);
 static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
 static __u32 ftdi_232bm_baud_to_divisor(int baud);
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
+static __u32 ftdi_2232h_baud_to_divisor(int baud);
 
 static struct usb_serial_driver ftdi_sio_device = {
        .driver = {
@@ -772,7 +778,6 @@ static struct usb_serial_driver ftdi_sio_device = {
        .ioctl =                ftdi_ioctl,
        .set_termios =          ftdi_set_termios,
        .break_ctl =            ftdi_break_ctl,
-       .shutdown =             ftdi_shutdown,
 };
 
 
@@ -838,6 +843,36 @@ static __u32 ftdi_232bm_baud_to_divisor(int baud)
         return ftdi_232bm_baud_base_to_divisor(baud, 48000000);
 }
 
+static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
+{
+       static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
+       __u32 divisor;
+       int divisor3;
+
+       /* hi-speed baud rate is 10-bit sampling instead of 16-bit */
+       divisor3 = (base / 10 / baud) * 8;
+
+       divisor = divisor3 >> 3;
+       divisor |= (__u32)divfrac[divisor3 & 0x7] << 14;
+       /* Deal with special cases for highest baud rates. */
+       if (divisor == 1)
+               divisor = 0;
+       else if (divisor == 0x4001)
+               divisor = 1;
+       /*
+        * Set this bit to turn off a divide by 2.5 on baud rate generator
+        * This enables baud rates up to 12Mbaud but cannot reach below 1200
+        * baud with this bit set
+        */
+       divisor |= 0x00020000;
+       return divisor;
+}
+
+static __u32 ftdi_2232h_baud_to_divisor(int baud)
+{
+        return ftdi_2232h_baud_base_to_divisor(baud, 120000000);
+}
+
 #define set_mctrl(port, set)           update_mctrl((port), (set), 0)
 #define clear_mctrl(port, clear)       update_mctrl((port), 0, (clear))
 
@@ -996,6 +1031,19 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
                        baud = 9600;
                }
                break;
+       case FT2232H: /* FT2232H chip */
+       case FT4232H: /* FT4232H chip */
+               if ((baud <= 12000000) & (baud >= 1200)) {
+                       div_value = ftdi_2232h_baud_to_divisor(baud);
+               } else if (baud < 1200) {
+                       div_value = ftdi_232bm_baud_to_divisor(baud);
+               } else {
+                       dbg("%s - Baud rate too high!", __func__);
+                       div_value = ftdi_232bm_baud_to_divisor(9600);
+                       div_okay = 0;
+                       baud = 9600;
+               }
+               break;
        } /* priv->chip_type */
 
        if (div_okay) {
@@ -1196,14 +1244,29 @@ static void ftdi_determine_type(struct usb_serial_port *port)
        if (interfaces > 1) {
                int inter;
 
-               /* Multiple interfaces.  Assume FT2232C. */
-               priv->chip_type = FT2232C;
+               /* Multiple interfaces.*/
+               if (version == 0x0800) {
+                       priv->chip_type = FT4232H;
+                       /* Hi-speed - baud clock runs at 120MHz */
+                       priv->baud_base = 120000000 / 2;
+               } else if (version == 0x0700) {
+                       priv->chip_type = FT2232H;
+                       /* Hi-speed - baud clock runs at 120MHz */
+                       priv->baud_base = 120000000 / 2;
+               } else
+                       priv->chip_type = FT2232C;
+
                /* Determine interface code. */
                inter = serial->interface->altsetting->desc.bInterfaceNumber;
-               if (inter == 0)
-                       priv->interface = PIT_SIOA;
-               else
-                       priv->interface = PIT_SIOB;
+               if (inter == 0) {
+                       priv->interface = INTERFACE_A;
+               } else  if (inter == 1) {
+                       priv->interface = INTERFACE_B;
+               } else  if (inter == 2) {
+                       priv->interface = INTERFACE_C;
+               } else  if (inter == 3) {
+                       priv->interface = INTERFACE_D;
+               }
                /* BM-type devices have a bug where bcdDevice gets set
                 * to 0x200 when iSerialNumber is 0.  */
                if (version < 0x500) {
@@ -1231,6 +1294,45 @@ static void ftdi_determine_type(struct usb_serial_port *port)
 }
 
 
+/* Determine the maximum packet size for the device.  This depends on the chip
+ * type and the USB host capabilities.  The value should be obtained from the
+ * device descriptor as the chip will use the appropriate values for the host.*/
+static void ftdi_set_max_packet_size(struct usb_serial_port *port)
+{
+       struct ftdi_private *priv = usb_get_serial_port_data(port);
+       struct usb_serial *serial = port->serial;
+       struct usb_device *udev = serial->dev;
+
+       struct usb_interface *interface = serial->interface;
+       struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+
+       unsigned num_endpoints;
+       int i = 0;
+
+       num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
+       dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
+
+       /* NOTE: some customers have programmed FT232R/FT245R devices
+        * with an endpoint size of 0 - not good.  In this case, we
+        * want to override the endpoint descriptor setting and use a
+        * value of 64 for wMaxPacketSize */
+       for (i = 0; i < num_endpoints; i++) {
+               dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
+                       interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
+               ep_desc = &interface->cur_altsetting->endpoint[i].desc;
+               if (ep_desc->wMaxPacketSize == 0) {
+                       ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
+                       dev_info(&udev->dev, "Overriding wMaxPacketSize on endpoint %d\n", i);
+               }
+       }
+
+       /* set max packet size based on descriptor */
+       priv->max_packet_size = ep_desc->wMaxPacketSize;
+
+       dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
+}
+
+
 /*
  * ***************************************************************************
  * Sysfs Attribute
@@ -1314,7 +1416,9 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
                if ((!retval) &&
                    (priv->chip_type == FT232BM ||
                     priv->chip_type == FT2232C ||
-                    priv->chip_type == FT232RL)) {
+                    priv->chip_type == FT232RL ||
+                    priv->chip_type == FT2232H ||
+                    priv->chip_type == FT4232H)) {
                        retval = device_create_file(&port->dev,
                                                    &dev_attr_latency_timer);
                }
@@ -1333,7 +1437,9 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
                device_remove_file(&port->dev, &dev_attr_event_char);
                if (priv->chip_type == FT232BM ||
                    priv->chip_type == FT2232C ||
-                   priv->chip_type == FT232RL) {
+                   priv->chip_type == FT232RL ||
+                   priv->chip_type == FT2232H ||
+                   priv->chip_type == FT4232H) {
                        device_remove_file(&port->dev, &dev_attr_latency_timer);
                }
        }
@@ -1416,6 +1522,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
        usb_set_serial_port_data(port, priv);
 
        ftdi_determine_type(port);
+       ftdi_set_max_packet_size(port);
        read_latency_timer(port);
        create_sysfs_attrs(port);
        return 0;
@@ -1485,18 +1592,6 @@ static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
        return 0;
 }
 
-/* ftdi_shutdown is called from usbserial:usb_serial_disconnect
- *   it is called when the usb device is disconnected
- *
- *   usbserial:usb_serial_disconnect
- *      calls __serial_close for each open of the port
- *      shutdown is called then (ie ftdi_shutdown)
- */
-static void ftdi_shutdown(struct usb_serial *serial)
-{
-       dbg("%s", __func__);
-}
-
 static void ftdi_sio_priv_release(struct kref *k)
 {
        struct ftdi_private *priv = container_of(k, struct ftdi_private, kref);
@@ -1671,8 +1766,8 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
        if (data_offset > 0) {
                /* Original sio needs control bytes too... */
                transfer_size += (data_offset *
-                               ((count + (PKTSZ - 1 - data_offset)) /
-                                (PKTSZ - data_offset)));
+                               ((count + (priv->max_packet_size - 1 - data_offset)) /
+                                (priv->max_packet_size - data_offset)));
        }
 
        buffer = kmalloc(transfer_size, GFP_ATOMIC);
@@ -1694,7 +1789,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
        if (data_offset > 0) {
                /* Original sio requires control byte at start of
                   each packet. */
-               int user_pktsz = PKTSZ - data_offset;
+               int user_pktsz = priv->max_packet_size - data_offset;
                int todo = count;
                unsigned char *first_byte = buffer;
                const unsigned char *current_position = buf;
@@ -1775,11 +1870,6 @@ static void ftdi_write_bulk_callback(struct urb *urb)
 
        dbg("%s - port %d", __func__, port->number);
 
-       if (status) {
-               dbg("nonzero write bulk status received: %d", status);
-               return;
-       }
-
        priv = usb_get_serial_port_data(port);
        if (!priv) {
                dbg("%s - bad port private data pointer - exiting", __func__);
@@ -1790,13 +1880,18 @@ static void ftdi_write_bulk_callback(struct urb *urb)
        data_offset = priv->write_offset;
        if (data_offset > 0) {
                /* Subtract the control bytes */
-               countback -= (data_offset * DIV_ROUND_UP(countback, PKTSZ));
+               countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
        }
        spin_lock_irqsave(&priv->tx_lock, flags);
        --priv->tx_outstanding_urbs;
        priv->tx_outstanding_bytes -= countback;
        spin_unlock_irqrestore(&priv->tx_lock, flags);
 
+       if (status) {
+               dbg("nonzero write bulk status received: %d", status);
+               return;
+       }
+
        usb_serial_port_softint(port);
 } /* ftdi_write_bulk_callback */
 
@@ -1892,7 +1987,7 @@ static void ftdi_read_bulk_callback(struct urb *urb)
 
        /* count data bytes, but not status bytes */
        countread = urb->actual_length;
-       countread -= 2 * DIV_ROUND_UP(countread, PKTSZ);
+       countread -= 2 * DIV_ROUND_UP(countread, priv->max_packet_size);
        spin_lock_irqsave(&priv->rx_lock, flags);
        priv->rx_bytes += countread;
        spin_unlock_irqrestore(&priv->rx_lock, flags);
@@ -1965,7 +2060,7 @@ static void ftdi_process_read(struct work_struct *work)
 
        need_flip = 0;
        for (packet_offset = priv->rx_processed;
-               packet_offset < urb->actual_length; packet_offset += PKTSZ) {
+               packet_offset < urb->actual_length; packet_offset += priv->max_packet_size) {
                int length;
 
                /* Compare new line status to the old one, signal if different/
@@ -1980,7 +2075,7 @@ static void ftdi_process_read(struct work_struct *work)
                        priv->prev_status = new_status;
                }
 
-               length = min_t(u32, PKTSZ, urb->actual_length-packet_offset)-2;
+               length = min_t(u32, priv->max_packet_size, urb->actual_length-packet_offset)-2;
                if (length < 0) {
                        dev_err(&port->dev, "%s - bad packet length: %d\n",
                                __func__, length+2);
@@ -2011,6 +2106,7 @@ static void ftdi_process_read(struct work_struct *work)
                if (data[packet_offset+1] & FTDI_RS_BI) {
                        error_flag = TTY_BREAK;
                        dbg("BREAK received");
+                       usb_serial_handle_break(port);
                }
                if (data[packet_offset+1] & FTDI_RS_PE) {
                        error_flag = TTY_PARITY;
@@ -2025,8 +2121,11 @@ static void ftdi_process_read(struct work_struct *work)
                                /* Note that the error flag is duplicated for
                                   every character received since we don't know
                                   which character it applied to */
-                               tty_insert_flip_char(tty,
-                                       data[packet_offset + i], error_flag);
+                               if (!usb_serial_handle_sysrq_char(port,
+                                               data[packet_offset + i]))
+                                       tty_insert_flip_char(tty,
+                                               data[packet_offset + i],
+                                               error_flag);
                        }
                        need_flip = 1;
                }
@@ -2332,6 +2431,8 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
        case FT232BM:
        case FT2232C:
        case FT232RL:
+       case FT2232H:
+       case FT4232H:
                /* the 8U232AM returns a two byte value (the sio is a 1 byte
                   value) - in the same format as the data returned from the in
                   point */
index 12330fa1c095b47fd875a758a3ae9eea56d7d914..f1d440a728a376770617ad7776a52eba590cc282 100644 (file)
@@ -10,7 +10,7 @@
  * The device is based on the FTDI FT8U100AX chip. It has a DB25 on one side,
  * USB on the other.
  *
- * Thanx to FTDI (http://www.ftdi.co.uk) for so kindly providing details
+ * Thanx to FTDI (http://www.ftdichip.com) for so kindly providing details
  * of the protocol required to talk to the device and ongoing assistence
  * during development.
  *
 #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
 #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
 #define FTDI_232RL_PID  0xFBFA  /* Product ID for FT232RL */
+#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
 #define FTDI_RELAIS_PID        0xFA10  /* Relais device from Rudolf Gugler */
 #define FTDI_NF_RIC_VID        0x0DCD  /* Vendor Id */
 #define FTDI_NF_RIC_PID        0x0001  /* Product Id */
 #define FTDI_USBX_707_PID 0xF857       /* ADSTech IR Blaster USBX-707 */
 
+/* Larsen and Brusgaard AltiTrack/USBtrack  */
+#define LARSENBRUSGAARD_VID            0x0FD8
+#define LB_ALTITRACK_PID               0x0001
 
 /* www.canusb.com Lawicel CANUSB device */
 #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
 #define FTDI_SIO_SET_LATENCY_TIMER     9 /* Set the latency timer */
 #define FTDI_SIO_GET_LATENCY_TIMER     10 /* Get the latency timer */
 
+/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+#define INTERFACE_A            1
+#define INTERFACE_B            2
+#define INTERFACE_C            3
+#define INTERFACE_D            4
 
 /*
  * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
@@ -1036,6 +1045,8 @@ typedef enum {
        FT232BM = 3,
        FT2232C = 4,
        FT232RL = 5,
+       FT2232H = 6,
+       FT4232H = 7
 } ftdi_chip_type_t;
 
 typedef enum {
index ee25a3fe3b09c317b0ecbb86f8dde60301bbc9b9..8839f1c70b7fc186ebe5c2b4f169467765086428 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Garmin GPS driver
  *
- * Copyright (C) 2006,2007 Hermann Kneissel herkne@users.sourceforge.net
+ * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
  *
  * The latest version of the driver can be found at
  * http://sourceforge.net/projects/garmin-gps/
@@ -51,7 +51,7 @@ static int debug;
  */
 
 #define VERSION_MAJOR  0
-#define VERSION_MINOR  31
+#define VERSION_MINOR  33
 
 #define _STR(s) #s
 #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
@@ -129,7 +129,6 @@ struct garmin_data {
        __u8   state;
        __u16  flags;
        __u8   mode;
-       __u8   ignorePkts;
        __u8   count;
        __u8   pkt_id;
        __u32  serial_num;
@@ -141,8 +140,6 @@ struct garmin_data {
        __u8   inbuffer [GPS_IN_BUFSIZ];  /* tty -> usb */
        __u8   outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
        __u8   privpkt[4*6];
-       atomic_t req_count;
-       atomic_t resp_count;
        spinlock_t lock;
        struct list_head pktlist;
 };
@@ -170,6 +167,8 @@ struct garmin_data {
 #define FLAGS_BULK_IN_ACTIVE      0x0020
 #define FLAGS_BULK_IN_RESTART     0x0010
 #define FLAGS_THROTTLED           0x0008
+#define APP_REQ_SEEN              0x0004
+#define APP_RESP_SEEN             0x0002
 #define CLEAR_HALT_REQUIRED       0x0001
 
 #define FLAGS_QUEUING             0x0100
@@ -184,20 +183,16 @@ struct garmin_data {
 
 
 /* function prototypes */
-static void gsp_next_packet(struct garmin_data *garmin_data_p);
-static int  garmin_write_bulk(struct usb_serial_port *port,
+static int gsp_next_packet(struct garmin_data *garmin_data_p);
+static int garmin_write_bulk(struct usb_serial_port *port,
                             const unsigned char *buf, int count,
                             int dismiss_ack);
 
 /* some special packets to be send or received */
 static unsigned char const GARMIN_START_SESSION_REQ[]
        = { 0, 0, 0, 0,  5, 0, 0, 0, 0, 0, 0, 0 };
-static unsigned char const GARMIN_START_SESSION_REQ2[]
-       = { 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
 static unsigned char const GARMIN_START_SESSION_REPLY[]
        = { 0, 0, 0, 0,  6, 0, 0, 0, 4, 0, 0, 0 };
-static unsigned char const GARMIN_SESSION_ACTIVE_REPLY[]
-       = { 0, 0, 0, 0, 17, 0, 0, 0, 4, 0, 0, 0, 0, 16, 0, 0 };
 static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
        = { 0, 0, 0, 0,  2, 0, 0, 0, 0, 0, 0, 0 };
 static unsigned char const GARMIN_APP_LAYER_REPLY[]
@@ -233,13 +228,6 @@ static struct usb_driver garmin_driver = {
 };
 
 
-static inline int noResponseFromAppLayer(struct garmin_data *garmin_data_p)
-{
-       return atomic_read(&garmin_data_p->req_count) ==
-                               atomic_read(&garmin_data_p->resp_count);
-}
-
-
 static inline int getLayerId(const __u8 *usbPacket)
 {
        return __le32_to_cpup((__le32 *)(usbPacket));
@@ -325,8 +313,11 @@ static int pkt_add(struct garmin_data *garmin_data_p,
                state = garmin_data_p->state;
                spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
+               dbg("%s - added: pkt: %d - %d bytes",
+                       __func__, pkt->seq, data_length);
+
                /* in serial mode, if someone is waiting for data from
-                  the device, iconvert and send the next packet to tty. */
+                  the device, convert and send the next packet to tty. */
                if (result && (state == STATE_GSP_WAIT_DATA))
                        gsp_next_packet(garmin_data_p);
        }
@@ -411,7 +402,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
 /*
  * called for a complete packet received from tty layer
  *
- * the complete packet (pkzid ... cksum) is in garmin_data_p->inbuf starting
+ * the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
  * at GSP_INITIAL_OFFSET.
  *
  * count - number of bytes in the input buffer including space reserved for
@@ -501,7 +492,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
        unsigned long flags;
        int offs = 0;
        int ack_or_nak_seen = 0;
-       int i = 0;
        __u8 *dest;
        int size;
        /* dleSeen: set if last byte read was a DLE */
@@ -519,8 +509,8 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
        skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
        spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
-       dbg("%s - dle=%d skip=%d size=%d count=%d",
-               __func__, dleSeen, skip, size, count);
+       /* dbg("%s - dle=%d skip=%d size=%d count=%d",
+               __func__, dleSeen, skip, size, count); */
 
        if (size == 0)
                size = GSP_INITIAL_OFFSET;
@@ -568,7 +558,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
                } else if (!skip) {
 
                        if (dleSeen) {
-                               dbg("non-masked DLE at %d - restarting", i);
                                size = GSP_INITIAL_OFFSET;
                                dleSeen = 0;
                        }
@@ -599,19 +588,19 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
        else
                garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
 
-       if (ack_or_nak_seen)
-               garmin_data_p->state = STATE_GSP_WAIT_DATA;
-
        spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
-       if (ack_or_nak_seen)
-               gsp_next_packet(garmin_data_p);
+       if (ack_or_nak_seen) {
+               if (gsp_next_packet(garmin_data_p) > 0)
+                       garmin_data_p->state = STATE_ACTIVE;
+               else
+                       garmin_data_p->state = STATE_GSP_WAIT_DATA;
+       }
        return count;
 }
 
 
 
-
 /*
  * Sends a usb packet to the tty
  *
@@ -733,29 +722,28 @@ static int gsp_send(struct garmin_data *garmin_data_p,
 }
 
 
-
-
-
 /*
  * Process the next pending data packet - if there is one
  */
-static void gsp_next_packet(struct garmin_data *garmin_data_p)
+static int gsp_next_packet(struct garmin_data *garmin_data_p)
 {
+       int result = 0;
        struct garmin_packet *pkt = NULL;
 
        while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
                dbg("%s - next pkt: %d", __func__, pkt->seq);
-               if (gsp_send(garmin_data_p, pkt->data, pkt->size) > 0) {
+               result = gsp_send(garmin_data_p, pkt->data, pkt->size);
+               if (result > 0) {
                        kfree(pkt);
-                       return;
+                       return result;
                }
                kfree(pkt);
        }
+       return result;
 }
 
 
 
-
 /******************************************************************************
  * garmin native mode
  ******************************************************************************/
@@ -888,14 +876,6 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
        unsigned long flags;
        int status = 0;
 
-       struct usb_serial_port *port = garmin_data_p->port;
-
-       if (port != NULL && atomic_read(&garmin_data_p->resp_count)) {
-               /* send a terminate command */
-               status = garmin_write_bulk(port, GARMIN_STOP_TRANSFER_REQ,
-                                       sizeof(GARMIN_STOP_TRANSFER_REQ), 1);
-       }
-
        /* flush all queued data */
        pkt_clear(garmin_data_p);
 
@@ -908,16 +888,12 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
 }
 
 
-
-
-
-
 static int garmin_init_session(struct usb_serial_port *port)
 {
-       unsigned long flags;
        struct usb_serial *serial = port->serial;
        struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
        int status = 0;
+       int i = 0;
 
        if (status == 0) {
                usb_kill_urb(port->interrupt_in_urb);
@@ -931,30 +907,25 @@ static int garmin_init_session(struct usb_serial_port *port)
                                                        __func__, status);
        }
 
+       /*
+        * using the initialization method from gpsbabel. See comments in
+        * gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
+        */
        if (status == 0) {
                dbg("%s - starting session ...", __func__);
                garmin_data_p->state = STATE_ACTIVE;
-               status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
-                                       sizeof(GARMIN_START_SESSION_REQ), 0);
-
-               if (status >= 0) {
-
-                       spin_lock_irqsave(&garmin_data_p->lock, flags);
-                       garmin_data_p->ignorePkts++;
-                       spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
-                       /* not needed, but the win32 driver does it too ... */
+               for (i = 0; i < 3; i++) {
                        status = garmin_write_bulk(port,
-                                       GARMIN_START_SESSION_REQ2,
-                                       sizeof(GARMIN_START_SESSION_REQ2), 0);
-                       if (status >= 0) {
-                               status = 0;
-                               spin_lock_irqsave(&garmin_data_p->lock, flags);
-                               garmin_data_p->ignorePkts++;
-                               spin_unlock_irqrestore(&garmin_data_p->lock,
-                                                                       flags);
-                       }
+                                       GARMIN_START_SESSION_REQ,
+                                       sizeof(GARMIN_START_SESSION_REQ), 0);
+
+                       if (status < 0)
+                               break;
                }
+
+               if (status > 0)
+                       status = 0;
        }
 
        return status;
@@ -962,8 +933,6 @@ static int garmin_init_session(struct usb_serial_port *port)
 
 
 
-
-
 static int garmin_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp)
 {
@@ -977,8 +946,6 @@ static int garmin_open(struct tty_struct *tty,
        garmin_data_p->mode  = initial_mode;
        garmin_data_p->count = 0;
        garmin_data_p->flags = 0;
-       atomic_set(&garmin_data_p->req_count, 0);
-       atomic_set(&garmin_data_p->resp_count, 0);
        spin_unlock_irqrestore(&garmin_data_p->lock, flags);
 
        /* shutdown any bulk reads that might be going on */
@@ -1006,6 +973,7 @@ static void garmin_close(struct usb_serial_port *port)
                return;
 
        mutex_lock(&port->serial->disc_mutex);
+
        if (!port->serial->disconnected)
                garmin_clear(garmin_data_p);
 
@@ -1013,25 +981,17 @@ static void garmin_close(struct usb_serial_port *port)
        usb_kill_urb(port->read_urb);
        usb_kill_urb(port->write_urb);
 
-       if (!port->serial->disconnected) {
-               if (noResponseFromAppLayer(garmin_data_p) ||
-                   ((garmin_data_p->flags & CLEAR_HALT_REQUIRED) != 0)) {
-                       process_resetdev_request(port);
-                       garmin_data_p->state = STATE_RESET;
-               } else {
-                       garmin_data_p->state = STATE_DISCONNECTED;
-               }
-       } else {
+       /* keep reset state so we know that we must start a new session */
+       if (garmin_data_p->state != STATE_RESET)
                garmin_data_p->state = STATE_DISCONNECTED;
-       }
+
        mutex_unlock(&port->serial->disc_mutex);
 }
 
+
 static void garmin_write_bulk_callback(struct urb *urb)
 {
-       unsigned long flags;
        struct usb_serial_port *port = urb->context;
-       int status = urb->status;
 
        if (port) {
                struct garmin_data *garmin_data_p =
@@ -1039,20 +999,13 @@ static void garmin_write_bulk_callback(struct urb *urb)
 
                dbg("%s - port %d", __func__, port->number);
 
-               if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)
-                   && (garmin_data_p->mode == MODE_GARMIN_SERIAL))  {
-                       gsp_send_ack(garmin_data_p,
-                                       ((__u8 *)urb->transfer_buffer)[4]);
-               }
+               if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
 
-               if (status) {
-                       dbg("%s - nonzero write bulk status received: %d",
-                           __func__, status);
-                       spin_lock_irqsave(&garmin_data_p->lock, flags);
-                       garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
-                       spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+                       if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
+                               gsp_send_ack(garmin_data_p,
+                                       ((__u8 *)urb->transfer_buffer)[4]);
+                       }
                }
-
                usb_serial_port_softint(port);
        }
 
@@ -1108,7 +1061,11 @@ static int garmin_write_bulk(struct usb_serial_port *port,
        urb->transfer_flags |= URB_ZERO_PACKET;
 
        if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
-               atomic_inc(&garmin_data_p->req_count);
+
+               spin_lock_irqsave(&garmin_data_p->lock, flags);
+               garmin_data_p->flags |= APP_REQ_SEEN;
+               spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
                if (garmin_data_p->mode == MODE_GARMIN_SERIAL)  {
                        pkt_clear(garmin_data_p);
                        garmin_data_p->state = STATE_GSP_WAIT_DATA;
@@ -1140,6 +1097,9 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
 
+       if (garmin_data_p->state == STATE_RESET)
+               return -EIO;
+
        /* check for our private packets */
        if (count >= GARMIN_PKTHDR_LENGTH) {
                len = PRIVPKTSIZ;
@@ -1184,7 +1144,7 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
                                break;
 
                        case PRIV_PKTID_RESET_REQ:
-                               atomic_inc(&garmin_data_p->req_count);
+                               process_resetdev_request(port);
                                break;
 
                        case PRIV_PKTID_SET_DEF_MODE:
@@ -1200,8 +1160,6 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
                }
        }
 
-       garmin_data_p->ignorePkts = 0;
-
        if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
                return gsp_receive(garmin_data_p, buf, count);
        } else {        /* MODE_NATIVE */
@@ -1224,31 +1182,33 @@ static int garmin_write_room(struct tty_struct *tty)
 static void garmin_read_process(struct garmin_data *garmin_data_p,
                                 unsigned char *data, unsigned data_length)
 {
+       unsigned long flags;
+
        if (garmin_data_p->flags & FLAGS_DROP_DATA) {
                /* abort-transfer cmd is actice */
                dbg("%s - pkt dropped", __func__);
        } else if (garmin_data_p->state != STATE_DISCONNECTED &&
                garmin_data_p->state != STATE_RESET) {
 
-               /* remember any appl.layer packets, so we know
-                  if a reset is required or not when closing
-                  the device */
-               if (0 == memcmp(data, GARMIN_APP_LAYER_REPLY,
-                               sizeof(GARMIN_APP_LAYER_REPLY))) {
-                       atomic_inc(&garmin_data_p->resp_count);
-               }
-
                /* if throttling is active or postprecessing is required
                   put the received data in the input queue, otherwise
                   send it directly to the tty port */
                if (garmin_data_p->flags & FLAGS_QUEUING) {
                        pkt_add(garmin_data_p, data, data_length);
-               } else if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
-                       if (getLayerId(data) == GARMIN_LAYERID_APPL)
+               } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
+
+                       spin_lock_irqsave(&garmin_data_p->lock, flags);
+                       garmin_data_p->flags |= APP_RESP_SEEN;
+                       spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+
+                       if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
                                pkt_add(garmin_data_p, data, data_length);
-               } else {
-                       send_to_tty(garmin_data_p->port, data, data_length);
+                       } else {
+                               send_to_tty(garmin_data_p->port, data,
+                                               data_length);
+                       }
                }
+               /* ignore system layer packets ... */
        }
 }
 
@@ -1363,8 +1323,6 @@ static void garmin_read_int_callback(struct urb *urb)
                        } else {
                                spin_lock_irqsave(&garmin_data_p->lock, flags);
                                garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
-                               /* do not send this packet to the user */
-                               garmin_data_p->ignorePkts = 1;
                                spin_unlock_irqrestore(&garmin_data_p->lock,
                                                                        flags);
                        }
@@ -1391,17 +1349,7 @@ static void garmin_read_int_callback(struct urb *urb)
                        __func__, garmin_data_p->serial_num);
        }
 
-       if (garmin_data_p->ignorePkts) {
-               /* this reply belongs to a request generated by the driver,
-                  ignore it. */
-               dbg("%s - pkt ignored (%d)",
-                       __func__, garmin_data_p->ignorePkts);
-               spin_lock_irqsave(&garmin_data_p->lock, flags);
-               garmin_data_p->ignorePkts--;
-               spin_unlock_irqrestore(&garmin_data_p->lock, flags);
-       } else {
-               garmin_read_process(garmin_data_p, data, urb->actual_length);
-       }
+       garmin_read_process(garmin_data_p, data, urb->actual_length);
 
        port->interrupt_in_urb->dev = port->serial->dev;
        retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1527,7 +1475,7 @@ static int garmin_attach(struct usb_serial *serial)
 }
 
 
-static void garmin_shutdown(struct usb_serial *serial)
+static void garmin_disconnect(struct usb_serial *serial)
 {
        struct usb_serial_port *port = serial->port[0];
        struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1536,8 +1484,17 @@ static void garmin_shutdown(struct usb_serial *serial)
 
        usb_kill_urb(port->interrupt_in_urb);
        del_timer_sync(&garmin_data_p->timer);
+}
+
+
+static void garmin_release(struct usb_serial *serial)
+{
+       struct usb_serial_port *port = serial->port[0];
+       struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+
+       dbg("%s", __func__);
+
        kfree(garmin_data_p);
-       usb_set_serial_port_data(port, NULL);
 }
 
 
@@ -1556,7 +1513,8 @@ static struct usb_serial_driver garmin_device = {
        .throttle            = garmin_throttle,
        .unthrottle          = garmin_unthrottle,
        .attach              = garmin_attach,
-       .shutdown            = garmin_shutdown,
+       .disconnect          = garmin_disconnect,
+       .release             = garmin_release,
        .write               = garmin_write,
        .write_room          = garmin_write_room,
        .write_bulk_callback = garmin_write_bulk_callback,
index be82ea956720184d545a0c43bce6f684c032fb7e..932d6241b787d2c399bae70f70bec85e4c979d71 100644 (file)
@@ -63,7 +63,8 @@ struct usb_serial_driver usb_serial_generic_device = {
        .id_table =             generic_device_ids,
        .usb_driver =           &generic_driver,
        .num_ports =            1,
-       .shutdown =             usb_serial_generic_shutdown,
+       .disconnect =           usb_serial_generic_disconnect,
+       .release =              usb_serial_generic_release,
        .throttle =             usb_serial_generic_throttle,
        .unthrottle =           usb_serial_generic_unthrottle,
        .resume =               usb_serial_generic_resume,
@@ -190,6 +191,88 @@ void usb_serial_generic_close(struct usb_serial_port *port)
        generic_cleanup(port);
 }
 
+static int usb_serial_multi_urb_write(struct tty_struct *tty,
+       struct usb_serial_port *port, const unsigned char *buf, int count)
+{
+       unsigned long flags;
+       struct urb *urb;
+       unsigned char *buffer;
+       int status;
+       int towrite;
+       int bwrite = 0;
+
+       dbg("%s - port %d", __func__, port->number);
+
+       if (count == 0)
+               dbg("%s - write request of 0 bytes", __func__);
+
+       while (count > 0) {
+               towrite = (count > port->bulk_out_size) ?
+                       port->bulk_out_size : count;
+               spin_lock_irqsave(&port->lock, flags);
+               if (port->urbs_in_flight >
+                   port->serial->type->max_in_flight_urbs) {
+                       spin_unlock_irqrestore(&port->lock, flags);
+                       dbg("%s - write limit hit\n", __func__);
+                       return bwrite;
+               }
+               port->tx_bytes_flight += towrite;
+               port->urbs_in_flight++;
+               spin_unlock_irqrestore(&port->lock, flags);
+
+               buffer = kmalloc(towrite, GFP_ATOMIC);
+               if (!buffer) {
+                       dev_err(&port->dev,
+                       "%s ran out of kernel memory for urb ...\n", __func__);
+                       goto error_no_buffer;
+               }
+
+               urb = usb_alloc_urb(0, GFP_ATOMIC);
+               if (!urb) {
+                       dev_err(&port->dev, "%s - no more free urbs\n",
+                               __func__);
+                       goto error_no_urb;
+               }
+
+               /* Copy data */
+               memcpy(buffer, buf + bwrite, towrite);
+               usb_serial_debug_data(debug, &port->dev, __func__,
+                                     towrite, buffer);
+               /* fill the buffer and send it */
+               usb_fill_bulk_urb(urb, port->serial->dev,
+                       usb_sndbulkpipe(port->serial->dev,
+                                       port->bulk_out_endpointAddress),
+                       buffer, towrite,
+                       usb_serial_generic_write_bulk_callback, port);
+
+               status = usb_submit_urb(urb, GFP_ATOMIC);
+               if (status) {
+                       dev_err(&port->dev,
+                               "%s - failed submitting write urb, error %d\n",
+                               __func__, status);
+                       goto error;
+               }
+
+               /* This urb is the responsibility of the host driver now */
+               usb_free_urb(urb);
+               dbg("%s write: %d", __func__, towrite);
+               count -= towrite;
+               bwrite += towrite;
+       }
+       return bwrite;
+
+error:
+       usb_free_urb(urb);
+error_no_urb:
+       kfree(buffer);
+error_no_buffer:
+       spin_lock_irqsave(&port->lock, flags);
+       port->urbs_in_flight--;
+       port->tx_bytes_flight -= towrite;
+       spin_unlock_irqrestore(&port->lock, flags);
+       return bwrite;
+}
+
 int usb_serial_generic_write(struct tty_struct *tty,
        struct usb_serial_port *port, const unsigned char *buf, int count)
 {
@@ -207,6 +290,11 @@ int usb_serial_generic_write(struct tty_struct *tty,
        /* only do something if we have a bulk out endpoint */
        if (serial->num_bulk_out) {
                unsigned long flags;
+
+               if (serial->type->max_in_flight_urbs)
+                       return usb_serial_multi_urb_write(tty, port,
+                                                         buf, count);
+
                spin_lock_irqsave(&port->lock, flags);
                if (port->write_urb_busy) {
                        spin_unlock_irqrestore(&port->lock, flags);
@@ -252,20 +340,26 @@ int usb_serial_generic_write(struct tty_struct *tty,
        /* no bulk out, so return 0 bytes written */
        return 0;
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_write);
 
 int usb_serial_generic_write_room(struct tty_struct *tty)
 {
        struct usb_serial_port *port = tty->driver_data;
        struct usb_serial *serial = port->serial;
+       unsigned long flags;
        int room = 0;
 
        dbg("%s - port %d", __func__, port->number);
-
-       /* FIXME: Locking */
-       if (serial->num_bulk_out) {
-               if (!(port->write_urb_busy))
-                       room = port->bulk_out_size;
+       spin_lock_irqsave(&port->lock, flags);
+       if (serial->type->max_in_flight_urbs) {
+               if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
+                       room = port->bulk_out_size *
+                               (serial->type->max_in_flight_urbs -
+                                port->urbs_in_flight);
+       } else if (serial->num_bulk_out && !(port->write_urb_busy)) {
+               room = port->bulk_out_size;
        }
+       spin_unlock_irqrestore(&port->lock, flags);
 
        dbg("%s - returns %d", __func__, room);
        return room;
@@ -276,11 +370,16 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
        struct usb_serial_port *port = tty->driver_data;
        struct usb_serial *serial = port->serial;
        int chars = 0;
+       unsigned long flags;
 
        dbg("%s - port %d", __func__, port->number);
 
-       /* FIXME: Locking */
-       if (serial->num_bulk_out) {
+       if (serial->type->max_in_flight_urbs) {
+               spin_lock_irqsave(&port->lock, flags);
+               chars = port->tx_bytes_flight;
+               spin_unlock_irqrestore(&port->lock, flags);
+       } else if (serial->num_bulk_out) {
+               /* FIXME: Locking */
                if (port->write_urb_busy)
                        chars = port->write_urb->transfer_buffer_length;
        }
@@ -290,7 +389,8 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
 }
 
 
-static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
+void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+                       gfp_t mem_flags)
 {
        struct urb *urb = port->read_urb;
        struct usb_serial *serial = port->serial;
@@ -311,25 +411,28 @@ static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
                        "%s - failed resubmitting read urb, error %d\n",
                                                        __func__, result);
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
 
 /* Push data to tty layer and resubmit the bulk read URB */
 static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
 {
        struct urb *urb = port->read_urb;
        struct tty_struct *tty = tty_port_tty_get(&port->port);
-       int room;
+       char *ch = (char *)urb->transfer_buffer;
+       int i;
+
+       if (!tty)
+               goto done;
 
        /* Push data to tty */
-       if (tty && urb->actual_length) {
-               room = tty_buffer_request_room(tty, urb->actual_length);
-               if (room) {
-                       tty_insert_flip_string(tty, urb->transfer_buffer, room);
-                       tty_flip_buffer_push(tty);
-               }
+       for (i = 0; i < urb->actual_length; i++, ch++) {
+               if (!usb_serial_handle_sysrq_char(port, *ch))
+                       tty_insert_flip_char(tty, *ch, TTY_NORMAL);
        }
+       tty_flip_buffer_push(tty);
        tty_kref_put(tty);
-
-       resubmit_read_urb(port, GFP_ATOMIC);
+done:
+       usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
 }
 
 void usb_serial_generic_read_bulk_callback(struct urb *urb)
@@ -363,12 +466,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
 
 void usb_serial_generic_write_bulk_callback(struct urb *urb)
 {
+       unsigned long flags;
        struct usb_serial_port *port = urb->context;
        int status = urb->status;
 
        dbg("%s - port %d", __func__, port->number);
 
-       port->write_urb_busy = 0;
+       if (port->serial->type->max_in_flight_urbs) {
+               spin_lock_irqsave(&port->lock, flags);
+               --port->urbs_in_flight;
+               port->tx_bytes_flight -= urb->transfer_buffer_length;
+               if (port->urbs_in_flight < 0)
+                       port->urbs_in_flight = 0;
+               spin_unlock_irqrestore(&port->lock, flags);
+       } else {
+               /* Handle the case for single urb mode */
+               port->write_urb_busy = 0;
+       }
+
        if (status) {
                dbg("%s - nonzero write bulk status received: %d",
                    __func__, status);
@@ -408,11 +523,36 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
 
        if (was_throttled) {
                /* Resume reading from device */
-               resubmit_read_urb(port, GFP_KERNEL);
+               usb_serial_generic_resubmit_read_urb(port, GFP_KERNEL);
+       }
+}
+
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
+{
+       if (port->sysrq && port->console) {
+               if (ch && time_before(jiffies, port->sysrq)) {
+                       handle_sysrq(ch, tty_port_tty_get(&port->port));
+                       port->sysrq = 0;
+                       return 1;
+               }
+               port->sysrq = 0;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
+
+int usb_serial_handle_break(struct usb_serial_port *port)
+{
+       if (!port->sysrq) {
+               port->sysrq = jiffies + HZ*5;
+               return 1;
        }
+       port->sysrq = 0;
+       return 0;
 }
+EXPORT_SYMBOL_GPL(usb_serial_handle_break);
 
-void usb_serial_generic_shutdown(struct usb_serial *serial)
+void usb_serial_generic_disconnect(struct usb_serial *serial)
 {
        int i;
 
@@ -423,3 +563,7 @@ void usb_serial_generic_shutdown(struct usb_serial *serial)
                generic_cleanup(serial->port[i]);
 }
 
+void usb_serial_generic_release(struct usb_serial *serial)
+{
+       dbg("%s", __func__);
+}
index 53ef5996e33de377b6290c84e4183f2669ef646e..0191693625d6541dbe5125df9b488be237828d77 100644 (file)
@@ -224,7 +224,8 @@ static int  edge_tiocmget(struct tty_struct *tty, struct file *file);
 static int  edge_tiocmset(struct tty_struct *tty, struct file *file,
                                        unsigned int set, unsigned int clear);
 static int  edge_startup(struct usb_serial *serial);
-static void edge_shutdown(struct usb_serial *serial);
+static void edge_disconnect(struct usb_serial *serial);
+static void edge_release(struct usb_serial *serial);
 
 #include "io_tables.h" /* all of the devices that this driver supports */
 
@@ -3193,21 +3194,16 @@ static int edge_startup(struct usb_serial *serial)
 
 
 /****************************************************************************
- * edge_shutdown
+ * edge_disconnect
  *     This function is called whenever the device is removed from the usb bus.
  ****************************************************************************/
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
 {
        struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
-       int i;
 
        dbg("%s", __func__);
 
        /* stop reads and writes on all ports */
-       for (i = 0; i < serial->num_ports; ++i) {
-               kfree(usb_get_serial_port_data(serial->port[i]));
-               usb_set_serial_port_data(serial->port[i],  NULL);
-       }
        /* free up our endpoint stuff */
        if (edge_serial->is_epic) {
                usb_kill_urb(edge_serial->interrupt_read_urb);
@@ -3218,9 +3214,24 @@ static void edge_shutdown(struct usb_serial *serial)
                usb_free_urb(edge_serial->read_urb);
                kfree(edge_serial->bulk_in_buffer);
        }
+}
+
+
+/****************************************************************************
+ * edge_release
+ *     This function is called when the device structure is deallocated.
+ ****************************************************************************/
+static void edge_release(struct usb_serial *serial)
+{
+       struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+       int i;
+
+       dbg("%s", __func__);
+
+       for (i = 0; i < serial->num_ports; ++i)
+               kfree(usb_get_serial_port_data(serial->port[i]));
 
        kfree(edge_serial);
-       usb_set_serial_data(serial, NULL);
 }
 
 
index 7eb9d67b81b632a20ead2479933c4e86cead3dfe..9241d314751324f65cb430ece19b6e6db373c72c 100644 (file)
@@ -117,7 +117,8 @@ static struct usb_serial_driver edgeport_2port_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
@@ -145,7 +146,8 @@ static struct usb_serial_driver edgeport_4port_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
@@ -173,7 +175,8 @@ static struct usb_serial_driver edgeport_8port_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
@@ -200,7 +203,8 @@ static struct usb_serial_driver epic_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
index db964db42d3c0d0c258d56395ed4f14c7fb89c61..e8bc42f92e79c599f1c35d7b21cb5b2a980aca9f 100644 (file)
@@ -2663,7 +2663,7 @@ cleanup:
        return -ENOMEM;
 }
 
-static void edge_shutdown(struct usb_serial *serial)
+static void edge_disconnect(struct usb_serial *serial)
 {
        int i;
        struct edgeport_port *edge_port;
@@ -2673,12 +2673,22 @@ static void edge_shutdown(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i) {
                edge_port = usb_get_serial_port_data(serial->port[i]);
                edge_remove_sysfs_attrs(edge_port->port);
+       }
+}
+
+static void edge_release(struct usb_serial *serial)
+{
+       int i;
+       struct edgeport_port *edge_port;
+
+       dbg("%s", __func__);
+
+       for (i = 0; i < serial->num_ports; ++i) {
+               edge_port = usb_get_serial_port_data(serial->port[i]);
                edge_buf_free(edge_port->ep_out_buf);
                kfree(edge_port);
-               usb_set_serial_port_data(serial->port[i], NULL);
        }
        kfree(usb_get_serial_data(serial));
-       usb_set_serial_data(serial, NULL);
 }
 
 
@@ -2915,7 +2925,8 @@ static struct usb_serial_driver edgeport_1port_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
@@ -2944,7 +2955,8 @@ static struct usb_serial_driver edgeport_2port_device = {
        .throttle               = edge_throttle,
        .unthrottle             = edge_unthrottle,
        .attach                 = edge_startup,
-       .shutdown               = edge_shutdown,
+       .disconnect             = edge_disconnect,
+       .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
index c610a99fa47741c51d2b9312150f9ffc05868e23..2545d45ce16f94159f32db95d43d6b628c461bd0 100644 (file)
@@ -79,7 +79,6 @@ static int  ipaq_open(struct tty_struct *tty,
 static void ipaq_close(struct usb_serial_port *port);
 static int  ipaq_calc_num_ports(struct usb_serial *serial);
 static int  ipaq_startup(struct usb_serial *serial);
-static void ipaq_shutdown(struct usb_serial *serial);
 static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
                        const unsigned char *buf, int count);
 static int ipaq_write_bulk(struct usb_serial_port *port,
@@ -576,7 +575,6 @@ static struct usb_serial_driver ipaq_device = {
        .close =                ipaq_close,
        .attach =               ipaq_startup,
        .calc_num_ports =       ipaq_calc_num_ports,
-       .shutdown =             ipaq_shutdown,
        .write =                ipaq_write,
        .write_room =           ipaq_write_room,
        .chars_in_buffer =      ipaq_chars_in_buffer,
@@ -990,11 +988,6 @@ static int ipaq_startup(struct usb_serial *serial)
        return usb_reset_configuration(serial->dev);
 }
 
-static void ipaq_shutdown(struct usb_serial *serial)
-{
-       dbg("%s", __func__);
-}
-
 static int __init ipaq_init(void)
 {
        int retval;
index 76a3cc327bb9c2d0dbfdfa0a0e0df55015e15036..96873a7a32b082ecd370e4329852a65e6b3d356b 100644 (file)
@@ -121,8 +121,8 @@ static int iuu_startup(struct usb_serial *serial)
        return 0;
 }
 
-/* Shutdown function */
-static void iuu_shutdown(struct usb_serial *serial)
+/* Release function */
+static void iuu_release(struct usb_serial *serial)
 {
        struct usb_serial_port *port = serial->port[0];
        struct iuu_private *priv = usb_get_serial_port_data(port);
@@ -1202,7 +1202,7 @@ static struct usb_serial_driver iuu_device = {
        .tiocmset = iuu_tiocmset,
        .set_termios = iuu_set_termios,
        .attach = iuu_startup,
-       .shutdown = iuu_shutdown,
+       .release = iuu_release,
 };
 
 static int __init iuu_init(void)
index f1195a98f316df62f0d465b144b32fc3f6f9da66..2594b8743d3fb340ed8a6e54dd3de4ca2e998cab 100644 (file)
@@ -2689,7 +2689,7 @@ static int keyspan_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void keyspan_shutdown(struct usb_serial *serial)
+static void keyspan_disconnect(struct usb_serial *serial)
 {
        int                             i, j;
        struct usb_serial_port          *port;
@@ -2729,6 +2729,17 @@ static void keyspan_shutdown(struct usb_serial *serial)
                        usb_free_urb(p_priv->out_urbs[j]);
                }
        }
+}
+
+static void keyspan_release(struct usb_serial *serial)
+{
+       int                             i;
+       struct usb_serial_port          *port;
+       struct keyspan_serial_private   *s_priv;
+
+       dbg("%s", __func__);
+
+       s_priv = usb_get_serial_data(serial);
 
        /*  dbg("Freeing serial->private."); */
        kfree(s_priv);
index 0d4569b60768a736a1e180db3621aa9053f22e72..3107ed15af641babe6ecce65ad7c78d2a06aaacc 100644 (file)
@@ -41,7 +41,8 @@ static int  keyspan_open              (struct tty_struct *tty,
 static void keyspan_close              (struct usb_serial_port *port);
 static void keyspan_dtr_rts            (struct usb_serial_port *port, int on);
 static int  keyspan_startup            (struct usb_serial *serial);
-static void keyspan_shutdown           (struct usb_serial *serial);
+static void keyspan_disconnect         (struct usb_serial *serial);
+static void keyspan_release            (struct usb_serial *serial);
 static int  keyspan_write_room         (struct tty_struct *tty);
 
 static int  keyspan_write              (struct tty_struct *tty,
@@ -569,7 +570,8 @@ static struct usb_serial_driver keyspan_1port_device = {
        .tiocmget               = keyspan_tiocmget,
        .tiocmset               = keyspan_tiocmset,
        .attach                 = keyspan_startup,
-       .shutdown               = keyspan_shutdown,
+       .disconnect             = keyspan_disconnect,
+       .release                = keyspan_release,
 };
 
 static struct usb_serial_driver keyspan_2port_device = {
@@ -590,7 +592,8 @@ static struct usb_serial_driver keyspan_2port_device = {
        .tiocmget               = keyspan_tiocmget,
        .tiocmset               = keyspan_tiocmset,
        .attach                 = keyspan_startup,
-       .shutdown               = keyspan_shutdown,
+       .disconnect             = keyspan_disconnect,
+       .release                = keyspan_release,
 };
 
 static struct usb_serial_driver keyspan_4port_device = {
@@ -611,7 +614,8 @@ static struct usb_serial_driver keyspan_4port_device = {
        .tiocmget               = keyspan_tiocmget,
        .tiocmset               = keyspan_tiocmset,
        .attach                 = keyspan_startup,
-       .shutdown               = keyspan_shutdown,
+       .disconnect             = keyspan_disconnect,
+       .release                = keyspan_release,
 };
 
 #endif
index ab769dbea1b3e6f66d4df15ce6166caca8a17a50..d0b12e40c2b105e2b41cc44f71eba3ea04a1cc29 100644 (file)
@@ -809,7 +809,7 @@ static int keyspan_pda_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void keyspan_pda_shutdown(struct usb_serial *serial)
+static void keyspan_pda_release(struct usb_serial *serial)
 {
        dbg("%s", __func__);
 
@@ -869,7 +869,7 @@ static struct usb_serial_driver keyspan_pda_device = {
        .tiocmget =             keyspan_pda_tiocmget,
        .tiocmset =             keyspan_pda_tiocmset,
        .attach =               keyspan_pda_startup,
-       .shutdown =             keyspan_pda_shutdown,
+       .release =              keyspan_pda_release,
 };
 
 
index fa817c66b3e8c827bb6ce0837591f2406dd73f70..0f44bb8e8d4f64a0400b969da4d8aa80760a318c 100644 (file)
@@ -73,7 +73,8 @@ static int debug;
  * Function prototypes
  */
 static int  klsi_105_startup(struct usb_serial *serial);
-static void klsi_105_shutdown(struct usb_serial *serial);
+static void klsi_105_disconnect(struct usb_serial *serial);
+static void klsi_105_release(struct usb_serial *serial);
 static int  klsi_105_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void klsi_105_close(struct usb_serial_port *port);
@@ -131,7 +132,8 @@ static struct usb_serial_driver kl5kusb105d_device = {
        .tiocmget =          klsi_105_tiocmget,
        .tiocmset =          klsi_105_tiocmset,
        .attach =            klsi_105_startup,
-       .shutdown =          klsi_105_shutdown,
+       .disconnect =        klsi_105_disconnect,
+       .release =           klsi_105_release,
        .throttle =          klsi_105_throttle,
        .unthrottle =        klsi_105_unthrottle,
 };
@@ -315,7 +317,7 @@ err_cleanup:
 } /* klsi_105_startup */
 
 
-static void klsi_105_shutdown(struct usb_serial *serial)
+static void klsi_105_disconnect(struct usb_serial *serial)
 {
        int i;
 
@@ -325,33 +327,36 @@ static void klsi_105_shutdown(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i) {
                struct klsi_105_private *priv =
                                usb_get_serial_port_data(serial->port[i]);
-               unsigned long flags;
 
                if (priv) {
                        /* kill our write urb pool */
                        int j;
                        struct urb **write_urbs = priv->write_urb_pool;
-                       spin_lock_irqsave(&priv->lock, flags);
 
                        for (j = 0; j < NUM_URBS; j++) {
                                if (write_urbs[j]) {
-                                       /* FIXME - uncomment the following
-                                        * usb_kill_urb call when the host
-                                        * controllers get fixed to set
-                                        * urb->dev = NULL after the urb is
-                                        * finished.  Otherwise this call
-                                        * oopses. */
-                                       /* usb_kill_urb(write_urbs[j]); */
-                                       kfree(write_urbs[j]->transfer_buffer);
+                                       usb_kill_urb(write_urbs[j]);
                                        usb_free_urb(write_urbs[j]);
                                }
                        }
-                       spin_unlock_irqrestore(&priv->lock, flags);
-                       kfree(priv);
-                       usb_set_serial_port_data(serial->port[i], NULL);
                }
        }
-} /* klsi_105_shutdown */
+} /* klsi_105_disconnect */
+
+
+static void klsi_105_release(struct usb_serial *serial)
+{
+       int i;
+
+       dbg("%s", __func__);
+
+       for (i = 0; i < serial->num_ports; ++i) {
+               struct klsi_105_private *priv =
+                               usb_get_serial_port_data(serial->port[i]);
+
+               kfree(priv);
+       }
+} /* klsi_105_release */
 
 static int  klsi_105_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp)
index 6b570498287f3f5d62d337e6a347945101b638c3..6db0e561f6805c25a032797ffd1e25bf57c5fa18 100644 (file)
@@ -69,7 +69,7 @@ static int debug;
 
 /* Function prototypes */
 static int  kobil_startup(struct usb_serial *serial);
-static void kobil_shutdown(struct usb_serial *serial);
+static void kobil_release(struct usb_serial *serial);
 static int  kobil_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void kobil_close(struct usb_serial_port *port);
@@ -117,7 +117,7 @@ static struct usb_serial_driver kobil_device = {
        .id_table =             id_table,
        .num_ports =            1,
        .attach =               kobil_startup,
-       .shutdown =             kobil_shutdown,
+       .release =              kobil_release,
        .ioctl =                kobil_ioctl,
        .set_termios =          kobil_set_termios,
        .tiocmget =             kobil_tiocmget,
@@ -201,17 +201,13 @@ static int kobil_startup(struct usb_serial *serial)
 }
 
 
-static void kobil_shutdown(struct usb_serial *serial)
+static void kobil_release(struct usb_serial *serial)
 {
        int i;
        dbg("%s - port %d", __func__, serial->port[0]->number);
 
-       for (i = 0; i < serial->num_ports; ++i) {
-               while (serial->port[i]->port.count > 0)
-                       kobil_close(serial->port[i]);
+       for (i = 0; i < serial->num_ports; ++i)
                kfree(usb_get_serial_port_data(serial->port[i]));
-               usb_set_serial_port_data(serial->port[i], NULL);
-       }
 }
 
 
index 873795548fc0a976a5bd129e3316933c9eda069b..d8825e159aa5cfe9361b152489d79fe953f22889 100644 (file)
@@ -92,7 +92,7 @@ static int debug;
  * Function prototypes
  */
 static int  mct_u232_startup(struct usb_serial *serial);
-static void mct_u232_shutdown(struct usb_serial *serial);
+static void mct_u232_release(struct usb_serial *serial);
 static int  mct_u232_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void mct_u232_close(struct usb_serial_port *port);
@@ -149,7 +149,7 @@ static struct usb_serial_driver mct_u232_device = {
        .tiocmget =          mct_u232_tiocmget,
        .tiocmset =          mct_u232_tiocmset,
        .attach =            mct_u232_startup,
-       .shutdown =          mct_u232_shutdown,
+       .release =           mct_u232_release,
 };
 
 
@@ -407,7 +407,7 @@ static int mct_u232_startup(struct usb_serial *serial)
 } /* mct_u232_startup */
 
 
-static void mct_u232_shutdown(struct usb_serial *serial)
+static void mct_u232_release(struct usb_serial *serial)
 {
        struct mct_u232_private *priv;
        int i;
@@ -417,12 +417,9 @@ static void mct_u232_shutdown(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i) {
                /* My special items, the standard routines free my urbs */
                priv = usb_get_serial_port_data(serial->port[i]);
-               if (priv) {
-                       usb_set_serial_port_data(serial->port[i], NULL);
-                       kfree(priv);
-               }
+               kfree(priv);
        }
-} /* mct_u232_shutdown */
+} /* mct_u232_release */
 
 static int  mct_u232_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp)
index 9e1a013ee7f679177e7b588aabbb2b6534d4cda3..bfc5ce000ef92e8d664407dfd62dbf0ba4592832 100644 (file)
@@ -1521,19 +1521,16 @@ static int mos7720_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void mos7720_shutdown(struct usb_serial *serial)
+static void mos7720_release(struct usb_serial *serial)
 {
        int i;
 
        /* free private structure allocated for serial port */
-       for (i = 0; i < serial->num_ports; ++i) {
+       for (i = 0; i < serial->num_ports; ++i)
                kfree(usb_get_serial_port_data(serial->port[i]));
-               usb_set_serial_port_data(serial->port[i], NULL);
-       }
 
        /* free private structure allocated for serial device */
        kfree(usb_get_serial_data(serial));
-       usb_set_serial_data(serial, NULL);
 }
 
 static struct usb_driver usb_driver = {
@@ -1558,7 +1555,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
        .throttle               = mos7720_throttle,
        .unthrottle             = mos7720_unthrottle,
        .attach                 = mos7720_startup,
-       .shutdown               = mos7720_shutdown,
+       .release                = mos7720_release,
        .ioctl                  = mos7720_ioctl,
        .set_termios            = mos7720_set_termios,
        .write                  = mos7720_write,
index 10b78a37214f6c337b1ac1bb3cb93219a9bbd3ba..c40f95c1951cf0cf17e8fedc4e8e9f4782324c67 100644 (file)
@@ -238,7 +238,7 @@ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg,
 {
        struct usb_device *dev = port->serial->dev;
        val = val & 0x00ff;
-       dbg("mos7840_set_reg_sync offset is %x, value %x\n", reg, val);
+       dbg("mos7840_set_reg_sync offset is %x, value %x", reg, val);
 
        return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
                               MCS_WR_RTYPE, val, reg, NULL, 0,
@@ -260,7 +260,7 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
        ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
                              MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
                              MOS_WDR_TIMEOUT);
-       dbg("mos7840_get_reg_sync offset is %x, return val %x\n", reg, *val);
+       dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
        *val = (*val) & 0x00ff;
        return ret;
 }
@@ -282,18 +282,18 @@ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
        if (port->serial->num_ports == 4) {
                val |= (((__u16) port->number -
                                (__u16) (port->serial->minor)) + 1) << 8;
-               dbg("mos7840_set_uart_reg application number is %x\n", val);
+               dbg("mos7840_set_uart_reg application number is %x", val);
        } else {
                if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
                        val |= (((__u16) port->number -
                              (__u16) (port->serial->minor)) + 1) << 8;
-                       dbg("mos7840_set_uart_reg application number is %x\n",
+                       dbg("mos7840_set_uart_reg application number is %x",
                            val);
                } else {
                        val |=
                            (((__u16) port->number -
                              (__u16) (port->serial->minor)) + 2) << 8;
-                       dbg("mos7840_set_uart_reg application number is %x\n",
+                       dbg("mos7840_set_uart_reg application number is %x",
                            val);
                }
        }
@@ -315,24 +315,24 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
        int ret = 0;
        __u16 Wval;
 
-       /* dbg("application number is %4x \n",
+       /* dbg("application number is %4x",
            (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
        /* Wval  is same as application number */
        if (port->serial->num_ports == 4) {
                Wval =
                    (((__u16) port->number - (__u16) (port->serial->minor)) +
                     1) << 8;
-               dbg("mos7840_get_uart_reg application number is %x\n", Wval);
+               dbg("mos7840_get_uart_reg application number is %x", Wval);
        } else {
                if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
                        Wval = (((__u16) port->number -
                              (__u16) (port->serial->minor)) + 1) << 8;
-                       dbg("mos7840_get_uart_reg application number is %x\n",
+                       dbg("mos7840_get_uart_reg application number is %x",
                            Wval);
                } else {
                        Wval = (((__u16) port->number -
                              (__u16) (port->serial->minor)) + 2) << 8;
-                       dbg("mos7840_get_uart_reg application number is %x\n",
+                       dbg("mos7840_get_uart_reg application number is %x",
                            Wval);
                }
        }
@@ -346,11 +346,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
 static void mos7840_dump_serial_port(struct moschip_port *mos7840_port)
 {
 
-       dbg("***************************************\n");
-       dbg("SpRegOffset is %2x\n", mos7840_port->SpRegOffset);
-       dbg("ControlRegOffset is %2x \n", mos7840_port->ControlRegOffset);
-       dbg("DCRRegOffset is %2x \n", mos7840_port->DcrRegOffset);
-       dbg("***************************************\n");
+       dbg("***************************************");
+       dbg("SpRegOffset is %2x", mos7840_port->SpRegOffset);
+       dbg("ControlRegOffset is %2x", mos7840_port->ControlRegOffset);
+       dbg("DCRRegOffset is %2x", mos7840_port->DcrRegOffset);
+       dbg("***************************************");
 
 }
 
@@ -474,12 +474,12 @@ static void mos7840_control_callback(struct urb *urb)
                goto exit;
        }
 
-       dbg("%s urb buffer size is %d\n", __func__, urb->actual_length);
-       dbg("%s mos7840_port->MsrLsr is %d port %d\n", __func__,
+       dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+       dbg("%s mos7840_port->MsrLsr is %d port %d", __func__,
            mos7840_port->MsrLsr, mos7840_port->port_num);
        data = urb->transfer_buffer;
        regval = (__u8) data[0];
-       dbg("%s data is %x\n", __func__, regval);
+       dbg("%s data is %x", __func__, regval);
        if (mos7840_port->MsrLsr == 0)
                mos7840_handle_new_msr(mos7840_port, regval);
        else if (mos7840_port->MsrLsr == 1)
@@ -538,7 +538,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
        __u16 wval, wreg = 0;
        int status = urb->status;
 
-       dbg("%s", " : Entering\n");
+       dbg("%s", " : Entering");
 
        switch (status) {
        case 0:
@@ -570,7 +570,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
         * Byte 5 FIFO status for both */
 
        if (length && length > 5) {
-               dbg("%s \n", "Wrong data !!!");
+               dbg("%s", "Wrong data !!!");
                return;
        }
 
@@ -587,17 +587,17 @@ static void mos7840_interrupt_callback(struct urb *urb)
                      (__u16) (serial->minor)) + 1) << 8;
                if (mos7840_port->open) {
                        if (sp[i] & 0x01) {
-                               dbg("SP%d No Interrupt !!!\n", i);
+                               dbg("SP%d No Interrupt !!!", i);
                        } else {
                                switch (sp[i] & 0x0f) {
                                case SERIAL_IIR_RLS:
                                        dbg("Serial Port %d: Receiver status error or ", i);
-                                       dbg("address bit detected in 9-bit mode\n");
+                                       dbg("address bit detected in 9-bit mode");
                                        mos7840_port->MsrLsr = 1;
                                        wreg = LINE_STATUS_REGISTER;
                                        break;
                                case SERIAL_IIR_MS:
-                                       dbg("Serial Port %d: Modem status change\n", i);
+                                       dbg("Serial Port %d: Modem status change", i);
                                        mos7840_port->MsrLsr = 0;
                                        wreg = MODEM_STATUS_REGISTER;
                                        break;
@@ -689,7 +689,7 @@ static void mos7840_bulk_in_callback(struct urb *urb)
 
        mos7840_port = urb->context;
        if (!mos7840_port) {
-               dbg("%s", "NULL mos7840_port pointer \n");
+               dbg("%s", "NULL mos7840_port pointer");
                mos7840_port->read_urb_busy = false;
                return;
        }
@@ -702,41 +702,41 @@ static void mos7840_bulk_in_callback(struct urb *urb)
 
        port = (struct usb_serial_port *)mos7840_port->port;
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                mos7840_port->read_urb_busy = false;
                return;
        }
 
        serial = mos7840_get_usb_serial(port, __func__);
        if (!serial) {
-               dbg("%s\n", "Bad serial pointer ");
+               dbg("%s", "Bad serial pointer");
                mos7840_port->read_urb_busy = false;
                return;
        }
 
-       dbg("%s\n", "Entering... \n");
+       dbg("%s", "Entering... ");
 
        data = urb->transfer_buffer;
 
-       dbg("%s", "Entering ........... \n");
+       dbg("%s", "Entering ...........");
 
        if (urb->actual_length) {
                tty = tty_port_tty_get(&mos7840_port->port->port);
                if (tty) {
                        tty_buffer_request_room(tty, urb->actual_length);
                        tty_insert_flip_string(tty, data, urb->actual_length);
-                       dbg(" %s \n", data);
+                       dbg(" %s ", data);
                        tty_flip_buffer_push(tty);
                        tty_kref_put(tty);
                }
                mos7840_port->icount.rx += urb->actual_length;
                smp_wmb();
-               dbg("mos7840_port->icount.rx is %d:\n",
+               dbg("mos7840_port->icount.rx is %d:",
                    mos7840_port->icount.rx);
        }
 
        if (!mos7840_port->read_urb) {
-               dbg("%s", "URB KILLED !!!\n");
+               dbg("%s", "URB KILLED !!!");
                mos7840_port->read_urb_busy = false;
                return;
        }
@@ -777,16 +777,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
        spin_unlock(&mos7840_port->pool_lock);
 
        if (status) {
-               dbg("nonzero write bulk status received:%d\n", status);
+               dbg("nonzero write bulk status received:%d", status);
                return;
        }
 
        if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                return;
        }
 
-       dbg("%s \n", "Entering .........");
+       dbg("%s", "Entering .........");
 
        tty = tty_port_tty_get(&mos7840_port->port->port);
        if (tty && mos7840_port->open)
@@ -830,15 +830,17 @@ static int mos7840_open(struct tty_struct *tty,
        struct moschip_port *mos7840_port;
        struct moschip_port *port0;
 
+       dbg ("%s enter", __func__);
+
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                return -ENODEV;
        }
 
        serial = port->serial;
 
        if (mos7840_serial_paranoia_check(serial, __func__)) {
-               dbg("%s", "Serial Paranoia failed \n");
+               dbg("%s", "Serial Paranoia failed");
                return -ENODEV;
        }
 
@@ -891,20 +893,20 @@ static int mos7840_open(struct tty_struct *tty,
        Data = 0x0;
        status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
        if (status < 0) {
-               dbg("Reading Spreg failed\n");
+               dbg("Reading Spreg failed");
                return -1;
        }
        Data |= 0x80;
        status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
        if (status < 0) {
-               dbg("writing Spreg failed\n");
+               dbg("writing Spreg failed");
                return -1;
        }
 
        Data &= ~0x80;
        status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
        if (status < 0) {
-               dbg("writing Spreg failed\n");
+               dbg("writing Spreg failed");
                return -1;
        }
        /* End of block to be checked */
@@ -913,7 +915,7 @@ static int mos7840_open(struct tty_struct *tty,
        status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
                                                                        &Data);
        if (status < 0) {
-               dbg("Reading Controlreg failed\n");
+               dbg("Reading Controlreg failed");
                return -1;
        }
        Data |= 0x08;           /* Driver done bit */
@@ -921,7 +923,7 @@ static int mos7840_open(struct tty_struct *tty,
        status = mos7840_set_reg_sync(port,
                                mos7840_port->ControlRegOffset, Data);
        if (status < 0) {
-               dbg("writing Controlreg failed\n");
+               dbg("writing Controlreg failed");
                return -1;
        }
        /* do register settings here */
@@ -932,21 +934,21 @@ static int mos7840_open(struct tty_struct *tty,
        Data = 0x00;
        status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
        if (status < 0) {
-               dbg("disableing interrupts failed\n");
+               dbg("disabling interrupts failed");
                return -1;
        }
        /* Set FIFO_CONTROL_REGISTER to the default value */
        Data = 0x00;
        status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
        if (status < 0) {
-               dbg("Writing FIFO_CONTROL_REGISTER  failed\n");
+               dbg("Writing FIFO_CONTROL_REGISTER  failed");
                return -1;
        }
 
        Data = 0xcf;
        status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
        if (status < 0) {
-               dbg("Writing FIFO_CONTROL_REGISTER  failed\n");
+               dbg("Writing FIFO_CONTROL_REGISTER  failed");
                return -1;
        }
 
@@ -1043,12 +1045,12 @@ static int mos7840_open(struct tty_struct *tty,
         * (can't set it up in mos7840_startup as the  *
         * structures were not set up at that time.)   */
 
-       dbg("port number is %d \n", port->number);
-       dbg("serial number is %d \n", port->serial->minor);
-       dbg("Bulkin endpoint is %d \n", port->bulk_in_endpointAddress);
-       dbg("BulkOut endpoint is %d \n", port->bulk_out_endpointAddress);
-       dbg("Interrupt endpoint is %d \n", port->interrupt_in_endpointAddress);
-       dbg("port's number in the device is %d\n", mos7840_port->port_num);
+       dbg("port number is %d", port->number);
+       dbg("serial number is %d", port->serial->minor);
+       dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
+       dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
+       dbg("Interrupt endpoint is %d", port->interrupt_in_endpointAddress);
+       dbg("port's number in the device is %d", mos7840_port->port_num);
        mos7840_port->read_urb = port->read_urb;
 
        /* set up our bulk in urb */
@@ -1061,7 +1063,7 @@ static int mos7840_open(struct tty_struct *tty,
                          mos7840_port->read_urb->transfer_buffer_length,
                          mos7840_bulk_in_callback, mos7840_port);
 
-       dbg("mos7840_open: bulkin endpoint is %d\n",
+       dbg("mos7840_open: bulkin endpoint is %d",
            port->bulk_in_endpointAddress);
        mos7840_port->read_urb_busy = true;
        response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1087,9 +1089,11 @@ static int mos7840_open(struct tty_struct *tty,
        mos7840_port->icount.tx = 0;
        mos7840_port->icount.rx = 0;
 
-       dbg("\n\nusb_serial serial:%p       mos7840_port:%p\n      usb_serial_port port:%p\n\n",
+       dbg("usb_serial serial:%p       mos7840_port:%p\n      usb_serial_port port:%p",
                                serial, mos7840_port, port);
 
+       dbg ("%s leave", __func__);
+
        return 0;
 
 }
@@ -1112,16 +1116,16 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
        unsigned long flags;
        struct moschip_port *mos7840_port;
 
-       dbg("%s \n", " mos7840_chars_in_buffer:entering ...........");
+       dbg("%s", " mos7840_chars_in_buffer:entering ...........");
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return 0;
        }
 
        mos7840_port = mos7840_get_port_private(port);
        if (mos7840_port == NULL) {
-               dbg("%s \n", "mos7840_break:leaving ...........");
+               dbg("%s", "mos7840_break:leaving ...........");
                return 0;
        }
 
@@ -1148,16 +1152,16 @@ static void mos7840_close(struct usb_serial_port *port)
        int j;
        __u16 Data;
 
-       dbg("%s\n", "mos7840_close:entering...");
+       dbg("%s", "mos7840_close:entering...");
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                return;
        }
 
        serial = mos7840_get_usb_serial(port, __func__);
        if (!serial) {
-               dbg("%s", "Serial Paranoia failed \n");
+               dbg("%s", "Serial Paranoia failed");
                return;
        }
 
@@ -1185,27 +1189,27 @@ static void mos7840_close(struct usb_serial_port *port)
         * and interrupt read if they exists                  */
        if (serial->dev) {
                if (mos7840_port->write_urb) {
-                       dbg("%s", "Shutdown bulk write\n");
+                       dbg("%s", "Shutdown bulk write");
                        usb_kill_urb(mos7840_port->write_urb);
                }
                if (mos7840_port->read_urb) {
-                       dbg("%s", "Shutdown bulk read\n");
+                       dbg("%s", "Shutdown bulk read");
                        usb_kill_urb(mos7840_port->read_urb);
                        mos7840_port->read_urb_busy = false;
                }
                if ((&mos7840_port->control_urb)) {
-                       dbg("%s", "Shutdown control read\n");
+                       dbg("%s", "Shutdown control read");
                        /*/      usb_kill_urb (mos7840_port->control_urb); */
                }
        }
 /*      if(mos7840_port->ctrl_buf != NULL) */
 /*              kfree(mos7840_port->ctrl_buf); */
        port0->open_ports--;
-       dbg("mos7840_num_open_ports in close%d:in port%d\n",
+       dbg("mos7840_num_open_ports in close%d:in port%d",
            port0->open_ports, port->number);
        if (port0->open_ports == 0) {
                if (serial->port[0]->interrupt_in_urb) {
-                       dbg("%s", "Shutdown interrupt_in_urb\n");
+                       dbg("%s", "Shutdown interrupt_in_urb");
                        usb_kill_urb(serial->port[0]->interrupt_in_urb);
                }
        }
@@ -1225,7 +1229,7 @@ static void mos7840_close(struct usb_serial_port *port)
 
        mos7840_port->open = 0;
 
-       dbg("%s \n", "Leaving ............");
+       dbg("%s", "Leaving ............");
 }
 
 /************************************************************************
@@ -1280,17 +1284,17 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
        struct usb_serial *serial;
        struct moschip_port *mos7840_port;
 
-       dbg("%s \n", "Entering ...........");
-       dbg("mos7840_break: Start\n");
+       dbg("%s", "Entering ...........");
+       dbg("mos7840_break: Start");
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                return;
        }
 
        serial = mos7840_get_usb_serial(port, __func__);
        if (!serial) {
-               dbg("%s", "Serial Paranoia failed \n");
+               dbg("%s", "Serial Paranoia failed");
                return;
        }
 
@@ -1310,7 +1314,7 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
 
        /* FIXME: no locking on shadowLCR anywhere in driver */
        mos7840_port->shadowLCR = data;
-       dbg("mcs7840_break mos7840_port->shadowLCR is %x\n",
+       dbg("mcs7840_break mos7840_port->shadowLCR is %x",
            mos7840_port->shadowLCR);
        mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
                             mos7840_port->shadowLCR);
@@ -1334,17 +1338,17 @@ static int mos7840_write_room(struct tty_struct *tty)
        unsigned long flags;
        struct moschip_port *mos7840_port;
 
-       dbg("%s \n", " mos7840_write_room:entering ...........");
+       dbg("%s", " mos7840_write_room:entering ...........");
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
-               dbg("%s \n", " mos7840_write_room:leaving ...........");
+               dbg("%s", "Invalid port");
+               dbg("%s", " mos7840_write_room:leaving ...........");
                return -1;
        }
 
        mos7840_port = mos7840_get_port_private(port);
        if (mos7840_port == NULL) {
-               dbg("%s \n", "mos7840_break:leaving ...........");
+               dbg("%s", "mos7840_break:leaving ...........");
                return -1;
        }
 
@@ -1384,16 +1388,16 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        /* __u16 Data; */
        const unsigned char *current_position = data;
        unsigned char *data1;
-       dbg("%s \n", "entering ...........");
-       /* dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+       dbg("%s", "entering ...........");
+       /* dbg("mos7840_write: mos7840_port->shadowLCR is %x",
                                        mos7840_port->shadowLCR); */
 
 #ifdef NOTMOS7840
        Data = 0x00;
        status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
        mos7840_port->shadowLCR = Data;
-       dbg("mos7840_write: LINE_CONTROL_REGISTER is %x\n", Data);
-       dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+       dbg("mos7840_write: LINE_CONTROL_REGISTER is %x", Data);
+       dbg("mos7840_write: mos7840_port->shadowLCR is %x",
            mos7840_port->shadowLCR);
 
        /* Data = 0x03; */
@@ -1407,32 +1411,32 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
        Data = 0x00;
        status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
-       dbg("mos7840_write:DLL value is %x\n", Data);
+       dbg("mos7840_write:DLL value is %x", Data);
 
        Data = 0x0;
        status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
-       dbg("mos7840_write:DLM value is %x\n", Data);
+       dbg("mos7840_write:DLM value is %x", Data);
 
        Data = Data & ~SERIAL_LCR_DLAB;
-       dbg("mos7840_write: mos7840_port->shadowLCR is %x\n",
+       dbg("mos7840_write: mos7840_port->shadowLCR is %x",
            mos7840_port->shadowLCR);
        status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
 #endif
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Port Paranoia failed \n");
+               dbg("%s", "Port Paranoia failed");
                return -1;
        }
 
        serial = port->serial;
        if (mos7840_serial_paranoia_check(serial, __func__)) {
-               dbg("%s", "Serial Paranoia failed \n");
+               dbg("%s", "Serial Paranoia failed");
                return -1;
        }
 
        mos7840_port = mos7840_get_port_private(port);
        if (mos7840_port == NULL) {
-               dbg("%s", "mos7840_port is NULL\n");
+               dbg("%s", "mos7840_port is NULL");
                return -1;
        }
 
@@ -1444,7 +1448,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
                if (!mos7840_port->busy[i]) {
                        mos7840_port->busy[i] = 1;
                        urb = mos7840_port->write_urb_pool[i];
-                       dbg("\nURB:%d", i);
+                       dbg("URB:%d", i);
                        break;
                }
        }
@@ -1479,7 +1483,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
                          mos7840_bulk_out_data_callback, mos7840_port);
 
        data1 = urb->transfer_buffer;
-       dbg("\nbulkout endpoint is %d", port->bulk_out_endpointAddress);
+       dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
 
        /* send it down the pipe */
        status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1494,7 +1498,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        bytes_sent = transfer_size;
        mos7840_port->icount.tx += transfer_size;
        smp_wmb();
-       dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx);
+       dbg("mos7840_port->icount.tx is %d:", mos7840_port->icount.tx);
 exit:
        return bytes_sent;
 
@@ -1513,11 +1517,11 @@ static void mos7840_throttle(struct tty_struct *tty)
        int status;
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return;
        }
 
-       dbg("- port %d\n", port->number);
+       dbg("- port %d", port->number);
 
        mos7840_port = mos7840_get_port_private(port);
 
@@ -1525,11 +1529,11 @@ static void mos7840_throttle(struct tty_struct *tty)
                return;
 
        if (!mos7840_port->open) {
-               dbg("%s\n", "port not opened");
+               dbg("%s", "port not opened");
                return;
        }
 
-       dbg("%s", "Entering .......... \n");
+       dbg("%s", "Entering ..........");
 
        /* if we are implementing XON/XOFF, send the stop character */
        if (I_IXOFF(tty)) {
@@ -1563,7 +1567,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
        struct moschip_port *mos7840_port = mos7840_get_port_private(port);
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return;
        }
 
@@ -1575,7 +1579,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
                return;
        }
 
-       dbg("%s", "Entering .......... \n");
+       dbg("%s", "Entering ..........");
 
        /* if we are implementing XON/XOFF, send the start character */
        if (I_IXOFF(tty)) {
@@ -1660,7 +1664,7 @@ static int mos7840_tiocmset(struct tty_struct *tty, struct file *file,
 
        status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
        if (status < 0) {
-               dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+               dbg("setting MODEM_CONTROL_REGISTER Failed");
                return status;
        }
 
@@ -1729,11 +1733,11 @@ static int mos7840_calc_baud_rate_divisor(int baudRate, int *divisor,
                        custom++;
                *divisor = custom;
 
-               dbg(" Baud %d = %d\n", baudrate, custom);
+               dbg(" Baud %d = %d", baudrate, custom);
                return 0;
        }
 
-       dbg("%s\n", " Baud calculation Failed...");
+       dbg("%s", " Baud calculation Failed...");
        return -1;
 #endif
 }
@@ -1759,16 +1763,16 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
 
        port = (struct usb_serial_port *)mos7840_port->port;
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return -1;
        }
 
        if (mos7840_serial_paranoia_check(port->serial, __func__)) {
-               dbg("%s", "Invalid Serial \n");
+               dbg("%s", "Invalid Serial");
                return -1;
        }
 
-       dbg("%s", "Entering .......... \n");
+       dbg("%s", "Entering ..........");
 
        number = mos7840_port->port->number - mos7840_port->port->serial->minor;
 
@@ -1784,7 +1788,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
                status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
                                                                        Data);
                if (status < 0) {
-                       dbg("Writing spreg failed in set_serial_baud\n");
+                       dbg("Writing spreg failed in set_serial_baud");
                        return -1;
                }
 #endif
@@ -1797,7 +1801,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
                status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
                                                                        Data);
                if (status < 0) {
-                       dbg("Writing spreg failed in set_serial_baud\n");
+                       dbg("Writing spreg failed in set_serial_baud");
                        return -1;
                }
 #endif
@@ -1812,14 +1816,14 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
                status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
                                                                 &Data);
                if (status < 0) {
-                       dbg("reading spreg failed in set_serial_baud\n");
+                       dbg("reading spreg failed in set_serial_baud");
                        return -1;
                }
                Data = (Data & 0x8f) | clk_sel_val;
                status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
                                                                Data);
                if (status < 0) {
-                       dbg("Writing spreg failed in set_serial_baud\n");
+                       dbg("Writing spreg failed in set_serial_baud");
                        return -1;
                }
                /* Calculate the Divisor */
@@ -1835,11 +1839,11 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
 
                /* Write the divisor */
                Data = (unsigned char)(divisor & 0xff);
-               dbg("set_serial_baud Value to write DLL is %x\n", Data);
+               dbg("set_serial_baud Value to write DLL is %x", Data);
                mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
 
                Data = (unsigned char)((divisor & 0xff00) >> 8);
-               dbg("set_serial_baud Value to write DLM is %x\n", Data);
+               dbg("set_serial_baud Value to write DLM is %x", Data);
                mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
 
                /* Disable access to divisor latch */
@@ -1877,12 +1881,12 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
        port = (struct usb_serial_port *)mos7840_port->port;
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return;
        }
 
        if (mos7840_serial_paranoia_check(port->serial, __func__)) {
-               dbg("%s", "Invalid Serial \n");
+               dbg("%s", "Invalid Serial");
                return;
        }
 
@@ -1895,7 +1899,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
                return;
        }
 
-       dbg("%s", "Entering .......... \n");
+       dbg("%s", "Entering ..........");
 
        lData = LCR_BITS_8;
        lStop = LCR_STOP_1;
@@ -1955,7 +1959,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
            ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
        mos7840_port->shadowLCR |= (lData | lParity | lStop);
 
-       dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x\n",
+       dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x",
            mos7840_port->shadowLCR);
        /* Disable Interrupts */
        Data = 0x00;
@@ -1997,7 +2001,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
 
        if (!baud) {
                /* pick a default, any default... */
-               dbg("%s\n", "Picked default baud...");
+               dbg("%s", "Picked default baud...");
                baud = 9600;
        }
 
@@ -2020,7 +2024,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
        }
        wake_up(&mos7840_port->delta_msr_wait);
        mos7840_port->delta_msr_cond = 1;
-       dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x\n",
+       dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
            mos7840_port->shadowLCR);
 
        return;
@@ -2040,16 +2044,16 @@ static void mos7840_set_termios(struct tty_struct *tty,
        unsigned int cflag;
        struct usb_serial *serial;
        struct moschip_port *mos7840_port;
-       dbg("mos7840_set_termios: START\n");
+       dbg("mos7840_set_termios: START");
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return;
        }
 
        serial = port->serial;
 
        if (mos7840_serial_paranoia_check(serial, __func__)) {
-               dbg("%s", "Invalid Serial \n");
+               dbg("%s", "Invalid Serial");
                return;
        }
 
@@ -2063,7 +2067,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
                return;
        }
 
-       dbg("%s\n", "setting termios - ");
+       dbg("%s", "setting termios - ");
 
        cflag = tty->termios->c_cflag;
 
@@ -2078,7 +2082,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
        mos7840_change_port_settings(tty, mos7840_port, old_termios);
 
        if (!mos7840_port->read_urb) {
-               dbg("%s", "URB KILLED !!!!!\n");
+               dbg("%s", "URB KILLED !!!!!");
                return;
        }
 
@@ -2144,7 +2148,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
 
        port = (struct usb_serial_port *)mos7840_port->port;
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return -1;
        }
 
@@ -2189,7 +2193,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
        status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
        unlock_kernel();
        if (status < 0) {
-               dbg("setting MODEM_CONTROL_REGISTER Failed\n");
+               dbg("setting MODEM_CONTROL_REGISTER Failed");
                return -1;
        }
 
@@ -2274,7 +2278,7 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
        int mosret = 0;
 
        if (mos7840_port_paranoia_check(port, __func__)) {
-               dbg("%s", "Invalid port \n");
+               dbg("%s", "Invalid port");
                return -1;
        }
 
@@ -2374,9 +2378,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
 {
        int mos7840_num_ports = 0;
 
-       dbg("numberofendpoints: %d \n",
-           (int)serial->interface->cur_altsetting->desc.bNumEndpoints);
-       dbg("numberofendpoints: %d \n",
+       dbg("numberofendpoints: cur %d, alt %d",
+           (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
            (int)serial->interface->altsetting->desc.bNumEndpoints);
        if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
                mos7840_num_ports = serial->num_ports = 2;
@@ -2385,7 +2388,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
                serial->num_bulk_out = 4;
                mos7840_num_ports = serial->num_ports = 4;
        }
-
+       dbg ("mos7840_num_ports = %d", mos7840_num_ports);
        return mos7840_num_ports;
 }
 
@@ -2400,22 +2403,24 @@ static int mos7840_startup(struct usb_serial *serial)
        int i, status;
 
        __u16 Data;
-       dbg("%s \n", " mos7840_startup :entering..........");
+       dbg("%s", "mos7840_startup :Entering..........");
 
        if (!serial) {
-               dbg("%s\n", "Invalid Handler");
+               dbg("%s", "Invalid Handler");
                return -1;
        }
 
        dev = serial->dev;
 
-       dbg("%s\n", "Entering...");
+       dbg("%s", "Entering...");
+       dbg ("mos7840_startup: serial = %p", serial);
 
        /* we set up the pointers to the endpoints in the mos7840_open *
         * function, as the structures aren't created yet.             */
 
        /* set up port private structures */
        for (i = 0; i < serial->num_ports; ++i) {
+               dbg ("mos7840_startup: configuring port %d............", i);
                mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
                if (mos7840_port == NULL) {
                        dev_err(&dev->dev, "%s - Out of memory\n", __func__);
@@ -2473,10 +2478,10 @@ static int mos7840_startup(struct usb_serial *serial)
                status = mos7840_get_reg_sync(serial->port[i],
                                 mos7840_port->ControlRegOffset, &Data);
                if (status < 0) {
-                       dbg("Reading ControlReg failed status-0x%x\n", status);
+                       dbg("Reading ControlReg failed status-0x%x", status);
                        break;
                } else
-                       dbg("ControlReg Reading success val is %x, status%d\n",
+                       dbg("ControlReg Reading success val is %x, status%d",
                            Data, status);
                Data |= 0x08;   /* setting driver done bit */
                Data |= 0x04;   /* sp1_bit to have cts change reflect in
@@ -2486,10 +2491,10 @@ static int mos7840_startup(struct usb_serial *serial)
                status = mos7840_set_reg_sync(serial->port[i],
                                         mos7840_port->ControlRegOffset, Data);
                if (status < 0) {
-                       dbg("Writing ControlReg failed(rx_disable) status-0x%x\n", status);
+                       dbg("Writing ControlReg failed(rx_disable) status-0x%x", status);
                        break;
                } else
-                       dbg("ControlReg Writing success(rx_disable) status%d\n",
+                       dbg("ControlReg Writing success(rx_disable) status%d",
                            status);
 
                /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
@@ -2498,48 +2503,48 @@ static int mos7840_startup(struct usb_serial *serial)
                status = mos7840_set_reg_sync(serial->port[i],
                         (__u16) (mos7840_port->DcrRegOffset + 0), Data);
                if (status < 0) {
-                       dbg("Writing DCR0 failed status-0x%x\n", status);
+                       dbg("Writing DCR0 failed status-0x%x", status);
                        break;
                } else
-                       dbg("DCR0 Writing success status%d\n", status);
+                       dbg("DCR0 Writing success status%d", status);
 
                Data = 0x05;
                status = mos7840_set_reg_sync(serial->port[i],
                         (__u16) (mos7840_port->DcrRegOffset + 1), Data);
                if (status < 0) {
-                       dbg("Writing DCR1 failed status-0x%x\n", status);
+                       dbg("Writing DCR1 failed status-0x%x", status);
                        break;
                } else
-                       dbg("DCR1 Writing success status%d\n", status);
+                       dbg("DCR1 Writing success status%d", status);
 
                Data = 0x24;
                status = mos7840_set_reg_sync(serial->port[i],
                         (__u16) (mos7840_port->DcrRegOffset + 2), Data);
                if (status < 0) {
-                       dbg("Writing DCR2 failed status-0x%x\n", status);
+                       dbg("Writing DCR2 failed status-0x%x", status);
                        break;
                } else
-                       dbg("DCR2 Writing success status%d\n", status);
+                       dbg("DCR2 Writing success status%d", status);
 
                /* write values in clkstart0x0 and clkmulti 0x20 */
                Data = 0x0;
                status = mos7840_set_reg_sync(serial->port[i],
                                         CLK_START_VALUE_REGISTER, Data);
                if (status < 0) {
-                       dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
+                       dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x", status);
                        break;
                } else
-                       dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status);
+                       dbg("CLK_START_VALUE_REGISTER Writing success status%d", status);
 
                Data = 0x20;
                status = mos7840_set_reg_sync(serial->port[i],
                                        CLK_MULTI_REGISTER, Data);
                if (status < 0) {
-                       dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n",
+                       dbg("Writing CLK_MULTI_REGISTER failed status-0x%x",
                            status);
                        goto error;
                } else
-                       dbg("CLK_MULTI_REGISTER Writing success status%d\n",
+                       dbg("CLK_MULTI_REGISTER Writing success status%d",
                            status);
 
                /* write value 0x0 to scratchpad register */
@@ -2547,11 +2552,11 @@ static int mos7840_startup(struct usb_serial *serial)
                status = mos7840_set_uart_reg(serial->port[i],
                                                SCRATCH_PAD_REGISTER, Data);
                if (status < 0) {
-                       dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x\n",
+                       dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x",
                            status);
                        break;
                } else
-                       dbg("SCRATCH_PAD_REGISTER Writing success status%d\n",
+                       dbg("SCRATCH_PAD_REGISTER Writing success status%d",
                            status);
 
                /* Zero Length flag register */
@@ -2562,30 +2567,30 @@ static int mos7840_startup(struct usb_serial *serial)
                        status = mos7840_set_reg_sync(serial->port[i],
                                      (__u16) (ZLP_REG1 +
                                      ((__u16)mos7840_port->port_num)), Data);
-                       dbg("ZLIP offset%x\n",
+                       dbg("ZLIP offset %x",
                            (__u16) (ZLP_REG1 +
                                        ((__u16) mos7840_port->port_num)));
                        if (status < 0) {
-                               dbg("Writing ZLP_REG%d failed status-0x%x\n",
+                               dbg("Writing ZLP_REG%d failed status-0x%x",
                                    i + 2, status);
                                break;
                        } else
-                               dbg("ZLP_REG%d Writing success status%d\n",
+                               dbg("ZLP_REG%d Writing success status%d",
                                    i + 2, status);
                } else {
                        Data = 0xff;
                        status = mos7840_set_reg_sync(serial->port[i],
                              (__u16) (ZLP_REG1 +
                              ((__u16)mos7840_port->port_num) - 0x1), Data);
-                       dbg("ZLIP offset%x\n",
+                       dbg("ZLIP offset %x",
                            (__u16) (ZLP_REG1 +
                                     ((__u16) mos7840_port->port_num) - 0x1));
                        if (status < 0) {
-                               dbg("Writing ZLP_REG%d failed status-0x%x\n",
+                               dbg("Writing ZLP_REG%d failed status-0x%x",
                                    i + 1, status);
                                break;
                        } else
-                               dbg("ZLP_REG%d Writing success status%d\n",
+                               dbg("ZLP_REG%d Writing success status%d",
                                    i + 1, status);
 
                }
@@ -2599,15 +2604,16 @@ static int mos7840_startup(struct usb_serial *serial)
                        goto error;
                }
        }
+       dbg ("mos7840_startup: all ports configured...........");
 
        /* Zero Length flag enable */
        Data = 0x0f;
        status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
        if (status < 0) {
-               dbg("Writing ZLP_REG5 failed status-0x%x\n", status);
+               dbg("Writing ZLP_REG5 failed status-0x%x", status);
                goto error;
        } else
-               dbg("ZLP_REG5 Writing success status%d\n", status);
+               dbg("ZLP_REG5 Writing success status%d", status);
 
        /* setting configuration feature to one */
        usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
@@ -2627,19 +2633,19 @@ error:
 }
 
 /****************************************************************************
- * mos7840_shutdown
+ * mos7840_disconnect
  *     This function is called whenever the device is removed from the usb bus.
  ****************************************************************************/
 
-static void mos7840_shutdown(struct usb_serial *serial)
+static void mos7840_disconnect(struct usb_serial *serial)
 {
        int i;
        unsigned long flags;
        struct moschip_port *mos7840_port;
-       dbg("%s \n", " shutdown :entering..........");
+       dbg("%s", " disconnect :entering..........");
 
        if (!serial) {
-               dbg("%s", "Invalid Handler \n");
+               dbg("%s", "Invalid Handler");
                return;
        }
 
@@ -2656,14 +2662,45 @@ static void mos7840_shutdown(struct usb_serial *serial)
                        mos7840_port->zombie = 1;
                        spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
                        usb_kill_urb(mos7840_port->control_urb);
+               }
+       }
+
+       dbg("%s", "Thank u :: ");
+
+}
+
+/****************************************************************************
+ * mos7840_release
+ *     This function is called when the usb_serial structure is freed.
+ ****************************************************************************/
+
+static void mos7840_release(struct usb_serial *serial)
+{
+       int i;
+       struct moschip_port *mos7840_port;
+       dbg("%s", " release :entering..........");
+
+       if (!serial) {
+               dbg("%s", "Invalid Handler");
+               return;
+       }
+
+       /* check for the ports to be closed,close the ports and disconnect */
+
+       /* free private structure allocated for serial port  *
+        * stop reads and writes on all ports                */
+
+       for (i = 0; i < serial->num_ports; ++i) {
+               mos7840_port = mos7840_get_port_private(serial->port[i]);
+               dbg("mos7840_port %d = %p", i, mos7840_port);
+               if (mos7840_port) {
                        kfree(mos7840_port->ctrl_buf);
                        kfree(mos7840_port->dr);
                        kfree(mos7840_port);
                }
-               mos7840_set_port_private(serial->port[i], NULL);
        }
 
-       dbg("%s\n", "Thank u :: ");
+       dbg("%s", "Thank u :: ");
 
 }
 
@@ -2701,7 +2738,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
        .tiocmget = mos7840_tiocmget,
        .tiocmset = mos7840_tiocmset,
        .attach = mos7840_startup,
-       .shutdown = mos7840_shutdown,
+       .disconnect = mos7840_disconnect,
+       .release = mos7840_release,
        .read_bulk_callback = mos7840_bulk_in_callback,
        .read_int_callback = mos7840_interrupt_callback,
 };
@@ -2714,7 +2752,7 @@ static int __init moschip7840_init(void)
 {
        int retval;
 
-       dbg("%s \n", " mos7840_init :entering..........");
+       dbg("%s", " mos7840_init :entering..........");
 
        /* Register with the usb serial */
        retval = usb_serial_register(&moschip7840_4port_device);
@@ -2722,14 +2760,14 @@ static int __init moschip7840_init(void)
        if (retval)
                goto failed_port_device_register;
 
-       dbg("%s\n", "Entring...");
+       dbg("%s", "Entering...");
        printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
               DRIVER_DESC "\n");
 
        /* Register with the usb */
        retval = usb_register(&io_driver);
        if (retval == 0) {
-               dbg("%s\n", "Leaving...");
+               dbg("%s", "Leaving...");
                return 0;
        }
        usb_serial_deregister(&moschip7840_4port_device);
@@ -2744,13 +2782,13 @@ failed_port_device_register:
 static void __exit moschip7840_exit(void)
 {
 
-       dbg("%s \n", " mos7840_exit :entering..........");
+       dbg("%s", " mos7840_exit :entering..........");
 
        usb_deregister(&io_driver);
 
        usb_serial_deregister(&moschip7840_4port_device);
 
-       dbg("%s\n", "Entring...");
+       dbg("%s", "Entering...");
 }
 
 module_init(moschip7840_init);
index 1104617334f50d9ff4e9afbbe3d3b21934f92cc3..56857ddbd70be66aa33931376fd20e0607869883 100644 (file)
@@ -72,7 +72,8 @@ static void omninet_write_bulk_callback(struct urb *urb);
 static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
                                const unsigned char *buf, int count);
 static int  omninet_write_room(struct tty_struct *tty);
-static void omninet_shutdown(struct usb_serial *serial);
+static void omninet_disconnect(struct usb_serial *serial);
+static void omninet_release(struct usb_serial *serial);
 static int omninet_attach(struct usb_serial *serial);
 
 static struct usb_device_id id_table[] = {
@@ -108,7 +109,8 @@ static struct usb_serial_driver zyxel_omninet_device = {
        .write_room =           omninet_write_room,
        .read_bulk_callback =   omninet_read_bulk_callback,
        .write_bulk_callback =  omninet_write_bulk_callback,
-       .shutdown =             omninet_shutdown,
+       .disconnect =           omninet_disconnect,
+       .release =              omninet_release,
 };
 
 
@@ -345,13 +347,22 @@ static void omninet_write_bulk_callback(struct urb *urb)
 }
 
 
-static void omninet_shutdown(struct usb_serial *serial)
+static void omninet_disconnect(struct usb_serial *serial)
 {
        struct usb_serial_port *wport = serial->port[1];
-       struct usb_serial_port *port = serial->port[0];
+
        dbg("%s", __func__);
 
        usb_kill_urb(wport->write_urb);
+}
+
+
+static void omninet_release(struct usb_serial *serial)
+{
+       struct usb_serial_port *port = serial->port[0];
+
+       dbg("%s", __func__);
+
        kfree(usb_get_serial_port_data(port));
 }
 
index c20480aa975558c9fdac755fe0323991e6c1b4f6..336bba79ad32d634dd6679ec075a54574ea45526 100644 (file)
@@ -463,7 +463,7 @@ error:
        return retval;
 }
 
-static void opticon_shutdown(struct usb_serial *serial)
+static void opticon_disconnect(struct usb_serial *serial)
 {
        struct opticon_private *priv = usb_get_serial_data(serial);
 
@@ -471,9 +471,16 @@ static void opticon_shutdown(struct usb_serial *serial)
 
        usb_kill_urb(priv->bulk_read_urb);
        usb_free_urb(priv->bulk_read_urb);
+}
+
+static void opticon_release(struct usb_serial *serial)
+{
+       struct opticon_private *priv = usb_get_serial_data(serial);
+
+       dbg("%s", __func__);
+
        kfree(priv->bulk_in_buffer);
        kfree(priv);
-       usb_set_serial_data(serial, NULL);
 }
 
 static int opticon_suspend(struct usb_interface *intf, pm_message_t message)
@@ -524,7 +531,8 @@ static struct usb_serial_driver opticon_device = {
        .close =                opticon_close,
        .write =                opticon_write,
        .write_room =           opticon_write_room,
-       .shutdown =             opticon_shutdown,
+       .disconnect =           opticon_disconnect,
+       .release =              opticon_release,
        .throttle =             opticon_throttle,
        .unthrottle =           opticon_unthrottle,
        .ioctl =                opticon_ioctl,
index a16d69fadba1cd9fa7088b9baa3a598b6402c2d9..575816e6ba371fb4ef5fbeeba3668e559b86bfe2 100644 (file)
 #include <linux/usb/serial.h>
 
 /* Function prototypes */
+static int  option_probe(struct usb_serial *serial,
+                       const struct usb_device_id *id);
 static int  option_open(struct tty_struct *tty, struct usb_serial_port *port,
                                                        struct file *filp);
 static void option_close(struct usb_serial_port *port);
 static void option_dtr_rts(struct usb_serial_port *port, int on);
 
 static int  option_startup(struct usb_serial *serial);
-static void option_shutdown(struct usb_serial *serial);
+static void option_disconnect(struct usb_serial *serial);
+static void option_release(struct usb_serial *serial);
 static int  option_write_room(struct tty_struct *tty);
 
 static void option_instat_callback(struct urb *urb);
@@ -202,9 +205,9 @@ static int  option_resume(struct usb_serial *serial);
 #define NOVATELWIRELESS_PRODUCT_MC727          0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D         0x4400
 #define NOVATELWIRELESS_PRODUCT_U727           0x5010
+#define NOVATELWIRELESS_PRODUCT_MC760          0x6000
 
 /* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
 #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
@@ -305,6 +308,10 @@ static int  option_resume(struct usb_serial *serial);
 #define DLINK_PRODUCT_DWM_652                  0x3e04
 
 
+/* TOSHIBA PRODUCTS */
+#define TOSHIBA_VENDOR_ID                      0x0930
+#define TOSHIBA_PRODUCT_HSDPA_MINICARD         0x1302
+
 static struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -422,7 +429,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
@@ -523,6 +530,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
        { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
+       { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -550,6 +558,7 @@ static struct usb_serial_driver option_1port_device = {
        .usb_driver        = &option_driver,
        .id_table          = option_ids,
        .num_ports         = 1,
+       .probe             = option_probe,
        .open              = option_open,
        .close             = option_close,
        .dtr_rts           = option_dtr_rts,
@@ -560,7 +569,8 @@ static struct usb_serial_driver option_1port_device = {
        .tiocmget          = option_tiocmget,
        .tiocmset          = option_tiocmset,
        .attach            = option_startup,
-       .shutdown          = option_shutdown,
+       .disconnect        = option_disconnect,
+       .release           = option_release,
        .read_int_callback = option_instat_callback,
        .suspend           = option_suspend,
        .resume            = option_resume,
@@ -626,6 +636,18 @@ static void __exit option_exit(void)
 module_init(option_init);
 module_exit(option_exit);
 
+static int option_probe(struct usb_serial *serial,
+                       const struct usb_device_id *id)
+{
+       /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
+       if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
+               serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
+               serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
+               return -ENODEV;
+
+       return 0;
+}
+
 static void option_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
@@ -1129,7 +1151,14 @@ static void stop_read_write_urbs(struct usb_serial *serial)
        }
 }
 
-static void option_shutdown(struct usb_serial *serial)
+static void option_disconnect(struct usb_serial *serial)
+{
+       dbg("%s", __func__);
+
+       stop_read_write_urbs(serial);
+}
+
+static void option_release(struct usb_serial *serial)
 {
        int i, j;
        struct usb_serial_port *port;
@@ -1137,8 +1166,6 @@ static void option_shutdown(struct usb_serial *serial)
 
        dbg("%s", __func__);
 
-       stop_read_write_urbs(serial);
-
        /* Now free them */
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
index 7de54781fe614e96d32ee69327822f02416c3801..3cece27325e71d90c7042aabfd812d70b2d158d4 100644 (file)
@@ -159,7 +159,7 @@ static int oti6858_tiocmget(struct tty_struct *tty, struct file *file);
 static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
                                unsigned int set, unsigned int clear);
 static int oti6858_startup(struct usb_serial *serial);
-static void oti6858_shutdown(struct usb_serial *serial);
+static void oti6858_release(struct usb_serial *serial);
 
 /* functions operating on buffers */
 static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
@@ -194,7 +194,7 @@ static struct usb_serial_driver oti6858_device = {
        .write_room =           oti6858_write_room,
        .chars_in_buffer =      oti6858_chars_in_buffer,
        .attach =               oti6858_startup,
-       .shutdown =             oti6858_shutdown,
+       .release =              oti6858_release,
 };
 
 struct oti6858_private {
@@ -782,7 +782,7 @@ static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
 }
 
 
-static void oti6858_shutdown(struct usb_serial *serial)
+static void oti6858_release(struct usb_serial *serial)
 {
        struct oti6858_private *priv;
        int i;
@@ -794,7 +794,6 @@ static void oti6858_shutdown(struct usb_serial *serial)
                if (priv) {
                        oti6858_buf_free(priv->buf);
                        kfree(priv);
-                       usb_set_serial_port_data(serial->port[i], NULL);
                }
        }
 }
index e02dc3d643c7ae1279f467b21584e2899a82dfb7..ec6c132a25b56f0a63ffa3b0b3a702594d1097b3 100644 (file)
@@ -878,7 +878,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
                dbg("%s - error sending break = %d", __func__, result);
 }
 
-static void pl2303_shutdown(struct usb_serial *serial)
+static void pl2303_release(struct usb_serial *serial)
 {
        int i;
        struct pl2303_private *priv;
@@ -890,7 +890,6 @@ static void pl2303_shutdown(struct usb_serial *serial)
                if (priv) {
                        pl2303_buf_free(priv->buf);
                        kfree(priv);
-                       usb_set_serial_port_data(serial->port[i], NULL);
                }
        }
 }
@@ -927,6 +926,8 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
        spin_lock_irqsave(&priv->lock, flags);
        priv->line_status = data[status_idx];
        spin_unlock_irqrestore(&priv->lock, flags);
+       if (priv->line_status & UART_BREAK_ERROR)
+               usb_serial_handle_break(port);
        wake_up_interruptible(&priv->delta_msr_wait);
 }
 
@@ -1037,7 +1038,8 @@ static void pl2303_read_bulk_callback(struct urb *urb)
                if (line_status & UART_OVERRUN_ERROR)
                        tty_insert_flip_char(tty, 0, TTY_OVERRUN);
                for (i = 0; i < urb->actual_length; ++i)
-                       tty_insert_flip_char(tty, data[i], tty_flag);
+                       if (!usb_serial_handle_sysrq_char(port, data[i]))
+                               tty_insert_flip_char(tty, data[i], tty_flag);
                tty_flip_buffer_push(tty);
        }
        tty_kref_put(tty);
@@ -1120,7 +1122,7 @@ static struct usb_serial_driver pl2303_device = {
        .write_room =           pl2303_write_room,
        .chars_in_buffer =      pl2303_chars_in_buffer,
        .attach =               pl2303_startup,
-       .shutdown =             pl2303_shutdown,
+       .release =              pl2303_release,
 };
 
 static int __init pl2303_init(void)
index 17ac34f4d66823deb49f02a571a09f28de07fe0a..032f7aeb40a42b17b71b1f4c2ae7b3d1ff221640 100644 (file)
@@ -1,7 +1,10 @@
 /*
   USB Driver for Sierra Wireless
 
-  Copyright (C) 2006, 2007, 2008  Kevin Lloyd <klloyd@sierrawireless.com>
+  Copyright (C) 2006, 2007, 2008  Kevin Lloyd <klloyd@sierrawireless.com>,
+
+  Copyright (C) 2008, 2009  Elina Pasheva, Matthew Safar, Rory Filer
+                       <linux@sierrawireless.com>
 
   IMPORTANT DISCLAIMER: This driver is not commercially supported by
   Sierra Wireless. Use at your own risk.
@@ -14,8 +17,8 @@
   Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
 */
 
-#define DRIVER_VERSION "v.1.3.3"
-#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
+#define DRIVER_VERSION "v.1.3.7"
+#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
 #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
 
 #include <linux/kernel.h>
 #define SWIMS_USB_REQUEST_SetPower     0x00
 #define SWIMS_USB_REQUEST_SetNmea      0x07
 
-#define N_IN_URB       4
-#define N_OUT_URB      4
+#define N_IN_URB       8
+#define N_OUT_URB      64
 #define IN_BUFLEN      4096
 
+#define MAX_TRANSFER           (PAGE_SIZE - 512)
+/* MAX_TRANSFER is chosen so that the VM is not stressed by
+   allocations > PAGE_SIZE and the number of packets in a page
+   is an integer 512 is the largest possible packet on EHCI */
+
 static int debug;
 static int nmea;
 
@@ -46,7 +54,7 @@ struct sierra_iface_info {
 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
 {
        int result;
-       dev_dbg(&udev->dev, "%s", __func__);
+       dev_dbg(&udev->dev, "%s\n", __func__);
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                        SWIMS_USB_REQUEST_SetPower,     /* __u8 request      */
                        USB_TYPE_VENDOR,                /* __u8 request type */
@@ -61,7 +69,7 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
 static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
 {
        int result;
-       dev_dbg(&udev->dev, "%s", __func__);
+       dev_dbg(&udev->dev, "%s\n", __func__);
        result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
                        SWIMS_USB_REQUEST_SetNmea,      /* __u8 request      */
                        USB_TYPE_VENDOR,                /* __u8 request type */
@@ -75,18 +83,22 @@ static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
 
 static int sierra_calc_num_ports(struct usb_serial *serial)
 {
-       int result;
-       int *num_ports = usb_get_serial_data(serial);
-       dev_dbg(&serial->dev->dev, "%s", __func__);
+       int num_ports = 0;
+       u8 ifnum, numendpoints;
 
-       result = *num_ports;
+       dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
-       if (result) {
-               kfree(num_ports);
-               usb_set_serial_data(serial, NULL);
-       }
+       ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+       numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
 
-       return result;
+       /* Dummy interface present on some SKUs should be ignored */
+       if (ifnum == 0x99)
+               num_ports = 0;
+       else if (numendpoints <= 3)
+               num_ports = 1;
+       else
+               num_ports = (numendpoints-1)/2;
+       return num_ports;
 }
 
 static int is_blacklisted(const u8 ifnum,
@@ -111,7 +123,7 @@ static int sierra_calc_interface(struct usb_serial *serial)
        int interface;
        struct usb_interface *p_interface;
        struct usb_host_interface *p_host_interface;
-       dev_dbg(&serial->dev->dev, "%s", __func__);
+       dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
        /* Get the interface structure pointer from the serial struct */
        p_interface = serial->interface;
@@ -132,23 +144,12 @@ static int sierra_probe(struct usb_serial *serial,
 {
        int result = 0;
        struct usb_device *udev;
-       int *num_ports;
        u8 ifnum;
-       u8 numendpoints;
 
-       dev_dbg(&serial->dev->dev, "%s", __func__);
-
-       num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
-       if (!num_ports)
-               return -ENOMEM;
-
-       ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
-       numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
        udev = serial->dev;
+       dev_dbg(&udev->dev, "%s\n", __func__);
 
-       /* Figure out the interface number from the serial structure */
        ifnum = sierra_calc_interface(serial);
-
        /*
         * If this interface supports more than 1 alternate
         * select the 2nd one
@@ -160,20 +161,6 @@ static int sierra_probe(struct usb_serial *serial,
                usb_set_interface(udev, ifnum, 1);
        }
 
-       /* Dummy interface present on some SKUs should be ignored */
-       if (ifnum == 0x99)
-               *num_ports = 0;
-       else if (numendpoints <= 3)
-               *num_ports = 1;
-       else
-               *num_ports = (numendpoints-1)/2;
-
-       /*
-        * save off our num_ports info so that we can use it in the
-        * calc_num_ports callback
-        */
-       usb_set_serial_data(serial, (void *)num_ports);
-
        /* ifnum could have changed - by calling usb_set_interface */
        ifnum = sierra_calc_interface(serial);
 
@@ -289,7 +276,7 @@ static int sierra_send_setup(struct usb_serial_port *port)
        __u16 interface = 0;
        int val = 0;
 
-       dev_dbg(&port->dev, "%s", __func__);
+       dev_dbg(&port->dev, "%s\n", __func__);
 
        portdata = usb_get_serial_port_data(port);
 
@@ -332,7 +319,7 @@ static int sierra_send_setup(struct usb_serial_port *port)
 static void sierra_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
-       dev_dbg(&port->dev, "%s", __func__);
+       dev_dbg(&port->dev, "%s\n", __func__);
        tty_termios_copy_hw(tty->termios, old_termios);
        sierra_send_setup(port);
 }
@@ -343,7 +330,7 @@ static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
        unsigned int value;
        struct sierra_port_private *portdata;
 
-       dev_dbg(&port->dev, "%s", __func__);
+       dev_dbg(&port->dev, "%s\n", __func__);
        portdata = usb_get_serial_port_data(port);
 
        value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
@@ -394,14 +381,14 @@ static void sierra_outdat_callback(struct urb *urb)
        int status = urb->status;
        unsigned long flags;
 
-       dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+       dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
        /* free up the transfer buffer, as usb_free_urb() does not do this */
        kfree(urb->transfer_buffer);
 
        if (status)
                dev_dbg(&port->dev, "%s - nonzero write bulk status "
-                   "received: %d", __func__, status);
+                   "received: %d\n", __func__, status);
 
        spin_lock_irqsave(&portdata->lock, flags);
        --portdata->outstanding_urbs;
@@ -419,50 +406,61 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
        unsigned long flags;
        unsigned char *buffer;
        struct urb *urb;
-       int status;
+       size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
+       int retval = 0;
+
+       /* verify that we actually have some data to write */
+       if (count == 0)
+               return 0;
 
        portdata = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s: write (%d chars)", __func__, count);
+       dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
 
        spin_lock_irqsave(&portdata->lock, flags);
+       dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
+               portdata->outstanding_urbs);
        if (portdata->outstanding_urbs > N_OUT_URB) {
                spin_unlock_irqrestore(&portdata->lock, flags);
                dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
                return 0;
        }
        portdata->outstanding_urbs++;
+       dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
+               portdata->outstanding_urbs);
        spin_unlock_irqrestore(&portdata->lock, flags);
 
-       buffer = kmalloc(count, GFP_ATOMIC);
+       buffer = kmalloc(writesize, GFP_ATOMIC);
        if (!buffer) {
                dev_err(&port->dev, "out of memory\n");
-               count = -ENOMEM;
+               retval = -ENOMEM;
                goto error_no_buffer;
        }
 
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
                dev_err(&port->dev, "no more free urbs\n");
-               count = -ENOMEM;
+               retval = -ENOMEM;
                goto error_no_urb;
        }
 
-       memcpy(buffer, buf, count);
+       memcpy(buffer, buf, writesize);
 
-       usb_serial_debug_data(debug, &port->dev, __func__, count, buffer);
+       usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer);
 
        usb_fill_bulk_urb(urb, serial->dev,
                          usb_sndbulkpipe(serial->dev,
                                          port->bulk_out_endpointAddress),
-                         buffer, count, sierra_outdat_callback, port);
+                         buffer, writesize, sierra_outdat_callback, port);
+
+       /* Handle the need to send a zero length packet */
+       urb->transfer_flags |= URB_ZERO_PACKET;
 
        /* send it down the pipe */
-       status = usb_submit_urb(urb, GFP_ATOMIC);
-       if (status) {
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval) {
                dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
-                       "with status = %d\n", __func__, status);
-               count = status;
+                       "with status = %d\n", __func__, retval);
                goto error;
        }
 
@@ -470,7 +468,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
         * really free it when it is finished with it */
        usb_free_urb(urb);
 
-       return count;
+       return writesize;
 error:
        usb_free_urb(urb);
 error_no_urb:
@@ -478,8 +476,10 @@ error_no_urb:
 error_no_buffer:
        spin_lock_irqsave(&portdata->lock, flags);
        --portdata->outstanding_urbs;
+       dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
+               portdata->outstanding_urbs);
        spin_unlock_irqrestore(&portdata->lock, flags);
-       return count;
+       return retval;
 }
 
 static void sierra_indat_callback(struct urb *urb)
@@ -491,33 +491,39 @@ static void sierra_indat_callback(struct urb *urb)
        unsigned char *data = urb->transfer_buffer;
        int status = urb->status;
 
-       dbg("%s: %p", __func__, urb);
-
        endpoint = usb_pipeendpoint(urb->pipe);
-       port =  urb->context;
+       port = urb->context;
+
+       dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
 
        if (status) {
                dev_dbg(&port->dev, "%s: nonzero status: %d on"
-                   " endpoint %02x.", __func__, status, endpoint);
+                       " endpoint %02x\n", __func__, status, endpoint);
        } else {
                if (urb->actual_length) {
                        tty = tty_port_tty_get(&port->port);
+
                        tty_buffer_request_room(tty, urb->actual_length);
                        tty_insert_flip_string(tty, data, urb->actual_length);
                        tty_flip_buffer_push(tty);
+
                        tty_kref_put(tty);
-               } else
+                       usb_serial_debug_data(debug, &port->dev, __func__,
+                               urb->actual_length, data);
+               } else {
                        dev_dbg(&port->dev, "%s: empty read urb"
-                               " received", __func__);
-
-               /* Resubmit urb so we continue receiving */
-               if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
-                       err = usb_submit_urb(urb, GFP_ATOMIC);
-                       if (err)
-                               dev_err(&port->dev, "resubmit read urb failed."
-                                       "(%d)\n", err);
+                               " received\n", __func__);
                }
        }
+
+       /* Resubmit urb so we continue receiving */
+       if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+               err = usb_submit_urb(urb, GFP_ATOMIC);
+               if (err)
+                       dev_err(&port->dev, "resubmit read urb failed."
+                               "(%d)\n", err);
+       }
+
        return;
 }
 
@@ -529,8 +535,7 @@ static void sierra_instat_callback(struct urb *urb)
        struct sierra_port_private *portdata = usb_get_serial_port_data(port);
        struct usb_serial *serial = port->serial;
 
-       dev_dbg(&port->dev, "%s", __func__);
-       dev_dbg(&port->dev, "%s: urb %p port %p has data %p", __func__,
+       dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__,
                urb, port, portdata);
 
        if (status == 0) {
@@ -550,7 +555,7 @@ static void sierra_instat_callback(struct urb *urb)
                                        sizeof(struct usb_ctrlrequest));
                        struct tty_struct *tty;
 
-                       dev_dbg(&port->dev, "%s: signal x%x", __func__,
+                       dev_dbg(&port->dev, "%s: signal x%x\n", __func__,
                                signals);
 
                        old_dcd_state = portdata->dcd_state;
@@ -565,20 +570,20 @@ static void sierra_instat_callback(struct urb *urb)
                                tty_hangup(tty);
                        tty_kref_put(tty);
                } else {
-                       dev_dbg(&port->dev, "%s: type %x req %x",
+                       dev_dbg(&port->dev, "%s: type %x req %x\n",
                                __func__, req_pkt->bRequestType,
                                req_pkt->bRequest);
                }
        } else
-               dev_dbg(&port->dev, "%s: error %d", __func__, status);
+               dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
 
        /* Resubmit urb so we continue receiving IRQ data */
-       if (status != -ESHUTDOWN) {
+       if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
                urb->dev = serial->dev;
                err = usb_submit_urb(urb, GFP_ATOMIC);
                if (err)
-                       dev_dbg(&port->dev, "%s: resubmit intr urb "
-                               "failed. (%d)", __func__, err);
+                       dev_err(&port->dev, "%s: resubmit intr urb "
+                               "failed. (%d)\n", __func__, err);
        }
 }
 
@@ -588,7 +593,7 @@ static int sierra_write_room(struct tty_struct *tty)
        struct sierra_port_private *portdata = usb_get_serial_port_data(port);
        unsigned long flags;
 
-       dev_dbg(&port->dev, "%s - port %d", __func__, port->number);
+       dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
 
        /* try to give a good number back based on if we have any free urbs at
         * this point in time */
@@ -729,7 +734,7 @@ static int sierra_open(struct tty_struct *tty,
 
        portdata = usb_get_serial_port_data(port);
 
-       dev_dbg(&port->dev, "%s", __func__);
+       dev_dbg(&port->dev, "%s\n", __func__);
 
        /* Set some sane defaults */
        portdata->rts_state = 1;
@@ -782,7 +787,7 @@ static int sierra_startup(struct usb_serial *serial)
        struct sierra_port_private *portdata;
        int i;
 
-       dev_dbg(&serial->dev->dev, "%s", __func__);
+       dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
        /* Set Device mode to D0 */
        sierra_set_power_state(serial->dev, 0x0000);
@@ -797,7 +802,7 @@ static int sierra_startup(struct usb_serial *serial)
                portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
                if (!portdata) {
                        dev_dbg(&port->dev, "%s: kmalloc for "
-                               "sierra_port_private (%d) failed!.",
+                               "sierra_port_private (%d) failed!.\n",
                                __func__, i);
                        return -ENOMEM;
                }
@@ -809,13 +814,13 @@ static int sierra_startup(struct usb_serial *serial)
        return 0;
 }
 
-static void sierra_shutdown(struct usb_serial *serial)
+static void sierra_disconnect(struct usb_serial *serial)
 {
        int i;
        struct usb_serial_port *port;
        struct sierra_port_private *portdata;
 
-       dev_dbg(&serial->dev->dev, "%s", __func__);
+       dev_dbg(&serial->dev->dev, "%s\n", __func__);
 
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
@@ -848,7 +853,7 @@ static struct usb_serial_driver sierra_device = {
        .tiocmget          = sierra_tiocmget,
        .tiocmset          = sierra_tiocmset,
        .attach            = sierra_startup,
-       .shutdown          = sierra_shutdown,
+       .disconnect        = sierra_disconnect,
        .read_int_callback = sierra_instat_callback,
 };
 
index 8f7ed8f13996185857021d7d2f730c08f60b2403..3c249d8e8b8eb356fef7aaee52ef2e0157994a01 100644 (file)
@@ -356,7 +356,7 @@ cleanup:
 }
 
 /* call when the device plug out. free all the memory alloced by probe */
-static void spcp8x5_shutdown(struct usb_serial *serial)
+static void spcp8x5_release(struct usb_serial *serial)
 {
        int i;
        struct spcp8x5_private *priv;
@@ -366,7 +366,6 @@ static void spcp8x5_shutdown(struct usb_serial *serial)
                if (priv) {
                        free_ringbuf(priv->buf);
                        kfree(priv);
-                       usb_set_serial_port_data(serial->port[i] , NULL);
                }
        }
 }
@@ -1020,7 +1019,7 @@ static struct usb_serial_driver spcp8x5_device = {
        .write_bulk_callback    = spcp8x5_write_bulk_callback,
        .chars_in_buffer        = spcp8x5_chars_in_buffer,
        .attach                 = spcp8x5_startup,
-       .shutdown               = spcp8x5_shutdown,
+       .release                = spcp8x5_release,
 };
 
 static int __init spcp8x5_init(void)
index 8b07ebc6baeb4e40ba036e07cacee5414b508628..6157fac9366b708c1a390c87fe46c21ab4dda91d 100644 (file)
@@ -267,7 +267,7 @@ error:
        return retval;
 }
 
-static void symbol_shutdown(struct usb_serial *serial)
+static void symbol_disconnect(struct usb_serial *serial)
 {
        struct symbol_private *priv = usb_get_serial_data(serial);
 
@@ -275,9 +275,16 @@ static void symbol_shutdown(struct usb_serial *serial)
 
        usb_kill_urb(priv->int_urb);
        usb_free_urb(priv->int_urb);
+}
+
+static void symbol_release(struct usb_serial *serial)
+{
+       struct symbol_private *priv = usb_get_serial_data(serial);
+
+       dbg("%s", __func__);
+
        kfree(priv->int_buffer);
        kfree(priv);
-       usb_set_serial_data(serial, NULL);
 }
 
 static struct usb_driver symbol_driver = {
@@ -299,7 +306,8 @@ static struct usb_serial_driver symbol_device = {
        .attach =               symbol_startup,
        .open =                 symbol_open,
        .close =                symbol_close,
-       .shutdown =             symbol_shutdown,
+       .disconnect =           symbol_disconnect,
+       .release =              symbol_release,
        .throttle =             symbol_throttle,
        .unthrottle =           symbol_unthrottle,
 };
index 42cb04c403beee3900e6fa33451913d0e0a3b29e..991d8232e3765c39de562ec8e09181d0e8d4c547 100644 (file)
@@ -97,7 +97,7 @@ struct ti_device {
 /* Function Declarations */
 
 static int ti_startup(struct usb_serial *serial);
-static void ti_shutdown(struct usb_serial *serial);
+static void ti_release(struct usb_serial *serial);
 static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
                struct file *file);
 static void ti_close(struct usb_serial_port *port);
@@ -230,7 +230,7 @@ static struct usb_serial_driver ti_1port_device = {
        .id_table               = ti_id_table_3410,
        .num_ports              = 1,
        .attach                 = ti_startup,
-       .shutdown               = ti_shutdown,
+       .release                = ti_release,
        .open                   = ti_open,
        .close                  = ti_close,
        .write                  = ti_write,
@@ -258,7 +258,7 @@ static struct usb_serial_driver ti_2port_device = {
        .id_table               = ti_id_table_5052,
        .num_ports              = 2,
        .attach                 = ti_startup,
-       .shutdown               = ti_shutdown,
+       .release                = ti_release,
        .open                   = ti_open,
        .close                  = ti_close,
        .write                  = ti_write,
@@ -473,7 +473,7 @@ free_tdev:
 }
 
 
-static void ti_shutdown(struct usb_serial *serial)
+static void ti_release(struct usb_serial *serial)
 {
        int i;
        struct ti_device *tdev = usb_get_serial_data(serial);
@@ -486,12 +486,10 @@ static void ti_shutdown(struct usb_serial *serial)
                if (tport) {
                        ti_buf_free(tport->tp_write_buf);
                        kfree(tport);
-                       usb_set_serial_port_data(serial->port[i], NULL);
                }
        }
 
        kfree(tdev);
-       usb_set_serial_data(serial, NULL);
 }
 
 
index 1967a7edc10c51fab8263aafc43a425a2b28e686..d595aa5586a733003532e6f0a7a95e051e3ce659 100644 (file)
@@ -141,6 +141,14 @@ static void destroy_serial(struct kref *kref)
        if (serial->minor != SERIAL_TTY_NO_MINOR)
                return_serial(serial);
 
+       serial->type->release(serial);
+
+       for (i = 0; i < serial->num_ports; ++i) {
+               port = serial->port[i];
+               if (port)
+                       put_device(&port->dev);
+       }
+
        /* If this is a "fake" port, we have to clean it up here, as it will
         * not get cleaned up in port_release() as it was never registered with
         * the driver core */
@@ -148,9 +156,8 @@ static void destroy_serial(struct kref *kref)
                for (i = serial->num_ports;
                                        i < serial->num_port_pointers; ++i) {
                        port = serial->port[i];
-                       if (!port)
-                               continue;
-                       port_free(port);
+                       if (port)
+                               port_free(port);
                }
        }
 
@@ -1046,10 +1053,15 @@ int usb_serial_probe(struct usb_interface *interface,
 
                dev_set_name(&port->dev, "ttyUSB%d", port->number);
                dbg ("%s - registering %s", __func__, dev_name(&port->dev));
+               port->dev_state = PORT_REGISTERING;
                retval = device_register(&port->dev);
-               if (retval)
+               if (retval) {
                        dev_err(&port->dev, "Error registering port device, "
                                "continuing\n");
+                       port->dev_state = PORT_UNREGISTERED;
+               } else {
+                       port->dev_state = PORT_REGISTERED;
+               }
        }
 
        usb_serial_console_init(debug, minor);
@@ -1113,10 +1125,6 @@ void usb_serial_disconnect(struct usb_interface *interface)
        serial->disconnected = 1;
        mutex_unlock(&serial->disc_mutex);
 
-       /* Unfortunately, many of the sub-drivers expect the port structures
-        * to exist when their shutdown method is called, so we have to go
-        * through this awkward two-step unregistration procedure.
-        */
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                if (port) {
@@ -1130,17 +1138,25 @@ void usb_serial_disconnect(struct usb_interface *interface)
                        }
                        kill_traffic(port);
                        cancel_work_sync(&port->work);
-                       device_del(&port->dev);
-               }
-       }
-       serial->type->shutdown(serial);
-       for (i = 0; i < serial->num_ports; ++i) {
-               port = serial->port[i];
-               if (port) {
-                       put_device(&port->dev);
-                       serial->port[i] = NULL;
+                       if (port->dev_state == PORT_REGISTERED) {
+
+                               /* Make sure the port is bound so that the
+                                * driver's port_remove method is called.
+                                */
+                               if (!port->dev.driver) {
+                                       int rc;
+
+                                       port->dev.driver =
+                                                       &serial->type->driver;
+                                       rc = device_bind_driver(&port->dev);
+                               }
+                               port->dev_state = PORT_UNREGISTERING;
+                               device_del(&port->dev);
+                               port->dev_state = PORT_UNREGISTERED;
+                       }
                }
        }
+       serial->type->disconnect(serial);
 
        /* let the last holder of this object
         * cause it to be cleaned up */
@@ -1318,7 +1334,8 @@ static void fixup_generic(struct usb_serial_driver *device)
        set_to_generic_if_null(device, chars_in_buffer);
        set_to_generic_if_null(device, read_bulk_callback);
        set_to_generic_if_null(device, write_bulk_callback);
-       set_to_generic_if_null(device, shutdown);
+       set_to_generic_if_null(device, disconnect);
+       set_to_generic_if_null(device, release);
 }
 
 int usb_serial_register(struct usb_serial_driver *driver)
index 6c9cbb59552a72626181b7ba6b5e9ee3811a1938..614800972dc3bb20f67431fd65c3b0ed9bf9146f 100644 (file)
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
 
+#define URB_DEBUG_MAX_IN_FLIGHT_URBS   4000
 #define USB_DEBUG_MAX_PACKET_SIZE      8
+#define USB_DEBUG_BRK_SIZE             8
+static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
+       0x00,
+       0xff,
+       0x01,
+       0xfe,
+       0x00,
+       0xfe,
+       0x01,
+       0xff,
+};
 
 static struct usb_device_id id_table [] = {
        { USB_DEVICE(0x0525, 0x127a) },
@@ -38,6 +50,32 @@ static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port,
        return usb_serial_generic_open(tty, port, filp);
 }
 
+/* This HW really does not support a serial break, so one will be
+ * emulated when ever the break state is set to true.
+ */
+static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
+{
+       struct usb_serial_port *port = tty->driver_data;
+       if (!break_state)
+               return;
+       usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
+}
+
+static void usb_debug_read_bulk_callback(struct urb *urb)
+{
+       struct usb_serial_port *port = urb->context;
+
+       if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
+           memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+                  USB_DEBUG_BRK_SIZE) == 0) {
+               usb_serial_handle_break(port);
+               usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
+               return;
+       }
+
+       usb_serial_generic_read_bulk_callback(urb);
+}
+
 static struct usb_serial_driver debug_device = {
        .driver = {
                .owner =        THIS_MODULE,
@@ -46,6 +84,9 @@ static struct usb_serial_driver debug_device = {
        .id_table =             id_table,
        .num_ports =            1,
        .open =                 usb_debug_open,
+       .max_in_flight_urbs =   URB_DEBUG_MAX_IN_FLIGHT_URBS,
+       .break_ctl =            usb_debug_break_ctl,
+       .read_bulk_callback =   usb_debug_read_bulk_callback,
 };
 
 static int __init debug_init(void)
index b15f1c0e1d4acba4adcbadf66ac98a9870e8c994..f5d0f64dcc52ee2c0a38e7d86b47a76181e197dc 100644 (file)
@@ -47,7 +47,7 @@ static void visor_unthrottle(struct tty_struct *tty);
 static int  visor_probe(struct usb_serial *serial,
                                        const struct usb_device_id *id);
 static int  visor_calc_num_ports(struct usb_serial *serial);
-static void visor_shutdown(struct usb_serial *serial);
+static void visor_release(struct usb_serial *serial);
 static void visor_write_bulk_callback(struct urb *urb);
 static void visor_read_bulk_callback(struct urb *urb);
 static void visor_read_int_callback(struct urb *urb);
@@ -202,7 +202,7 @@ static struct usb_serial_driver handspring_device = {
        .attach =               treo_attach,
        .probe =                visor_probe,
        .calc_num_ports =       visor_calc_num_ports,
-       .shutdown =             visor_shutdown,
+       .release =              visor_release,
        .write =                visor_write,
        .write_room =           visor_write_room,
        .write_bulk_callback =  visor_write_bulk_callback,
@@ -227,7 +227,7 @@ static struct usb_serial_driver clie_5_device = {
        .attach =               clie_5_attach,
        .probe =                visor_probe,
        .calc_num_ports =       visor_calc_num_ports,
-       .shutdown =             visor_shutdown,
+       .release =              visor_release,
        .write =                visor_write,
        .write_room =           visor_write_room,
        .write_bulk_callback =  visor_write_bulk_callback,
@@ -918,7 +918,7 @@ static int clie_5_attach(struct usb_serial *serial)
        return generic_startup(serial);
 }
 
-static void visor_shutdown(struct usb_serial *serial)
+static void visor_release(struct usb_serial *serial)
 {
        struct visor_private *priv;
        int i;
@@ -927,10 +927,7 @@ static void visor_shutdown(struct usb_serial *serial)
 
        for (i = 0; i < serial->num_ports; i++) {
                priv = usb_get_serial_port_data(serial->port[i]);
-               if (priv) {
-                       usb_set_serial_port_data(serial->port[i], NULL);
-                       kfree(priv);
-               }
+               kfree(priv);
        }
 }
 
index 7c7295d09f344cf7b4b167346e140e75d6394207..8d126dd7a02e4ebf423b54bf4064599721fb98b2 100644 (file)
@@ -144,7 +144,7 @@ static int  whiteheat_firmware_attach(struct usb_serial *serial);
 
 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
 static int  whiteheat_attach(struct usb_serial *serial);
-static void whiteheat_shutdown(struct usb_serial *serial);
+static void whiteheat_release(struct usb_serial *serial);
 static int  whiteheat_open(struct tty_struct *tty,
                        struct usb_serial_port *port, struct file *filp);
 static void whiteheat_close(struct usb_serial_port *port);
@@ -189,7 +189,7 @@ static struct usb_serial_driver whiteheat_device = {
        .id_table =             id_table_std,
        .num_ports =            4,
        .attach =               whiteheat_attach,
-       .shutdown =             whiteheat_shutdown,
+       .release =              whiteheat_release,
        .open =                 whiteheat_open,
        .close =                whiteheat_close,
        .write =                whiteheat_write,
@@ -617,7 +617,7 @@ no_command_buffer:
 }
 
 
-static void whiteheat_shutdown(struct usb_serial *serial)
+static void whiteheat_release(struct usb_serial *serial)
 {
        struct usb_serial_port *command_port;
        struct usb_serial_port *port;
index 2dd9bd4bff56edceec8962d79ba17f72efad8854..ec17c96371afc7a71297ef8e8cbfd17299a0c5e3 100644 (file)
@@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us)
        us->iobuf[0] = 0x1;
        result = usb_stor_control_msg(us, us->send_ctrl_pipe,
                        0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
-                       0x01, 0x0, us->iobuf, 0x1, 5*HZ);
+                       0x01, 0x0, us->iobuf, 0x1, 5000);
        US_DEBUGP("-- result is %d\n", result);
 
        return 0;
@@ -80,14 +80,16 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
 
        res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
                        US_BULK_CB_WRAP_LEN, &partial);
-       if(res)
-               return res;
+       if (res)
+               return -EIO;
 
        US_DEBUGP("Getting status packet...\n");
        res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
                        US_BULK_CS_WRAP_LEN, &partial);
+       if (res)
+               return -EIO;
 
-       return (res ? -1 : 0);
+       return 0;
 }
 
 /* This places the HUAWEI E220 devices in multi-port mode */
@@ -99,6 +101,6 @@ int usb_stor_huawei_e220_init(struct us_data *us)
                                      USB_REQ_SET_FEATURE,
                                      USB_TYPE_STANDARD | USB_RECIP_DEVICE,
                                      0x01, 0x0, NULL, 0x0, 1000);
-       US_DEBUGP("usb_control_msg performing result is %d\n", result);
-       return (result ? 0 : -1);
+       US_DEBUGP("Huawei mode set result is %d\n", result);
+       return (result ? 0 : -ENODEV);
 }
index 353f922939a4cd8a6e78c04aae37b328b4ce6176..d41cc0a970f79a147660da239bdc014612396354 100644 (file)
@@ -37,7 +37,7 @@ MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
 
 #define RESPONSE_LEN 1024
 
-static int option_rezero(struct us_data *us, int ep_in, int ep_out)
+static int option_rezero(struct us_data *us)
 {
        const unsigned char rezero_msg[] = {
          0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
@@ -54,10 +54,10 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
        if (buffer == NULL)
                return USB_STOR_TRANSPORT_ERROR;
 
-       memcpy(buffer, rezero_msg, sizeof (rezero_msg));
+       memcpy(buffer, rezero_msg, sizeof(rezero_msg));
        result = usb_stor_bulk_transfer_buf(us,
-                       usb_sndbulkpipe(us->pusb_dev, ep_out),
-                       buffer, sizeof (rezero_msg), NULL);
+                       us->send_bulk_pipe,
+                       buffer, sizeof(rezero_msg), NULL);
        if (result != USB_STOR_XFER_GOOD) {
                result = USB_STOR_XFER_ERROR;
                goto out;
@@ -66,9 +66,15 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
        /* Some of the devices need to be asked for a response, but we don't
         * care what that response is.
         */
-       result = usb_stor_bulk_transfer_buf(us,
-                       usb_sndbulkpipe(us->pusb_dev, ep_out),
+       usb_stor_bulk_transfer_buf(us,
+                       us->recv_bulk_pipe,
                        buffer, RESPONSE_LEN, NULL);
+
+       /* Read the CSW */
+       usb_stor_bulk_transfer_buf(us,
+                       us->recv_bulk_pipe,
+                       buffer, 13, NULL);
+
        result = USB_STOR_XFER_GOOD;
 
 out:
@@ -76,63 +82,75 @@ out:
        return result;
 }
 
-int option_ms_init(struct us_data *us)
+static int option_inquiry(struct us_data *us)
 {
-       struct usb_device *udev;
-       struct usb_interface *intf;
-       struct usb_host_interface *iface_desc;
-       struct usb_endpoint_descriptor *endpoint = NULL;
-       u8 ep_in = 0, ep_out = 0;
-       int ep_in_size = 0, ep_out_size = 0;
-       int i, result;
-
-       udev = us->pusb_dev;
-       intf = us->pusb_intf;
-
-       /* Ensure it's really a ZeroCD device; devices that are already
-        * in modem mode return 0xFF for class, subclass, and protocol.
-        */
-       if (udev->descriptor.bDeviceClass != 0 ||
-           udev->descriptor.bDeviceSubClass != 0 ||
-           udev->descriptor.bDeviceProtocol != 0)
-               return USB_STOR_TRANSPORT_GOOD;
+       const unsigned char inquiry_msg[] = {
+         0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
+         0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
+         0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
+         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+       };
+       char *buffer;
+       int result;
 
-       US_DEBUGP("Option MS: option_ms_init called\n");
+       US_DEBUGP("Option MS: %s", "device inquiry for vendor name\n");
 
-       /* Find the right mass storage interface */
-       iface_desc = intf->cur_altsetting;
-       if (iface_desc->desc.bInterfaceClass != 0x8 ||
-           iface_desc->desc.bInterfaceSubClass != 0x6 ||
-           iface_desc->desc.bInterfaceProtocol != 0x50) {
-               US_DEBUGP("Option MS: mass storage interface not found, no action "
-                         "required\n");
-               return USB_STOR_TRANSPORT_GOOD;
-       }
+       buffer = kzalloc(0x24, GFP_KERNEL);
+       if (buffer == NULL)
+               return USB_STOR_TRANSPORT_ERROR;
 
-       /* Find the mass storage bulk endpoints */
-       for (i = 0; i < iface_desc->desc.bNumEndpoints && (!ep_in_size || !ep_out_size); ++i) {
-               endpoint = &iface_desc->endpoint[i].desc;
-
-               if (usb_endpoint_is_bulk_in(endpoint)) {
-                       ep_in = usb_endpoint_num(endpoint);
-                       ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize);
-               } else if (usb_endpoint_is_bulk_out(endpoint)) {
-                       ep_out = usb_endpoint_num(endpoint);
-                       ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize);
-               }
+       memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
+       result = usb_stor_bulk_transfer_buf(us,
+                       us->send_bulk_pipe,
+                       buffer, sizeof(inquiry_msg), NULL);
+       if (result != USB_STOR_XFER_GOOD) {
+               result = USB_STOR_XFER_ERROR;
+               goto out;
        }
 
-       /* Can't find the mass storage endpoints */
-       if (!ep_in_size || !ep_out_size) {
-               US_DEBUGP("Option MS: mass storage endpoints not found, no action "
-                         "required\n");
-               return USB_STOR_TRANSPORT_GOOD;
+       result = usb_stor_bulk_transfer_buf(us,
+                       us->recv_bulk_pipe,
+                       buffer, 0x24, NULL);
+       if (result != USB_STOR_XFER_GOOD) {
+               result = USB_STOR_XFER_ERROR;
+               goto out;
        }
 
+       result = memcmp(buffer+8, "Option", 6);
+
+       /* Read the CSW */
+       usb_stor_bulk_transfer_buf(us,
+                       us->recv_bulk_pipe,
+                       buffer, 13, NULL);
+
+out:
+       kfree(buffer);
+       return result;
+}
+
+
+int option_ms_init(struct us_data *us)
+{
+       int result;
+
+       US_DEBUGP("Option MS: option_ms_init called\n");
+
+       /* Additional test for vendor information via INQUIRY,
+        * because some vendor/product IDs are ambiguous
+        */
+       result = option_inquiry(us);
+       if (result != 0) {
+               US_DEBUGP("Option MS: vendor is not Option or not determinable,"
+                         " no action taken\n");
+               return 0;
+       } else
+               US_DEBUGP("Option MS: this is a genuine Option device,"
+                         " proceeding\n");
+
        /* Force Modem mode */
        if (option_zero_cd == ZCD_FORCE_MODEM) {
                US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
-               result = option_rezero(us, ep_in, ep_out);
+               result = option_rezero(us);
                if (result != USB_STOR_XFER_GOOD)
                        US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
                return -EIO;
@@ -142,6 +160,6 @@ int option_ms_init(struct us_data *us)
                          " requests it\n");
        }
 
-       return USB_STOR_TRANSPORT_GOOD;
+       return 0;
 }
 
index 4359a2cb42df396d8e7996761814ff40755846dd..4395c4100ec2096313933d029e8b25191eefe6c9 100644 (file)
@@ -202,6 +202,6 @@ int sierra_ms_init(struct us_data *us)
 complete:
        result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
 
-       return USB_STOR_TRANSPORT_GOOD;
+       return 0;
 }
 
index 4b8b69045fe6073f26193bed04b1aef818a14fb6..1b9c5dd0fb27b00904c1cb9357f0402c9e12a323 100644 (file)
@@ -1385,7 +1385,7 @@ UNUSUAL_DEV(  0x10d6, 0x2200, 0x0100, 0x0100,
 UNUSUAL_DEV(  0x1186, 0x3e04, 0x0000, 0x0000,
            "D-Link",
            "USB Mass Storage",
-           US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0),
+           US_SC_DEVICE, US_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
 
 /* Reported by Kevin Lloyd <linux@sierrawireless.com>
  * Entry is needed for the initializer function override,
index 2b5a691064b719fcfa94fdb4bcac82bf0fb26205..932ffdbf86d98b542d3efe05087903926584a86d 100644 (file)
@@ -2104,6 +2104,7 @@ config FB_MB862XX_LIME
        bool "Lime GDC"
        depends on FB_MB862XX
        depends on OF && !FB_MB862XX_PCI_GDC
+       depends on PPC
        select FB_FOREIGN_ENDIAN
        select FB_LITTLE_ENDIAN
        ---help---
index 6995fe1e86d4aae1ad5abd6535e65572f1a59316..0bcc59eb37fa3787b39164a938199f2e1e0ea8ea 100644 (file)
@@ -859,43 +859,6 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
        return 0;
 }
 
-/*
- * Note that we are entered with the kernel locked.
- */
-static int
-acornfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
-       unsigned long off, start;
-       u32 len;
-
-       off = vma->vm_pgoff << PAGE_SHIFT;
-
-       start = info->fix.smem_start;
-       len = PAGE_ALIGN(start & ~PAGE_MASK) + info->fix.smem_len;
-       start &= PAGE_MASK;
-       if ((vma->vm_end - vma->vm_start + off) > len)
-               return -EINVAL;
-       off += start;
-       vma->vm_pgoff = off >> PAGE_SHIFT;
-
-       /* This is an IO map - tell maydump to skip this VMA */
-       vma->vm_flags |= VM_IO;
-
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-       /*
-        * Don't alter the page protection flags; we want to keep the area
-        * cached for better performance.  This does mean that we may miss
-        * some updates to the screen occasionally, but process switches
-        * should cause the caches and buffers to be flushed often enough.
-        */
-       if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-                               vma->vm_end - vma->vm_start,
-                               vma->vm_page_prot))
-               return -EAGAIN;
-       return 0;
-}
-
 static struct fb_ops acornfb_ops = {
        .owner          = THIS_MODULE,
        .fb_check_var   = acornfb_check_var,
@@ -905,7 +868,6 @@ static struct fb_ops acornfb_ops = {
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
        .fb_imageblit   = cfb_imageblit,
-       .fb_mmap        = acornfb_mmap,
 };
 
 /*
index 2fb63f6ea2f167fcdc1d5148e896fff00f415127..5afd64482f5584fd7c03f0d6267875da6eb3f16f 100644 (file)
@@ -345,7 +345,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
        dev_dbg(dev, "  bpp:        %u\n", var->bits_per_pixel);
        dev_dbg(dev, "  clk:        %lu KHz\n", clk_value_khz);
 
-       if ((PICOS2KHZ(var->pixclock) * var->bits_per_pixel / 8) > clk_value_khz) {
+       if (PICOS2KHZ(var->pixclock) > clk_value_khz) {
                dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock));
                return -EINVAL;
        }
index 97a1f095f327115524bdb9fcd55da6162d8497d2..515cf1978d19ef21fc5062471e1357826e4da8c6 100644 (file)
@@ -213,7 +213,6 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
                         PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb     |
                         PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb       |
                         PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb          |
-                        PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb          |
                         PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
                 OUTPLL(pllPIXCLKS_CNTL, tmp);
 
@@ -395,7 +394,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
                        PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb      |
                        PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb        |
                        PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb           |
-                       PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb);
+                       PIXCLKS_CNTL__R300_P2G2CLK_DAC_ALWAYS_ONb);
                OUTPLL(pllPIXCLKS_CNTL, tmp);
 
                tmp = INPLL(pllMCLK_MISC);
index 37e60b1d2ed953ff963820e8f386985d973b5fa7..e49ae5edcc008fcc6a698ad07388de8199ea4278 100644 (file)
@@ -323,7 +323,6 @@ static int bfin_bf54x_fb_release(struct fb_info *info, int user)
                bfin_write_EPPI0_CONTROL(0);
                SSYNC();
                disable_dma(CH_EPPI0);
-               memset(fbi->fb_buffer, 0, info->fix.smem_len);
        }
 
        spin_unlock(&fbi->lock);
@@ -530,7 +529,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int __init bfin_bf54x_probe(struct platform_device *pdev)
+static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
 {
        struct bfin_bf54xfb_info *info;
        struct fb_info *fbinfo;
@@ -626,14 +625,12 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
                goto out3;
        }
 
-       memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
-
        fbinfo->screen_base = (void *)info->fb_buffer;
        fbinfo->fix.smem_start = (int)info->fb_buffer;
 
        fbinfo->fbops = &bfin_bf54x_fb_ops;
 
-       fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+       fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
        if (!fbinfo->pseudo_palette) {
                printk(KERN_ERR DRIVER_NAME
                       "Fail to allocate pseudo_palette\n");
@@ -642,8 +639,6 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
                goto out4;
        }
 
-       memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16);
-
        if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
            < 0) {
                printk(KERN_ERR DRIVER_NAME
@@ -712,7 +707,7 @@ out1:
        return ret;
 }
 
-static int bfin_bf54x_remove(struct platform_device *pdev)
+static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
 {
 
        struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -781,7 +776,7 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
 
 static struct platform_driver bfin_bf54x_driver = {
        .probe = bfin_bf54x_probe,
-       .remove = bfin_bf54x_remove,
+       .remove = __devexit_p(bfin_bf54x_remove),
        .suspend = bfin_bf54x_suspend,
        .resume = bfin_bf54x_resume,
        .driver = {
@@ -790,7 +785,7 @@ static struct platform_driver bfin_bf54x_driver = {
                   },
 };
 
-static int __devinit bfin_bf54x_driver_init(void)
+static int __init bfin_bf54x_driver_init(void)
 {
        return platform_driver_register(&bfin_bf54x_driver);
 }
index 90cfddabf1f7618548275f99ec292f5f6573b8e6..5cc36cfbf07be275ce2b8ea92104c41e4bda1996 100644 (file)
@@ -242,7 +242,6 @@ static int bfin_t350mcqb_fb_release(struct fb_info *info, int user)
                SSYNC();
                disable_dma(CH_PPI);
                bfin_t350mcqb_stop_timers();
-               memset(fbi->fb_buffer, 0, info->fix.smem_len);
        }
 
        spin_unlock(&fbi->lock);
@@ -527,8 +526,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
                goto out3;
        }
 
-       memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
-
        fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
        fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
 
@@ -602,7 +599,7 @@ out1:
        return ret;
 }
 
-static int bfin_t350mcqb_remove(struct platform_device *pdev)
+static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
 {
 
        struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -637,9 +634,6 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
 {
-       struct fb_info *fbinfo = platform_get_drvdata(pdev);
-       struct bfin_t350mcqbfb_info *info = fbinfo->par;
-
        bfin_t350mcqb_disable_ppi();
        disable_dma(CH_PPI);
        bfin_write_PPI_STATUS(0xFFFF);
@@ -649,9 +643,6 @@ static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t stat
 
 static int bfin_t350mcqb_resume(struct platform_device *pdev)
 {
-       struct fb_info *fbinfo = platform_get_drvdata(pdev);
-       struct bfin_t350mcqbfb_info *info = fbinfo->par;
-
        enable_dma(CH_PPI);
        bfin_t350mcqb_enable_ppi();
 
@@ -664,7 +655,7 @@ static int bfin_t350mcqb_resume(struct platform_device *pdev)
 
 static struct platform_driver bfin_t350mcqb_driver = {
        .probe = bfin_t350mcqb_probe,
-       .remove = bfin_t350mcqb_remove,
+       .remove = __devexit_p(bfin_t350mcqb_remove),
        .suspend = bfin_t350mcqb_suspend,
        .resume = bfin_t350mcqb_resume,
        .driver = {
@@ -673,7 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
                   },
 };
 
-static int __devinit bfin_t350mcqb_driver_init(void)
+static int __init bfin_t350mcqb_driver_init(void)
 {
        return platform_driver_register(&bfin_t350mcqb_driver);
 }
index c7ff3c1a266a3611c2081e71e56bbb29264eaa0b..0c02f8ec4bf3dafe14965025a3a1121e19ab4dfd 100644 (file)
@@ -562,7 +562,7 @@ static int __devinit alloc_carmine_fb(void __iomem *regs, void __iomem *smem_bas
        if (ret < 0)
                goto err_free_fb;
 
-       if (fb_mode > ARRAY_SIZE(carmine_modedb))
+       if (fb_mode >= ARRAY_SIZE(carmine_modedb))
                fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
 
        par->cur_mode = par->new_mode = ~0;
index 777389c40988812c29f5ad31c7c04cc700f6f690..57b9d276497ed07e485311f38e95c6ef86af870e 100644 (file)
@@ -414,7 +414,6 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
        }
 
        pci_set_drvdata(dp, p);
-       p->device = &dp->dev;
 
        init_chips(p, addr);
 
index 8dea2bc927054783658183f662d9644e2eb27efc..eb12182b20598861937b46496b8716efcae1e366 100644 (file)
@@ -280,6 +280,9 @@ static int __init efifb_probe(struct platform_device *dev)
        info->pseudo_palette = info->par;
        info->par = NULL;
 
+       info->aperture_base = efifb_fix.smem_start;
+       info->aperture_size = size_total;
+
        info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
@@ -337,7 +340,7 @@ static int __init efifb_probe(struct platform_device *dev)
        info->fbops = &efifb_ops;
        info->var = efifb_defined;
        info->fix = efifb_fix;
-       info->flags = FBINFO_FLAG_DEFAULT;
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
 
        if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
                printk(KERN_ERR "efifb: cannot allocate colormap\n");
index d412a1ddc12fabb50df8562af40b5df25351d2bd..f8a09bf8d0cdecb0963116a4fb7a7d3793ed798d 100644 (file)
@@ -1462,6 +1462,16 @@ static int fb_check_foreignness(struct fb_info *fi)
        return 0;
 }
 
+static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
+{
+       /* is the generic aperture base the same as the HW one */
+       if (gen->aperture_base == hw->aperture_base)
+               return true;
+       /* is the generic aperture base inside the hw base->hw base+size */
+       if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
+               return true;
+       return false;
+}
 /**
  *     register_framebuffer - registers a frame buffer device
  *     @fb_info: frame buffer info structure
@@ -1485,6 +1495,23 @@ register_framebuffer(struct fb_info *fb_info)
        if (fb_check_foreignness(fb_info))
                return -ENOSYS;
 
+       /* check all firmware fbs and kick off if the base addr overlaps */
+       for (i = 0 ; i < FB_MAX; i++) {
+               if (!registered_fb[i])
+                       continue;
+
+               if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
+                       if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
+                               printk(KERN_ERR "fb: conflicting fb hw usage "
+                                      "%s vs %s - removing generic driver\n",
+                                      fb_info->fix.id,
+                                      registered_fb[i]->fix.id);
+                               unregister_framebuffer(registered_fb[i]);
+                               break;
+                       }
+               }
+       }
+
        num_registered_fb++;
        for (i = 0 ; i < FB_MAX; i++)
                if (!registered_fb[i])
@@ -1586,6 +1613,10 @@ unregister_framebuffer(struct fb_info *fb_info)
        device_destroy(fb_class, MKDEV(FB_MAJOR, i));
        event.info = fb_info;
        fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+
+       /* this may free fb info */
+       if (fb_info->fbops->fb_destroy)
+               fb_info->fbops->fb_destroy(fb_info);
 done:
        return ret;
 }
index 3a81060137a22bd29176d885a2cec1facf3c599b..15d200109446aa577f77f0d12d9f2cd66d04f6f2 100644 (file)
@@ -395,17 +395,16 @@ int __init igafb_init(void)
        /* We leak a reference here but as it cannot be unloaded this is
           fine. If you write unload code remember to free it in unload */
        
-       size = sizeof(struct fb_info) + sizeof(struct iga_par) + sizeof(u32)*16;
+       size = sizeof(struct iga_par) + sizeof(u32)*16;
 
-        info = kzalloc(size, GFP_ATOMIC);
+       info = framebuffer_alloc(size, &pdev->dev);
         if (!info) {
                 printk("igafb_init: can't alloc fb_info\n");
                 pci_dev_put(pdev);
                 return -ENOMEM;
         }
 
-       par = (struct iga_par *) (info + 1);
-       
+       par = info->par;
 
        if ((addr = pdev->resource[0].start) == 0) {
                 printk("igafb_init: no memory start\n");
@@ -526,7 +525,6 @@ int __init igafb_init(void)
        info->var = default_var;
        info->fix = igafb_fix;
        info->pseudo_palette = (void *)(par + 1);
-       info->device = &pdev->dev;
 
        if (!iga_init(info, par)) {
                iounmap((void *)par->io_base);
index ace14fe02fc4f49348024b1d1bc964cccffecbdc..0cafd642fbc0f57b5eaee3fe85a60895c30e1d8d 100644 (file)
@@ -1365,6 +1365,11 @@ static int intelfb_set_par(struct fb_info *info)
        DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres,
                info->var.yres, info->var.bits_per_pixel);
 
+       /*
+        * Disable VCO prior to timing register change.
+        */
+       OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE);
+
        intelfb_blank(FB_BLANK_POWERDOWN, info);
 
        if (ACCEL(dinfo, info))
index b91251d1fe41c9e254ebfb51320545754d45c305..3b437813584cec4f3d73ba5364353ac4a5cbb29b 100644 (file)
@@ -37,22 +37,24 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
 # Gray 256
 extra-y += $(call logo-cfiles,_gray256,pgm)
 
+pnmtologo := scripts/pnmtologo
+
 # Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
 quiet_cmd_logo = LOGO    $@
-       cmd_logo = scripts/pnmtologo \
+       cmd_logo = $(pnmtologo) \
                        -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
                        -n $(notdir $(basename $<)) -o $@ $<
 
-$(obj)/%_mono.c: $(src)/%_mono.pbm FORCE
+$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
-$(obj)/%_vga16.c: $(src)/%_vga16.ppm FORCE
+$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
-$(obj)/%_clut224.c: $(src)/%_clut224.ppm FORCE
+$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
-$(obj)/%_gray256.c: $(src)/%_gray256.pgm FORCE
+$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
 # Files generated that shall be removed upon make clean
index 2e85a2b52d05feada5f9c7a056d8c2575f5c9ba1..ea7a8ccc830c8659f23b13dc98db761b89a6c0da 100644 (file)
 #include <asm/bootinfo.h>
 #endif
 
-extern const struct linux_logo logo_linux_mono;
-extern const struct linux_logo logo_linux_vga16;
-extern const struct linux_logo logo_linux_clut224;
-extern const struct linux_logo logo_blackfin_vga16;
-extern const struct linux_logo logo_blackfin_clut224;
-extern const struct linux_logo logo_dec_clut224;
-extern const struct linux_logo logo_mac_clut224;
-extern const struct linux_logo logo_parisc_clut224;
-extern const struct linux_logo logo_sgi_clut224;
-extern const struct linux_logo logo_sun_clut224;
-extern const struct linux_logo logo_superh_mono;
-extern const struct linux_logo logo_superh_vga16;
-extern const struct linux_logo logo_superh_clut224;
-extern const struct linux_logo logo_m32r_clut224;
-
 static int nologo;
 module_param(nologo, bool, 0);
 MODULE_PARM_DESC(nologo, "Disables startup logo");
index fb64234a3825a2a9d1ee7f24e52e7cd75b969713..a28e3cfbbf707964099cbf6d04a97eb6a6db2860 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
-#if defined(CONFIG_PPC_OF)
+#if defined(CONFIG_OF)
 #include <linux/of_platform.h>
 #endif
 #include "mb862xxfb.h"
index 16186240c5f22ba47b63971847ba6e2384515d27..34e4e79951696bd5ac805cb69fea5780e84018c2 100644 (file)
@@ -264,6 +264,14 @@ static const struct fb_videomode modedb[] = {
        /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
        NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
        0, FB_VMODE_NONINTERLACED
+    }, {
+       /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+       NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
+       0, FB_VMODE_INTERLACED
+    }, {
+       /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+       NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
+       0, FB_VMODE_INTERLACED
     },
 };
 
index e1d9eeb1aeafe432f1e240e4bf30d895cc72c80d..4d8c54c23dd7e320289fadec313d52ec5f333392 100644 (file)
@@ -378,7 +378,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
        struct fb_fix_screeninfo *fix;
        struct fb_var_screeninfo *var;
        struct fb_info *info;
-       int size;
 
        if (!request_mem_region(res_start, res_size, "offb"))
                return;
@@ -393,15 +392,12 @@ static void __init offb_init_fb(const char *name, const char *full_name,
                return;
        }
 
-       size = sizeof(struct fb_info) + sizeof(u32) * 16;
-
-       info = kmalloc(size, GFP_ATOMIC);
+       info = framebuffer_alloc(sizeof(u32) * 16, NULL);
        
        if (info == 0) {
                release_mem_region(res_start, res_size);
                return;
        }
-       memset(info, 0, size);
 
        fix = &info->fix;
        var = &info->var;
@@ -497,7 +493,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
                iounmap(par->cmap_adr);
                par->cmap_adr = NULL;
                iounmap(info->screen_base);
-               kfree(info);
+               framebuffer_release(info);
                release_mem_region(res_start, res_size);
                return;
        }
index c6dd924976a4f105457bd15c205a2938a07eeee3..36436ee6c1a4f58076b56b9c99f9e6568e613f3a 100644 (file)
@@ -1748,7 +1748,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
        fb_dealloc_cmap(&info->cmap);
        kfree(info->pixmap.addr);
-       kfree(info);
+       framebuffer_release(info);
 }
 
 static struct pci_device_id pm2fb_id_table[] = {
index 0726aecf3b7e084c87f6abdc66ca44f4c16cb631..0deb0a8867b74af88560e7f867f36c262084f17a 100644 (file)
@@ -2,6 +2,7 @@
  *
  * (c) 2004 Simtec Electronics
  * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com>
  *
  * Driver for Epson S1D13xxx series framebuffer chips
  *
  *  linux/drivers/video/epson1355fb.c
  *  linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson)
  *
- * Note, currently only tested on S1D13806 with 16bit CRT.
- * As such, this driver might still contain some hardcoded bits relating to
- * S1D13806.
- * Making it work on other S1D13XXX chips should merely be a matter of adding
- * a few switch()s, some missing glue here and there maybe, and split header
- * files.
- *
  * TODO: - handle dual screen display (CRT and LCD at the same time).
  *      - check_var(), mode change, etc.
- *      - PM untested.
- *      - Accelerated interfaces.
- *      - Probably not SMP safe :)
+ *      - probably not SMP safe :)
+ *       - support all bitblt operations on all cards
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License. See the file COPYING in the main directory of this archive for
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
-
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
 #include <linux/fb.h>
+#include <linux/spinlock_types.h>
+#include <linux/spinlock.h>
 
 #include <asm/io.h>
 
 #include <video/s1d13xxxfb.h>
 
-#define PFX "s1d13xxxfb: "
+#define PFX    "s1d13xxxfb: "
+#define BLIT   "s1d13xxxfb_bitblt: "
 
+/*
+ * set this to enable debugging on general functions
+ */
 #if 0
 #define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
 #else
 #endif
 
 /*
- * List of card production ids
+ * set this to enable debugging on 2D acceleration
+ */
+#if 0
+#define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0)
+#else
+#define dbg_blit(fmt, args...) do { } while (0)
+#endif
+
+/*
+ * we make sure only one bitblt operation is running
+ */
+static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock);
+
+/*
+ * list of card production ids
  */
 static const int s1d13xxxfb_prod_ids[] = {
        S1D13505_PROD_ID,
@@ -69,7 +81,7 @@ static const char *s1d13xxxfb_prod_names[] = {
 };
 
 /*
- * Here we define the default struct fb_fix_screeninfo
+ * here we define the default struct fb_fix_screeninfo
  */
 static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = {
        .id             = S1D_FBID,
@@ -145,8 +157,10 @@ crt_enable(struct s1d13xxxfb_par *par, int enable)
        s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode);
 }
 
-/* framebuffer control routines */
 
+/*************************************************************
+ framebuffer control functions
+ *************************************************************/
 static inline void
 s1d13xxxfb_setup_pseudocolour(struct fb_info *info)
 {
@@ -242,13 +256,13 @@ s1d13xxxfb_set_par(struct fb_info *info)
 }
 
 /**
- *     s1d13xxxfb_setcolreg - sets a color register.
- *      @regno: Which register in the CLUT we are programming
- *      @red: The red value which can be up to 16 bits wide
+ *     s1d13xxxfb_setcolreg - sets a color register.
+ *     @regno: Which register in the CLUT we are programming
+ *     @red: The red value which can be up to 16 bits wide
  *     @green: The green value which can be up to 16 bits wide
  *     @blue:  The blue value which can be up to 16 bits wide.
  *     @transp: If supported the alpha value which can be up to 16 bits wide.
- *      @info: frame buffer info structure
+ *     @info: frame buffer info structure
  *
  *     Returns negative errno on error, or zero on success.
  */
@@ -351,15 +365,15 @@ s1d13xxxfb_blank(int blank_mode, struct fb_info *info)
 }
 
 /**
- *      s1d13xxxfb_pan_display - Pans the display.
- *      @var: frame buffer variable screen structure
- *      @info: frame buffer structure that represents a single frame buffer
+ *     s1d13xxxfb_pan_display - Pans the display.
+ *     @var: frame buffer variable screen structure
+ *     @info: frame buffer structure that represents a single frame buffer
  *
  *     Pan (or wrap, depending on the `vmode' field) the display using the
- *     `yoffset' field of the `var' structure (`xoffset'  not yet supported).
- *     If the values don't fit, return -EINVAL.
+ *     `yoffset' field of the `var' structure (`xoffset'  not yet supported).
+ *     If the values don't fit, return -EINVAL.
  *
- *      Returns negative errno on error, or zero on success.
+ *     Returns negative errno on error, or zero on success.
  */
 static int
 s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -390,8 +404,259 @@ s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
        return 0;
 }
 
-/* framebuffer information structures */
+/************************************************************
+ functions to handle bitblt acceleration
+ ************************************************************/
+
+/**
+ *     bltbit_wait_bitset - waits for change in register value
+ *     @info : framebuffer structure
+ *     @bit  : value expected in register
+ *     @timeout : ...
+ *
+ *     waits until value changes INTO bit
+ */
+static u8
+bltbit_wait_bitset(struct fb_info *info, u8 bit, int timeout)
+{
+       while (!(s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit)) {
+               udelay(10);
+               if (!--timeout) {
+                       dbg_blit("wait_bitset timeout\n");
+                       break;
+               }
+       }
+
+       return timeout;
+}
+
+/**
+ *     bltbit_wait_bitclear - waits for change in register value
+ *     @info : frambuffer structure
+ *     @bit  : value currently in register
+ *     @timeout : ...
+ *
+ *     waits until value changes FROM bit
+ *
+ */
+static u8
+bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout)
+{
+       while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) {
+               udelay(10);
+               if (!--timeout) {
+                       dbg_blit("wait_bitclear timeout\n");
+                       break;
+               }
+       }
+
+       return timeout;
+}
+
+/**
+ *     bltbit_fifo_status - checks the current status of the fifo
+ *     @info : framebuffer structure
+ *
+ *     returns number of free words in buffer
+ */
+static u8
+bltbit_fifo_status(struct fb_info *info)
+{
+       u8 status;
 
+       status = s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0);
+
+       /* its empty so room for 16 words */
+       if (status & BBLT_FIFO_EMPTY)
+               return 16;
+
+       /* its full so we dont want to add */
+       if (status & BBLT_FIFO_FULL)
+               return 0;
+
+       /* its atleast half full but we can add one atleast */
+       if (status & BBLT_FIFO_NOT_FULL)
+               return 1;
+
+       return 0;
+}
+
+/*
+ *     s1d13xxxfb_bitblt_copyarea - accelerated copyarea function
+ *     @info : framebuffer structure
+ *     @area : fb_copyarea structure
+ *
+ *     supports (atleast) S1D13506
+ *
+ */
+static void
+s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+       u32 dst, src;
+       u32 stride;
+       u16 reverse = 0;
+       u16 sx = area->sx, sy = area->sy;
+       u16 dx = area->dx, dy = area->dy;
+       u16 width = area->width, height = area->height;
+       u16 bpp;
+
+       spin_lock(&s1d13xxxfb_bitblt_lock);
+
+       /* bytes per xres line */
+       bpp = (info->var.bits_per_pixel >> 3);
+       stride = bpp * info->var.xres;
+
+       /* reverse, calculate the last pixel in rectangle */
+       if ((dy > sy) || ((dy == sy) && (dx >= sx))) {
+               dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1)));
+               src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1)));
+               reverse = 1;
+       /* not reverse, calculate the first pixel in rectangle */
+       } else { /* (y * xres) + (bpp * x) */
+               dst = (dy * stride) + (bpp * dx);
+               src = (sy * stride) + (bpp * sx);
+       }
+
+       /* set source adress */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff));
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff);
+
+       /* set destination adress */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff));
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff);
+
+       /* program height and width */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8));
+
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8));
+
+       /* negative direction ROP */
+       if (reverse == 1) {
+               dbg_blit("(copyarea) negative rop\n");
+               s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03);
+       } else /* positive direction ROP */ {
+               s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02);
+               dbg_blit("(copyarea) positive rop\n");
+       }
+
+       /* set for rectangel mode and not linear */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
+
+       /* setup the bpp 1 = 16bpp, 0 = 8bpp*/
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1));
+
+       /* set words per xres */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9));
+
+       dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy);
+       dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy);
+       dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1);
+       dbg_blit("(copyarea) stride=%d\n", stride);
+       dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1),
+               (stride >> 1) & 0xff, stride >> 9);
+
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c);
+
+       /* initialize the engine */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
+
+       /* wait to complete */
+       bltbit_wait_bitclear(info, 0x80, 8000);
+
+       spin_unlock(&s1d13xxxfb_bitblt_lock);
+}
+
+/**
+ *
+ *     s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
+ *     @info : framebuffer structure
+ *     @rect : fb_fillrect structure
+ *
+ *     supports (atleast 13506)
+ *
+ **/
+static void
+s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect)
+{
+       u32 screen_stride, dest;
+       u32 fg;
+       u16 bpp = (info->var.bits_per_pixel >> 3);
+
+       /* grab spinlock */
+       spin_lock(&s1d13xxxfb_bitblt_lock);
+
+       /* bytes per x width */
+       screen_stride = (bpp * info->var.xres);
+
+       /* bytes to starting point */
+       dest = ((rect->dy * screen_stride) + (bpp * rect->dx));
+
+       dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n"
+                "(solidfill) : rect_width=%d, rect_height=%d\n",
+                               rect->dx, rect->dy, screen_stride, dest,
+                               rect->width - 1, rect->height - 1);
+
+       dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n",
+                               info->var.xres, info->var.yres,
+                               info->var.bits_per_pixel);
+       dbg_blit("(solidfill) : rop=%d\n", rect->rop);
+
+       /* We split the destination into the three registers */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff));
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff));
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff));
+
+       /* give information regarding rectangel width */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8));
+
+       /* give information regarding rectangel height */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8));
+
+       if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+               info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+               fg = ((u32 *)info->pseudo_palette)[rect->color];
+               dbg_blit("(solidfill) truecolor/directcolor\n");
+               dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg);
+       } else {
+               fg = rect->color;
+               dbg_blit("(solidfill) color = %d\n", rect->color);
+       }
+
+       /* set foreground color */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff));
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff);
+
+       /* set rectangual region of memory (rectangle and not linear) */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
+
+       /* set operation mode SOLID_FILL */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL);
+
+       /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4));
+
+       /* set the memory offset for the bblt in word sizes */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff);
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9));
+
+       /* and away we go.... */
+       s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
+
+       /* wait until its done */
+       bltbit_wait_bitclear(info, 0x80, 8000);
+
+       /* let others play */
+       spin_unlock(&s1d13xxxfb_bitblt_lock);
+}
+
+/* framebuffer information structures */
 static struct fb_ops s1d13xxxfb_fbops = {
        .owner          = THIS_MODULE,
        .fb_set_par     = s1d13xxxfb_set_par,
@@ -400,7 +665,7 @@ static struct fb_ops s1d13xxxfb_fbops = {
 
        .fb_pan_display = s1d13xxxfb_pan_display,
 
-       /* to be replaced by any acceleration we can */
+       /* gets replaced at chip detection time */
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
        .fb_imageblit   = cfb_imageblit,
@@ -412,9 +677,9 @@ static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
 };
 
 /**
- *      s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
+ *     s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
  *     hardware setup.
- *      @info: frame buffer structure
+ *     @info: frame buffer structure
  *
  *     We setup the framebuffer structures according to the current
  *     hardware setup. On some machines, the BIOS will have filled
@@ -569,7 +834,6 @@ s1d13xxxfb_probe(struct platform_device *pdev)
        if (pdata && pdata->platform_init_video)
                pdata->platform_init_video();
 
-
        if (pdev->num_resources != 2) {
                dev_err(&pdev->dev, "invalid num_resources: %i\n",
                       pdev->num_resources);
@@ -655,16 +919,27 @@ s1d13xxxfb_probe(struct platform_device *pdev)
 
        info->fix = s1d13xxxfb_fix;
        info->fix.mmio_start = pdev->resource[1].start;
-       info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start +1;
+       info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
        info->fix.smem_start = pdev->resource[0].start;
-       info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start +1;
+       info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
 
        printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
               default_par->regs, info->fix.smem_len / 1024, info->screen_base);
 
        info->par = default_par;
-       info->fbops = &s1d13xxxfb_fbops;
        info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+       info->fbops = &s1d13xxxfb_fbops;
+
+       switch(prod_id) {
+       case S1D13506_PROD_ID:  /* activate acceleration */
+               s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
+               s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
+               info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
+                       FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
+               break;
+       default:
+               break;
+       }
 
        /* perform "manual" chip initialization, if needed */
        if (pdata && pdata->initregs)
index d3a568e6b169c4d1a0a122880dfb4a0b3a5bb9ac..43680e545427f190b432252acb6bfe518219f089 100644 (file)
@@ -358,9 +358,16 @@ static int s3c_fb_set_par(struct fb_info *info)
        writel(data, regs + VIDOSD_B(win_no));
 
        data = var->xres * var->yres;
+
+       u32 osdc_data = 0;
+
+       osdc_data = VIDISD14C_ALPHA1_R(0xf) |
+               VIDISD14C_ALPHA1_G(0xf) |
+               VIDISD14C_ALPHA1_B(0xf);
+
        if (s3c_fb_has_osd_d(win_no)) {
                writel(data, regs + VIDOSD_D(win_no));
-               writel(0, regs + VIDOSD_C(win_no));
+               writel(osdc_data, regs + VIDOSD_C(win_no));
        } else
                writel(data, regs + VIDOSD_C(win_no));
 
@@ -409,8 +416,12 @@ static int s3c_fb_set_par(struct fb_info *info)
                                data |= WINCON1_BPPMODE_19BPP_A1666;
                        else
                                data |= WINCON1_BPPMODE_18BPP_666;
-               } else if (var->transp.length != 0)
-                       data |= WINCON1_BPPMODE_25BPP_A1888;
+               } else if (var->transp.length == 1)
+                       data |= WINCON1_BPPMODE_25BPP_A1888
+                               | WINCON1_BLD_PIX;
+               else if (var->transp.length == 4)
+                       data |= WINCON1_BPPMODE_28BPP_A4888
+                               | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
                else
                        data |= WINCON0_BPPMODE_24BPP_888;
 
@@ -418,6 +429,20 @@ static int s3c_fb_set_par(struct fb_info *info)
                break;
        }
 
+       /* It has no color key control register for window0 */
+       if (win_no > 0) {
+               u32 keycon0_data = 0, keycon1_data = 0;
+
+               keycon0_data = ~(WxKEYCON0_KEYBL_EN |
+                               WxKEYCON0_KEYEN_F |
+                               WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+               keycon1_data = WxKEYCON1_COLVAL(0xffffff);
+
+               writel(keycon0_data, regs + WxKEYCONy(win_no-1, 0));
+               writel(keycon1_data, regs + WxKEYCONy(win_no-1, 1));
+       }
+
        writel(data, regs + WINCON(win_no));
        writel(0x0, regs + WINxMAP(win_no));
 
@@ -700,9 +725,12 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
  */
 static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
 {
-       fb_dealloc_cmap(&win->fbinfo->cmap);
-       unregister_framebuffer(win->fbinfo);
-       s3c_fb_free_memory(sfb, win);
+       if (win->fbinfo) {
+               unregister_framebuffer(win->fbinfo);
+               fb_dealloc_cmap(&win->fbinfo->cmap);
+               s3c_fb_free_memory(sfb, win);
+               framebuffer_release(win->fbinfo);
+       }
 }
 
 /**
@@ -753,7 +781,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        ret = s3c_fb_alloc_memory(sfb, win);
        if (ret) {
                dev_err(sfb->dev, "failed to allocate display memory\n");
-               goto err_framebuffer;
+               return ret;
        }
 
        /* setup the r/b/g positions for the window's palette */
@@ -776,7 +804,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        ret = s3c_fb_check_var(&fbinfo->var, fbinfo);
        if (ret < 0) {
                dev_err(sfb->dev, "check_var failed on initial video params\n");
-               goto err_alloc_mem;
+               return ret;
        }
 
        /* create initial colour map */
@@ -796,20 +824,13 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
        ret = register_framebuffer(fbinfo);
        if (ret < 0) {
                dev_err(sfb->dev, "failed to register framebuffer\n");
-               goto err_alloc_mem;
+               return ret;
        }
 
        *res = win;
        dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id);
 
        return 0;
-
-err_alloc_mem:
-       s3c_fb_free_memory(sfb, win);
-
-err_framebuffer:
-       unregister_framebuffer(fbinfo);
-       return ret;
 }
 
 /**
index b0b4513ba53784014c6bf0c6c60fc576d7f054f7..7da0027e2409f498ac88a175c78e1fb9f0f4b30e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
 
 #include <asm/io.h>
 #include <asm/div64.h>
@@ -89,7 +90,7 @@ static void s3c2410fb_set_lcdaddr(struct fb_info *info)
 static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi,
                                          unsigned long pixclk)
 {
-       unsigned long clk = clk_get_rate(fbi->clk);
+       unsigned long clk = fbi->clk_rate;
        unsigned long long div;
 
        /* pixclk is in picoseconds, our clock is in Hz
@@ -758,6 +759,57 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_CPU_FREQ
+
+static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
+                                       unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freqs = data;
+       struct s3c2410fb_info *info;
+       struct fb_info *fbinfo;
+       long delta_f;
+
+       info = container_of(nb, struct s3c2410fb_info, freq_transition);
+       fbinfo = platform_get_drvdata(to_platform_device(info->dev));
+
+       /* work out change, <0 for speed-up */
+       delta_f = info->clk_rate - clk_get_rate(info->clk);
+
+       if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
+           (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
+               info->clk_rate = clk_get_rate(info->clk);
+               s3c2410fb_activate_var(fbinfo);
+       }
+
+       return 0;
+}
+
+static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
+{
+       info->freq_transition.notifier_call = s3c2410fb_cpufreq_transition;
+
+       return cpufreq_register_notifier(&info->freq_transition,
+                                        CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
+{
+       cpufreq_unregister_notifier(&info->freq_transition,
+                                   CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
+{
+       return 0;
+}
+
+static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
+{
+}
+#endif
+
+
 static char driver_name[] = "s3c2410fb";
 
 static int __init s3c24xxfb_probe(struct platform_device *pdev,
@@ -875,6 +927,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
 
        msleep(1);
 
+       info->clk_rate = clk_get_rate(info->clk);
+
        /* find maximum required memory size for display */
        for (i = 0; i < mach_info->num_displays; i++) {
                unsigned long smem_len = mach_info->displays[i].xres;
@@ -904,11 +958,17 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
 
        s3c2410fb_check_var(&fbinfo->var, fbinfo);
 
+       ret = s3c2410fb_cpufreq_register(info);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register cpufreq\n");
+               goto free_video_memory;
+       }
+
        ret = register_framebuffer(fbinfo);
        if (ret < 0) {
                printk(KERN_ERR "Failed to register framebuffer device: %d\n",
                        ret);
-               goto free_video_memory;
+               goto free_cpufreq;
        }
 
        /* create device files */
@@ -922,6 +982,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
 
        return 0;
 
+ free_cpufreq:
+       s3c2410fb_cpufreq_deregister(info);
 free_video_memory:
        s3c2410fb_unmap_video_memory(fbinfo);
 release_clock:
@@ -961,6 +1023,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
        int irq;
 
        unregister_framebuffer(fbinfo);
+       s3c2410fb_cpufreq_deregister(info);
 
        s3c2410fb_lcd_enable(info, 0);
        msleep(1);
index 9a6ba3e9d1b823bf7a9dc7db874cc37e7c26645f..47a17bd2301169d9368d041b83d2b2b9d80835bf 100644 (file)
@@ -29,8 +29,13 @@ struct s3c2410fb_info {
        enum s3c_drv_type       drv_type;
        struct s3c2410fb_hw     regs;
 
+       unsigned long           clk_rate;
        unsigned int            palette_ready;
 
+#ifdef CONFIG_CPU_FREQ
+       struct notifier_block   freq_transition;
+#endif
+
        /* keep these registers in case we need to re-write palette */
        u32                     palette_buffer[256];
        u32                     pseudo_pal[16];
index 7e17ee95a97aa41b4a990ac395453ad19e10d4da..7072d19080d584dbaf2f754dd31c177251932de4 100644 (file)
@@ -5928,7 +5928,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if(pci_enable_device(pdev)) {
                        if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
                        pci_set_drvdata(pdev, NULL);
-                       kfree(sis_fb_info);
+                       framebuffer_release(sis_fb_info);
                        return -EIO;
                }
        }
@@ -6134,7 +6134,7 @@ error_3:  vfree(ivideo->bios_abase);
                pci_set_drvdata(pdev, NULL);
                if(!ivideo->sisvga_enabled)
                        pci_disable_device(pdev);
-               kfree(sis_fb_info);
+               framebuffer_release(sis_fb_info);
                return ret;
        }
 
index eabaad765aebcdf1e975c8d68108e098a330175d..eec9dcb7f59976f2a9fe337322fb82724a4ed5fe 100644 (file)
@@ -1380,7 +1380,7 @@ stifb_cleanup(void)
                                if (info->screen_base)
                                        iounmap(info->screen_base);
                        fb_dealloc_cmap(&info->cmap);
-                       kfree(info); 
+                       framebuffer_release(info);
                }
                sti->info = NULL;
        }
index 643afbfe8277bfe9bf03bf35a80c5d4a83e98841..45b883598bf02739083c19ba32106fe527dda081 100644 (file)
@@ -116,17 +116,16 @@ struct tcx_par {
        u32                     flags;
 #define TCX_FLAG_BLANKED       0x00000001
 
-       unsigned long           physbase;
        unsigned long           which_io;
-       unsigned long           fbsize;
 
        struct sbus_mmap_map    mmap_map[TCX_MMAP_ENTRIES];
        int                     lowdepth;
 };
 
 /* Reset control plane so that WID is 8-bit plane. */
-static void __tcx_set_control_plane(struct tcx_par *par)
+static void __tcx_set_control_plane(struct fb_info *info)
 {
+       struct tcx_par *par = info->par;
        u32 __iomem *p, *pend;
 
        if (par->lowdepth)
@@ -135,7 +134,7 @@ static void __tcx_set_control_plane(struct tcx_par *par)
        p = par->cplane;
        if (p == NULL)
                return;
-       for (pend = p + par->fbsize; p < pend; p++) {
+       for (pend = p + info->fix.smem_len; p < pend; p++) {
                u32 tmp = sbus_readl(p);
 
                tmp &= 0xffffff;
@@ -149,7 +148,7 @@ static void tcx_reset(struct fb_info *info)
        unsigned long flags;
 
        spin_lock_irqsave(&par->lock, flags);
-       __tcx_set_control_plane(par);
+       __tcx_set_control_plane(info);
        spin_unlock_irqrestore(&par->lock, flags);
 }
 
@@ -304,7 +303,7 @@ static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
        struct tcx_par *par = (struct tcx_par *)info->par;
 
        return sbusfb_mmap_helper(par->mmap_map,
-                                 par->physbase, par->fbsize,
+                                 info->fix.smem_start, info->fix.smem_len,
                                  par->which_io, vma);
 }
 
@@ -316,7 +315,7 @@ static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
        return sbusfb_ioctl_helper(cmd, arg, info,
                                   FBTYPE_TCXCOLOR,
                                   (par->lowdepth ? 8 : 24),
-                                  par->fbsize);
+                                  info->fix.smem_len);
 }
 
 /*
@@ -358,10 +357,10 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
                           par->bt, sizeof(struct bt_regs));
        if (par->cplane)
                of_iounmap(&op->resource[4],
-                          par->cplane, par->fbsize * sizeof(u32));
+                          par->cplane, info->fix.smem_len * sizeof(u32));
        if (info->screen_base)
                of_iounmap(&op->resource[0],
-                          info->screen_base, par->fbsize);
+                          info->screen_base, info->fix.smem_len);
 }
 
 static int __devinit tcx_probe(struct of_device *op,
@@ -391,7 +390,7 @@ static int __devinit tcx_probe(struct of_device *op,
 
        linebytes = of_getintprop_default(dp, "linebytes",
                                          info->var.xres);
-       par->fbsize = PAGE_ALIGN(linebytes * info->var.yres);
+       info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
 
        par->tec = of_ioremap(&op->resource[7], 0,
                                  sizeof(struct tcx_tec), "tcx tec");
@@ -400,7 +399,7 @@ static int __devinit tcx_probe(struct of_device *op,
        par->bt = of_ioremap(&op->resource[8], 0,
                                 sizeof(struct bt_regs), "tcx dac");
        info->screen_base = of_ioremap(&op->resource[0], 0,
-                                          par->fbsize, "tcx ram");
+                                          info->fix.smem_len, "tcx ram");
        if (!par->tec || !par->thc ||
            !par->bt || !info->screen_base)
                goto out_unmap_regs;
@@ -408,7 +407,7 @@ static int __devinit tcx_probe(struct of_device *op,
        memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map));
        if (!par->lowdepth) {
                par->cplane = of_ioremap(&op->resource[4], 0,
-                                            par->fbsize * sizeof(u32),
+                                            info->fix.smem_len * sizeof(u32),
                                             "tcx cplane");
                if (!par->cplane)
                        goto out_unmap_regs;
@@ -419,7 +418,7 @@ static int __devinit tcx_probe(struct of_device *op,
                par->mmap_map[6].size = SBUS_MMAP_EMPTY;
        }
 
-       par->physbase = op->resource[0].start;
+       info->fix.smem_start = op->resource[0].start;
        par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
        for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
@@ -473,7 +472,7 @@ static int __devinit tcx_probe(struct of_device *op,
        printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
               dp->full_name,
               par->which_io,
-              par->physbase,
+              info->fix.smem_start,
               par->lowdepth ? "8-bit only" : "24-bit depth");
 
        return 0;
index d6856f43d241df13d14ab43113c9472387ecbdef..bd37ee1f6a251bce027920a4eab805b0bbcfc3de 100644 (file)
@@ -174,8 +174,17 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
        return err;
 }
 
+static void vesafb_destroy(struct fb_info *info)
+{
+       if (info->screen_base)
+               iounmap(info->screen_base);
+       release_mem_region(info->aperture_base, info->aperture_size);
+       framebuffer_release(info);
+}
+
 static struct fb_ops vesafb_ops = {
        .owner          = THIS_MODULE,
+       .fb_destroy     = vesafb_destroy,
        .fb_setcolreg   = vesafb_setcolreg,
        .fb_pan_display = vesafb_pan_display,
        .fb_fillrect    = cfb_fillrect,
@@ -286,6 +295,10 @@ static int __init vesafb_probe(struct platform_device *dev)
        info->pseudo_palette = info->par;
        info->par = NULL;
 
+       /* set vesafb aperture size for generic probing */
+       info->aperture_base = screen_info.lfb_base;
+       info->aperture_size = size_total;
+
        info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
        if (!info->screen_base) {
                printk(KERN_ERR
@@ -437,7 +450,7 @@ static int __init vesafb_probe(struct platform_device *dev)
        info->fbops = &vesafb_ops;
        info->var = vesafb_defined;
        info->fix = vesafb_fix;
-       info->flags = FBINFO_FLAG_DEFAULT |
+       info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
                (ypan ? FBINFO_HWACCEL_YPAN : 0);
 
        if (!ypan)
index 2493f05e9f6176588c0f0fc3471097f62c8ae68d..15502d5e3641890f1f6646cc661b5e26d4755753 100644 (file)
@@ -384,7 +384,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
                fb_size = XENFB_DEFAULT_FB_LEN;
        }
 
-       dev->dev.driver_data = info;
+       dev_set_drvdata(&dev->dev, info);
        info->xbdev = dev;
        info->irq = -1;
        info->x1 = info->y1 = INT_MAX;
@@ -503,7 +503,7 @@ xenfb_make_preferred_console(void)
 
 static int xenfb_resume(struct xenbus_device *dev)
 {
-       struct xenfb_info *info = dev->dev.driver_data;
+       struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
        xenfb_disconnect_backend(info);
        xenfb_init_shared_page(info, info->fb_info);
@@ -512,7 +512,7 @@ static int xenfb_resume(struct xenbus_device *dev)
 
 static int xenfb_remove(struct xenbus_device *dev)
 {
-       struct xenfb_info *info = dev->dev.driver_data;
+       struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
        xenfb_disconnect_backend(info);
        if (info->fb_info) {
@@ -621,7 +621,7 @@ static void xenfb_disconnect_backend(struct xenfb_info *info)
 static void xenfb_backend_changed(struct xenbus_device *dev,
                                  enum xenbus_state backend_state)
 {
-       struct xenfb_info *info = dev->dev.driver_data;
+       struct xenfb_info *info = dev_get_drvdata(&dev->dev);
        int val;
 
        switch (backend_state) {
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
new file mode 100644 (file)
index 0000000..f654221
--- /dev/null
@@ -0,0 +1,20 @@
+menu "TI VLYNQ"
+
+config VLYNQ
+       bool "TI VLYNQ bus support"
+       depends on AR7 && EXPERIMENTAL
+       help
+         Support for Texas Instruments(R) VLYNQ bus.
+         The VLYNQ bus is a high-speed, serial and packetized
+         data bus which allows external peripherals of a SoC
+         to appear into the system's main memory.
+
+         If unsure, say N
+
+config VLYNQ_DEBUG
+       bool "VLYNQ bus debug"
+       depends on VLYNQ && KERNEL_DEBUG
+       help
+         Turn on VLYNQ bus debugging.
+
+endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
new file mode 100644 (file)
index 0000000..b3f6114
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for kernel vlynq drivers
+#
+
+obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
new file mode 100644 (file)
index 0000000..7335433
--- /dev/null
@@ -0,0 +1,814 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ * Parts of the VLYNQ specification can be found here:
+ * http://www.ti.com/litv/pdf/sprue36a
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/vlynq.h>
+
+#define VLYNQ_CTRL_PM_ENABLE           0x80000000
+#define VLYNQ_CTRL_CLOCK_INT           0x00008000
+#define VLYNQ_CTRL_CLOCK_DIV(x)                (((x) & 7) << 16)
+#define VLYNQ_CTRL_INT_LOCAL           0x00004000
+#define VLYNQ_CTRL_INT_ENABLE          0x00002000
+#define VLYNQ_CTRL_INT_VECTOR(x)       (((x) & 0x1f) << 8)
+#define VLYNQ_CTRL_INT2CFG             0x00000080
+#define VLYNQ_CTRL_RESET               0x00000001
+
+#define VLYNQ_CTRL_CLOCK_MASK          (0x7 << 16)
+
+#define VLYNQ_INT_OFFSET               0x00000014
+#define VLYNQ_REMOTE_OFFSET            0x00000080
+
+#define VLYNQ_STATUS_LINK              0x00000001
+#define VLYNQ_STATUS_LERROR            0x00000080
+#define VLYNQ_STATUS_RERROR            0x00000100
+
+#define VINT_ENABLE                    0x00000100
+#define VINT_TYPE_EDGE                 0x00000080
+#define VINT_LEVEL_LOW                 0x00000040
+#define VINT_VECTOR(x)                 ((x) & 0x1f)
+#define VINT_OFFSET(irq)               (8 * ((irq) % 4))
+
+#define VLYNQ_AUTONEGO_V2              0x00010000
+
+struct vlynq_regs {
+       u32 revision;
+       u32 control;
+       u32 status;
+       u32 int_prio;
+       u32 int_status;
+       u32 int_pending;
+       u32 int_ptr;
+       u32 tx_offset;
+       struct vlynq_mapping rx_mapping[4];
+       u32 chip;
+       u32 autonego;
+       u32 unused[6];
+       u32 int_device[8];
+};
+
+#ifdef VLYNQ_DEBUG
+static void vlynq_dump_regs(struct vlynq_device *dev)
+{
+       int i;
+
+       printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
+                       dev->local, dev->remote);
+       for (i = 0; i < 32; i++) {
+               printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
+                       i + 1, ((u32 *)dev->local)[i]);
+               printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
+                       i + 1, ((u32 *)dev->remote)[i]);
+       }
+}
+
+static void vlynq_dump_mem(u32 *base, int count)
+{
+       int i;
+
+       for (i = 0; i < (count + 3) / 4; i++) {
+               if (i % 4 == 0)
+                       printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
+               printk(KERN_DEBUG " 0x%08x", *(base + i));
+       }
+       printk(KERN_DEBUG "\n");
+}
+#endif
+
+/* Check the VLYNQ link status with a given device */
+static int vlynq_linked(struct vlynq_device *dev)
+{
+       int i;
+
+       for (i = 0; i < 100; i++)
+               if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
+                       return 1;
+               else
+                       cpu_relax();
+
+       return 0;
+}
+
+static void vlynq_reset(struct vlynq_device *dev)
+{
+       writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
+                       &dev->local->control);
+
+       /* Wait for the devices to finish resetting */
+       msleep(5);
+
+       /* Remove reset bit */
+       writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
+                       &dev->local->control);
+
+       /* Give some time for the devices to settle */
+       msleep(5);
+}
+
+static void vlynq_irq_unmask(unsigned int irq)
+{
+       u32 val;
+       struct vlynq_device *dev = get_irq_chip_data(irq);
+       int virq;
+
+       BUG_ON(!dev);
+       virq = irq - dev->irq_start;
+       val = readl(&dev->remote->int_device[virq >> 2]);
+       val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
+       writel(val, &dev->remote->int_device[virq >> 2]);
+}
+
+static void vlynq_irq_mask(unsigned int irq)
+{
+       u32 val;
+       struct vlynq_device *dev = get_irq_chip_data(irq);
+       int virq;
+
+       BUG_ON(!dev);
+       virq = irq - dev->irq_start;
+       val = readl(&dev->remote->int_device[virq >> 2]);
+       val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
+       writel(val, &dev->remote->int_device[virq >> 2]);
+}
+
+static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
+{
+       u32 val;
+       struct vlynq_device *dev = get_irq_chip_data(irq);
+       int virq;
+
+       BUG_ON(!dev);
+       virq = irq - dev->irq_start;
+       val = readl(&dev->remote->int_device[virq >> 2]);
+       switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+       case IRQ_TYPE_EDGE_RISING:
+       case IRQ_TYPE_EDGE_FALLING:
+       case IRQ_TYPE_EDGE_BOTH:
+               val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
+               val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
+               val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
+               val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
+               break;
+       default:
+               return -EINVAL;
+       }
+       writel(val, &dev->remote->int_device[virq >> 2]);
+       return 0;
+}
+
+static void vlynq_local_ack(unsigned int irq)
+{
+       struct vlynq_device *dev = get_irq_chip_data(irq);
+
+       u32 status = readl(&dev->local->status);
+
+       pr_debug("%s: local status: 0x%08x\n",
+                      dev_name(&dev->dev), status);
+       writel(status, &dev->local->status);
+}
+
+static void vlynq_remote_ack(unsigned int irq)
+{
+       struct vlynq_device *dev = get_irq_chip_data(irq);
+
+       u32 status = readl(&dev->remote->status);
+
+       pr_debug("%s: remote status: 0x%08x\n",
+                      dev_name(&dev->dev), status);
+       writel(status, &dev->remote->status);
+}
+
+static irqreturn_t vlynq_irq(int irq, void *dev_id)
+{
+       struct vlynq_device *dev = dev_id;
+       u32 status;
+       int virq = 0;
+
+       status = readl(&dev->local->int_status);
+       writel(status, &dev->local->int_status);
+
+       if (unlikely(!status))
+               spurious_interrupt();
+
+       while (status) {
+               if (status & 1)
+                       do_IRQ(dev->irq_start + virq);
+               status >>= 1;
+               virq++;
+       }
+
+       return IRQ_HANDLED;
+}
+
+static struct irq_chip vlynq_irq_chip = {
+       .name = "vlynq",
+       .unmask = vlynq_irq_unmask,
+       .mask = vlynq_irq_mask,
+       .set_type = vlynq_irq_type,
+};
+
+static struct irq_chip vlynq_local_chip = {
+       .name = "vlynq local error",
+       .unmask = vlynq_irq_unmask,
+       .mask = vlynq_irq_mask,
+       .ack = vlynq_local_ack,
+};
+
+static struct irq_chip vlynq_remote_chip = {
+       .name = "vlynq local error",
+       .unmask = vlynq_irq_unmask,
+       .mask = vlynq_irq_mask,
+       .ack = vlynq_remote_ack,
+};
+
+static int vlynq_setup_irq(struct vlynq_device *dev)
+{
+       u32 val;
+       int i, virq;
+
+       if (dev->local_irq == dev->remote_irq) {
+               printk(KERN_ERR
+                      "%s: local vlynq irq should be different from remote\n",
+                      dev_name(&dev->dev));
+               return -EINVAL;
+       }
+
+       /* Clear local and remote error bits */
+       writel(readl(&dev->local->status), &dev->local->status);
+       writel(readl(&dev->remote->status), &dev->remote->status);
+
+       /* Now setup interrupts */
+       val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
+       val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
+               VLYNQ_CTRL_INT2CFG;
+       val |= readl(&dev->local->control);
+       writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
+       writel(val, &dev->local->control);
+
+       val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
+       val |= VLYNQ_CTRL_INT_ENABLE;
+       val |= readl(&dev->remote->control);
+       writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
+       writel(val, &dev->remote->int_ptr);
+       writel(val, &dev->remote->control);
+
+       for (i = dev->irq_start; i <= dev->irq_end; i++) {
+               virq = i - dev->irq_start;
+               if (virq == dev->local_irq) {
+                       set_irq_chip_and_handler(i, &vlynq_local_chip,
+                                                handle_level_irq);
+                       set_irq_chip_data(i, dev);
+               } else if (virq == dev->remote_irq) {
+                       set_irq_chip_and_handler(i, &vlynq_remote_chip,
+                                                handle_level_irq);
+                       set_irq_chip_data(i, dev);
+               } else {
+                       set_irq_chip_and_handler(i, &vlynq_irq_chip,
+                                                handle_simple_irq);
+                       set_irq_chip_data(i, dev);
+                       writel(0, &dev->remote->int_device[virq >> 2]);
+               }
+       }
+
+       if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
+               printk(KERN_ERR "%s: request_irq failed\n",
+                                       dev_name(&dev->dev));
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static void vlynq_device_release(struct device *dev)
+{
+       struct vlynq_device *vdev = to_vlynq_device(dev);
+       kfree(vdev);
+}
+
+static int vlynq_device_match(struct device *dev,
+                             struct device_driver *drv)
+{
+       struct vlynq_device *vdev = to_vlynq_device(dev);
+       struct vlynq_driver *vdrv = to_vlynq_driver(drv);
+       struct vlynq_device_id *ids = vdrv->id_table;
+
+       while (ids->id) {
+               if (ids->id == vdev->dev_id) {
+                       vdev->divisor = ids->divisor;
+                       vlynq_set_drvdata(vdev, ids);
+                       printk(KERN_INFO "Driver found for VLYNQ "
+                               "device: %08x\n", vdev->dev_id);
+                       return 1;
+               }
+               printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
+                       " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
+               ids++;
+       }
+       return 0;
+}
+
+static int vlynq_device_probe(struct device *dev)
+{
+       struct vlynq_device *vdev = to_vlynq_device(dev);
+       struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
+       struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
+       int result = -ENODEV;
+
+       if (drv->probe)
+               result = drv->probe(vdev, id);
+       if (result)
+               put_device(dev);
+       return result;
+}
+
+static int vlynq_device_remove(struct device *dev)
+{
+       struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
+
+       if (drv->remove)
+               drv->remove(to_vlynq_device(dev));
+
+       return 0;
+}
+
+int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
+{
+       driver->driver.name = driver->name;
+       driver->driver.bus = &vlynq_bus_type;
+       return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL(__vlynq_register_driver);
+
+void vlynq_unregister_driver(struct vlynq_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(vlynq_unregister_driver);
+
+/*
+ * A VLYNQ remote device can clock the VLYNQ bus master
+ * using a dedicated clock line. In that case, both the
+ * remove device and the bus master should have the same
+ * serial clock dividers configured. Iterate through the
+ * 8 possible dividers until we actually link with the
+ * device.
+ */
+static int __vlynq_try_remote(struct vlynq_device *dev)
+{
+       int i;
+
+       vlynq_reset(dev);
+       for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
+                       i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
+               dev->dev_id ? i++ : i--) {
+
+               if (!vlynq_linked(dev))
+                       break;
+
+               writel((readl(&dev->remote->control) &
+                               ~VLYNQ_CTRL_CLOCK_MASK) |
+                               VLYNQ_CTRL_CLOCK_INT |
+                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
+                               &dev->remote->control);
+               writel((readl(&dev->local->control)
+                               & ~(VLYNQ_CTRL_CLOCK_INT |
+                               VLYNQ_CTRL_CLOCK_MASK)) |
+                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
+                               &dev->local->control);
+
+               if (vlynq_linked(dev)) {
+                       printk(KERN_DEBUG
+                               "%s: using remote clock divisor %d\n",
+                               dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
+                       dev->divisor = i;
+                       return 0;
+               } else {
+                       vlynq_reset(dev);
+               }
+       }
+
+       return -ENODEV;
+}
+
+/*
+ * A VLYNQ remote device can be clocked by the VLYNQ bus
+ * master using a dedicated clock line. In that case, only
+ * the bus master configures the serial clock divider.
+ * Iterate through the 8 possible dividers until we
+ * actually get a link with the device.
+ */
+static int __vlynq_try_local(struct vlynq_device *dev)
+{
+       int i;
+
+       vlynq_reset(dev);
+
+       for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
+                       i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
+               dev->dev_id ? i++ : i--) {
+
+               writel((readl(&dev->local->control) &
+                               ~VLYNQ_CTRL_CLOCK_MASK) |
+                               VLYNQ_CTRL_CLOCK_INT |
+                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
+                               &dev->local->control);
+
+               if (vlynq_linked(dev)) {
+                       printk(KERN_DEBUG
+                               "%s: using local clock divisor %d\n",
+                               dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
+                       dev->divisor = i;
+                       return 0;
+               } else {
+                       vlynq_reset(dev);
+               }
+       }
+
+       return -ENODEV;
+}
+
+/*
+ * When using external clocking method, serial clock
+ * is supplied by an external oscillator, therefore we
+ * should mask the local clock bit in the clock control
+ * register for both the bus master and the remote device.
+ */
+static int __vlynq_try_external(struct vlynq_device *dev)
+{
+       vlynq_reset(dev);
+       if (!vlynq_linked(dev))
+               return -ENODEV;
+
+       writel((readl(&dev->remote->control) &
+                       ~VLYNQ_CTRL_CLOCK_INT),
+                       &dev->remote->control);
+
+       writel((readl(&dev->local->control) &
+                       ~VLYNQ_CTRL_CLOCK_INT),
+                       &dev->local->control);
+
+       if (vlynq_linked(dev)) {
+               printk(KERN_DEBUG "%s: using external clock\n",
+                       dev_name(&dev->dev));
+                       dev->divisor = vlynq_div_external;
+               return 0;
+       }
+
+       return -ENODEV;
+}
+
+static int __vlynq_enable_device(struct vlynq_device *dev)
+{
+       int result;
+       struct plat_vlynq_ops *ops = dev->dev.platform_data;
+
+       result = ops->on(dev);
+       if (result)
+               return result;
+
+       switch (dev->divisor) {
+       case vlynq_div_external:
+       case vlynq_div_auto:
+               /* When the device is brought from reset it should have clock
+                * generation negotiated by hardware.
+                * Check which device is generating clocks and perform setup
+                * accordingly */
+               if (vlynq_linked(dev) && readl(&dev->remote->control) &
+                  VLYNQ_CTRL_CLOCK_INT) {
+                       if (!__vlynq_try_remote(dev) ||
+                               !__vlynq_try_local(dev)  ||
+                               !__vlynq_try_external(dev))
+                               return 0;
+               } else {
+                       if (!__vlynq_try_external(dev) ||
+                               !__vlynq_try_local(dev)    ||
+                               !__vlynq_try_remote(dev))
+                               return 0;
+               }
+               break;
+       case vlynq_ldiv1:
+       case vlynq_ldiv2:
+       case vlynq_ldiv3:
+       case vlynq_ldiv4:
+       case vlynq_ldiv5:
+       case vlynq_ldiv6:
+       case vlynq_ldiv7:
+       case vlynq_ldiv8:
+               writel(VLYNQ_CTRL_CLOCK_INT |
+                       VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
+                       vlynq_ldiv1), &dev->local->control);
+               writel(0, &dev->remote->control);
+               if (vlynq_linked(dev)) {
+                       printk(KERN_DEBUG
+                               "%s: using local clock divisor %d\n",
+                               dev_name(&dev->dev),
+                               dev->divisor - vlynq_ldiv1 + 1);
+                       return 0;
+               }
+               break;
+       case vlynq_rdiv1:
+       case vlynq_rdiv2:
+       case vlynq_rdiv3:
+       case vlynq_rdiv4:
+       case vlynq_rdiv5:
+       case vlynq_rdiv6:
+       case vlynq_rdiv7:
+       case vlynq_rdiv8:
+               writel(0, &dev->local->control);
+               writel(VLYNQ_CTRL_CLOCK_INT |
+                       VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
+                       vlynq_rdiv1), &dev->remote->control);
+               if (vlynq_linked(dev)) {
+                       printk(KERN_DEBUG
+                               "%s: using remote clock divisor %d\n",
+                               dev_name(&dev->dev),
+                               dev->divisor - vlynq_rdiv1 + 1);
+                       return 0;
+               }
+               break;
+       }
+
+       ops->off(dev);
+       return -ENODEV;
+}
+
+int vlynq_enable_device(struct vlynq_device *dev)
+{
+       struct plat_vlynq_ops *ops = dev->dev.platform_data;
+       int result = -ENODEV;
+
+       result = __vlynq_enable_device(dev);
+       if (result)
+               return result;
+
+       result = vlynq_setup_irq(dev);
+       if (result)
+               ops->off(dev);
+
+       dev->enabled = !result;
+       return result;
+}
+EXPORT_SYMBOL(vlynq_enable_device);
+
+
+void vlynq_disable_device(struct vlynq_device *dev)
+{
+       struct plat_vlynq_ops *ops = dev->dev.platform_data;
+
+       dev->enabled = 0;
+       free_irq(dev->irq, dev);
+       ops->off(dev);
+}
+EXPORT_SYMBOL(vlynq_disable_device);
+
+int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
+                           struct vlynq_mapping *mapping)
+{
+       int i;
+
+       if (!dev->enabled)
+               return -ENXIO;
+
+       writel(tx_offset, &dev->local->tx_offset);
+       for (i = 0; i < 4; i++) {
+               writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
+               writel(mapping[i].size, &dev->local->rx_mapping[i].size);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(vlynq_set_local_mapping);
+
+int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
+                            struct vlynq_mapping *mapping)
+{
+       int i;
+
+       if (!dev->enabled)
+               return -ENXIO;
+
+       writel(tx_offset, &dev->remote->tx_offset);
+       for (i = 0; i < 4; i++) {
+               writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
+               writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(vlynq_set_remote_mapping);
+
+int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
+{
+       int irq = dev->irq_start + virq;
+       if (dev->enabled)
+               return -EBUSY;
+
+       if ((irq < dev->irq_start) || (irq > dev->irq_end))
+               return -EINVAL;
+
+       if (virq == dev->remote_irq)
+               return -EINVAL;
+
+       dev->local_irq = virq;
+
+       return 0;
+}
+EXPORT_SYMBOL(vlynq_set_local_irq);
+
+int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
+{
+       int irq = dev->irq_start + virq;
+       if (dev->enabled)
+               return -EBUSY;
+
+       if ((irq < dev->irq_start) || (irq > dev->irq_end))
+               return -EINVAL;
+
+       if (virq == dev->local_irq)
+               return -EINVAL;
+
+       dev->remote_irq = virq;
+
+       return 0;
+}
+EXPORT_SYMBOL(vlynq_set_remote_irq);
+
+static int vlynq_probe(struct platform_device *pdev)
+{
+       struct vlynq_device *dev;
+       struct resource *regs_res, *mem_res, *irq_res;
+       int len, result;
+
+       regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+       if (!regs_res)
+               return -ENODEV;
+
+       mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+       if (!mem_res)
+               return -ENODEV;
+
+       irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
+       if (!irq_res)
+               return -ENODEV;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               printk(KERN_ERR
+                      "vlynq: failed to allocate device structure\n");
+               return -ENOMEM;
+       }
+
+       dev->id = pdev->id;
+       dev->dev.bus = &vlynq_bus_type;
+       dev->dev.parent = &pdev->dev;
+       dev_set_name(&dev->dev, "vlynq%d", dev->id);
+       dev->dev.platform_data = pdev->dev.platform_data;
+       dev->dev.release = vlynq_device_release;
+
+       dev->regs_start = regs_res->start;
+       dev->regs_end = regs_res->end;
+       dev->mem_start = mem_res->start;
+       dev->mem_end = mem_res->end;
+
+       len = regs_res->end - regs_res->start;
+       if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
+               printk(KERN_ERR "%s: Can't request vlynq registers\n",
+                      dev_name(&dev->dev));
+               result = -ENXIO;
+               goto fail_request;
+       }
+
+       dev->local = ioremap(regs_res->start, len);
+       if (!dev->local) {
+               printk(KERN_ERR "%s: Can't remap vlynq registers\n",
+                      dev_name(&dev->dev));
+               result = -ENXIO;
+               goto fail_remap;
+       }
+
+       dev->remote = (struct vlynq_regs *)((void *)dev->local +
+                                           VLYNQ_REMOTE_OFFSET);
+
+       dev->irq = platform_get_irq_byname(pdev, "irq");
+       dev->irq_start = irq_res->start;
+       dev->irq_end = irq_res->end;
+       dev->local_irq = dev->irq_end - dev->irq_start;
+       dev->remote_irq = dev->local_irq - 1;
+
+       if (device_register(&dev->dev))
+               goto fail_register;
+       platform_set_drvdata(pdev, dev);
+
+       printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
+              dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
+              (void *)dev->mem_start);
+
+       dev->dev_id = 0;
+       dev->divisor = vlynq_div_auto;
+       result = __vlynq_enable_device(dev);
+       if (result == 0) {
+               dev->dev_id = readl(&dev->remote->chip);
+               ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
+       }
+       if (dev->dev_id)
+               printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
+
+       return 0;
+
+fail_register:
+       iounmap(dev->local);
+fail_remap:
+fail_request:
+       release_mem_region(regs_res->start, len);
+       kfree(dev);
+       return result;
+}
+
+static int vlynq_remove(struct platform_device *pdev)
+{
+       struct vlynq_device *dev = platform_get_drvdata(pdev);
+
+       device_unregister(&dev->dev);
+       iounmap(dev->local);
+       release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
+
+       kfree(dev);
+
+       return 0;
+}
+
+static struct platform_driver vlynq_platform_driver = {
+       .driver.name = "vlynq",
+       .probe = vlynq_probe,
+       .remove = __devexit_p(vlynq_remove),
+};
+
+struct bus_type vlynq_bus_type = {
+       .name = "vlynq",
+       .match = vlynq_device_match,
+       .probe = vlynq_device_probe,
+       .remove = vlynq_device_remove,
+};
+EXPORT_SYMBOL(vlynq_bus_type);
+
+static int __devinit vlynq_init(void)
+{
+       int res = 0;
+
+       res = bus_register(&vlynq_bus_type);
+       if (res)
+               goto fail_bus;
+
+       res = platform_driver_register(&vlynq_platform_driver);
+       if (res)
+               goto fail_platform;
+
+       return 0;
+
+fail_platform:
+       bus_unregister(&vlynq_bus_type);
+fail_bus:
+       return res;
+}
+
+static void __devexit vlynq_exit(void)
+{
+       platform_driver_unregister(&vlynq_platform_driver);
+       bus_unregister(&vlynq_bus_type);
+}
+
+module_init(vlynq_init);
+module_exit(vlynq_exit);
index 525da2e8f73be1da26f67be2c1630b6f20518ac0..4044f163035f6d77734c404ca3a5ee8ab6bb55e8 100644 (file)
@@ -39,6 +39,13 @@ config FS_POSIX_ACL
        bool
        default n
 
+source "fs/xfs/Kconfig"
+source "fs/gfs2/Kconfig"
+source "fs/ocfs2/Kconfig"
+source "fs/btrfs/Kconfig"
+
+endif # BLOCK
+
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EMBEDDED
        default y
@@ -47,13 +54,6 @@ config FILE_LOCKING
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
-source "fs/xfs/Kconfig"
-source "fs/gfs2/Kconfig"
-source "fs/ocfs2/Kconfig"
-source "fs/btrfs/Kconfig"
-
-endif # BLOCK
-
 source "fs/notify/Kconfig"
 
 source "fs/quota/Kconfig"
index 9367b6297d84c819c1c42abef581ed39c0093b78..89cd2deeb4aff144d1ee4355e6e57b4c9ec3aaeb 100644 (file)
@@ -513,7 +513,7 @@ befs_utf2nls(struct super_block *sb, const char *in,
 {
        struct nls_table *nls = BEFS_SB(sb)->nls;
        int i, o;
-       wchar_t uni;
+       unicode_t uni;
        int unilen, utflen;
        char *result;
        /* The utf8->nls conversion won't make the final nls string bigger
@@ -539,16 +539,16 @@ befs_utf2nls(struct super_block *sb, const char *in,
        for (i = o = 0; i < in_len; i += utflen, o += unilen) {
 
                /* convert from UTF-8 to Unicode */
-               utflen = utf8_mbtowc(&uni, &in[i], in_len - i);
-               if (utflen < 0) {
+               utflen = utf8_to_utf32(&in[i], in_len - i, &uni);
+               if (utflen < 0)
                        goto conv_err;
-               }
 
                /* convert from Unicode to nls */
+               if (uni > MAX_WCHAR_T)
+                       goto conv_err;
                unilen = nls->uni2char(uni, &result[o], in_len - o);
-               if (unilen < 0) {
+               if (unilen < 0)
                        goto conv_err;
-               }
        }
        result[o] = '\0';
        *out_len = o;
@@ -619,15 +619,13 @@ befs_nls2utf(struct super_block *sb, const char *in,
 
                /* convert from nls to unicode */
                unilen = nls->char2uni(&in[i], in_len - i, &uni);
-               if (unilen < 0) {
+               if (unilen < 0)
                        goto conv_err;
-               }
 
                /* convert from unicode to UTF-8 */
-               utflen = utf8_wctomb(&result[o], uni, 3);
-               if (utflen <= 0) {
+               utflen = utf32_to_utf8(uni, &result[o], 3);
+               if (utflen <= 0)
                        goto conv_err;
-               }
        }
 
        result[o] = '\0';
index 33a90120f6ad11673bb869f5526db633d9cfac24..4d74fc72c195daf72e9d9b8ec9a56e62a51f0227 100644 (file)
@@ -67,6 +67,8 @@ static int debugfs_u8_get(void *data, u64 *val)
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
 
 /**
  * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value
@@ -95,6 +97,13 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n");
 struct dentry *debugfs_create_u8(const char *name, mode_t mode,
                                 struct dentry *parent, u8 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u8_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u8_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_u8);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u8);
@@ -110,6 +119,8 @@ static int debugfs_u16_get(void *data, u64 *val)
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
 
 /**
  * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value
@@ -138,6 +149,13 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n");
 struct dentry *debugfs_create_u16(const char *name, mode_t mode,
                                  struct dentry *parent, u16 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u16_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u16_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_u16);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u16);
@@ -153,6 +171,8 @@ static int debugfs_u32_get(void *data, u64 *val)
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n");
 
 /**
  * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value
@@ -181,6 +201,13 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n");
 struct dentry *debugfs_create_u32(const char *name, mode_t mode,
                                 struct dentry *parent, u32 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u32_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u32_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_u32);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u32);
@@ -197,6 +224,8 @@ static int debugfs_u64_get(void *data, u64 *val)
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
 
 /**
  * debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value
@@ -225,15 +254,28 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n");
 struct dentry *debugfs_create_u64(const char *name, mode_t mode,
                                 struct dentry *parent, u64 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u64_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_u64);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u64);
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n");
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n");
 
 DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n");
 
 /*
  * debugfs_create_x{8,16,32} - create a debugfs file that is used to read and write an unsigned {8,16,32}-bit value
@@ -256,6 +298,13 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n"
 struct dentry *debugfs_create_x8(const char *name, mode_t mode,
                                 struct dentry *parent, u8 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x8_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x8_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_x8);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x8);
@@ -273,6 +322,13 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8);
 struct dentry *debugfs_create_x16(const char *name, mode_t mode,
                                 struct dentry *parent, u16 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x16_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x16_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_x16);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x16);
@@ -290,6 +346,13 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16);
 struct dentry *debugfs_create_x32(const char *name, mode_t mode,
                                 struct dentry *parent, u32 *value)
 {
+       /* if there are no write bits set, make read only */
+       if (!(mode & S_IWUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x32_ro);
+       /* if there are no read bits set, make write only */
+       if (!(mode & S_IRUGO))
+               return debugfs_create_file(name, mode, parent, value, &fops_x32_wo);
+
        return debugfs_create_file(name, mode, parent, value, &fops_x32);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_x32);
@@ -419,7 +482,7 @@ static const struct file_operations fops_blob = {
 };
 
 /**
- * debugfs_create_blob - create a debugfs file that is used to read and write a binary blob
+ * debugfs_create_blob - create a debugfs file that is used to read a binary blob
  * @name: a pointer to a string containing the name of the file to create.
  * @mode: the permission that the file should have
  * @parent: a pointer to the parent dentry for this file.  This should be a
index 0662ba6de85a46040ac83b87d66562146a009811..d22438ef7674870ad4987b9c2eb11555057e4dc8 100644 (file)
@@ -403,6 +403,7 @@ void debugfs_remove_recursive(struct dentry *dentry)
                }
                child = list_entry(parent->d_subdirs.next, struct dentry,
                                d_u.d_child);
+ next_sibling:
 
                /*
                 * If "child" isn't empty, walk down the tree and
@@ -416,6 +417,16 @@ void debugfs_remove_recursive(struct dentry *dentry)
                }
                __debugfs_remove(child, parent);
                if (parent->d_subdirs.next == &child->d_u.d_child) {
+                       /*
+                        * Try the next sibling.
+                        */
+                       if (child->d_u.d_child.next != &parent->d_subdirs) {
+                               child = list_entry(child->d_u.d_child.next,
+                                                  struct dentry,
+                                                  d_u.d_child);
+                               goto next_sibling;
+                       }
+
                        /*
                         * Avoid infinite loop if we fail to remove
                         * one dentry.
index b6a719a909f8eee1829764ea770e879c1c9c73fe..a2edb79134472170e95d5fe835e7ad6a783e00db 100644 (file)
@@ -24,7 +24,7 @@ static void drop_pagecache_sb(struct super_block *sb)
                        continue;
                __iget(inode);
                spin_unlock(&inode_lock);
-               __invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
+               invalidate_mapping_pages(inode->i_mapping, 0, -1);
                iput(toput_inode);
                toput_inode = inode;
                spin_lock(&inode_lock);
index 3b8e71b412fde05e2ed9318c3528fd682e49ba65..38ff75a0fe22d3b8764e9be001dd57857c5abe66 100644 (file)
 #include <asm/uaccess.h>
 #include "fat.h"
 
+/*
+ * Maximum buffer size of short name.
+ * [(MSDOS_NAME + '.') * max one char + nul]
+ * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
+ */
+#define FAT_MAX_SHORT_SIZE     ((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
+/*
+ * Maximum buffer size of unicode chars from slots.
+ * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
+ */
+#define FAT_MAX_UNI_CHARS      ((MSDOS_SLOTS - 1) * 13 + 1)
+#define FAT_MAX_UNI_SIZE       (FAT_MAX_UNI_CHARS * sizeof(wchar_t))
+
 static inline loff_t fat_make_i_pos(struct super_block *sb,
                                    struct buffer_head *bh,
                                    struct msdos_dir_entry *de)
@@ -171,7 +184,8 @@ static inline int fat_uni_to_x8(struct msdos_sb_info *sbi, const wchar_t *uni,
                                unsigned char *buf, int size)
 {
        if (sbi->options.utf8)
-               return utf8_wcstombs(buf, uni, size);
+               return utf16s_to_utf8s(uni, FAT_MAX_UNI_CHARS,
+                               UTF16_HOST_ENDIAN, buf, size);
        else
                return uni16_to_x8(buf, uni, size, sbi->options.unicode_xlate,
                                   sbi->nls_io);
@@ -324,19 +338,6 @@ parse_long:
        return 0;
 }
 
-/*
- * Maximum buffer size of short name.
- * [(MSDOS_NAME + '.') * max one char + nul]
- * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul]
- */
-#define FAT_MAX_SHORT_SIZE     ((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1)
-/*
- * Maximum buffer size of unicode chars from slots.
- * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
- */
-#define FAT_MAX_UNI_CHARS      ((MSDOS_SLOTS - 1) * 13 + 1)
-#define FAT_MAX_UNI_SIZE       (FAT_MAX_UNI_CHARS * sizeof(wchar_t))
-
 /*
  * Return values: negative -> error, 0 -> not found, positive -> found,
  * value is the total amount of slots, including the shortname entry.
index 8d6fdcfd41df6bdc889ab6b85a6064acd7740296..73471b7ecc8c2bd90591f43391afaaeacf6c39af 100644 (file)
@@ -502,11 +502,11 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
        if (utf8) {
                int name_len = strlen(name);
 
-               *outlen = utf8_mbstowcs((wchar_t *)outname, name, PATH_MAX);
+               *outlen = utf8s_to_utf16s(name, PATH_MAX, (wchar_t *) outname);
 
                /*
                 * We stripped '.'s before and set len appropriately,
-                * but utf8_mbstowcs doesn't care about len
+                * but utf8s_to_utf16s doesn't care about len
                 */
                *outlen -= (name_len - len);
 
index 1ad703150dee08d9bec6c481f480b99ae7b149d5..a040b764f8e38b6a5067642713d4f9c6e9dc92ba 100644 (file)
@@ -198,15 +198,19 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
 }
 
 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
-                     uid_t uid, uid_t euid, int force)
+                     int force)
 {
        write_lock_irq(&filp->f_owner.lock);
        if (force || !filp->f_owner.pid) {
                put_pid(filp->f_owner.pid);
                filp->f_owner.pid = get_pid(pid);
                filp->f_owner.pid_type = type;
-               filp->f_owner.uid = uid;
-               filp->f_owner.euid = euid;
+
+               if (pid) {
+                       const struct cred *cred = current_cred();
+                       filp->f_owner.uid = cred->uid;
+                       filp->f_owner.euid = cred->euid;
+               }
        }
        write_unlock_irq(&filp->f_owner.lock);
 }
@@ -214,14 +218,13 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
 int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
                int force)
 {
-       const struct cred *cred = current_cred();
        int err;
-       
+
        err = security_file_set_fowner(filp);
        if (err)
                return err;
 
-       f_modown(filp, pid, type, cred->uid, cred->euid, force);
+       f_modown(filp, pid, type, force);
        return 0;
 }
 EXPORT_SYMBOL(__f_setown);
@@ -247,7 +250,7 @@ EXPORT_SYMBOL(f_setown);
 
 void f_delown(struct file *filp)
 {
-       f_modown(filp, NULL, PIDTYPE_PID, 0, 0, 1);
+       f_modown(filp, NULL, PIDTYPE_PID, 1);
 }
 
 pid_t f_getown(struct file *filp)
@@ -425,14 +428,20 @@ static inline int sigio_perm(struct task_struct *p,
 }
 
 static void send_sigio_to_task(struct task_struct *p,
-                              struct fown_struct *fown, 
+                              struct fown_struct *fown,
                               int fd,
                               int reason)
 {
-       if (!sigio_perm(p, fown, fown->signum))
+       /*
+        * F_SETSIG can change ->signum lockless in parallel, make
+        * sure we read it once and use the same value throughout.
+        */
+       int signum = ACCESS_ONCE(fown->signum);
+
+       if (!sigio_perm(p, fown, signum))
                return;
 
-       switch (fown->signum) {
+       switch (signum) {
                siginfo_t si;
                default:
                        /* Queue a rt signal with the appropriate fd as its
@@ -441,7 +450,7 @@ static void send_sigio_to_task(struct task_struct *p,
                           delivered even if we can't queue.  Failure to
                           queue in this case _should_ be reported; we fall
                           back to SIGIO in that case. --sct */
-                       si.si_signo = fown->signum;
+                       si.si_signo = signum;
                        si.si_errno = 0;
                        si.si_code  = reason;
                        /* Make sure we are called with one of the POLL_*
@@ -453,7 +462,7 @@ static void send_sigio_to_task(struct task_struct *p,
                        else
                                si.si_band = band_table[reason - POLL_IN];
                        si.si_fd    = fd;
-                       if (!group_send_sig_info(fown->signum, &si, p))
+                       if (!group_send_sig_info(signum, &si, p))
                                break;
                /* fall-through: fall back on the old plain SIGIO signal */
                case 0:
index 40308e98c6a44f9763354b375ba4c51bb569607a..caf049146ca27a537a7dddc4a38ba02f085a6b35 100644 (file)
@@ -321,7 +321,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        spin_lock(&inode_lock);
        inode->i_state &= ~I_SYNC;
-       if (!(inode->i_state & I_FREEING)) {
+       if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
                if (!(inode->i_state & I_DIRTY) &&
                    mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
                        /*
@@ -492,7 +492,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
                        break;
                }
 
-               if (inode->i_state & I_NEW) {
+               if (inode->i_state & (I_NEW | I_WILL_FREE)) {
                        requeue_io(inode);
                        continue;
                }
@@ -523,7 +523,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
                if (current_is_pdflush() && !writeback_acquire(bdi))
                        break;
 
-               BUG_ON(inode->i_state & I_FREEING);
+               BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
                __iget(inode);
                pages_skipped = wbc->pages_skipped;
                __writeback_single_inode(inode, wbc);
index 92c14b850e9cadeaf2179ca012130cfa6b744204..a048de81c09318ae5ccd092f5675649a50a7025b 100644 (file)
@@ -37,37 +37,6 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
        return (op - ascii);
 }
 
-/* Convert big endian wide character string to utf8 */
-static int
-wcsntombs_be(__u8 *s, const __u8 *pwcs, int inlen, int maxlen)
-{
-       const __u8 *ip;
-       __u8 *op;
-       int size;
-       __u16 c;
-
-       op = s;
-       ip = pwcs;
-       while ((*ip || ip[1]) && (maxlen > 0) && (inlen > 0)) {
-               c = (*ip << 8) | ip[1];
-               if (c > 0x7f) {
-                       size = utf8_wctomb(op, c, maxlen);
-                       if (size == -1) {
-                               /* Ignore character and move on */
-                               maxlen--;
-                       } else {
-                               op += size;
-                               maxlen -= size;
-                       }
-               } else {
-                       *op++ = (__u8) c;
-               }
-               ip += 2;
-               inlen--;
-       }
-       return (op - s);
-}
-
 int
 get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
 {
@@ -79,8 +48,9 @@ get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, st
        nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
 
        if (utf8) {
-               len = wcsntombs_be(outname, de->name,
-                               de->name_len[0] >> 1, PAGE_SIZE);
+               len = utf16s_to_utf8s((const wchar_t *) de->name,
+                               de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
+                               outname, PAGE_SIZE);
        } else {
                len = uni16_to_x8(outname, (__be16 *) de->name,
                                de->name_len[0] >> 1, nls);
index bbbd5f202e3740d7e735449eb8e6e9626d56e4f1..41d6045dbeb08b5db952e9945a054bf722470451 100644 (file)
@@ -391,6 +391,7 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
                }
                XADaddress(xp, xaddr);
                XADlength(xp, xlen);
+               XADoffset(xp, prev);
                /*
                 * only preserve the abnr flag within the xad flags
                 * of the returned hint.
index 97645f112114e0c2f1aa4304a1f85046ec66ebb6..0ec6237a5970f162e0ed0d72b33fa9f1e15a7b86 100644 (file)
@@ -1113,11 +1113,13 @@ ncp__io2vol(struct ncp_server *server, unsigned char *vname, unsigned int *vlen,
 
                if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
                        int k;
+                       unicode_t u;
 
-                       k = utf8_mbtowc(&ec, iname, iname_end - iname);
-                       if (k < 0)
+                       k = utf8_to_utf32(iname, iname_end - iname, &u);
+                       if (k < 0 || u > MAX_WCHAR_T)
                                return -EINVAL;
                        iname += k;
+                       ec = u;
                } else {
                        if (*iname == NCP_ESC) {
                                int k;
@@ -1214,7 +1216,7 @@ ncp__vol2io(struct ncp_server *server, unsigned char *iname, unsigned int *ilen,
                if (NCP_IS_FLAG(server, NCP_FLAG_UTF8)) {
                        int k;
 
-                       k = utf8_wctomb(iname, ec, iname_end - iname);
+                       k = utf32_to_utf8(ec, iname, iname_end - iname);
                        if (k < 0) {
                                err = -ENAMETOOLONG;
                                goto quit;
index a2ab2529b5ca4ee1c1ac2b8e3889e4651bb8f1ac..ceda50aad73cc5a3bc93baeaaa7771ebd7a0f00f 100644 (file)
@@ -31,7 +31,7 @@ static inline void nfs_inc_server_stats(const struct nfs_server *server,
        cpu = get_cpu();
        iostats = per_cpu_ptr(server->io_stats, cpu);
        iostats->events[stat]++;
-       put_cpu_no_resched();
+       put_cpu();
 }
 
 static inline void nfs_inc_stats(const struct inode *inode,
@@ -50,7 +50,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
        cpu = get_cpu();
        iostats = per_cpu_ptr(server->io_stats, cpu);
        iostats->bytes[stat] += addend;
-       put_cpu_no_resched();
+       put_cpu();
 }
 
 static inline void nfs_add_stats(const struct inode *inode,
@@ -71,7 +71,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
        cpu = get_cpu();
        iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
        iostats->fscache[stat] += addend;
-       put_cpu_no_resched();
+       put_cpu();
 }
 #endif
 
index 9b0efdad89100bd60e5a15954b67ca83495cad56..477d37d83b316367e1ac04fb31ba98e375a37b1a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/errno.h>
 #include <linux/kmod.h>
 #include <linux/spinlock.h>
+#include <asm/byteorder.h>
 
 static struct nls_table default_table;
 static struct nls_table *tables = &default_table;
@@ -43,10 +44,17 @@ static const struct utf8_table utf8_table[] =
     {0,                                                       /* end of table    */}
 };
 
-int
-utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
+#define UNICODE_MAX    0x0010ffff
+#define PLANE_SIZE     0x00010000
+
+#define SURROGATE_MASK 0xfffff800
+#define SURROGATE_PAIR 0x0000d800
+#define SURROGATE_LOW  0x00000400
+#define SURROGATE_BITS 0x000003ff
+
+int utf8_to_utf32(const u8 *s, int len, unicode_t *pu)
 {
-       long l;
+       unsigned long l;
        int c0, c, nc;
        const struct utf8_table *t;
   
@@ -57,12 +65,13 @@ utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
                nc++;
                if ((c0 & t->cmask) == t->cval) {
                        l &= t->lmask;
-                       if (l < t->lval)
+                       if (l < t->lval || l > UNICODE_MAX ||
+                                       (l & SURROGATE_MASK) == SURROGATE_PAIR)
                                return -1;
-                       *p = l;
+                       *pu = (unicode_t) l;
                        return nc;
                }
-               if (n <= nc)
+               if (len <= nc)
                        return -1;
                s++;
                c = (*s ^ 0x80) & 0xFF;
@@ -72,90 +81,133 @@ utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
        }
        return -1;
 }
+EXPORT_SYMBOL(utf8_to_utf32);
 
-int
-utf8_mbstowcs(wchar_t *pwcs, const __u8 *s, int n)
+int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
 {
-       __u16 *op;
-       const __u8 *ip;
-       int size;
-
-       op = pwcs;
-       ip = s;
-       while (*ip && n > 0) {
-               if (*ip & 0x80) {
-                       size = utf8_mbtowc(op, ip, n);
-                       if (size == -1) {
-                               /* Ignore character and move on */
-                               ip++;
-                               n--;
-                       } else {
-                               op++;
-                               ip += size;
-                               n -= size;
-                       }
-               } else {
-                       *op++ = *ip++;
-                       n--;
-               }
-       }
-       return (op - pwcs);
-}
-
-int
-utf8_wctomb(__u8 *s, wchar_t wc, int maxlen)
-{
-       long l;
+       unsigned long l;
        int c, nc;
        const struct utf8_table *t;
-  
+
        if (!s)
                return 0;
-  
-       l = wc;
+
+       l = u;
+       if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR)
+               return -1;
+
        nc = 0;
        for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) {
                nc++;
                if (l <= t->lmask) {
                        c = t->shift;
-                       *s = t->cval | (l >> c);
+                       *s = (u8) (t->cval | (l >> c));
                        while (c > 0) {
                                c -= 6;
                                s++;
-                               *s = 0x80 | ((l >> c) & 0x3F);
+                               *s = (u8) (0x80 | ((l >> c) & 0x3F));
                        }
                        return nc;
                }
        }
        return -1;
 }
+EXPORT_SYMBOL(utf32_to_utf8);
 
-int
-utf8_wcstombs(__u8 *s, const wchar_t *pwcs, int maxlen)
+int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
 {
-       const __u16 *ip;
-       __u8 *op;
+       u16 *op;
        int size;
+       unicode_t u;
+
+       op = pwcs;
+       while (*s && len > 0) {
+               if (*s & 0x80) {
+                       size = utf8_to_utf32(s, len, &u);
+                       if (size < 0) {
+                               /* Ignore character and move on */
+                               size = 1;
+                       } else if (u >= PLANE_SIZE) {
+                               u -= PLANE_SIZE;
+                               *op++ = (wchar_t) (SURROGATE_PAIR |
+                                               ((u >> 10) & SURROGATE_BITS));
+                               *op++ = (wchar_t) (SURROGATE_PAIR |
+                                               SURROGATE_LOW |
+                                               (u & SURROGATE_BITS));
+                       } else {
+                               *op++ = (wchar_t) u;
+                       }
+                       s += size;
+                       len -= size;
+               } else {
+                       *op++ = *s++;
+                       len--;
+               }
+       }
+       return op - pwcs;
+}
+EXPORT_SYMBOL(utf8s_to_utf16s);
+
+static inline unsigned long get_utf16(unsigned c, enum utf16_endian endian)
+{
+       switch (endian) {
+       default:
+               return c;
+       case UTF16_LITTLE_ENDIAN:
+               return __le16_to_cpu(c);
+       case UTF16_BIG_ENDIAN:
+               return __be16_to_cpu(c);
+       }
+}
+
+int utf16s_to_utf8s(const wchar_t *pwcs, int len, enum utf16_endian endian,
+               u8 *s, int maxlen)
+{
+       u8 *op;
+       int size;
+       unsigned long u, v;
 
        op = s;
-       ip = pwcs;
-       while (*ip && maxlen > 0) {
-               if (*ip > 0x7f) {
-                       size = utf8_wctomb(op, *ip, maxlen);
+       while (len > 0 && maxlen > 0) {
+               u = get_utf16(*pwcs, endian);
+               if (!u)
+                       break;
+               pwcs++;
+               len--;
+               if (u > 0x7f) {
+                       if ((u & SURROGATE_MASK) == SURROGATE_PAIR) {
+                               if (u & SURROGATE_LOW) {
+                                       /* Ignore character and move on */
+                                       continue;
+                               }
+                               if (len <= 0)
+                                       break;
+                               v = get_utf16(*pwcs, endian);
+                               if ((v & SURROGATE_MASK) != SURROGATE_PAIR ||
+                                               !(v & SURROGATE_LOW)) {
+                                       /* Ignore character and move on */
+                                       continue;
+                               }
+                               u = PLANE_SIZE + ((u & SURROGATE_BITS) << 10)
+                                               + (v & SURROGATE_BITS);
+                               pwcs++;
+                               len--;
+                       }
+                       size = utf32_to_utf8(u, op, maxlen);
                        if (size == -1) {
                                /* Ignore character and move on */
-                               maxlen--;
                        } else {
                                op += size;
                                maxlen -= size;
                        }
                } else {
-                       *op++ = (__u8) *ip;
+                       *op++ = (u8) u;
+                       maxlen--;
                }
-               ip++;
        }
-       return (op - s);
+       return op - s;
 }
+EXPORT_SYMBOL(utf16s_to_utf8s);
 
 int register_nls(struct nls_table * nls)
 {
@@ -467,9 +519,5 @@ EXPORT_SYMBOL(unregister_nls);
 EXPORT_SYMBOL(unload_nls);
 EXPORT_SYMBOL(load_nls);
 EXPORT_SYMBOL(load_nls_default);
-EXPORT_SYMBOL(utf8_mbtowc);
-EXPORT_SYMBOL(utf8_mbstowcs);
-EXPORT_SYMBOL(utf8_wctomb);
-EXPORT_SYMBOL(utf8_wcstombs);
 
 MODULE_LICENSE("Dual BSD/GPL");
index aa2c42fdd977d8ed481e9e303382e608d1f6a9d8..0d60a44acacd42b7eee10349a672a9ec9a74240a 100644 (file)
@@ -15,7 +15,11 @@ static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
 {
        int n;
 
-       if ( (n = utf8_wctomb(out, uni, boundlen)) == -1) {
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       n = utf32_to_utf8(uni, out, boundlen);
+       if (n < 0) {
                *out = '?';
                return -EINVAL;
        }
@@ -25,11 +29,14 @@ static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
 static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
 {
        int n;
+       unicode_t u;
 
-       if ( (n = utf8_mbtowc(uni, rawstring, boundlen)) == -1) {
+       n = utf8_to_utf32(rawstring, boundlen, &u);
+       if (n < 0 || u > MAX_WCHAR_T) {
                *uni = 0x003f;  /* ? */
-               n = -EINVAL;
+               return -EINVAL;
        }
+       *uni = (wchar_t) u;
        return n;
 }
 
index 82c5085559c6796332f97bfe3064861c5cc46aad..9938034762cca7867007dc841af663b0f097c574 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
 #include <linux/slab.h>
+#include <linux/log2.h>
 
 #include "aops.h"
 #include "attrib.h"
@@ -1570,7 +1571,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
        ntfs_debug("Index collation rule is 0x%x.",
                        le32_to_cpu(ir->collation_rule));
        ni->itype.index.block_size = le32_to_cpu(ir->index_block_size);
-       if (ni->itype.index.block_size & (ni->itype.index.block_size - 1)) {
+       if (!is_power_of_2(ni->itype.index.block_size)) {
                ntfs_error(vi->i_sb, "Index block size (%u) is not a power of "
                                "two.", ni->itype.index.block_size);
                goto unm_err_out;
index d7932e95b1fdfe09ad3e46c78799aa1304d87dc5..89b02985c054e7d05fdf0b147ecf970b7d498e22 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/highmem.h>
 #include <linux/buffer_head.h>
 #include <linux/bitops.h>
+#include <linux/log2.h>
 
 #include "attrib.h"
 #include "aops.h"
@@ -65,7 +66,7 @@ static bool ntfs_check_restart_page_header(struct inode *vi,
                        logfile_log_page_size < NTFS_BLOCK_SIZE ||
                        logfile_system_page_size &
                        (logfile_system_page_size - 1) ||
-                       logfile_log_page_size & (logfile_log_page_size - 1)) {
+                       !is_power_of_2(logfile_log_page_size)) {
                ntfs_error(vi->i_sb, "$LogFile uses unsupported page size.");
                return false;
        }
index 1539e630c47d524b1df251236e638dbb1e8d279b..3ce5ae9e3d2dabd36dce105ecb4545a4723c1202 100644 (file)
@@ -1006,7 +1006,12 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf,
 
        if (!task)
                return -ESRCH;
-       oom_adjust = task->oomkilladj;
+       task_lock(task);
+       if (task->mm)
+               oom_adjust = task->mm->oom_adj;
+       else
+               oom_adjust = OOM_DISABLE;
+       task_unlock(task);
        put_task_struct(task);
 
        len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
@@ -1035,11 +1040,19 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        task = get_proc_task(file->f_path.dentry->d_inode);
        if (!task)
                return -ESRCH;
-       if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
+       task_lock(task);
+       if (!task->mm) {
+               task_unlock(task);
+               put_task_struct(task);
+               return -EINVAL;
+       }
+       if (oom_adjust < task->mm->oom_adj && !capable(CAP_SYS_RESOURCE)) {
+               task_unlock(task);
                put_task_struct(task);
                return -EACCES;
        }
-       task->oomkilladj = oom_adjust;
+       task->mm->oom_adj = oom_adjust;
+       task_unlock(task);
        put_task_struct(task);
        if (end - buffer == 0)
                return -EIO;
index c6b0302af4c40268e9511e34d16f3385f817e1d6..d5c410d47faef162d3edf1876f8582baec0936e9 100644 (file)
@@ -64,10 +64,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                "Inactive(anon): %8lu kB\n"
                "Active(file):   %8lu kB\n"
                "Inactive(file): %8lu kB\n"
-#ifdef CONFIG_UNEVICTABLE_LRU
                "Unevictable:    %8lu kB\n"
                "Mlocked:        %8lu kB\n"
-#endif
 #ifdef CONFIG_HIGHMEM
                "HighTotal:      %8lu kB\n"
                "HighFree:       %8lu kB\n"
@@ -109,10 +107,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(pages[LRU_INACTIVE_ANON]),
                K(pages[LRU_ACTIVE_FILE]),
                K(pages[LRU_INACTIVE_FILE]),
-#ifdef CONFIG_UNEVICTABLE_LRU
                K(pages[LRU_UNEVICTABLE]),
                K(global_page_state(NR_MLOCK)),
-#endif
 #ifdef CONFIG_HIGHMEM
                K(i.totalhigh),
                K(i.freehigh),
index e9983837d08d4d310701fa774057f1b66848ffb3..2707c6c7a20f0dc8bac60ad129103570b4cb10d0 100644 (file)
@@ -6,11 +6,13 @@
 #include <linux/mmzone.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/hugetlb.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
 #define KPMSIZE sizeof(u64)
 #define KPMMASK (KPMSIZE - 1)
+
 /* /proc/kpagecount - an array exposing page counts
  *
  * Each entry is a u64 representing the corresponding
@@ -32,20 +34,22 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               ppage = NULL;
                if (pfn_valid(pfn))
                        ppage = pfn_to_page(pfn);
-               pfn++;
+               else
+                       ppage = NULL;
                if (!ppage)
                        pcount = 0;
                else
                        pcount = page_mapcount(ppage);
 
-               if (put_user(pcount, out++)) {
+               if (put_user(pcount, out)) {
                        ret = -EFAULT;
                        break;
                }
 
+               pfn++;
+               out++;
                count -= KPMSIZE;
        }
 
@@ -68,19 +72,122 @@ static const struct file_operations proc_kpagecount_operations = {
 
 /* These macros are used to decouple internal flags from exported ones */
 
-#define KPF_LOCKED     0
-#define KPF_ERROR      1
-#define KPF_REFERENCED 2
-#define KPF_UPTODATE   3
-#define KPF_DIRTY      4
-#define KPF_LRU        5
-#define KPF_ACTIVE     6
-#define KPF_SLAB       7
-#define KPF_WRITEBACK  8
-#define KPF_RECLAIM    9
-#define KPF_BUDDY     10
+#define KPF_LOCKED             0
+#define KPF_ERROR              1
+#define KPF_REFERENCED         2
+#define KPF_UPTODATE           3
+#define KPF_DIRTY              4
+#define KPF_LRU                        5
+#define KPF_ACTIVE             6
+#define KPF_SLAB               7
+#define KPF_WRITEBACK          8
+#define KPF_RECLAIM            9
+#define KPF_BUDDY              10
+
+/* 11-20: new additions in 2.6.31 */
+#define KPF_MMAP               11
+#define KPF_ANON               12
+#define KPF_SWAPCACHE          13
+#define KPF_SWAPBACKED         14
+#define KPF_COMPOUND_HEAD      15
+#define KPF_COMPOUND_TAIL      16
+#define KPF_HUGE               17
+#define KPF_UNEVICTABLE                18
+#define KPF_NOPAGE             20
+
+/* kernel hacking assistances
+ * WARNING: subject to change, never rely on them!
+ */
+#define KPF_RESERVED           32
+#define KPF_MLOCKED            33
+#define KPF_MAPPEDTODISK       34
+#define KPF_PRIVATE            35
+#define KPF_PRIVATE_2          36
+#define KPF_OWNER_PRIVATE      37
+#define KPF_ARCH               38
+#define KPF_UNCACHED           39
+
+static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
+{
+       return ((kflags >> kbit) & 1) << ubit;
+}
 
-#define kpf_copy_bit(flags, dstpos, srcpos) (((flags >> srcpos) & 1) << dstpos)
+static u64 get_uflags(struct page *page)
+{
+       u64 k;
+       u64 u;
+
+       /*
+        * pseudo flag: KPF_NOPAGE
+        * it differentiates a memory hole from a page with no flags
+        */
+       if (!page)
+               return 1 << KPF_NOPAGE;
+
+       k = page->flags;
+       u = 0;
+
+       /*
+        * pseudo flags for the well known (anonymous) memory mapped pages
+        *
+        * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
+        * simple test in page_mapped() is not enough.
+        */
+       if (!PageSlab(page) && page_mapped(page))
+               u |= 1 << KPF_MMAP;
+       if (PageAnon(page))
+               u |= 1 << KPF_ANON;
+
+       /*
+        * compound pages: export both head/tail info
+        * they together define a compound page's start/end pos and order
+        */
+       if (PageHead(page))
+               u |= 1 << KPF_COMPOUND_HEAD;
+       if (PageTail(page))
+               u |= 1 << KPF_COMPOUND_TAIL;
+       if (PageHuge(page))
+               u |= 1 << KPF_HUGE;
+
+       u |= kpf_copy_bit(k, KPF_LOCKED,        PG_locked);
+
+       /*
+        * Caveats on high order pages:
+        * PG_buddy will only be set on the head page; SLUB/SLQB do the same
+        * for PG_slab; SLOB won't set PG_slab at all on compound pages.
+        */
+       u |= kpf_copy_bit(k, KPF_SLAB,          PG_slab);
+       u |= kpf_copy_bit(k, KPF_BUDDY,         PG_buddy);
+
+       u |= kpf_copy_bit(k, KPF_ERROR,         PG_error);
+       u |= kpf_copy_bit(k, KPF_DIRTY,         PG_dirty);
+       u |= kpf_copy_bit(k, KPF_UPTODATE,      PG_uptodate);
+       u |= kpf_copy_bit(k, KPF_WRITEBACK,     PG_writeback);
+
+       u |= kpf_copy_bit(k, KPF_LRU,           PG_lru);
+       u |= kpf_copy_bit(k, KPF_REFERENCED,    PG_referenced);
+       u |= kpf_copy_bit(k, KPF_ACTIVE,        PG_active);
+       u |= kpf_copy_bit(k, KPF_RECLAIM,       PG_reclaim);
+
+       u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
+       u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
+
+       u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
+       u |= kpf_copy_bit(k, KPF_MLOCKED,       PG_mlocked);
+
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+       u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
+#endif
+
+       u |= kpf_copy_bit(k, KPF_RESERVED,      PG_reserved);
+       u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,  PG_mappedtodisk);
+       u |= kpf_copy_bit(k, KPF_PRIVATE,       PG_private);
+       u |= kpf_copy_bit(k, KPF_PRIVATE_2,     PG_private_2);
+       u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
+       u |= kpf_copy_bit(k, KPF_ARCH,          PG_arch_1);
+
+       return u;
+};
 
 static ssize_t kpageflags_read(struct file *file, char __user *buf,
                             size_t count, loff_t *ppos)
@@ -90,7 +197,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
        unsigned long src = *ppos;
        unsigned long pfn;
        ssize_t ret = 0;
-       u64 kflags, uflags;
 
        pfn = src / KPMSIZE;
        count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
@@ -98,32 +204,18 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               ppage = NULL;
                if (pfn_valid(pfn))
                        ppage = pfn_to_page(pfn);
-               pfn++;
-               if (!ppage)
-                       kflags = 0;
                else
-                       kflags = ppage->flags;
-
-               uflags = kpf_copy_bit(kflags, KPF_LOCKED, PG_locked) |
-                       kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
-                       kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
-                       kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
-                       kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
-                       kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
-                       kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
-                       kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
-                       kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
-                       kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
-                       kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
-
-               if (put_user(uflags, out++)) {
+                       ppage = NULL;
+
+               if (put_user(get_uflags(ppage), out)) {
                        ret = -EFAULT;
                        break;
                }
 
+               pfn++;
+               out++;
                count -= KPMSIZE;
        }
 
index 0fe0e1469df31f386845dd443fdfe89a01543d92..d870237e42c74f018b2824304345aa44a90b8607 100644 (file)
@@ -168,7 +168,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
        return table->entry++;
 }
 
-static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
 {
        struct poll_wqueues *pwq = wait->private;
        DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
@@ -194,6 +194,16 @@ static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
        return default_wake_function(&dummy_wait, mode, sync, key);
 }
 
+static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+       struct poll_table_entry *entry;
+
+       entry = container_of(wait, struct poll_table_entry, wait);
+       if (key && !((unsigned long)key & entry->key))
+               return 0;
+       return __pollwake(wait, mode, sync, key);
+}
+
 /* Add a new entry */
 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
                                poll_table *p)
@@ -205,6 +215,7 @@ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
        get_file(filp);
        entry->filp = filp;
        entry->wait_address = wait_address;
+       entry->key = p->key;
        init_waitqueue_func_entry(&entry->wait, pollwake);
        entry->wait.private = pwq;
        add_wait_queue(wait_address, &entry->wait);
@@ -362,6 +373,18 @@ get_max:
 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
 #define POLLEX_SET (POLLPRI)
 
+static inline void wait_key_set(poll_table *wait, unsigned long in,
+                               unsigned long out, unsigned long bit)
+{
+       if (wait) {
+               wait->key = POLLEX_SET;
+               if (in & bit)
+                       wait->key |= POLLIN_SET;
+               if (out & bit)
+                       wait->key |= POLLOUT_SET;
+       }
+}
+
 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
 {
        ktime_t expire, *to = NULL;
@@ -418,20 +441,25 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
                                if (file) {
                                        f_op = file->f_op;
                                        mask = DEFAULT_POLLMASK;
-                                       if (f_op && f_op->poll)
-                                               mask = (*f_op->poll)(file, retval ? NULL : wait);
+                                       if (f_op && f_op->poll) {
+                                               wait_key_set(wait, in, out, bit);
+                                               mask = (*f_op->poll)(file, wait);
+                                       }
                                        fput_light(file, fput_needed);
                                        if ((mask & POLLIN_SET) && (in & bit)) {
                                                res_in |= bit;
                                                retval++;
+                                               wait = NULL;
                                        }
                                        if ((mask & POLLOUT_SET) && (out & bit)) {
                                                res_out |= bit;
                                                retval++;
+                                               wait = NULL;
                                        }
                                        if ((mask & POLLEX_SET) && (ex & bit)) {
                                                res_ex |= bit;
                                                retval++;
+                                               wait = NULL;
                                        }
                                }
                        }
@@ -685,8 +713,12 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
                mask = POLLNVAL;
                if (file != NULL) {
                        mask = DEFAULT_POLLMASK;
-                       if (file->f_op && file->f_op->poll)
+                       if (file->f_op && file->f_op->poll) {
+                               if (pwait)
+                                       pwait->key = pollfd->events |
+                                                       POLLERR | POLLHUP;
                                mask = file->f_op->poll(file, pwait);
+                       }
                        /* Mask out unneeded events. */
                        mask &= pollfd->events | POLLERR | POLLHUP;
                        fput_light(file, fput_needed);
index a3ba217fbe74f4313a1dca88a8cc863e14766341..1d897ad808e0b1d234c302bc745a3c858848ce6b 100644 (file)
@@ -192,8 +192,11 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        int error = -ENOMEM;
        unsigned long page = get_zeroed_page(GFP_KERNEL);
-       if (page)
+       if (page) {
                error = sysfs_getlink(dentry, (char *) page); 
+               if (error < 0)
+                       free_page((unsigned long)page);
+       }
        nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
        return NULL;
 }
index 58c33055c304f65a1c6f576d1d1e8b0c8533afad..54e8b3d956b7fe9ba20082844b50237d27050681 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_GENERIC_KMAP_TYPES_H
 #define _ASM_GENERIC_KMAP_TYPES_H
 
-#ifdef CONFIG_DEBUG_HIGHMEM
+#ifdef __WITH_KM_FENCE
 # define D(n) __KM_FENCE_##n ,
 #else
 # define D(n)
index 54398d2c6d8df6cab9c898d91ee775acfabab108..d276b5510c83498bbd4815367ef05cf398beee3b 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _LINUX_BUG_H
 #define _LINUX_BUG_H
 
-#include <linux/module.h>
 #include <asm/bug.h>
 
 enum bug_trap_type {
@@ -24,10 +23,6 @@ const struct bug_entry *find_bug(unsigned long bugaddr);
 
 enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
 
-int  module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
-                        struct module *);
-void module_bug_cleanup(struct module *);
-
 /* These are defined by the architecture */
 int is_valid_bugaddr(unsigned long addr);
 
@@ -38,13 +33,6 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
 {
        return BUG_TRAP_TYPE_BUG;
 }
-static inline int  module_bug_finalize(const Elf_Ehdr *hdr,
-                                       const Elf_Shdr *sechdrs,
-                                       struct module *mod)
-{
-       return 0;
-}
-static inline void module_bug_cleanup(struct module *mod) {}
 
 #endif /* CONFIG_GENERIC_BUG */
 #endif /* _LINUX_BUG_H */
index 7b5a2388ba67412dda1740cf869950648668fd31..2a5cd867c365c3fd4e7f776ceb7e6d42d4e255f9 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/kmemcheck.h>
 
 #define C2PORT_NAME_LEN                        32
 
 /* Main struct */
 struct c2port_ops;
 struct c2port_device {
+       kmemcheck_bitfield_begin(flags);
        unsigned int access:1;
        unsigned int flash_access:1;
+       kmemcheck_bitfield_end(flags);
 
        int id;
        char name[C2PORT_NAME_LEN];
index 05ea1dd7d681d072a5ae1c9c7c2da56d75bbc6e6..a5740fc4d04b9415478f4180dcbdad289f5eb281 100644 (file)
@@ -18,7 +18,6 @@
 
 extern int number_of_cpusets;  /* How many cpusets are defined in system? */
 
-extern int cpuset_init_early(void);
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
@@ -27,7 +26,6 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p,
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
-void cpuset_update_task_memory_state(void);
 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
 
 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
@@ -92,9 +90,13 @@ extern void rebuild_sched_domains(void);
 
 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
 
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+       current->mems_allowed = nodemask;
+}
+
 #else /* !CONFIG_CPUSETS */
 
-static inline int cpuset_init_early(void) { return 0; }
 static inline int cpuset_init(void) { return 0; }
 static inline void cpuset_init_smp(void) {}
 
@@ -116,7 +118,6 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 
 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
 static inline void cpuset_init_current_mems_allowed(void) {}
-static inline void cpuset_update_task_memory_state(void) {}
 
 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 {
@@ -188,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
 {
 }
 
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
index a4a7b10aaa48a8b086142876272466a888069ce4..ed4e39f2c4230da03f93651f5d323f805cdb6c94 100644 (file)
@@ -114,6 +114,8 @@ extern int bus_unregister_notifier(struct bus_type *bus,
 #define BUS_NOTIFY_BOUND_DRIVER                0x00000003 /* driver bound to device */
 #define BUS_NOTIFY_UNBIND_DRIVER       0x00000004 /* driver about to be
                                                      unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER      0x00000005 /* driver is unbound
+                                                     from the device */
 
 extern struct kset *bus_get_kset(struct bus_type *bus);
 extern struct klist *bus_get_device_klist(struct bus_type *bus);
@@ -192,6 +194,7 @@ struct class {
        struct kobject                  *dev_kobj;
 
        int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+       char *(*nodename)(struct device *dev);
 
        void (*class_release)(struct class *class);
        void (*dev_release)(struct device *dev);
@@ -287,6 +290,7 @@ struct device_type {
        const char *name;
        struct attribute_group **groups;
        int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+       char *(*nodename)(struct device *dev);
        void (*release)(struct device *dev);
 
        struct dev_pm_ops *pm;
@@ -486,6 +490,7 @@ extern struct device *device_find_child(struct device *dev, void *data,
 extern int device_rename(struct device *dev, char *new_name);
 extern int device_move(struct device *dev, struct device *new_parent,
                       enum dpm_order dpm_order);
+extern const char *device_get_nodename(struct device *dev, const char **tmp);
 
 /*
  * Root device objects for grouping under /sys/devices
index e61c0be2a45977fb61e48b0b0eb89f016b7c978d..6925249a5ac656a7cda3c104d04914816e303ff4 100644 (file)
@@ -78,12 +78,12 @@ static inline void eisa_driver_unregister (struct eisa_driver *edrv) { }
 /* Mimics pci.h... */
 static inline void *eisa_get_drvdata (struct eisa_device *edev)
 {
-        return edev->dev.driver_data;
+        return dev_get_drvdata(&edev->dev);
 }
 
 static inline void eisa_set_drvdata (struct eisa_device *edev, void *data)
 {
-        edev->dev.driver_data = data;
+        dev_set_drvdata(&edev->dev, data);
 }
 
 /* The EISA root device. There's rumours about machines with multiple
index 330c4b1bfcaa58b32907cd9f050c9cc67b097cb0..dd68358996b716da2b1be8e1ab9069b282ab9902 100644 (file)
@@ -677,6 +677,9 @@ struct fb_ops {
        /* get capability given var */
        void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
                            struct fb_var_screeninfo *var);
+
+       /* teardown any resources to do with this framebuffer */
+       void (*fb_destroy)(struct fb_info *info);
 };
 
 #ifdef CONFIG_FB_TILEBLITTING
@@ -786,6 +789,8 @@ struct fb_tile_ops {
 #define FBINFO_MISC_USEREVENT          0x10000 /* event request
                                                  from userspace */
 #define FBINFO_MISC_TILEBLITTING       0x20000 /* use tile blitting */
+#define FBINFO_MISC_FIRMWARE           0x40000 /* a replaceable firmware
+                                                 inited framebuffer */
 
 /* A driver may set this flag to indicate that it does want a set_par to be
  * called every time when fbcon_switch is executed. The advantage is that with
@@ -854,7 +859,12 @@ struct fb_info {
        u32 state;                      /* Hardware state i.e suspend */
        void *fbcon_par;                /* fbcon use-only private area */
        /* From here on everything is device dependent */
-       void *par;      
+       void *par;
+       /* we need the PCI or similiar aperture base/size not
+          smem_start/size as smem_start may just be an object
+          allocated inside the aperture so may not actually overlap */
+       resource_size_t aperture_base;
+       resource_size_t aperture_size;
 };
 
 #ifdef MODULE
@@ -893,7 +903,7 @@ struct fb_info {
 #define fb_writeq sbus_writeq
 #define fb_memset sbus_memset_io
 
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
 
 #define fb_readb __raw_readb
 #define fb_readw __raw_readw
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
new file mode 100644 (file)
index 0000000..e584b72
--- /dev/null
@@ -0,0 +1,358 @@
+#ifndef _LINUX_FIREWIRE_H
+#define _LINUX_FIREWIRE_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
+#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
+
+static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
+{
+       u32    *dst = _dst;
+       __be32 *src = _src;
+       int i;
+
+       for (i = 0; i < size / 4; i++)
+               dst[i] = be32_to_cpu(src[i]);
+}
+
+static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
+{
+       fw_memcpy_from_be32(_dst, _src, size);
+}
+#define CSR_REGISTER_BASE              0xfffff0000000ULL
+
+/* register offsets are relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR                        0x0
+#define CSR_STATE_SET                  0x4
+#define CSR_NODE_IDS                   0x8
+#define CSR_RESET_START                        0xc
+#define CSR_SPLIT_TIMEOUT_HI           0x18
+#define CSR_SPLIT_TIMEOUT_LO           0x1c
+#define CSR_CYCLE_TIME                 0x200
+#define CSR_BUS_TIME                   0x204
+#define CSR_BUSY_TIMEOUT               0x210
+#define CSR_BUS_MANAGER_ID             0x21c
+#define CSR_BANDWIDTH_AVAILABLE                0x220
+#define CSR_CHANNELS_AVAILABLE         0x224
+#define CSR_CHANNELS_AVAILABLE_HI      0x224
+#define CSR_CHANNELS_AVAILABLE_LO      0x228
+#define CSR_BROADCAST_CHANNEL          0x234
+#define CSR_CONFIG_ROM                 0x400
+#define CSR_CONFIG_ROM_END             0x800
+#define CSR_FCP_COMMAND                        0xB00
+#define CSR_FCP_RESPONSE               0xD00
+#define CSR_FCP_END                    0xF00
+#define CSR_TOPOLOGY_MAP               0x1000
+#define CSR_TOPOLOGY_MAP_END           0x1400
+#define CSR_SPEED_MAP                  0x2000
+#define CSR_SPEED_MAP_END              0x3000
+
+#define CSR_OFFSET             0x40
+#define CSR_LEAF               0x80
+#define CSR_DIRECTORY          0xc0
+
+#define CSR_DESCRIPTOR         0x01
+#define CSR_VENDOR             0x03
+#define CSR_HARDWARE_VERSION   0x04
+#define CSR_NODE_CAPABILITIES  0x0c
+#define CSR_UNIT               0x11
+#define CSR_SPECIFIER_ID       0x12
+#define CSR_VERSION            0x13
+#define CSR_DEPENDENT_INFO     0x14
+#define CSR_MODEL              0x17
+#define CSR_INSTANCE           0x18
+#define CSR_DIRECTORY_ID       0x20
+
+struct fw_csr_iterator {
+       u32 *p;
+       u32 *end;
+};
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+
+extern struct bus_type fw_bus_type;
+
+struct fw_card_driver;
+struct fw_node;
+
+struct fw_card {
+       const struct fw_card_driver *driver;
+       struct device *device;
+       struct kref kref;
+       struct completion done;
+
+       int node_id;
+       int generation;
+       int current_tlabel;
+       u64 tlabel_mask;
+       struct list_head transaction_list;
+       struct timer_list flush_timer;
+       unsigned long reset_jiffies;
+
+       unsigned long long guid;
+       unsigned max_receive;
+       int link_speed;
+       int config_rom_generation;
+
+       spinlock_t lock; /* Take this lock when handling the lists in
+                         * this struct. */
+       struct fw_node *local_node;
+       struct fw_node *root_node;
+       struct fw_node *irm_node;
+       u8 color; /* must be u8 to match the definition in struct fw_node */
+       int gap_count;
+       bool beta_repeaters_present;
+
+       int index;
+
+       struct list_head link;
+
+       /* Work struct for BM duties. */
+       struct delayed_work work;
+       int bm_retries;
+       int bm_generation;
+
+       bool broadcast_channel_allocated;
+       u32 broadcast_channel;
+       u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+};
+
+static inline struct fw_card *fw_card_get(struct fw_card *card)
+{
+       kref_get(&card->kref);
+
+       return card;
+}
+
+void fw_card_release(struct kref *kref);
+
+static inline void fw_card_put(struct fw_card *card)
+{
+       kref_put(&card->kref, fw_card_release);
+}
+
+struct fw_attribute_group {
+       struct attribute_group *groups[2];
+       struct attribute_group group;
+       struct attribute *attrs[12];
+};
+
+enum fw_device_state {
+       FW_DEVICE_INITIALIZING,
+       FW_DEVICE_RUNNING,
+       FW_DEVICE_GONE,
+       FW_DEVICE_SHUTDOWN,
+};
+
+/*
+ * Note, fw_device.generation always has to be read before fw_device.node_id.
+ * Use SMP memory barriers to ensure this.  Otherwise requests will be sent
+ * to an outdated node_id if the generation was updated in the meantime due
+ * to a bus reset.
+ *
+ * Likewise, fw-core will take care to update .node_id before .generation so
+ * that whenever fw_device.generation is current WRT the actual bus generation,
+ * fw_device.node_id is guaranteed to be current too.
+ *
+ * The same applies to fw_device.card->node_id vs. fw_device.generation.
+ *
+ * fw_device.config_rom and fw_device.config_rom_length may be accessed during
+ * the lifetime of any fw_unit belonging to the fw_device, before device_del()
+ * was called on the last fw_unit.  Alternatively, they may be accessed while
+ * holding fw_device_rwsem.
+ */
+struct fw_device {
+       atomic_t state;
+       struct fw_node *node;
+       int node_id;
+       int generation;
+       unsigned max_speed;
+       struct fw_card *card;
+       struct device device;
+
+       struct mutex client_list_mutex;
+       struct list_head client_list;
+
+       u32 *config_rom;
+       size_t config_rom_length;
+       int config_rom_retries;
+       unsigned is_local:1;
+       unsigned max_rec:4;
+       unsigned cmc:1;
+       unsigned irmc:1;
+       unsigned bc_implemented:2;
+
+       struct delayed_work work;
+       struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_device *fw_device(struct device *dev)
+{
+       return container_of(dev, struct fw_device, device);
+}
+
+static inline int fw_device_is_shutdown(struct fw_device *device)
+{
+       return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
+}
+
+static inline struct fw_device *fw_device_get(struct fw_device *device)
+{
+       get_device(&device->device);
+
+       return device;
+}
+
+static inline void fw_device_put(struct fw_device *device)
+{
+       put_device(&device->device);
+}
+
+int fw_device_enable_phys_dma(struct fw_device *device);
+
+/*
+ * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
+ */
+struct fw_unit {
+       struct device device;
+       u32 *directory;
+       struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_unit *fw_unit(struct device *dev)
+{
+       return container_of(dev, struct fw_unit, device);
+}
+
+static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
+{
+       get_device(&unit->device);
+
+       return unit;
+}
+
+static inline void fw_unit_put(struct fw_unit *unit)
+{
+       put_device(&unit->device);
+}
+
+static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
+{
+       return fw_device(unit->device.parent);
+}
+
+struct ieee1394_device_id;
+
+struct fw_driver {
+       struct device_driver driver;
+       /* Called when the parent device sits through a bus reset. */
+       void (*update)(struct fw_unit *unit);
+       const struct ieee1394_device_id *id_table;
+};
+
+struct fw_packet;
+struct fw_request;
+
+typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
+                                    struct fw_card *card, int status);
+typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+                                         void *data, size_t length,
+                                         void *callback_data);
+/*
+ * Important note:  The callback must guarantee that either fw_send_response()
+ * or kfree() is called on the @request.
+ */
+typedef void (*fw_address_callback_t)(struct fw_card *card,
+                                     struct fw_request *request,
+                                     int tcode, int destination, int source,
+                                     int generation, int speed,
+                                     unsigned long long offset,
+                                     void *data, size_t length,
+                                     void *callback_data);
+
+struct fw_packet {
+       int speed;
+       int generation;
+       u32 header[4];
+       size_t header_length;
+       void *payload;
+       size_t payload_length;
+       dma_addr_t payload_bus;
+       u32 timestamp;
+
+       /*
+        * This callback is called when the packet transmission has
+        * completed; for successful transmission, the status code is
+        * the ack received from the destination, otherwise it's a
+        * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
+        * The callback can be called from tasklet context and thus
+        * must never block.
+        */
+       fw_packet_callback_t callback;
+       int ack;
+       struct list_head link;
+       void *driver_data;
+};
+
+struct fw_transaction {
+       int node_id; /* The generation is implied; it is always the current. */
+       int tlabel;
+       int timestamp;
+       struct list_head link;
+
+       struct fw_packet packet;
+
+       /*
+        * The data passed to the callback is valid only during the
+        * callback.
+        */
+       fw_transaction_callback_t callback;
+       void *callback_data;
+};
+
+struct fw_address_handler {
+       u64 offset;
+       size_t length;
+       fw_address_callback_t address_callback;
+       void *callback_data;
+       struct list_head link;
+};
+
+struct fw_address_region {
+       u64 start;
+       u64 end;
+};
+
+extern const struct fw_address_region fw_high_memory_region;
+
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+                               const struct fw_address_region *region);
+void fw_core_remove_address_handler(struct fw_address_handler *handler);
+void fw_send_response(struct fw_card *card,
+                     struct fw_request *request, int rcode);
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
+                    int tcode, int destination_id, int generation, int speed,
+                    unsigned long long offset, void *payload, size_t length,
+                    fw_transaction_callback_t callback, void *callback_data);
+int fw_cancel_transaction(struct fw_card *card,
+                         struct fw_transaction *transaction);
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+                      int generation, int speed, unsigned long long offset,
+                      void *payload, size_t length);
+
+#endif /* _LINUX_FIREWIRE_H */
index cca686b39123dca178f26512e5fcbf0db114a41b..875451f1373a3ce38bfbfd81699eddd4c26ccecf 100644 (file)
  */
 #ifdef CONFIG_FIRMWARE_MEMMAP
 
-int firmware_map_add(resource_size_t start, resource_size_t end,
-                    const char *type);
-int firmware_map_add_early(resource_size_t start, resource_size_t end,
-                          const char *type);
+int firmware_map_add(u64 start, u64 end, const char *type);
+int firmware_map_add_early(u64 start, u64 end, const char *type);
 
 #else /* CONFIG_FIRMWARE_MEMMAP */
 
-static inline int firmware_map_add(resource_size_t start, resource_size_t end,
-                                  const char *type)
+static inline int firmware_map_add(u64 start, u64 end, const char *type)
 {
        return 0;
 }
 
-static inline int firmware_map_add_early(resource_size_t start,
-                                        resource_size_t end, const char *type)
+static inline int firmware_map_add_early(u64 start, u64 end, const char *type)
 {
        return 0;
 }
index c8ecf5b2a207371d5662234ec4eb9be9d3420c14..d31544628436cb717b88079f7f99bf6e9b775b68 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/types.h>
 #include <linux/compiler.h>
 
-#define FIRMWARE_NAME_MAX 30 
 #define FW_ACTION_NOHOTPLUG 0
 #define FW_ACTION_HOTPLUG 1
 
index ede84fa7da5d07dacaeafdaf104107a06a4aa7fa..74a57938c8801dafadeea3e0519f4849f01afd91 100644 (file)
@@ -879,7 +879,7 @@ struct file_ra_state {
                                           there are only # of pages ahead */
 
        unsigned int ra_pages;          /* Maximum readahead window */
-       int mmap_miss;                  /* Cache miss stat for mmap accesses */
+       unsigned int mmap_miss;         /* Cache miss stat for mmap accesses */
        loff_t prev_pos;                /* Cache last read() position */
 };
 
@@ -1919,8 +1919,9 @@ extern void __init vfs_caches_init(unsigned long);
 
 extern struct kmem_cache *names_cachep;
 
-#define __getname()    kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname_gfp(gfp)     kmem_cache_alloc(names_cachep, (gfp))
+#define __getname()            __getname_gfp(GFP_KERNEL)
+#define __putname(name)                kmem_cache_free(names_cachep, (void *)(name))
 #ifndef CONFIG_AUDITSYSCALL
 #define putname(name)   __putname(name)
 #else
@@ -2036,9 +2037,6 @@ extern int __invalidate_device(struct block_device *);
 extern int invalidate_partition(struct gendisk *, int);
 #endif
 extern int invalidate_inodes(struct super_block *);
-unsigned long __invalidate_mapping_pages(struct address_space *mapping,
-                                       pgoff_t start, pgoff_t end,
-                                       bool be_atomic);
 unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                        pgoff_t start, pgoff_t end);
 
index 7cbd38d363a2f051af1abbd30d7bbf90c066151e..45fc320a53c6f002fbb466d2b38315254b19ff94 100644 (file)
@@ -142,7 +142,7 @@ struct gendisk {
                                          * disks that can't be partitioned. */
 
        char disk_name[DISK_NAME_LEN];  /* name of major driver */
-
+       char *(*nodename)(struct gendisk *gd);
        /* Array of pointers to partitions indexed by partno.
         * Protected with matching bdev lock but stat and other
         * non-critical accesses use RCU.  Always access through
index 3760e7c5de0264aaec173486daca51035e80b252..cfdb35d71bcab52831d4de557312122d72a11810 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/stddef.h>
 #include <linux/linkage.h>
 #include <linux/topology.h>
+#include <linux/mmdebug.h>
 
 struct vm_area_struct;
 
@@ -20,7 +21,8 @@ struct vm_area_struct;
 #define __GFP_DMA      ((__force gfp_t)0x01u)
 #define __GFP_HIGHMEM  ((__force gfp_t)0x02u)
 #define __GFP_DMA32    ((__force gfp_t)0x04u)
-
+#define __GFP_MOVABLE  ((__force gfp_t)0x08u)  /* Page is movable */
+#define GFP_ZONEMASK   (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 /*
  * Action modifiers - doesn't change the zoning
  *
@@ -50,9 +52,20 @@ struct vm_area_struct;
 #define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
 #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
 #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
-#define __GFP_MOVABLE  ((__force gfp_t)0x100000u)  /* Page is movable */
 
-#define __GFP_BITS_SHIFT 21    /* Room for 21 __GFP_FOO bits */
+#ifdef CONFIG_KMEMCHECK
+#define __GFP_NOTRACK  ((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
+#else
+#define __GFP_NOTRACK  ((__force gfp_t)0)
+#endif
+
+/*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+ * allocations that simply cannot be supported (e.g. page tables).
+ */
+#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+#define __GFP_BITS_SHIFT 22    /* Room for 22 __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -115,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
                ((gfp_flags & __GFP_RECLAIMABLE) != 0);
 }
 
-static inline enum zone_type gfp_zone(gfp_t flags)
-{
+#ifdef CONFIG_HIGHMEM
+#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
+#else
+#define OPT_ZONE_HIGHMEM ZONE_NORMAL
+#endif
+
 #ifdef CONFIG_ZONE_DMA
-       if (flags & __GFP_DMA)
-               return ZONE_DMA;
+#define OPT_ZONE_DMA ZONE_DMA
+#else
+#define OPT_ZONE_DMA ZONE_NORMAL
 #endif
+
 #ifdef CONFIG_ZONE_DMA32
-       if (flags & __GFP_DMA32)
-               return ZONE_DMA32;
+#define OPT_ZONE_DMA32 ZONE_DMA32
+#else
+#define OPT_ZONE_DMA32 ZONE_NORMAL
 #endif
-       if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
-                       (__GFP_HIGHMEM | __GFP_MOVABLE))
-               return ZONE_MOVABLE;
-#ifdef CONFIG_HIGHMEM
-       if (flags & __GFP_HIGHMEM)
-               return ZONE_HIGHMEM;
+
+/*
+ * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
+ * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
+ * and there are 16 of them to cover all possible combinations of
+ * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM
+ *
+ * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
+ * But GFP_MOVABLE is not only a zone specifier but also an allocation
+ * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
+ * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1".
+ *
+ *       bit       result
+ *       =================
+ *       0x0    => NORMAL
+ *       0x1    => DMA or NORMAL
+ *       0x2    => HIGHMEM or NORMAL
+ *       0x3    => BAD (DMA+HIGHMEM)
+ *       0x4    => DMA32 or DMA or NORMAL
+ *       0x5    => BAD (DMA+DMA32)
+ *       0x6    => BAD (HIGHMEM+DMA32)
+ *       0x7    => BAD (HIGHMEM+DMA32+DMA)
+ *       0x8    => NORMAL (MOVABLE+0)
+ *       0x9    => DMA or NORMAL (MOVABLE+DMA)
+ *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too)
+ *       0xb    => BAD (MOVABLE+HIGHMEM+DMA)
+ *       0xc    => DMA32 (MOVABLE+HIGHMEM+DMA32)
+ *       0xd    => BAD (MOVABLE+DMA32+DMA)
+ *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM)
+ *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
+ *
+ * ZONES_SHIFT must be <= 2 on 32 bit platforms.
+ */
+
+#if 16 * ZONES_SHIFT > BITS_PER_LONG
+#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
+#endif
+
+#define GFP_ZONE_TABLE ( \
+       (ZONE_NORMAL << 0 * ZONES_SHIFT)                                \
+       | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT)                     \
+       | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT)             \
+       | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT)                 \
+       | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT)                  \
+       | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT)   \
+       | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
+       | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
+)
+
+/*
+ * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32
+ * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
+ * entry starting with bit 0. Bit is set if the combination is not
+ * allowed.
+ */
+#define GFP_ZONE_BAD ( \
+       1 << (__GFP_DMA | __GFP_HIGHMEM)                                \
+       | 1 << (__GFP_DMA | __GFP_DMA32)                                \
+       | 1 << (__GFP_DMA32 | __GFP_HIGHMEM)                            \
+       | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)                \
+       | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA)              \
+       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA)                \
+       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM)            \
+       | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
+)
+
+static inline enum zone_type gfp_zone(gfp_t flags)
+{
+       enum zone_type z;
+       int bit = flags & GFP_ZONEMASK;
+
+       z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
+                                        ((1 << ZONES_SHIFT) - 1);
+
+       if (__builtin_constant_p(bit))
+               BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+       else {
+#ifdef CONFIG_DEBUG_VM
+               BUG_ON((GFP_ZONE_BAD >> bit) & 1);
 #endif
-       return ZONE_NORMAL;
+       }
+       return z;
 }
 
 /*
@@ -172,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { }
 #endif
 
 struct page *
-__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                       struct zonelist *zonelist, nodemask_t *nodemask);
 
 static inline struct page *
 __alloc_pages(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist)
 {
-       return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
+       return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
 }
 
-static inline struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist, nodemask_t *nodemask)
-{
-       return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
-}
-
-
 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
                                                unsigned int order)
 {
-       if (unlikely(order >= MAX_ORDER))
-               return NULL;
-
        /* Unknown node is current node */
        if (nid < 0)
                nid = numa_node_id();
@@ -203,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
        return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 }
 
+static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+                                               unsigned int order)
+{
+       VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+
+       return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+
 #ifdef CONFIG_NUMA
 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
 
 static inline struct page *
 alloc_pages(gfp_t gfp_mask, unsigned int order)
 {
-       if (unlikely(order >= MAX_ORDER))
-               return NULL;
-
        return alloc_pages_current(gfp_mask, order);
 }
 extern struct page *alloc_page_vma(gfp_t gfp_mask,
@@ -248,4 +336,16 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
 void drain_all_pages(void);
 void drain_local_pages(void *dummy);
 
+extern bool oom_killer_disabled;
+
+static inline void oom_killer_disable(void)
+{
+       oom_killer_disabled = true;
+}
+
+static inline void oom_killer_enable(void)
+{
+       oom_killer_disabled = false;
+}
+
 #endif /* __LINUX_GFP_H */
index 1fcb7126a01f1aa3e7a4c63abd46c4311946c843..211ff4497269e8228951524c8bf2b01a7d1dbeb3 100644 (file)
@@ -55,7 +55,9 @@ static inline void *kmap(struct page *page)
        return page_address(page);
 }
 
-#define kunmap(page) do { (void) (page); } while (0)
+static inline void kunmap(struct page *page)
+{
+}
 
 static inline void *kmap_atomic(struct page *page, enum km_type idx)
 {
index 03be7f29ca0153e8c1da31650b14ce3d95f59d80..a05a5ef33391d0e2ef816666f2bb08ca10fba491 100644 (file)
@@ -11,6 +11,8 @@
 
 struct ctl_table;
 
+int PageHuge(struct page *page);
+
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
 {
        return vma->vm_flags & VM_HUGETLB;
@@ -61,6 +63,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 
 #else /* !CONFIG_HUGETLB_PAGE */
 
+static inline int PageHuge(struct page *page)
+{
+       return 0;
+}
+
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
 {
        return 0;
index b2189803f19afb4ff111fed17bd56b0c514e3f43..8c2c9989626db737941654fcf8a57c60607f5053 100644 (file)
@@ -29,7 +29,7 @@
  * sign followed by value, e.g.:
  *
  * static int init_variable __initdata = 0;
- * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
  *
  * Don't forget to initialize data not at file scope, i.e. within a function,
  * as gcc otherwise puts the data into the bss section and not into the init
index 28b1f30601b555d6f12532a9223428244dad7e87..5368fbdc78018c573c9f20a1dab76adbc8cb0f24 100644 (file)
 extern struct files_struct init_files;
 extern struct fs_struct init_fs;
 
-#define INIT_MM(name) \
-{                                                              \
-       .mm_rb          = RB_ROOT,                              \
-       .pgd            = swapper_pg_dir,                       \
-       .mm_users       = ATOMIC_INIT(2),                       \
-       .mm_count       = ATOMIC_INIT(1),                       \
-       .mmap_sem       = __RWSEM_INITIALIZER(name.mmap_sem),   \
-       .page_table_lock =  __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
-       .mmlist         = LIST_HEAD_INIT(name.mmlist),          \
-       .cpu_vm_mask    = CPU_MASK_ALL,                         \
-}
-
 #define INIT_SIGNALS(sig) {                                            \
        .count          = ATOMIC_INIT(1),                               \
        .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
index c41e812e9d5ef17fcc894c0e316f5c9da95e486f..2721f07e93548150a195123bb13e02e1bfa4664b 100644 (file)
@@ -472,6 +472,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
                __tasklet_hi_schedule(t);
 }
 
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+               __tasklet_hi_schedule_first(t);
+}
+
 
 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
 {
index 1b2e1747df1a02f8cc4ef308fc8a90c2aeb00a83..c5a71c38a95f5521b17aef7ca178e43e2ca11b0e 100644 (file)
@@ -408,7 +408,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
  *
  * Use tracing_on/tracing_off when you want to quickly turn on or off
  * tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space debugfs/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
  * file, which gives a means for the kernel and userspace to interact.
  * Place a tracing_off() in the kernel where you want tracing to end.
  * From user space, examine the trace, and then echo 1 > tracing_on
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644 (file)
index 0000000..47b39b7
--- /dev/null
@@ -0,0 +1,153 @@
+#ifndef LINUX_KMEMCHECK_H
+#define LINUX_KMEMCHECK_H
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KMEMCHECK
+extern int kmemcheck_enabled;
+
+/* The slab-related functions. */
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
+void kmemcheck_free_shadow(struct page *page, int order);
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+                         size_t size);
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
+
+void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
+                              gfp_t gfpflags);
+
+void kmemcheck_show_pages(struct page *p, unsigned int n);
+void kmemcheck_hide_pages(struct page *p, unsigned int n);
+
+bool kmemcheck_page_is_tracked(struct page *p);
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n);
+void kmemcheck_mark_uninitialized(void *address, unsigned int n);
+void kmemcheck_mark_initialized(void *address, unsigned int n);
+void kmemcheck_mark_freed(void *address, unsigned int n);
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
+
+int kmemcheck_show_addr(unsigned long address);
+int kmemcheck_hide_addr(unsigned long address);
+
+#else
+#define kmemcheck_enabled 0
+
+static inline void
+kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+}
+
+static inline void
+kmemcheck_free_shadow(struct page *page, int order)
+{
+}
+
+static inline void
+kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+                    size_t size)
+{
+}
+
+static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
+                                      size_t size)
+{
+}
+
+static inline void kmemcheck_pagealloc_alloc(struct page *p,
+       unsigned int order, gfp_t gfpflags)
+{
+}
+
+static inline bool kmemcheck_page_is_tracked(struct page *p)
+{
+       return false;
+}
+
+static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_unallocated_pages(struct page *p,
+                                                   unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
+                                                     unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized_pages(struct page *p,
+                                                   unsigned int n)
+{
+}
+
+#endif /* CONFIG_KMEMCHECK */
+
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ *     struct a {
+ *             int x:8, y:8;
+ *     };
+ *
+ * then this should be rewritten as
+ *
+ *     struct a {
+ *             kmemcheck_bitfield_begin(flags);
+ *             int x:8, y:8;
+ *             kmemcheck_bitfield_end(flags);
+ *     };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ *     kmemcheck_annotate_bitfield(a, flags);
+ *
+ * Note: We provide the same definitions for both kmemcheck and non-
+ * kmemcheck kernels. This makes it harder to introduce accidental errors. It
+ * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
+ */
+#define kmemcheck_bitfield_begin(name) \
+       int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name)   \
+       int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name)                         \
+       do if (ptr) {                                                   \
+               int _n = (long) &((ptr)->name##_end)                    \
+                       - (long) &((ptr)->name##_begin);                \
+               BUILD_BUG_ON(_n < 0);                                   \
+                                                                       \
+               kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+       } while (0)
+
+#define kmemcheck_annotate_variable(var)                               \
+       do {                                                            \
+               kmemcheck_mark_initialized(&(var), sizeof(var));        \
+       } while (0)                                                     \
+
+#endif /* LINUX_KMEMCHECK_H */
index 08a92969c76e09f99ad546ec4f8a84aa8b6ed917..ca5bd91d12e169cd80eb6a546fe73c35df8ead91 100644 (file)
@@ -32,6 +32,22 @@ struct linux_logo {
        const unsigned char *data;
 };
 
+extern const struct linux_logo logo_linux_mono;
+extern const struct linux_logo logo_linux_vga16;
+extern const struct linux_logo logo_linux_clut224;
+extern const struct linux_logo logo_blackfin_vga16;
+extern const struct linux_logo logo_blackfin_clut224;
+extern const struct linux_logo logo_dec_clut224;
+extern const struct linux_logo logo_mac_clut224;
+extern const struct linux_logo logo_parisc_clut224;
+extern const struct linux_logo logo_sgi_clut224;
+extern const struct linux_logo logo_sun_clut224;
+extern const struct linux_logo logo_superh_mono;
+extern const struct linux_logo logo_superh_vga16;
+extern const struct linux_logo logo_superh_clut224;
+extern const struct linux_logo logo_m32r_clut224;
+extern const struct linux_logo logo_spe_clut224;
+
 extern const struct linux_logo *fb_find_logo(int depth);
 #ifdef CONFIG_FB_LOGO_EXTRA
 extern void fb_append_extra_logo(const struct linux_logo *logo,
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
new file mode 100644 (file)
index 0000000..ad651f4
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __LIS3LV02D_H_
+#define __LIS3LV02D_H_
+
+struct lis3lv02d_platform_data {
+       /* please note: the 'click' feature is only supported for
+        * LIS[32]02DL variants of the chip and will be ignored for
+        * others */
+#define LIS3_CLICK_SINGLE_X    (1 << 0)
+#define LIS3_CLICK_DOUBLE_X    (1 << 1)
+#define LIS3_CLICK_SINGLE_Y    (1 << 2)
+#define LIS3_CLICK_DOUBLE_Y    (1 << 3)
+#define LIS3_CLICK_SINGLE_Z    (1 << 4)
+#define LIS3_CLICK_DOUBLE_Z    (1 << 5)
+       unsigned char click_flags;
+       unsigned char click_thresh_x;
+       unsigned char click_thresh_y;
+       unsigned char click_thresh_z;
+       unsigned char click_time_limit;
+       unsigned char click_latency;
+       unsigned char click_window;
+
+#define LIS3_IRQ1_DISABLE      (0 << 0)
+#define LIS3_IRQ1_FF_WU_1      (1 << 0)
+#define LIS3_IRQ1_FF_WU_2      (2 << 0)
+#define LIS3_IRQ1_FF_WU_12     (3 << 0)
+#define LIS3_IRQ1_DATA_READY   (4 << 0)
+#define LIS3_IRQ1_CLICK                (7 << 0)
+#define LIS3_IRQ2_DISABLE      (0 << 3)
+#define LIS3_IRQ2_FF_WU_1      (1 << 3)
+#define LIS3_IRQ2_FF_WU_2      (2 << 3)
+#define LIS3_IRQ2_FF_WU_12     (3 << 3)
+#define LIS3_IRQ2_DATA_READY   (4 << 3)
+#define LIS3_IRQ2_CLICK                (7 << 3)
+#define LIS3_IRQ_OPEN_DRAIN    (1 << 6)
+#define LIS3_IRQ_ACTIVE_HIGH   (1 << 7)
+       unsigned char irq_cfg;
+};
+
+#endif /* __LIS3LV02D_H_ */
index 058ec15dd060341391277f6f82f479b151a494eb..6a8ca98c9a962ee67fbcf3da94fe0222f6885c2c 100644 (file)
 #define UNIX98_PTY_MAJOR_COUNT 8
 #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
 
+#define DRBD_MAJOR             147
 #define RTF_MAJOR              150
 #define RAW_MAJOR              162
 
index 25b9ca93d2327e942da46f8d9545a7a514c6ed13..45add35dda1b5da25a1ad7d9ff88c94687cb3cad 100644 (file)
@@ -94,6 +94,7 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
 extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
                                                        int priority);
 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
                                       struct zone *zone,
                                       enum lru_list lru);
@@ -239,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
        return 1;
 }
 
+static inline int
+mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+       return 1;
+}
+
 static inline unsigned long
 mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
                         enum lru_list lru)
index beb6ec99cfefb768b4ff43488c1a6137715eda15..052117744629bfd05e300528f99e5675574dda3e 100644 (file)
@@ -41,6 +41,7 @@ struct miscdevice  {
        struct list_head list;
        struct device *parent;
        struct device *this_device;
+       const char *devnode;
 };
 
 extern int misc_register(struct miscdevice * misc);
index ad613ed66ab07b60e0f859eba21be6c6c4a1202e..d88d6fc530ade07b88272c033804404c064c2440 100644 (file)
@@ -7,7 +7,6 @@
 
 #include <linux/gfp.h>
 #include <linux/list.h>
-#include <linux/mmdebug.h>
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
@@ -725,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock,
        return 0;
 }
 #endif
-struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
 
 int shmem_zero_setup(struct vm_area_struct *);
 
@@ -793,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma);
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
                unsigned int flags, unsigned long *prot, resource_size_t *phys);
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -824,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm,
 extern int make_pages_present(unsigned long addr, unsigned long end);
 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
-               int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                       unsigned long start, int len, int write, int force,
+                       struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+                       struct page **pages);
 
 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
 extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -849,19 +853,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
                          struct vm_area_struct **pprev, unsigned long start,
                          unsigned long end, unsigned long newflags);
 
-/*
- * get_user_pages_fast provides equivalent functionality to get_user_pages,
- * operating on current and current->mm (force=0 and doesn't return any vmas).
- *
- * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
- * can be made about locking. get_user_pages_fast is to be implemented in a
- * way that is advantageous (vs get_user_pages()) when the user memory area is
- * already faulted in and present in ptes. However if the pages have to be
- * faulted in, it may turn out to be slightly slower).
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
-                       struct page **pages);
-
 /*
  * A callback you can register to apply pressure to ageable caches.
  *
@@ -1061,7 +1052,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn);
 extern void set_dma_reserve(unsigned long new_dma_reserve);
 extern void memmap_init_zone(unsigned long, int, unsigned long,
                                unsigned long, enum memmap_context);
-extern void setup_per_zone_pages_min(void);
+extern void setup_per_zone_wmarks(void);
+extern void calculate_zone_inactive_ratio(struct zone *zone);
 extern void mem_init(void);
 extern void __init mmap_init(void);
 extern void show_mem(void);
@@ -1178,8 +1170,6 @@ void task_dirty_inc(struct task_struct *tsk);
 #define VM_MAX_READAHEAD       128     /* kbytes */
 #define VM_MIN_READAHEAD       16      /* kbytes (includes current page) */
 
-int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
-                       pgoff_t offset, unsigned long nr_to_read);
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
                        pgoff_t offset, unsigned long nr_to_read);
 
@@ -1197,6 +1187,9 @@ void page_cache_async_readahead(struct address_space *mapping,
                                unsigned long size);
 
 unsigned long max_sane_readahead(unsigned long nr);
+unsigned long ra_submit(struct file_ra_state *ra,
+                       struct address_space *mapping,
+                       struct file *filp);
 
 /* Do stack extension */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
index 0e80e26ecf21220104d8d2abbeb9cca6a1215e6e..7acc8439d9b305caad3d8354ee7ad43e0a3de465 100644 (file)
@@ -98,6 +98,14 @@ struct page {
 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
        unsigned long debug_flags;      /* Use atomic bitops on this */
 #endif
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * kmemcheck wants to track the status of each byte in a page; this
+        * is a pointer to such a status block. NULL if not tracked.
+        */
+       void *shadow;
+#endif
 };
 
 /*
@@ -232,6 +240,8 @@ struct mm_struct {
 
        unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
+       s8 oom_adj;     /* OOM kill score adjustment (bit shift) */
+
        cpumask_t cpu_vm_mask;
 
        /* Architecture-specific MM context */
index a47c879e1304cc19397543403aa068bd02433f29..889598537370b6f1de72e672efd55b6f3db1b377 100644 (file)
@@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled;
 
 static inline int get_pageblock_migratetype(struct page *page)
 {
-       if (unlikely(page_group_by_mobility_disabled))
-               return MIGRATE_UNMOVABLE;
-
        return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
 }
 
@@ -86,13 +83,8 @@ enum zone_stat_item {
        NR_ACTIVE_ANON,         /*  "     "     "   "       "         */
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
-#ifdef CONFIG_UNEVICTABLE_LRU
        NR_UNEVICTABLE,         /*  "     "     "   "       "         */
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
-#else
-       NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
-       NR_MLOCK = NR_ACTIVE_FILE,
-#endif
        NR_ANON_PAGES,  /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
                           only modified from process context */
@@ -135,11 +127,7 @@ enum lru_list {
        LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
        LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
        LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
-#ifdef CONFIG_UNEVICTABLE_LRU
        LRU_UNEVICTABLE,
-#else
-       LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
-#endif
        NR_LRU_LISTS
 };
 
@@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l)
 
 static inline int is_unevictable_lru(enum lru_list l)
 {
-#ifdef CONFIG_UNEVICTABLE_LRU
        return (l == LRU_UNEVICTABLE);
-#else
-       return 0;
-#endif
 }
 
+enum zone_watermarks {
+       WMARK_MIN,
+       WMARK_LOW,
+       WMARK_HIGH,
+       NR_WMARK
+};
+
+#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
+#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
+#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+
 struct per_cpu_pages {
        int count;              /* number of pages in the list */
        int high;               /* high watermark, emptying needed */
@@ -278,7 +273,10 @@ struct zone_reclaim_stat {
 
 struct zone {
        /* Fields commonly accessed by the page allocator */
-       unsigned long           pages_min, pages_low, pages_high;
+
+       /* zone watermarks, access with *_wmark_pages(zone) macros */
+       unsigned long watermark[NR_WMARK];
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -323,9 +321,9 @@ struct zone {
 
        /* Fields commonly accessed by the page reclaim scanner */
        spinlock_t              lru_lock;       
-       struct {
+       struct zone_lru {
                struct list_head list;
-               unsigned long nr_scan;
+               unsigned long nr_saved_scan;    /* accumulated for batching */
        } lru[NR_LRU_LISTS];
 
        struct zone_reclaim_stat reclaim_stat;
index a7bc6e7b43a7c429b9e4993933a900057034cf77..505f20dcc1c7bcc4c19aab80e7d1353c5970b4c4 100644 (file)
@@ -697,4 +697,21 @@ static inline void module_remove_modinfo_attrs(struct module *mod)
 
 #define __MODULE_STRING(x) __stringify(x)
 
+
+#ifdef CONFIG_GENERIC_BUG
+int  module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+                        struct module *);
+void module_bug_cleanup(struct module *);
+
+#else  /* !CONFIG_GENERIC_BUG */
+
+static inline int  module_bug_finalize(const Elf_Ehdr *hdr,
+                                       const Elf_Shdr *sechdrs,
+                                       struct module *mod)
+{
+       return 0;
+}
+static inline void module_bug_cleanup(struct module *mod) {}
+#endif /* CONFIG_GENERIC_BUG */
+
 #endif /* _LINUX_MODULE_H */
index 52b1a76c1b431520773888b0861d5a61163d408f..d47beef08dfdd59374c9d4b41309d99ba9682fd0 100644 (file)
@@ -3,8 +3,23 @@
 
 #include <linux/init.h>
 
-/* unicode character */
-typedef __u16 wchar_t;
+/* Unicode has changed over the years.  Unicode code points no longer
+ * fit into 16 bits; as of Unicode 5 valid code points range from 0
+ * to 0x10ffff (17 planes, where each plane holds 65536 code points).
+ *
+ * The original decision to represent Unicode characters as 16-bit
+ * wchar_t values is now outdated.  But plane 0 still includes the
+ * most commonly used characters, so we will retain it.  The newer
+ * 32-bit unicode_t type can be used when it is necessary to
+ * represent the full Unicode character set.
+ */
+
+/* Plane-0 Unicode character */
+typedef u16 wchar_t;
+#define MAX_WCHAR_T    0xffff
+
+/* Arbitrary Unicode character */
+typedef u32 unicode_t;
 
 struct nls_table {
        const char *charset;
@@ -21,6 +36,13 @@ struct nls_table {
 /* this value hold the maximum octet of charset */
 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
 
+/* Byte order for UTF-16 strings */
+enum utf16_endian {
+       UTF16_HOST_ENDIAN,
+       UTF16_LITTLE_ENDIAN,
+       UTF16_BIG_ENDIAN
+};
+
 /* nls.c */
 extern int register_nls(struct nls_table *);
 extern int unregister_nls(struct nls_table *);
@@ -28,10 +50,11 @@ extern struct nls_table *load_nls(char *);
 extern void unload_nls(struct nls_table *);
 extern struct nls_table *load_nls_default(void);
 
-extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
-extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
-extern int utf8_wctomb(__u8 *, wchar_t, int);
-extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
+extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
+extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+               enum utf16_endian endian, u8 *s, int maxlen);
 
 static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
 {
index 848025cd708783795c96a35c08a6176d0fe84a9f..829b94b156f28ec0ed7c0d8dbc2dfeab4ea6825b 100644 (file)
@@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state)
 #define next_online_node(nid)  next_node((nid), node_states[N_ONLINE])
 
 extern int nr_node_ids;
+extern int nr_online_nodes;
+
+static inline void node_set_online(int nid)
+{
+       node_set_state(nid, N_ONLINE);
+       nr_online_nodes = num_node_state(N_ONLINE);
+}
+
+static inline void node_set_offline(int nid)
+{
+       node_clear_state(nid, N_ONLINE);
+       nr_online_nodes = num_node_state(N_ONLINE);
+}
 #else
 
 static inline int node_state(int node, enum node_states state)
@@ -434,7 +447,10 @@ static inline int num_node_state(enum node_states state)
 #define first_online_node      0
 #define next_online_node(nid)  (MAX_NUMNODES)
 #define nr_node_ids            1
+#define nr_online_nodes                1
 
+#define node_set_online(node)     node_set_state((node), N_ONLINE)
+#define node_set_offline(node)    node_clear_state((node), N_ONLINE)
 #endif
 
 #define node_online_map        node_states[N_ONLINE]
@@ -454,9 +470,6 @@ static inline int num_node_state(enum node_states state)
 #define node_online(node)      node_state((node), N_ONLINE)
 #define node_possible(node)    node_state((node), N_POSSIBLE)
 
-#define node_set_online(node)     node_set_state((node), N_ONLINE)
-#define node_set_offline(node)    node_clear_state((node), N_ONLINE)
-
 #define for_each_node(node)       for_each_node_state(node, N_POSSIBLE)
 #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
 
index 62214c7d2d939e734352bf888e058001e44ccc30..d6792f88a1769c0ece3dbf7084fb6c8072a150b4 100644 (file)
@@ -95,9 +95,7 @@ enum pageflags {
        PG_reclaim,             /* To be reclaimed asap */
        PG_buddy,               /* Page is free, on buddy lists */
        PG_swapbacked,          /* Page is backed by RAM/swap */
-#ifdef CONFIG_UNEVICTABLE_LRU
        PG_unevictable,         /* Page is "unevictable"  */
-#endif
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
        PG_mlocked,             /* Page is vma mlocked */
 #endif
@@ -248,14 +246,8 @@ PAGEFLAG_FALSE(SwapCache)
        SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
        TESTCLEARFLAG(Unevictable, unevictable)
-#else
-PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
-       SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
-       __CLEARPAGEFLAG_NOOP(Unevictable)
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define MLOCK_PAGES 1
@@ -382,12 +374,6 @@ static inline void __ClearPageTail(struct page *page)
 
 #endif /* !PAGEFLAGS_EXTENDED */
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-#define __PG_UNEVICTABLE       (1 << PG_unevictable)
-#else
-#define __PG_UNEVICTABLE       0
-#endif
-
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 #define __PG_MLOCKED           (1 << PG_mlocked)
 #else
@@ -403,7 +389,7 @@ static inline void __ClearPageTail(struct page *page)
         1 << PG_private | 1 << PG_private_2 | \
         1 << PG_buddy   | 1 << PG_writeback | 1 << PG_reserved | \
         1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
-        __PG_UNEVICTABLE | __PG_MLOCKED)
+        1 << PG_unevictable | __PG_MLOCKED)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
index 34da5230faab4617cfe9bba4c43ac83cc083ab12..aec3252afcf5f4f48df2bedc9f3747f62a241bc5 100644 (file)
@@ -22,9 +22,7 @@ enum mapping_flags {
        AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
        AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
        AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
-#ifdef CONFIG_UNEVICTABLE_LRU
        AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
-#endif
 };
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -37,8 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
        }
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
-
 static inline void mapping_set_unevictable(struct address_space *mapping)
 {
        set_bit(AS_UNEVICTABLE, &mapping->flags);
@@ -55,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
                return test_bit(AS_UNEVICTABLE, &mapping->flags);
        return !!mapping;
 }
-#else
-static inline void mapping_set_unevictable(struct address_space *mapping) { }
-static inline void mapping_clear_unevictable(struct address_space *mapping) { }
-static inline int mapping_unevictable(struct address_space *mapping)
-{
-       return 0;
-}
-#endif
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
index 72698d89e767c76909fb0b4cdb65f292c93e0b76..8e366bb0705fc5e4c386d5dea5b2a71f0bbeb58e 100644 (file)
@@ -124,6 +124,14 @@ typedef int __bitwise pci_power_t;
 #define PCI_UNKNOWN    ((pci_power_t __force) 5)
 #define PCI_POWER_ERROR        ((pci_power_t __force) -1)
 
+/* Remember to update this when the list above changes! */
+extern const char *pci_power_names[];
+
+static inline const char *pci_power_name(pci_power_t state)
+{
+       return pci_power_names[1 + (int) state];
+}
+
 #define PCI_PM_D2_DELAY        200
 #define PCI_PM_D3_WAIT 10
 #define PCI_PM_BUS_WAIT        50
index aa01d38c9971a745feec8bb70b0fa93392dcc08e..a3b0003657955da3651542059572387fda381308 100644 (file)
 #define PCI_CLASS_SERIAL_USB_UHCI      0x0c0300
 #define PCI_CLASS_SERIAL_USB_OHCI      0x0c0310
 #define PCI_CLASS_SERIAL_USB_EHCI      0x0c0320
+#define PCI_CLASS_SERIAL_USB_XHCI      0x0c0330
 #define PCI_CLASS_SERIAL_FIBER         0x0c04
 #define PCI_CLASS_SERIAL_SMBUS         0x0c05
 
index b67bb5d7b2211208da085524a1e2cdf2fdb41c00..8dc5123b63057f3e16ca400847477282a993e892 100644 (file)
@@ -36,8 +36,8 @@ extern struct device platform_bus;
 
 extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int);
 extern int platform_get_irq(struct platform_device *, unsigned int);
-extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *);
-extern int platform_get_irq_byname(struct platform_device *, char *);
+extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *);
+extern int platform_get_irq_byname(struct platform_device *, const char *);
 extern int platform_add_devices(struct platform_device **, int);
 
 extern struct platform_device *platform_device_register_simple(const char *, int id,
index 8c24ef8d99769520e737f2735bbf9dc8f1931db7..fa287f25138dc2a243e27825f92d670b210a4b6b 100644 (file)
@@ -32,6 +32,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
 
 typedef struct poll_table_struct {
        poll_queue_proc qproc;
+       unsigned long key;
 } poll_table;
 
 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -43,10 +44,12 @@ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_addres
 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
 {
        pt->qproc = qproc;
+       pt->key   = ~0UL; /* all events enabled */
 }
 
 struct poll_table_entry {
        struct file *filp;
+       unsigned long key;
        wait_queue_t wait;
        wait_queue_head_t *wait_address;
 };
index 355f6e80db0d3e8afd4de9d4bcf447ec4e637ea3..c5da7491809655fde7650c0ba65753988b41c4ab 100644 (file)
@@ -167,6 +167,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
                        unsigned long first_index, unsigned int max_items);
 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
                                unsigned long index, unsigned long max_scan);
+unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+                               unsigned long index, unsigned long max_scan);
 int radix_tree_preload(gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
index 8670f1575fe19abe7793b700aa70d525080c83bf..29f8599e6bea0db5ef5e963511925138a06b2ad1 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_RING_BUFFER_H
 #define _LINUX_RING_BUFFER_H
 
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/seq_file.h>
 
@@ -11,7 +12,10 @@ struct ring_buffer_iter;
  * Don't refer to this struct directly, use functions below.
  */
 struct ring_buffer_event {
+       kmemcheck_bitfield_begin(bitfield);
        u32             type_len:5, time_delta:27;
+       kmemcheck_bitfield_end(bitfield);
+
        u32             array[];
 };
 
index b35bc0e19cd9a69ee1bedc7e2e98253b40a486be..216d024f830d2f272d4b38a768819c00bc7239d1 100644 (file)
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
 /*
  * Called from mm/vmscan.c to handle paging out
  */
-int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
+int page_referenced(struct page *, int is_locked,
+                       struct mem_cgroup *cnt, unsigned long *vm_flags);
 int try_to_unmap(struct page *, int ignore_refs);
 
 /*
@@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
  */
 int page_mkclean(struct page *);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * called in munlock()/munmap() path to check for other vmas holding
  * the page mlocked.
  */
 int try_to_munlock(struct page *);
-#else
-static inline int try_to_munlock(struct page *page)
-{
-       return 0;       /* a.k.a. SWAP_SUCCESS */
-}
-#endif
 
 #else  /* !CONFIG_MMU */
 
@@ -124,7 +118,7 @@ static inline int try_to_munlock(struct page *page)
 #define anon_vma_prepare(vma)  (0)
 #define anon_vma_link(vma)     do {} while (0)
 
-#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
+#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
 #define try_to_unmap(page, refs) SWAP_FAIL
 
 static inline int page_mkclean(struct page *page)
index c900aa530070d7c08ad4a0851e684ac6931c98b9..02042e7f21965c3e7b4bbf43f8b5e996076f6f4f 100644 (file)
@@ -674,7 +674,7 @@ struct user_struct {
        struct task_group *tg;
 #ifdef CONFIG_SYSFS
        struct kobject kobj;
-       struct work_struct work;
+       struct delayed_work work;
 #endif
 #endif
 
@@ -1178,7 +1178,6 @@ struct task_struct {
         * a short time
         */
        unsigned char fpu_counter;
-       s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        unsigned int btrace_seq;
 #endif
@@ -1318,7 +1317,8 @@ struct task_struct {
 /* Thread group tracking */
        u32 parent_exec_id;
        u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
+ * mempolicy */
        spinlock_t alloc_lock;
 
 #ifdef CONFIG_GENERIC_HARDIRQS
@@ -1386,8 +1386,7 @@ struct task_struct {
        cputime_t acct_timexpd; /* stime + utime since last update */
 #endif
 #ifdef CONFIG_CPUSETS
-       nodemask_t mems_allowed;
-       int cpuset_mems_generation;
+       nodemask_t mems_allowed;        /* Protected by alloc_lock */
        int cpuset_mem_spread_rotor;
 #endif
 #ifdef CONFIG_CGROUPS
@@ -1410,7 +1409,7 @@ struct task_struct {
        struct list_head perf_counter_list;
 #endif
 #ifdef CONFIG_NUMA
-       struct mempolicy *mempolicy;
+       struct mempolicy *mempolicy;    /* Protected by alloc_lock */
        short il_next;
 #endif
        atomic_t fs_excl;       /* holding fs exclusive resources */
index fa51293f270811832b97e8f660f979ae14e0bc44..63ef24bc01d0eb728ab2ff3d7544b6c8380c8fcf 100644 (file)
@@ -15,6 +15,7 @@
 #define _LINUX_SKBUFF_H
 
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/compiler.h>
 #include <linux/time.h>
 #include <linux/cache.h>
@@ -343,6 +344,7 @@ struct sk_buff {
                };
        };
        __u32                   priority;
+       kmemcheck_bitfield_begin(flags1);
        __u8                    local_df:1,
                                cloned:1,
                                ip_summed:2,
@@ -353,6 +355,7 @@ struct sk_buff {
                                ipvs_property:1,
                                peeked:1,
                                nf_trace:1;
+       kmemcheck_bitfield_end(flags1);
        __be16                  protocol;
 
        void                    (*destructor)(struct sk_buff *skb);
@@ -372,12 +375,16 @@ struct sk_buff {
        __u16                   tc_verd;        /* traffic control verdict */
 #endif
 #endif
+
+       kmemcheck_bitfield_begin(flags2);
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
        __u8                    do_not_encrypt:1;
 #endif
+       kmemcheck_bitfield_end(flags2);
+
        /* 0/13/14 bit hole */
 
 #ifdef CONFIG_NET_DMA
index 219b8fb4651dd1b15d4c2fe9296c01fef9e2b1e2..2da8372519f5e96a32027a9be8b0efc66f35b907 100644 (file)
 
 #define SLAB_NOLEAKTRACE       0x00800000UL    /* Avoid kmemleak tracing */
 
+/* Don't track use of uninitialized memory */
+#ifdef CONFIG_KMEMCHECK
+# define SLAB_NOTRACK          0x01000000UL
+#else
+# define SLAB_NOTRACK          0x00000000UL
+#endif
+
 /* The following flags affect the page allocator grouping pages by mobility */
 #define SLAB_RECLAIM_ACCOUNT   0x00020000UL            /* Objects are reclaimable */
 #define SLAB_TEMPORARY         SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
index 713f841ecaa914e74aead8e4d8ff5d74cb5040d3..850d057500dece3f52497b2bed3aab51dd677852 100644 (file)
 #include <linux/compiler.h>
 #include <linux/kmemtrace.h>
 
+/*
+ * struct kmem_cache
+ *
+ * manages a cache.
+ */
+
+struct kmem_cache {
+/* 1) per-cpu data, touched during every alloc/free */
+       struct array_cache *array[NR_CPUS];
+/* 2) Cache tunables. Protected by cache_chain_mutex */
+       unsigned int batchcount;
+       unsigned int limit;
+       unsigned int shared;
+
+       unsigned int buffer_size;
+       u32 reciprocal_buffer_size;
+/* 3) touched by every alloc & free from the backend */
+
+       unsigned int flags;             /* constant flags */
+       unsigned int num;               /* # of objs per slab */
+
+/* 4) cache_grow/shrink */
+       /* order of pgs per slab (2^n) */
+       unsigned int gfporder;
+
+       /* force GFP flags, e.g. GFP_DMA */
+       gfp_t gfpflags;
+
+       size_t colour;                  /* cache colouring range */
+       unsigned int colour_off;        /* colour offset */
+       struct kmem_cache *slabp_cache;
+       unsigned int slab_size;
+       unsigned int dflags;            /* dynamic flags */
+
+       /* constructor func */
+       void (*ctor)(void *obj);
+
+/* 5) cache creation/removal */
+       const char *name;
+       struct list_head next;
+
+/* 6) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+       unsigned long num_active;
+       unsigned long num_allocations;
+       unsigned long high_mark;
+       unsigned long grown;
+       unsigned long reaped;
+       unsigned long errors;
+       unsigned long max_freeable;
+       unsigned long node_allocs;
+       unsigned long node_frees;
+       unsigned long node_overflow;
+       atomic_t allochit;
+       atomic_t allocmiss;
+       atomic_t freehit;
+       atomic_t freemiss;
+
+       /*
+        * If debugging is enabled, then the allocator can add additional
+        * fields and/or padding to every object. buffer_size contains the total
+        * object size including these internal fields, the following two
+        * variables contain the offset to the user object and its size.
+        */
+       int obj_offset;
+       int obj_size;
+#endif /* CONFIG_DEBUG_SLAB */
+
+       /*
+        * We put nodelists[] at the end of kmem_cache, because we want to size
+        * this array to nr_node_ids slots instead of MAX_NUMNODES
+        * (see kmem_cache_init())
+        * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+        * is statically defined, so we reserve the max number of nodes.
+        */
+       struct kmem_list3 *nodelists[MAX_NUMNODES];
+       /*
+        * Do not add fields after nodelists[]
+        */
+};
+
 /* Size description struct for general caches. */
 struct cache_sizes {
        size_t                  cs_size;
index a69db820eed6b33bfe71e6f946536209dd935372..9e3d8af09207c9ec5ecf58dbb6c0034200947158 100644 (file)
@@ -177,7 +177,6 @@ static inline void init_call_single_data(void)
 
 #define get_cpu()              ({ preempt_disable(); smp_processor_id(); })
 #define put_cpu()              preempt_enable()
-#define put_cpu_no_resched()   preempt_enable_no_resched()
 
 /*
  * Callback to arch code if there's nosmp or maxcpus=0 on the
index 1a8cecc4f38cd2b91a90f267a54fd39bb4de196b..51efbef38fb0e204cfddb61b56619d52cefab623 100644 (file)
@@ -4,6 +4,8 @@
 struct task_struct;
 
 #ifdef CONFIG_STACKTRACE
+struct task_struct;
+
 struct stack_trace {
        unsigned int nr_entries, max_entries;
        unsigned long *entries;
@@ -11,6 +13,7 @@ struct stack_trace {
 };
 
 extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
 extern void save_stack_trace_tsk(struct task_struct *tsk,
                                struct stack_trace *trace);
 
index d476aad3ff577faf4fe7883dc7b33a3aea99b38a..0cedf31af0b0ed4cd230c2eb9cc2cf1a8c5e95af 100644 (file)
@@ -129,9 +129,10 @@ enum {
 
 #define SWAP_CLUSTER_MAX 32
 
-#define SWAP_MAP_MAX   0x7fff
-#define SWAP_MAP_BAD   0x8000
-
+#define SWAP_MAP_MAX   0x7ffe
+#define SWAP_MAP_BAD   0x7fff
+#define SWAP_HAS_CACHE  0x8000         /* There is a swap cache of entry. */
+#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
 /*
  * The in-memory structure used to track swap areas.
  */
@@ -235,7 +236,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
 extern void scan_mapping_unevictable_pages(struct address_space *);
 
@@ -244,24 +244,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
                                        void __user *, size_t *, loff_t *);
 extern int scan_unevictable_register_node(struct node *node);
 extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int page_evictable(struct page *page,
-                                               struct vm_area_struct *vma)
-{
-       return 1;
-}
-
-static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-}
-
-static inline int scan_unevictable_register_node(struct node *node)
-{
-       return 0;
-}
-
-static inline void scan_unevictable_unregister_node(struct node *node) { }
-#endif
 
 extern int kswapd_run(int nid);
 
@@ -274,7 +256,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
 
 #ifdef CONFIG_SWAP
 /* linux/mm/page_io.c */
-extern int swap_readpage(struct file *, struct page *);
+extern int swap_readpage(struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
 extern void end_swap_bio_read(struct bio *bio, int err);
 
@@ -300,9 +282,11 @@ extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
 extern swp_entry_t get_swap_page_of_type(int);
-extern int swap_duplicate(swp_entry_t);
+extern void swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
+extern void swapcache_free(swp_entry_t, struct page *page);
 extern int free_swap_and_cache(swp_entry_t);
 extern int swap_type_of(dev_t, sector_t, struct block_device **);
 extern unsigned int count_swap_pages(int, int);
@@ -370,12 +354,20 @@ static inline void show_swap_cache_info(void)
 }
 
 #define free_swap_and_cache(swp)       is_migration_entry(swp)
-#define swap_duplicate(swp)            is_migration_entry(swp)
+#define swapcache_prepare(swp)         is_migration_entry(swp)
+
+static inline void swap_duplicate(swp_entry_t swp)
+{
+}
 
 static inline void swap_free(swp_entry_t swp)
 {
 }
 
+static inline void swapcache_free(swp_entry_t swp, struct page *page)
+{
+}
+
 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr)
 {
index 418d90f5effe72ff9921a6ea066bb526a7337127..fa4242cdade86f9b956c949b5253e965e7aa74c9 100644 (file)
@@ -434,6 +434,7 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
 asmlinkage long sys_fcntl64(unsigned int fd,
                                unsigned int cmd, unsigned long arg);
 #endif
+asmlinkage long sys_pipe(int __user *fildes);
 asmlinkage long sys_pipe2(int __user *fildes, int flags);
 asmlinkage long sys_dup(unsigned int fildes);
 asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
@@ -751,8 +752,6 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
 asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
                          struct timespec __user *, const sigset_t __user *,
                          size_t);
-asmlinkage long sys_pipe2(int __user *, int);
-asmlinkage long sys_pipe(int __user *);
 
 int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
 
index 9910e3bd5b316918466359ca20ae3a88914892bd..e6967d10d9e5ea39f77efecca64c59beb9d009d5 100644 (file)
@@ -280,6 +280,9 @@ extern int do_adjtimex(struct timex *);
 
 int read_current_timer(unsigned long *timer_val);
 
+/* The clock frequency of the i8253/i8254 PIT */
+#define PIT_TICK_RATE 1193182ul
+
 #endif /* KERNEL */
 
 #endif /* LINUX_TIMEX_H */
index 14df7e635d439e07d438e9890f0c8f718fa2aa15..b9dc4ca0246f1b802983e7a672d0093601f6d587 100644 (file)
@@ -198,7 +198,7 @@ static inline void tracepoint_synchronize_unregister(void)
  *     * This is how the trace record is structured and will
  *     * be saved into the ring buffer. These are the fields
  *     * that will be exposed to user-space in
- *     * /debug/tracing/events/<*>/format.
+ *     * /sys/kernel/debug/tracing/events/<*>/format.
  *     *
  *     * The declared 'local variable' is called '__entry'
  *     *
@@ -258,7 +258,7 @@ static inline void tracepoint_synchronize_unregister(void)
  * tracepoint callback (this is used by programmatic plugins and
  * can also by used by generic instrumentation like SystemTap), and
  * it is also used to expose a structured trace record in
- * /debug/tracing/events/.
+ * /sys/kernel/debug/tracing/events/.
  */
 
 #define TRACE_EVENT(name, proto, args, struct, assign, print)  \
index 3aa2cd1f8d082fd699a5fcfb672e2dd555ea7dcc..84929e9140341583cbc8fd7050c4e5e5a12b8d29 100644 (file)
@@ -36,6 +36,7 @@ struct wusb_dev;
  *  - configs have one (often) or more interfaces;
  *  - interfaces have one (usually) or more settings;
  *  - each interface setting has zero or (usually) more endpoints.
+ *  - a SuperSpeed endpoint has a companion descriptor
  *
  * And there might be other descriptors mixed in with those.
  *
@@ -44,6 +45,19 @@ struct wusb_dev;
 
 struct ep_device;
 
+/* For SS devices */
+/**
+ * struct usb_host_ss_ep_comp - Valid for SuperSpeed devices only
+ * @desc: endpoint companion descriptor, wMaxPacketSize in native byteorder
+ * @extra: descriptors following this endpoint companion descriptor
+ * @extralen: how many bytes of "extra" are valid
+ */
+struct usb_host_ss_ep_comp {
+       struct usb_ss_ep_comp_descriptor        desc;
+       unsigned char                           *extra;   /* Extra descriptors */
+       int                                     extralen;
+};
+
 /**
  * struct usb_host_endpoint - host-side endpoint descriptor and queue
  * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
@@ -51,6 +65,7 @@ struct ep_device;
  * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
  *     with one or more transfer descriptors (TDs) per urb
  * @ep_dev: ep_device for sysfs info
+ * @ss_ep_comp: companion descriptor information for this endpoint
  * @extra: descriptors following this endpoint in the configuration
  * @extralen: how many bytes of "extra" are valid
  * @enabled: URBs may be submitted to this endpoint
@@ -63,6 +78,7 @@ struct usb_host_endpoint {
        struct list_head                urb_list;
        void                            *hcpriv;
        struct ep_device                *ep_dev;        /* For sysfs info */
+       struct usb_host_ss_ep_comp      *ss_ep_comp;    /* For SS devices */
 
        unsigned char *extra;   /* Extra descriptors */
        int extralen;
@@ -336,7 +352,6 @@ struct usb_bus {
 #ifdef CONFIG_USB_DEVICEFS
        struct dentry *usbfs_dentry;    /* usbfs dentry entry for the bus */
 #endif
-       struct device *dev;             /* device for this bus */
 
 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
        struct mon_bus *mon_bus;        /* non-null when associated */
@@ -363,6 +378,7 @@ struct usb_tt;
  * struct usb_device - kernel's representation of a USB device
  * @devnum: device number; address on a USB bus
  * @devpath: device ID string for use in messages (e.g., /port/...)
+ * @route: tree topology hex string for use with xHCI
  * @state: device state: configured, not attached, etc.
  * @speed: device speed: high/full/low (or error)
  * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
@@ -420,6 +436,7 @@ struct usb_tt;
  * @skip_sys_resume: skip the next system resume
  * @wusb_dev: if this is a Wireless USB device, link to the WUSB
  *     specific data for the device.
+ * @slot_id: Slot ID assigned by xHCI
  *
  * Notes:
  * Usbcore drivers should not set usbdev->state directly.  Instead use
@@ -428,6 +445,7 @@ struct usb_tt;
 struct usb_device {
        int             devnum;
        char            devpath [16];
+       u32             route;
        enum usb_device_state   state;
        enum usb_device_speed   speed;
 
@@ -503,6 +521,7 @@ struct usb_device {
        unsigned skip_sys_resume:1;
 #endif
        struct wusb_dev *wusb_dev;
+       int slot_id;
 };
 #define        to_usb_device(d) container_of(d, struct usb_device, dev)
 
@@ -869,6 +888,8 @@ struct usb_driver {
  * struct usb_device_driver - identifies USB device driver to usbcore
  * @name: The driver name should be unique among USB drivers,
  *     and should normally be the same as the module name.
+ * @nodename: Callback to provide a naming hint for a possible
+ *     device node to create.
  * @probe: Called to see if the driver is willing to manage a particular
  *     device.  If it is, probe returns zero and uses dev_set_drvdata()
  *     to associate driver-specific data with the device.  If unwilling
@@ -912,6 +933,7 @@ extern struct bus_type usb_bus_type;
  */
 struct usb_class_driver {
        char *name;
+       char *(*nodename)(struct device *dev);
        const struct file_operations *fops;
        int minor_base;
 };
@@ -1041,7 +1063,9 @@ typedef void (*usb_complete_t)(struct urb *);
  * @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the
  *     device driver has provided this DMA address for the setup packet.
  *     The host controller driver should use this in preference to
- *     setup_packet.
+ *     setup_packet, but the HCD may chose to ignore the address if it must
+ *     copy the setup packet into internal structures.  Therefore, setup_packet
+ *     must always point to a valid buffer.
  * @start_frame: Returns the initial frame for isochronous transfers.
  * @number_of_packets: Lists the number of ISO transfer buffers.
  * @interval: Specifies the polling interval for interrupt or isochronous
@@ -1177,6 +1201,8 @@ struct urb {
        unsigned int transfer_flags;    /* (in) URB_SHORT_NOT_OK | ...*/
        void *transfer_buffer;          /* (in) associated data buffer */
        dma_addr_t transfer_dma;        /* (in) dma addr for transfer_buffer */
+       struct usb_sg_request *sg;      /* (in) scatter gather buffer list */
+       int num_sgs;                    /* (in) number of entries in the sg list */
        u32 transfer_buffer_length;     /* (in) data buffer length */
        u32 actual_length;              /* (return) actual transfer length */
        unsigned char *setup_packet;    /* (in) setup packet (control only) */
@@ -1422,8 +1448,8 @@ struct usb_sg_request {
        int                     status;
        size_t                  bytes;
 
-       /*
-        * members below are private: to usbcore,
+       /* private:
+        * members below are private to usbcore,
         * and are not provided for driver access!
         */
        spinlock_t              lock;
@@ -1558,6 +1584,9 @@ extern void usb_unregister_notify(struct notifier_block *nb);
 #define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
        format "\n" , ## arg)
 
+/* debugfs stuff */
+extern struct dentry *usb_debug_root;
+
 #endif  /* __KERNEL__ */
 
 #endif
index 8cb025fef6346a63270cd55a8de69b3b93a49006..b5744bc218ab576b7671602a643decc6bf508542 100644 (file)
 #define USB_SUBCLASS_AUDIOCONTROL      0x01
 #define USB_SUBCLASS_AUDIOSTREAMING    0x02
 #define USB_SUBCLASS_MIDISTREAMING     0x03
+#define USB_SUBCLASS_VENDOR_SPEC       0xff
 
+/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/
+#define HEADER                         0x01
+#define INPUT_TERMINAL                 0x02
+#define OUTPUT_TERMINAL                        0x03
+#define MIXER_UNIT                     0x04
+#define SELECTOR_UNIT                  0x05
+#define FEATURE_UNIT                   0x06
+#define PROCESSING_UNIT                        0x07
+#define EXTENSION_UNIT                 0x08
+
+#define AS_GENERAL                     0x01
+#define FORMAT_TYPE                    0x02
+#define FORMAT_SPECIFIC                        0x03
+
+#define EP_GENERAL                     0x01
+
+#define MS_GENERAL                     0x01
+#define MIDI_IN_JACK                   0x02
+#define MIDI_OUT_JACK                  0x03
+
+/* endpoint attributes */
+#define EP_ATTR_MASK                   0x0c
+#define EP_ATTR_ASYNC                  0x04
+#define EP_ATTR_ADAPTIVE               0x08
+#define EP_ATTR_SYNC                   0x0c
+
+/* cs endpoint attributes */
+#define EP_CS_ATTR_SAMPLE_RATE         0x01
+#define EP_CS_ATTR_PITCH_CONTROL       0x02
+#define EP_CS_ATTR_FILL_MAX            0x80
+
+/* Audio Class specific Request Codes */
+#define USB_AUDIO_SET_INTF             0x21
+#define USB_AUDIO_SET_ENDPOINT         0x22
+#define USB_AUDIO_GET_INTF             0xa1
+#define USB_AUDIO_GET_ENDPOINT         0xa2
+
+#define SET_   0x00
+#define GET_   0x80
+
+#define _CUR   0x1
+#define _MIN   0x2
+#define _MAX   0x3
+#define _RES   0x4
+#define _MEM   0x5
+
+#define SET_CUR                (SET_ | _CUR)
+#define GET_CUR                (GET_ | _CUR)
+#define SET_MIN                (SET_ | _MIN)
+#define GET_MIN                (GET_ | _MIN)
+#define SET_MAX                (SET_ | _MAX)
+#define GET_MAX                (GET_ | _MAX)
+#define SET_RES                (SET_ | _RES)
+#define GET_RES                (GET_ | _RES)
+#define SET_MEM                (SET_ | _MEM)
+#define GET_MEM                (GET_ | _MEM)
+
+#define GET_STAT       0xff
+
+#define USB_AC_TERMINAL_UNDEFINED      0x100
+#define USB_AC_TERMINAL_STREAMING      0x101
+#define USB_AC_TERMINAL_VENDOR_SPEC    0x1FF
+
+/* Terminal Control Selectors */
 /* 4.3.2  Class-Specific AC Interface Descriptor */
 struct usb_ac_header_descriptor {
-       __u8  bLength;                  /* 8+n */
+       __u8  bLength;                  /* 8 + n */
        __u8  bDescriptorType;          /* USB_DT_CS_INTERFACE */
        __u8  bDescriptorSubtype;       /* USB_MS_HEADER */
        __le16 bcdADC;                  /* 0x0100 */
@@ -36,7 +101,7 @@ struct usb_ac_header_descriptor {
        __u8  baInterfaceNr[];          /* [n] */
 } __attribute__ ((packed));
 
-#define USB_DT_AC_HEADER_SIZE(n)       (8+(n))
+#define USB_DT_AC_HEADER_SIZE(n)       (8 + (n))
 
 /* As above, but more useful for defining your own descriptors: */
 #define DECLARE_USB_AC_HEADER_DESCRIPTOR(n)                    \
@@ -50,4 +115,200 @@ struct usb_ac_header_descriptor_##n {                              \
        __u8  baInterfaceNr[n];                                 \
 } __attribute__ ((packed))
 
+/* 4.3.2.1 Input Terminal Descriptor */
+struct usb_input_terminal_descriptor {
+       __u8  bLength;                  /* in bytes: 12 */
+       __u8  bDescriptorType;          /* CS_INTERFACE descriptor type */
+       __u8  bDescriptorSubtype;       /* INPUT_TERMINAL descriptor subtype */
+       __u8  bTerminalID;              /* Constant uniquely terminal ID */
+       __le16 wTerminalType;           /* USB Audio Terminal Types */
+       __u8  bAssocTerminal;           /* ID of the Output Terminal associated */
+       __u8  bNrChannels;              /* Number of logical output channels */
+       __le16 wChannelConfig;
+       __u8  iChannelNames;
+       __u8  iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_INPUT_TERMINAL_SIZE                  12
+
+#define USB_AC_INPUT_TERMINAL_UNDEFINED                        0x200
+#define USB_AC_INPUT_TERMINAL_MICROPHONE               0x201
+#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE       0x202
+#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE      0x203
+#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE      0x204
+#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY         0x205
+#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY    0x206
+
+/* 4.3.2.2 Output Terminal Descriptor */
+struct usb_output_terminal_descriptor {
+       __u8  bLength;                  /* in bytes: 9 */
+       __u8  bDescriptorType;          /* CS_INTERFACE descriptor type */
+       __u8  bDescriptorSubtype;       /* OUTPUT_TERMINAL descriptor subtype */
+       __u8  bTerminalID;              /* Constant uniquely terminal ID */
+       __le16 wTerminalType;           /* USB Audio Terminal Types */
+       __u8  bAssocTerminal;           /* ID of the Input Terminal associated */
+       __u8  bSourceID;                /* ID of the connected Unit or Terminal*/
+       __u8  iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_OUTPUT_TERMINAL_SIZE                         9
+
+#define USB_AC_OUTPUT_TERMINAL_UNDEFINED                       0x300
+#define USB_AC_OUTPUT_TERMINAL_SPEAKER                         0x301
+#define USB_AC_OUTPUT_TERMINAL_HEADPHONES                      0x302
+#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO      0x303
+#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER                 0x304
+#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER                    0x305
+#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER           0x306
+#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER                0x307
+
+/* Set bControlSize = 2 as default setting */
+#define USB_DT_AC_FEATURE_UNIT_SIZE(ch)                (7 + ((ch) + 1) * 2)
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch)             \
+struct usb_ac_feature_unit_descriptor_##ch {                   \
+       __u8  bLength;                                          \
+       __u8  bDescriptorType;                                  \
+       __u8  bDescriptorSubtype;                               \
+       __u8  bUnitID;                                          \
+       __u8  bSourceID;                                        \
+       __u8  bControlSize;                                     \
+       __le16 bmaControls[ch + 1];                             \
+       __u8  iFeature;                                         \
+} __attribute__ ((packed))
+
+/* 4.5.2 Class-Specific AS Interface Descriptor */
+struct usb_as_header_descriptor {
+       __u8  bLength;                  /* in bytes: 7 */
+       __u8  bDescriptorType;          /* USB_DT_CS_INTERFACE */
+       __u8  bDescriptorSubtype;       /* AS_GENERAL */
+       __u8  bTerminalLink;            /* Terminal ID of connected Terminal */
+       __u8  bDelay;                   /* Delay introduced by the data path */
+       __le16 wFormatTag;              /* The Audio Data Format */
+} __attribute__ ((packed));
+
+#define USB_DT_AS_HEADER_SIZE          7
+
+#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED   0x0
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM         0x1
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8                0x2
+#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT  0x3
+#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW                0x4
+#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW       0x5
+
+struct usb_as_format_type_i_continuous_descriptor {
+       __u8  bLength;                  /* in bytes: 8 + (ns * 3) */
+       __u8  bDescriptorType;          /* USB_DT_CS_INTERFACE */
+       __u8  bDescriptorSubtype;       /* FORMAT_TYPE */
+       __u8  bFormatType;              /* FORMAT_TYPE_1 */
+       __u8  bNrChannels;              /* physical channels in the stream */
+       __u8  bSubframeSize;            /* */
+       __u8  bBitResolution;
+       __u8  bSamFreqType;
+       __u8  tLowerSamFreq[3];
+       __u8  tUpperSamFreq[3];
+} __attribute__ ((packed));
+
+#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE      14
+
+struct usb_as_formate_type_i_discrete_descriptor {
+       __u8  bLength;                  /* in bytes: 8 + (ns * 3) */
+       __u8  bDescriptorType;          /* USB_DT_CS_INTERFACE */
+       __u8  bDescriptorSubtype;       /* FORMAT_TYPE */
+       __u8  bFormatType;              /* FORMAT_TYPE_1 */
+       __u8  bNrChannels;              /* physical channels in the stream */
+       __u8  bSubframeSize;            /* */
+       __u8  bBitResolution;
+       __u8  bSamFreqType;
+       __u8  tSamFreq[][3];
+} __attribute__ ((packed));
+
+#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n)          \
+struct usb_as_formate_type_i_discrete_descriptor_##n {         \
+       __u8  bLength;                                          \
+       __u8  bDescriptorType;                                  \
+       __u8  bDescriptorSubtype;                               \
+       __u8  bFormatType;                                      \
+       __u8  bNrChannels;                                      \
+       __u8  bSubframeSize;                                    \
+       __u8  bBitResolution;                                   \
+       __u8  bSamFreqType;                                     \
+       __u8  tSamFreq[n][3];                                   \
+} __attribute__ ((packed))
+
+#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n)     (8 + (n * 3))
+
+#define USB_AS_FORMAT_TYPE_UNDEFINED   0x0
+#define USB_AS_FORMAT_TYPE_I           0x1
+#define USB_AS_FORMAT_TYPE_II          0x2
+#define USB_AS_FORMAT_TYPE_III         0x3
+
+#define USB_AS_ENDPOINT_ASYNC          (1 << 2)
+#define USB_AS_ENDPOINT_ADAPTIVE       (2 << 2)
+#define USB_AS_ENDPOINT_SYNC           (3 << 2)
+
+struct usb_as_iso_endpoint_descriptor {
+       __u8  bLength;                  /* in bytes: 7 */
+       __u8  bDescriptorType;          /* USB_DT_CS_ENDPOINT */
+       __u8  bDescriptorSubtype;       /* EP_GENERAL */
+       __u8  bmAttributes;
+       __u8  bLockDelayUnits;
+       __le16 wLockDelay;
+};
+#define USB_AS_ISO_ENDPOINT_DESC_SIZE  7
+
+#define FU_CONTROL_UNDEFINED           0x00
+#define MUTE_CONTROL                   0x01
+#define VOLUME_CONTROL                 0x02
+#define BASS_CONTROL                   0x03
+#define MID_CONTROL                    0x04
+#define TREBLE_CONTROL                 0x05
+#define GRAPHIC_EQUALIZER_CONTROL      0x06
+#define AUTOMATIC_GAIN_CONTROL         0x07
+#define DELAY_CONTROL                  0x08
+#define BASS_BOOST_CONTROL             0x09
+#define LOUDNESS_CONTROL               0x0a
+
+#define FU_MUTE                (1 << (MUTE_CONTROL - 1))
+#define FU_VOLUME      (1 << (VOLUME_CONTROL - 1))
+#define FU_BASS                (1 << (BASS_CONTROL - 1))
+#define FU_MID         (1 << (MID_CONTROL - 1))
+#define FU_TREBLE      (1 << (TREBLE_CONTROL - 1))
+#define FU_GRAPHIC_EQ  (1 << (GRAPHIC_EQUALIZER_CONTROL - 1))
+#define FU_AUTO_GAIN   (1 << (AUTOMATIC_GAIN_CONTROL - 1))
+#define FU_DELAY       (1 << (DELAY_CONTROL - 1))
+#define FU_BASS_BOOST  (1 << (BASS_BOOST_CONTROL - 1))
+#define FU_LOUDNESS    (1 << (LOUDNESS_CONTROL - 1))
+
+struct usb_audio_control {
+       struct list_head list;
+       const char *name;
+       u8 type;
+       int data[5];
+       int (*set)(struct usb_audio_control *con, u8 cmd, int value);
+       int (*get)(struct usb_audio_control *con, u8 cmd);
+};
+
+static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+       con->data[cmd] = value;
+
+       return 0;
+}
+
+static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+       return con->data[cmd];
+}
+
+struct usb_audio_control_selector {
+       struct list_head list;
+       struct list_head control;
+       u8 id;
+       const char *name;
+       u8 type;
+       struct usb_descriptor_header *desc;
+};
+
 #endif /* __LINUX_USB_AUDIO_H */
index b145119a90da509b493c3b8d62dfd4e9bb53231e..93223638f702c90513f4ea308c4ed3f2c873c18c 100644 (file)
@@ -191,6 +191,8 @@ struct usb_ctrlrequest {
 #define USB_DT_WIRE_ADAPTER            0x21
 #define USB_DT_RPIPE                   0x22
 #define USB_DT_CS_RADIO_CONTROL                0x23
+/* From the USB 3.0 spec */
+#define        USB_DT_SS_ENDPOINT_COMP         0x30
 
 /* Conventional codes for class-specific descriptors.  The convention is
  * defined in the USB "Common Class" Spec (3.11).  Individual class specs
@@ -535,6 +537,20 @@ static inline int usb_endpoint_is_isoc_out(
 
 /*-------------------------------------------------------------------------*/
 
+/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
+struct usb_ss_ep_comp_descriptor {
+       __u8  bLength;
+       __u8  bDescriptorType;
+
+       __u8  bMaxBurst;
+       __u8  bmAttributes;
+       __u16 wBytesPerInterval;
+} __attribute__ ((packed));
+
+#define USB_DT_SS_EP_COMP_SIZE         6
+
+/*-------------------------------------------------------------------------*/
+
 /* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
 struct usb_qualifier_descriptor {
        __u8  bLength;
@@ -752,6 +768,7 @@ enum usb_device_speed {
        USB_SPEED_LOW, USB_SPEED_FULL,          /* usb 1.1 */
        USB_SPEED_HIGH,                         /* usb 2.0 */
        USB_SPEED_VARIABLE,                     /* wireless (usb 2.5) */
+       USB_SPEED_SUPER,                        /* usb 3.0 */
 };
 
 enum usb_device_state {
index acd7b0f06c8aa3941ddd5746ac9dbc1ebc701782..4f6bb3d2160e9f8d84516aae44363a662418431a 100644 (file)
@@ -124,6 +124,7 @@ struct usb_function {
        void                    (*suspend)(struct usb_function *);
        void                    (*resume)(struct usb_function *);
 
+       /* private: */
        /* internals */
        struct list_head                list;
 };
@@ -219,6 +220,7 @@ struct usb_configuration {
 
        struct usb_composite_dev        *cdev;
 
+       /* private: */
        /* internals */
        struct list_head        list;
        struct list_head        functions;
@@ -321,6 +323,7 @@ struct usb_composite_dev {
 
        struct usb_configuration        *config;
 
+       /* private: */
        /* internals */
        struct usb_device_descriptor    desc;
        struct list_head                configs;
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
new file mode 100644 (file)
index 0000000..e115ae6
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_OTG_H__
+#define __LANGWELL_OTG_H__
+
+/* notify transceiver driver about OTG events */
+extern void langwell_update_transceiver(void);
+/* HCD register bus driver */
+extern int langwell_register_host(struct pci_driver *host_driver);
+/* HCD unregister bus driver */
+extern void langwell_unregister_host(struct pci_driver *host_driver);
+/* DCD register bus driver */
+extern int langwell_register_peripheral(struct pci_driver *client_driver);
+/* DCD unregister bus driver */
+extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
+/* No silent failure, output warning message */
+extern void langwell_otg_nsf_msg(unsigned long message);
+
+#define CI_USBCMD              0x30
+#      define USBCMD_RST               BIT(1)
+#      define USBCMD_RS                BIT(0)
+#define CI_USBSTS              0x34
+#      define USBSTS_SLI               BIT(8)
+#      define USBSTS_URI               BIT(6)
+#      define USBSTS_PCI               BIT(2)
+#define CI_PORTSC1             0x74
+#      define PORTSC_PP                BIT(12)
+#      define PORTSC_LS                (BIT(11) | BIT(10))
+#      define PORTSC_SUSP              BIT(7)
+#      define PORTSC_CCS               BIT(0)
+#define CI_HOSTPC1             0xb4
+#      define HOSTPC1_PHCD             BIT(22)
+#define CI_OTGSC               0xf4
+#      define OTGSC_DPIE               BIT(30)
+#      define OTGSC_1MSE               BIT(29)
+#      define OTGSC_BSEIE              BIT(28)
+#      define OTGSC_BSVIE              BIT(27)
+#      define OTGSC_ASVIE              BIT(26)
+#      define OTGSC_AVVIE              BIT(25)
+#      define OTGSC_IDIE               BIT(24)
+#      define OTGSC_DPIS               BIT(22)
+#      define OTGSC_1MSS               BIT(21)
+#      define OTGSC_BSEIS              BIT(20)
+#      define OTGSC_BSVIS              BIT(19)
+#      define OTGSC_ASVIS              BIT(18)
+#      define OTGSC_AVVIS              BIT(17)
+#      define OTGSC_IDIS               BIT(16)
+#      define OTGSC_DPS                BIT(14)
+#      define OTGSC_1MST               BIT(13)
+#      define OTGSC_BSE                BIT(12)
+#      define OTGSC_BSV                BIT(11)
+#      define OTGSC_ASV                BIT(10)
+#      define OTGSC_AVV                BIT(9)
+#      define OTGSC_ID                 BIT(8)
+#      define OTGSC_HABA               BIT(7)
+#      define OTGSC_HADP               BIT(6)
+#      define OTGSC_IDPU               BIT(5)
+#      define OTGSC_DP                 BIT(4)
+#      define OTGSC_OT                 BIT(3)
+#      define OTGSC_HAAR               BIT(2)
+#      define OTGSC_VC                 BIT(1)
+#      define OTGSC_VD                 BIT(0)
+#      define OTGSC_INTEN_MASK         (0x7f << 24)
+#      define OTGSC_INTSTS_MASK        (0x7f << 16)
+#define CI_USBMODE             0xf8
+#      define USBMODE_CM               (BIT(1) | BIT(0))
+#      define USBMODE_IDLE             0
+#      define USBMODE_DEVICE           0x2
+#      define USBMODE_HOST             0x3
+
+#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
+
+struct otg_hsm {
+       /* Input */
+       int a_bus_resume;
+       int a_bus_suspend;
+       int a_conn;
+       int a_sess_vld;
+       int a_srp_det;
+       int a_vbus_vld;
+       int b_bus_resume;
+       int b_bus_suspend;
+       int b_conn;
+       int b_se0_srp;
+       int b_sess_end;
+       int b_sess_vld;
+       int id;
+
+       /* Internal variables */
+       int a_set_b_hnp_en;
+       int b_srp_done;
+       int b_hnp_enable;
+
+       /* Timeout indicator for timers */
+       int a_wait_vrise_tmout;
+       int a_wait_bcon_tmout;
+       int a_aidl_bdis_tmout;
+       int b_ase0_brst_tmout;
+       int b_bus_suspend_tmout;
+       int b_srp_res_tmout;
+
+       /* Informative variables */
+       int a_bus_drop;
+       int a_bus_req;
+       int a_clr_err;
+       int a_suspend_req;
+       int b_bus_req;
+
+       /* Output */
+       int drv_vbus;
+       int loc_conn;
+       int loc_sof;
+
+       /* Others */
+       int b_bus_suspend_vld;
+};
+
+#define TA_WAIT_VRISE  100
+#define TA_WAIT_BCON   30000
+#define TA_AIDL_BDIS   15000
+#define TB_ASE0_BRST   5000
+#define TB_SE0_SRP     2
+#define TB_SRP_RES     100
+#define TB_BUS_SUSPEND 500
+
+struct langwell_otg_timer {
+       unsigned long expires;  /* Number of count increase to timeout */
+       unsigned long count;    /* Tick counter */
+       void (*function)(unsigned long);        /* Timeout function */
+       unsigned long data;     /* Data passed to function */
+       struct list_head list;
+};
+
+struct langwell_otg {
+       struct otg_transceiver  otg;
+       struct otg_hsm          hsm;
+       void __iomem            *regs;
+       unsigned                region;
+       struct pci_driver       *host_ops;
+       struct pci_driver       *client_ops;
+       struct pci_dev          *pdev;
+       struct work_struct      work;
+       struct workqueue_struct *qwork;
+       spinlock_t              lock;
+       spinlock_t              wq_lock;
+};
+
+static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
+{
+       return container_of(otg, struct langwell_otg, otg);
+}
+
+#ifdef DEBUG
+#define otg_dbg(fmt, args...) \
+       printk(KERN_DEBUG fmt , ## args)
+#else
+#define otg_dbg(fmt, args...) \
+       do { } while (0)
+#endif /* DEBUG */
+#endif /* __LANGWELL_OTG_H__ */
diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h
new file mode 100644 (file)
index 0000000..c949178
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_UDC_H
+#define __LANGWELL_UDC_H
+
+
+/* MACRO defines */
+#define        CAP_REG_OFFSET          0x0
+#define        OP_REG_OFFSET           0x28
+
+#define        DMA_ADDR_INVALID        (~(dma_addr_t)0)
+
+#define        DQH_ALIGNMENT           2048
+#define        DTD_ALIGNMENT           64
+#define        DMA_BOUNDARY            4096
+
+#define        EP0_MAX_PKT_SIZE        64
+#define EP_DIR_IN              1
+#define EP_DIR_OUT             0
+
+#define FLUSH_TIMEOUT          1000
+#define RESET_TIMEOUT          1000
+#define SETUPSTAT_TIMEOUT      100
+#define PRIME_TIMEOUT          100
+
+
+/* device memory space registers */
+
+/* Capability Registers, BAR0 + CAP_REG_OFFSET */
+struct langwell_cap_regs {
+       /* offset: 0x0 */
+       u8      caplength;      /* offset of Operational Register */
+       u8      _reserved3;
+       u16     hciversion;     /* H: BCD encoding of host version */
+       u32     hcsparams;      /* H: host port steering logic capability */
+       u32     hccparams;      /* H: host multiple mode control capability */
+#define        HCC_LEN BIT(17)         /* Link power management (LPM) capability */
+       u8      _reserved4[0x20-0xc];
+       /* offset: 0x20 */
+       u16     dciversion;     /* BCD encoding of device version */
+       u8      _reserved5[0x24-0x22];
+       u32     dccparams;      /* overall device controller capability */
+#define        HOSTCAP BIT(8)          /* host capable */
+#define        DEVCAP  BIT(7)          /* device capable */
+#define DEN(d) \
+       (((d)>>0)&0x1f)         /* bits 4:0, device endpoint number */
+} __attribute__ ((packed));
+
+
+/* Operational Registers, BAR0 + OP_REG_OFFSET */
+struct langwell_op_regs {
+       /* offset: 0x28 */
+       u32     extsts;
+#define        EXTS_TI1        BIT(4)  /* general purpose timer interrupt 1 */
+#define        EXTS_TI1TI0     BIT(3)  /* general purpose timer interrupt 0 */
+#define        EXTS_TI1UPI     BIT(2)  /* USB host periodic interrupt */
+#define        EXTS_TI1UAI     BIT(1)  /* USB host asynchronous interrupt */
+#define        EXTS_TI1NAKI    BIT(0)  /* NAK interrupt */
+       u32     extintr;
+#define        EXTI_TIE1       BIT(4)  /* general purpose timer interrupt enable 1 */
+#define        EXTI_TIE0       BIT(3)  /* general purpose timer interrupt enable 0 */
+#define        EXTI_UPIE       BIT(2)  /* USB host periodic interrupt enable */
+#define        EXTI_UAIE       BIT(1)  /* USB host asynchronous interrupt enable */
+#define        EXTI_NAKE       BIT(0)  /* NAK interrupt enable */
+       /* offset: 0x30 */
+       u32     usbcmd;
+#define        CMD_HIRD(u)     \
+       (((u)>>24)&0xf)         /* bits 27:24, host init resume duration */
+#define        CMD_ITC(u)      \
+       (((u)>>16)&0xff)        /* bits 23:16, interrupt threshold control */
+#define        CMD_PPE         BIT(15) /* per-port change events enable */
+#define        CMD_ATDTW       BIT(14) /* add dTD tripwire */
+#define        CMD_SUTW        BIT(13) /* setup tripwire */
+#define        CMD_ASPE        BIT(11) /* asynchronous schedule park mode enable */
+#define        CMD_FS2         BIT(10) /* frame list size */
+#define        CMD_ASP1        BIT(9)  /* asynchronous schedule park mode count */
+#define        CMD_ASP0        BIT(8)
+#define        CMD_LR          BIT(7)  /* light host/device controller reset */
+#define        CMD_IAA         BIT(6)  /* interrupt on async advance doorbell */
+#define        CMD_ASE         BIT(5)  /* asynchronous schedule enable */
+#define        CMD_PSE         BIT(4)  /* periodic schedule enable */
+#define        CMD_FS1         BIT(3)
+#define        CMD_FS0         BIT(2)
+#define        CMD_RST         BIT(1)  /* controller reset */
+#define        CMD_RUNSTOP     BIT(0)  /* run/stop */
+       u32     usbsts;
+#define        STS_PPCI(u)     \
+       (((u)>>16)&0xffff)      /* bits 31:16, port-n change detect */
+#define        STS_AS          BIT(15) /* asynchronous schedule status */
+#define        STS_PS          BIT(14) /* periodic schedule status */
+#define        STS_RCL         BIT(13) /* reclamation */
+#define        STS_HCH         BIT(12) /* HC halted */
+#define        STS_ULPII       BIT(10) /* ULPI interrupt */
+#define        STS_SLI         BIT(8)  /* DC suspend */
+#define        STS_SRI         BIT(7)  /* SOF received */
+#define        STS_URI         BIT(6)  /* USB reset received */
+#define        STS_AAI         BIT(5)  /* interrupt on async advance */
+#define        STS_SEI         BIT(4)  /* system error */
+#define        STS_FRI         BIT(3)  /* frame list rollover */
+#define        STS_PCI         BIT(2)  /* port change detect */
+#define        STS_UEI         BIT(1)  /* USB error interrupt */
+#define        STS_UI          BIT(0)  /* USB interrupt */
+       u32     usbintr;
+/* bits 31:16, per-port interrupt enable */
+#define        INTR_PPCE(u)    (((u)>>16)&0xffff)
+#define        INTR_ULPIE      BIT(10) /* ULPI enable */
+#define        INTR_SLE        BIT(8)  /* DC sleep/suspend enable */
+#define        INTR_SRE        BIT(7)  /* SOF received enable */
+#define        INTR_URE        BIT(6)  /* USB reset enable */
+#define        INTR_AAE        BIT(5)  /* interrupt on async advance enable */
+#define        INTR_SEE        BIT(4)  /* system error enable */
+#define        INTR_FRE        BIT(3)  /* frame list rollover enable */
+#define        INTR_PCE        BIT(2)  /* port change detect enable */
+#define        INTR_UEE        BIT(1)  /* USB error interrupt enable */
+#define        INTR_UE         BIT(0)  /* USB interrupt enable */
+       u32     frindex;        /* frame index */
+#define        FRINDEX_MASK    (0x3fff << 0)
+       u32     ctrldssegment;  /* not used */
+       u32     deviceaddr;
+#define USBADR_SHIFT   25
+#define        USBADR(d)       \
+       (((d)>>25)&0x7f)        /* bits 31:25, device address */
+#define USBADR_MASK    (0x7f << 25)
+#define        USBADRA         BIT(24) /* device address advance */
+       u32     endpointlistaddr;/* endpoint list top memory address */
+/* bits 31:11, endpoint list pointer */
+#define        EPBASE(d)       (((d)>>11)&0x1fffff)
+#define        ENDPOINTLISTADDR_MASK   (0x1fffff << 11)
+       u32     ttctrl;         /* H: TT operatin, not used */
+       /* offset: 0x50 */
+       u32     burstsize;      /* burst size of data movement */
+#define        TXPBURST(b)     \
+       (((b)>>8)&0xff)         /* bits 15:8, TX burst length */
+#define        RXPBURST(b)     \
+       (((b)>>0)&0xff)         /* bits 7:0, RX burst length */
+       u32     txfilltuning;   /* TX tuning */
+       u32     txttfilltuning; /* H: TX TT tuning */
+       u32     ic_usb;         /* control the IC_USB FS/LS transceiver */
+       /* offset: 0x60 */
+       u32     ulpi_viewport;  /* indirect access to ULPI PHY */
+#define        ULPIWU          BIT(31) /* ULPI wakeup */
+#define        ULPIRUN         BIT(30) /* ULPI read/write run */
+#define        ULPIRW          BIT(29) /* ULPI read/write control */
+#define        ULPISS          BIT(27) /* ULPI sync state */
+#define        ULPIPORT(u)     \
+       (((u)>>24)&7)           /* bits 26:24, ULPI port number */
+#define        ULPIADDR(u)     \
+       (((u)>>16)&0xff)        /* bits 23:16, ULPI data address */
+#define        ULPIDATRD(u)    \
+       (((u)>>8)&0xff)         /* bits 15:8, ULPI data read */
+#define        ULPIDATWR(u)    \
+       (((u)>>0)&0xff)         /* bits 7:0, ULPI date write */
+       u8      _reserved6[0x70-0x64];
+       /* offset: 0x70 */
+       u32     configflag;     /* H: not used */
+       u32     portsc1;        /* port status */
+#define        DA(p)   \
+       (((p)>>25)&0x7f)        /* bits 31:25, device address */
+#define        PORTS_SSTS      (BIT(24) | BIT(23))     /* suspend status */
+#define        PORTS_WKOC      BIT(22) /* wake on over-current enable */
+#define        PORTS_WKDS      BIT(21) /* wake on disconnect enable */
+#define        PORTS_WKCN      BIT(20) /* wake on connect enable */
+#define        PORTS_PTC(p)    (((p)>>16)&0xf) /* bits 19:16, port test control */
+#define        PORTS_PIC       (BIT(15) | BIT(14))     /* port indicator control */
+#define        PORTS_PO        BIT(13) /* port owner */
+#define        PORTS_PP        BIT(12) /* port power */
+#define        PORTS_LS        (BIT(11) | BIT(10))     /* line status */
+#define        PORTS_SLP       BIT(9)  /* suspend using L1 */
+#define        PORTS_PR        BIT(8)  /* port reset */
+#define        PORTS_SUSP      BIT(7)  /* suspend */
+#define        PORTS_FPR       BIT(6)  /* force port resume */
+#define        PORTS_OCC       BIT(5)  /* over-current change */
+#define        PORTS_OCA       BIT(4)  /* over-current active */
+#define        PORTS_PEC       BIT(3)  /* port enable/disable change */
+#define        PORTS_PE        BIT(2)  /* port enable/disable */
+#define        PORTS_CSC       BIT(1)  /* connect status change */
+#define        PORTS_CCS       BIT(0)  /* current connect status */
+       u8      _reserved7[0xb4-0x78];
+       /* offset: 0xb4 */
+       u32     devlc;          /* control LPM and each USB port behavior */
+/* bits 31:29, parallel transceiver select */
+#define        LPM_PTS(d)      (((d)>>29)&7)
+#define        LPM_STS         BIT(28) /* serial transceiver select */
+#define        LPM_PTW         BIT(27) /* parallel transceiver width */
+#define        LPM_PSPD(d)     (((d)>>25)&3)   /* bits 26:25, port speed */
+#define LPM_PSPD_MASK  (BIT(26) | BIT(25))
+#define LPM_SPEED_FULL 0
+#define LPM_SPEED_LOW  1
+#define LPM_SPEED_HIGH 2
+#define        LPM_SRT         BIT(24) /* shorten reset time */
+#define        LPM_PFSC        BIT(23) /* port force full speed connect */
+#define        LPM_PHCD        BIT(22) /* PHY low power suspend clock disable */
+#define        LPM_STL         BIT(16) /* STALL reply to LPM token */
+#define        LPM_BA(d)       \
+       (((d)>>1)&0x7ff)        /* bits 11:1, BmAttributes */
+#define        LPM_NYT_ACK     BIT(0)  /* NYET/ACK reply to LPM token */
+       u8      _reserved8[0xf4-0xb8];
+       /* offset: 0xf4 */
+       u32     otgsc;          /* On-The-Go status and control */
+#define        OTGSC_DPIE      BIT(30) /* data pulse interrupt enable */
+#define        OTGSC_MSE       BIT(29) /* 1 ms timer interrupt enable */
+#define        OTGSC_BSEIE     BIT(28) /* B session end interrupt enable */
+#define        OTGSC_BSVIE     BIT(27) /* B session valid interrupt enable */
+#define        OTGSC_ASVIE     BIT(26) /* A session valid interrupt enable */
+#define        OTGSC_AVVIE     BIT(25) /* A VBUS valid interrupt enable */
+#define        OTGSC_IDIE      BIT(24) /* USB ID interrupt enable */
+#define        OTGSC_DPIS      BIT(22) /* data pulse interrupt status */
+#define        OTGSC_MSS       BIT(21) /* 1 ms timer interrupt status */
+#define        OTGSC_BSEIS     BIT(20) /* B session end interrupt status */
+#define        OTGSC_BSVIS     BIT(19) /* B session valid interrupt status */
+#define        OTGSC_ASVIS     BIT(18) /* A session valid interrupt status */
+#define        OTGSC_AVVIS     BIT(17) /* A VBUS valid interrupt status */
+#define        OTGSC_IDIS      BIT(16) /* USB ID interrupt status */
+#define        OTGSC_DPS       BIT(14) /* data bus pulsing status */
+#define        OTGSC_MST       BIT(13) /* 1 ms timer toggle */
+#define        OTGSC_BSE       BIT(12) /* B session end */
+#define        OTGSC_BSV       BIT(11) /* B session valid */
+#define        OTGSC_ASV       BIT(10) /* A session valid */
+#define        OTGSC_AVV       BIT(9)  /* A VBUS valid */
+#define        OTGSC_USBID     BIT(8)  /* USB ID */
+#define        OTGSC_HABA      BIT(7)  /* hw assist B-disconnect to A-connect */
+#define        OTGSC_HADP      BIT(6)  /* hw assist data pulse */
+#define        OTGSC_IDPU      BIT(5)  /* ID pullup */
+#define        OTGSC_DP        BIT(4)  /* data pulsing */
+#define        OTGSC_OT        BIT(3)  /* OTG termination */
+#define        OTGSC_HAAR      BIT(2)  /* hw assist auto reset */
+#define        OTGSC_VC        BIT(1)  /* VBUS charge */
+#define        OTGSC_VD        BIT(0)  /* VBUS discharge */
+       u32     usbmode;
+#define        MODE_VBPS       BIT(5)  /* R/W VBUS power select */
+#define        MODE_SDIS       BIT(4)  /* R/W stream disable mode */
+#define        MODE_SLOM       BIT(3)  /* R/W setup lockout mode */
+#define        MODE_ENSE       BIT(2)  /* endian select */
+#define        MODE_CM(u)      (((u)>>0)&3)    /* bits 1:0, controller mode */
+#define        MODE_IDLE       0
+#define        MODE_DEVICE     2
+#define        MODE_HOST       3
+       u8      _reserved9[0x100-0xfc];
+       /* offset: 0x100 */
+       u32     endptnak;
+#define        EPTN(e)         \
+       (((e)>>16)&0xffff)      /* bits 31:16, TX endpoint NAK */
+#define        EPRN(e)         \
+       (((e)>>0)&0xffff)       /* bits 15:0, RX endpoint NAK */
+       u32     endptnaken;
+#define        EPTNE(e)        \
+       (((e)>>16)&0xffff)      /* bits 31:16, TX endpoint NAK enable */
+#define        EPRNE(e)        \
+       (((e)>>0)&0xffff)       /* bits 15:0, RX endpoint NAK enable */
+       u32     endptsetupstat;
+#define        SETUPSTAT_MASK          (0xffff << 0)   /* bits 15:0 */
+#define EP0SETUPSTAT_MASK      1
+       u32     endptprime;
+/* bits 31:16, prime endpoint transmit buffer */
+#define        PETB(e)         (((e)>>16)&0xffff)
+/* bits 15:0, prime endpoint receive buffer */
+#define        PERB(e)         (((e)>>0)&0xffff)
+       /* offset: 0x110 */
+       u32     endptflush;
+/* bits 31:16, flush endpoint transmit buffer */
+#define        FETB(e)         (((e)>>16)&0xffff)
+/* bits 15:0, flush endpoint receive buffer */
+#define        FERB(e)         (((e)>>0)&0xffff)
+       u32     endptstat;
+/* bits 31:16, endpoint transmit buffer ready */
+#define        ETBR(e)         (((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive buffer ready */
+#define        ERBR(e)         (((e)>>0)&0xffff)
+       u32     endptcomplete;
+/* bits 31:16, endpoint transmit complete event */
+#define        ETCE(e)         (((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive complete event */
+#define        ERCE(e)         (((e)>>0)&0xffff)
+       /* offset: 0x11c */
+       u32     endptctrl[16];
+#define        EPCTRL_TXE      BIT(23) /* TX endpoint enable */
+#define        EPCTRL_TXR      BIT(22) /* TX data toggle reset */
+#define        EPCTRL_TXI      BIT(21) /* TX data toggle inhibit */
+#define        EPCTRL_TXT(e)   (((e)>>18)&3)   /* bits 19:18, TX endpoint type */
+#define        EPCTRL_TXT_SHIFT        18
+#define        EPCTRL_TXD      BIT(17) /* TX endpoint data source */
+#define        EPCTRL_TXS      BIT(16) /* TX endpoint STALL */
+#define        EPCTRL_RXE      BIT(7)  /* RX endpoint enable */
+#define        EPCTRL_RXR      BIT(6)  /* RX data toggle reset */
+#define        EPCTRL_RXI      BIT(5)  /* RX data toggle inhibit */
+#define        EPCTRL_RXT(e)   (((e)>>2)&3)    /* bits 3:2, RX endpoint type */
+#define        EPCTRL_RXT_SHIFT        2       /* bits 19:18, TX endpoint type */
+#define        EPCTRL_RXD      BIT(1)  /* RX endpoint data sink */
+#define        EPCTRL_RXS      BIT(0)  /* RX endpoint STALL */
+} __attribute__ ((packed));
+
+#endif /* __LANGWELL_UDC_H */
+
index 1aaa826396a1462555925b871f22f95ff8870299..2443c0e7a80cd5e25ca77d217a0f5abbd2c1151e 100644 (file)
@@ -80,10 +80,10 @@ struct otg_transceiver {
 
 /* for board-specific init logic */
 extern int otg_set_transceiver(struct otg_transceiver *);
-#ifdef CONFIG_NOP_USB_XCEIV
+
+/* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
-#endif
 
 
 /* for usb host and peripheral controller drivers */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
new file mode 100644 (file)
index 0000000..e9f0384
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * R8A66597 driver platform data
+ *
+ * Copyright (C) 2009  Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#ifndef __LINUX_USB_R8A66597_H
+#define __LINUX_USB_R8A66597_H
+
+#define R8A66597_PLATDATA_XTAL_12MHZ   0x01
+#define R8A66597_PLATDATA_XTAL_24MHZ   0x02
+#define R8A66597_PLATDATA_XTAL_48MHZ   0x03
+
+struct r8a66597_platdata {
+       /* This ops can controll port power instead of DVSTCTR register. */
+       void (*port_power)(int port, int power);
+
+       /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */
+       unsigned        xtal:2;
+
+       /* set one = 3.3V, set zero = 1.5V */
+       unsigned        vif:1;
+
+       /* set one = big endian, set zero = little endian */
+       unsigned        endian:1;
+};
+#endif
+
index 8cdfed738fe4b8b28d0f7208c8553f0cf2eaee03..44801d26a37a7888d7ebd88dbb5cf0e4a1836de0 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/kref.h>
 #include <linux/mutex.h>
+#include <linux/sysrq.h>
 
 #define SERIAL_TTY_MAJOR       188     /* Nice legal number now */
 #define SERIAL_TTY_MINORS      254     /* loads of devices :) */
 /* parity check flag */
 #define RELEVANT_IFLAG(iflag)  (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
 
+enum port_dev_state {
+       PORT_UNREGISTERED,
+       PORT_REGISTERING,
+       PORT_REGISTERED,
+       PORT_UNREGISTERING,
+};
+
 /**
  * usb_serial_port: structure for the specific ports of a device.
  * @serial: pointer back to the struct usb_serial owner of this port.
@@ -91,12 +99,17 @@ struct usb_serial_port {
        int                     write_urb_busy;
        __u8                    bulk_out_endpointAddress;
 
+       int                     tx_bytes_flight;
+       int                     urbs_in_flight;
+
        wait_queue_head_t       write_wait;
        struct work_struct      work;
        char                    throttled;
        char                    throttle_req;
        char                    console;
+       unsigned long           sysrq; /* sysrq timeout */
        struct device           dev;
+       enum port_dev_state     dev_state;
 };
 #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
 
@@ -181,8 +194,10 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
  *     This will be called when the struct usb_serial structure is fully set
  *     set up.  Do any local initialization of the device, or any private
  *     memory structure allocation at this point in time.
- * @shutdown: pointer to the driver's shutdown function.  This will be
- *     called when the device is removed from the system.
+ * @disconnect: pointer to the driver's disconnect function.  This will be
+ *     called when the device is unplugged or unbound from the driver.
+ * @release: pointer to the driver's release function.  This will be called
+ *     when the usb_serial data structure is about to be destroyed.
  * @usb_driver: pointer to the struct usb_driver that controls this
  *     device.  This is necessary to allow dynamic ids to be added to
  *     the driver from sysfs.
@@ -207,12 +222,14 @@ struct usb_serial_driver {
        struct device_driver    driver;
        struct usb_driver       *usb_driver;
        struct usb_dynids       dynids;
+       int                     max_in_flight_urbs;
 
        int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
        int (*attach)(struct usb_serial *serial);
        int (*calc_num_ports) (struct usb_serial *serial);
 
-       void (*shutdown)(struct usb_serial *serial);
+       void (*disconnect)(struct usb_serial *serial);
+       void (*release)(struct usb_serial *serial);
 
        int (*port_probe)(struct usb_serial_port *port);
        int (*port_remove)(struct usb_serial_port *port);
@@ -294,9 +311,16 @@ extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
 extern void usb_serial_generic_throttle(struct tty_struct *tty);
 extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
-extern void usb_serial_generic_shutdown(struct usb_serial *serial);
+extern void usb_serial_generic_disconnect(struct usb_serial *serial);
+extern void usb_serial_generic_release(struct usb_serial *serial);
 extern int usb_serial_generic_register(int debug);
 extern void usb_serial_generic_deregister(void);
+extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+                                                gfp_t mem_flags);
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
+                                       unsigned int ch);
+extern int usb_serial_handle_break(struct usb_serial_port *port);
+
 
 extern int usb_serial_bus_register(struct usb_serial_driver *device);
 extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
index 11232676bfff59af017b4b3343c67dc430004c4b..3656b300de3a740e39620172e4eb0e0997b06aea 100644 (file)
@@ -22,12 +22,12 @@ struct old_utsname {
 };
 
 struct new_utsname {
-       char sysname[65];
-       char nodename[65];
-       char release[65];
-       char version[65];
-       char machine[65];
-       char domainname[65];
+       char sysname[__NEW_UTS_LEN + 1];
+       char nodename[__NEW_UTS_LEN + 1];
+       char release[__NEW_UTS_LEN + 1];
+       char version[__NEW_UTS_LEN + 1];
+       char machine[__NEW_UTS_LEN + 1];
+       char domainname[__NEW_UTS_LEN + 1];
 };
 
 #ifdef __KERNEL__
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
new file mode 100644 (file)
index 0000000..8f6a958
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef __VLYNQ_H__
+#define __VLYNQ_H__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define VLYNQ_NUM_IRQS 32
+
+struct vlynq_mapping {
+       u32 size;
+       u32 offset;
+};
+
+enum vlynq_divisor {
+       vlynq_div_auto = 0,
+       vlynq_ldiv1,
+       vlynq_ldiv2,
+       vlynq_ldiv3,
+       vlynq_ldiv4,
+       vlynq_ldiv5,
+       vlynq_ldiv6,
+       vlynq_ldiv7,
+       vlynq_ldiv8,
+       vlynq_rdiv1,
+       vlynq_rdiv2,
+       vlynq_rdiv3,
+       vlynq_rdiv4,
+       vlynq_rdiv5,
+       vlynq_rdiv6,
+       vlynq_rdiv7,
+       vlynq_rdiv8,
+       vlynq_div_external
+};
+
+struct vlynq_device_id {
+       u32 id;
+       enum vlynq_divisor divisor;
+       unsigned long driver_data;
+};
+
+struct vlynq_regs;
+struct vlynq_device {
+       u32 id, dev_id;
+       int local_irq;
+       int remote_irq;
+       enum vlynq_divisor divisor;
+       u32 regs_start, regs_end;
+       u32 mem_start, mem_end;
+       u32 irq_start, irq_end;
+       int irq;
+       int enabled;
+       struct vlynq_regs *local;
+       struct vlynq_regs *remote;
+       struct device dev;
+};
+
+struct vlynq_driver {
+       char *name;
+       struct vlynq_device_id *id_table;
+       int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
+       void (*remove)(struct vlynq_device *dev);
+       struct device_driver driver;
+};
+
+struct plat_vlynq_ops {
+       int (*on)(struct vlynq_device *dev);
+       void (*off)(struct vlynq_device *dev);
+};
+
+static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
+{
+       return container_of(drv, struct vlynq_driver, driver);
+}
+
+static inline struct vlynq_device *to_vlynq_device(struct device *device)
+{
+       return container_of(device, struct vlynq_device, dev);
+}
+
+extern struct bus_type vlynq_bus_type;
+
+extern int __vlynq_register_driver(struct vlynq_driver *driver,
+                                  struct module *owner);
+
+static inline int vlynq_register_driver(struct vlynq_driver *driver)
+{
+       return __vlynq_register_driver(driver, THIS_MODULE);
+}
+
+static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
+{
+       return dev_get_drvdata(&dev->dev);
+}
+
+static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
+{
+       dev_set_drvdata(&dev->dev, data);
+}
+
+static inline u32 vlynq_mem_start(struct vlynq_device *dev)
+{
+       return dev->mem_start;
+}
+
+static inline u32 vlynq_mem_end(struct vlynq_device *dev)
+{
+       return dev->mem_end;
+}
+
+static inline u32 vlynq_mem_len(struct vlynq_device *dev)
+{
+       return dev->mem_end - dev->mem_start + 1;
+}
+
+static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
+{
+       int irq = dev->irq_start + virq;
+       if ((irq < dev->irq_start) || (irq > dev->irq_end))
+               return -EINVAL;
+
+       return irq;
+}
+
+static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
+{
+       if ((irq < dev->irq_start) || (irq > dev->irq_end))
+               return -EINVAL;
+
+       return irq - dev->irq_start;
+}
+
+extern void vlynq_unregister_driver(struct vlynq_driver *driver);
+extern int vlynq_enable_device(struct vlynq_device *dev);
+extern void vlynq_disable_device(struct vlynq_device *dev);
+extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
+                                  struct vlynq_mapping *mapping);
+extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
+                                   struct vlynq_mapping *mapping);
+extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
+extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
+
+#endif /* __VLYNQ_H__ */
index 524cd1b28ecb0ff62a9d3620f9d21d48f8c1e32e..81a97cf8f0a0845a102a1200dc316aa32a47920f 100644 (file)
@@ -36,12 +36,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                FOR_ALL_ZONES(PGSTEAL),
                FOR_ALL_ZONES(PGSCAN_KSWAPD),
                FOR_ALL_ZONES(PGSCAN_DIRECT),
+#ifdef CONFIG_NUMA
+               PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
                PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
 #ifdef CONFIG_HUGETLB_PAGE
                HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
                UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
                UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
                UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
@@ -50,7 +52,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
                UNEVICTABLE_MLOCKFREED,
-#endif
                NR_VM_EVENT_ITEMS
 };
 
index 20a6957af870a0b12973a4b3cb937d5e0aeac011..47004f35cc7eaf6f2b3cac2779ea7b7ccd5d9c1f 100644 (file)
@@ -17,6 +17,7 @@
 #define _INET_SOCK_H
 
 
+#include <linux/kmemcheck.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/jhash.h>
@@ -66,14 +67,16 @@ struct inet_request_sock {
        __be32                  loc_addr;
        __be32                  rmt_addr;
        __be16                  rmt_port;
-       u16                     snd_wscale : 4, 
-                               rcv_wscale : 4, 
+       kmemcheck_bitfield_begin(flags);
+       u16                     snd_wscale : 4,
+                               rcv_wscale : 4,
                                tstamp_ok  : 1,
                                sack_ok    : 1,
                                wscale_ok  : 1,
                                ecn_ok     : 1,
                                acked      : 1,
                                no_srccheck: 1;
+       kmemcheck_bitfield_end(flags);
        struct ip_options       *opt;
 };
 
@@ -199,9 +202,12 @@ static inline int inet_sk_ehashfn(const struct sock *sk)
 static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
 {
        struct request_sock *req = reqsk_alloc(ops);
+       struct inet_request_sock *ireq = inet_rsk(req);
 
-       if (req != NULL)
-               inet_rsk(req)->opt = NULL;
+       if (req != NULL) {
+               kmemcheck_annotate_bitfield(ireq, flags);
+               ireq->opt = NULL;
+       }
 
        return req;
 }
index 4b8ece22b8e94417879a6504e87afe5bd0ecc371..b63b80fac567a2e103c996d8c021b10ef6e150e8 100644 (file)
@@ -16,6 +16,7 @@
 #define _INET_TIMEWAIT_SOCK_
 
 
+#include <linux/kmemcheck.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/timer.h>
@@ -127,10 +128,12 @@ struct inet_timewait_sock {
        __be32                  tw_rcv_saddr;
        __be16                  tw_dport;
        __u16                   tw_num;
+       kmemcheck_bitfield_begin(flags);
        /* And these are ours. */
        __u8                    tw_ipv6only:1,
                                tw_transparent:1;
-       /* 15 bits hole, try to pack */
+       /* 14 bits hole, try to pack */
+       kmemcheck_bitfield_end(flags);
        __u16                   tw_ipv6_offset;
        unsigned long           tw_ttd;
        struct inet_bind_bucket *tw_tb;
index 010e14a93c9256a0d92e35232e2292e746e8f86c..95bd3fd75f942d42351cd42450420334fef6f2a0 100644 (file)
@@ -218,9 +218,11 @@ struct sock {
 #define sk_hash                        __sk_common.skc_hash
 #define sk_prot                        __sk_common.skc_prot
 #define sk_net                 __sk_common.skc_net
+       kmemcheck_bitfield_begin(flags);
        unsigned char           sk_shutdown : 2,
                                sk_no_check : 2,
                                sk_userlocks : 4;
+       kmemcheck_bitfield_end(flags);
        unsigned char           sk_protocol;
        unsigned short          sk_type;
        int                     sk_rcvbuf;
index c3b2a2aa7140afed4251baabb45a1671fe16ccc7..f0736cff2ca3f15f2762634726f55a6f27af22e7 100644 (file)
 #define S1DREG_DELAYOFF                        0xFFFE
 #define S1DREG_DELAYON                 0xFFFF
 
+#define BBLT_FIFO_EMPTY                        0x00
+#define BBLT_FIFO_NOT_EMPTY            0x40
+#define BBLT_FIFO_NOT_FULL             0x30
+#define BBLT_FIFO_HALF_FULL            0x20
+#define BBLT_FIFO_FULL                 0x10
+
+#define BBLT_SOLID_FILL                        0x0c
+
+
 /* Note: all above defines should go in separate header files
    when implementing other S1D13xxx chip support. */
 
index fed6dc31b0dae854c26ac29216fc5a08824eaa53..c4b3c6d51a72bae4e3f127b68c67ebdde8684bd8 100644 (file)
@@ -616,13 +616,13 @@ config SYSFS_DEPRECATED
        bool
 
 config SYSFS_DEPRECATED_V2
-       bool "Create deprecated sysfs layout for older userspace tools"
+       bool "remove sysfs features which may confuse old userspace tools"
        depends on SYSFS
-       default y
+       default n
        select SYSFS_DEPRECATED
        help
          This option switches the layout of sysfs to the deprecated
-         version.
+         version. Do not use it on recent distributions.
 
          The current sysfs layout features a unified device tree at
          /sys/devices/, which is able to express a hierarchy between
index dd7ee5f203f3f9c476a92d9ef39257a2791b132f..093f65915501bad5db5a8f9c348b5d62ad8a484c 100644 (file)
@@ -231,7 +231,8 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
 
 void __init mount_block_root(char *name, int flags)
 {
-       char *fs_names = __getname();
+       char *fs_names = __getname_gfp(GFP_KERNEL
+               | __GFP_NOTRACK_FALSE_POSITIVE);
        char *p;
 #ifdef CONFIG_BLOCK
        char b[BDEVNAME_SIZE];
index f6204f712e7c639652d06cbefb70bba4df2ae7a1..0e7aedeaa05f4f56a6737c078331f675201505a1 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/idr.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
+#include <linux/kmemcheck.h>
 #include <linux/kmemtrace.h>
 #include <trace/boot.h>
 
@@ -546,6 +547,7 @@ static void __init mm_init(void)
        page_cgroup_init_flatmem();
        mem_init();
        kmem_cache_init();
+       pgtable_cache_init();
        vmalloc_init();
 }
 
@@ -670,7 +672,6 @@ asmlinkage void __init start_kernel(void)
                initrd_start = 0;
        }
 #endif
-       cpuset_init_early();
        page_cgroup_init();
        enable_debug_pagealloc();
        cpu_hotplug_init();
@@ -684,7 +685,6 @@ asmlinkage void __init start_kernel(void)
                late_time_init();
        calibrate_delay();
        pidmap_init();
-       pgtable_cache_init();
        anon_vma_init();
 #ifdef CONFIG_X86
        if (efi_enabled)
@@ -867,6 +867,11 @@ static noinline int init_post(void)
 static int __init kernel_init(void * unused)
 {
        lock_kernel();
+
+       /*
+        * init can allocate pages on any node
+        */
+       set_mems_allowed(node_possible_map);
        /*
         * init can run on any cpu.
         */
index 90b53f6dc226c674d9b05eee60bcc671bab0edc9..9df4501cb92158d64ac3db254f7ae9e4ca93d22c 100644 (file)
@@ -11,6 +11,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
            async.o
+obj-y += groups.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
index d5a7e17474ee354259e71878156b6ea82ac8a6a2..7e75a41bd50855caf3a33109bed11a52fa967f80 100644 (file)
@@ -97,12 +97,6 @@ struct cpuset {
 
        struct cpuset *parent;          /* my parent */
 
-       /*
-        * Copy of global cpuset_mems_generation as of the most
-        * recent time this cpuset changed its mems_allowed.
-        */
-       int mems_generation;
-
        struct fmeter fmeter;           /* memory_pressure filter */
 
        /* partition number for rebuild_sched_domains() */
@@ -176,27 +170,6 @@ static inline int is_spread_slab(const struct cpuset *cs)
        return test_bit(CS_SPREAD_SLAB, &cs->flags);
 }
 
-/*
- * Increment this integer everytime any cpuset changes its
- * mems_allowed value.  Users of cpusets can track this generation
- * number, and avoid having to lock and reload mems_allowed unless
- * the cpuset they're using changes generation.
- *
- * A single, global generation is needed because cpuset_attach_task() could
- * reattach a task to a different cpuset, which must not have its
- * generation numbers aliased with those of that tasks previous cpuset.
- *
- * Generations are needed for mems_allowed because one task cannot
- * modify another's memory placement.  So we must enable every task,
- * on every visit to __alloc_pages(), to efficiently check whether
- * its current->cpuset->mems_allowed has changed, requiring an update
- * of its current->mems_allowed.
- *
- * Since writes to cpuset_mems_generation are guarded by the cgroup lock
- * there is no need to mark it atomic.
- */
-static int cpuset_mems_generation;
-
 static struct cpuset top_cpuset = {
        .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
 };
@@ -228,8 +201,9 @@ static struct cpuset top_cpuset = {
  * If a task is only holding callback_mutex, then it has read-only
  * access to cpusets.
  *
- * The task_struct fields mems_allowed and mems_generation may only
- * be accessed in the context of that task, so require no locks.
+ * Now, the task_struct fields mems_allowed and mempolicy may be changed
+ * by other task, we use alloc_lock in the task_struct fields to protect
+ * them.
  *
  * The cpuset_common_file_read() handlers only hold callback_mutex across
  * small pieces of code, such as when reading out possibly multi-word
@@ -331,75 +305,22 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
        BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
 }
 
-/**
- * cpuset_update_task_memory_state - update task memory placement
- *
- * If the current tasks cpusets mems_allowed changed behind our
- * backs, update current->mems_allowed, mems_generation and task NUMA
- * mempolicy to the new value.
- *
- * Task mempolicy is updated by rebinding it relative to the
- * current->cpuset if a task has its memory placement changed.
- * Do not call this routine if in_interrupt().
- *
- * Call without callback_mutex or task_lock() held.  May be
- * called with or without cgroup_mutex held.  Thanks in part to
- * 'the_top_cpuset_hack', the task's cpuset pointer will never
- * be NULL.  This routine also might acquire callback_mutex during
- * call.
- *
- * Reading current->cpuset->mems_generation doesn't need task_lock
- * to guard the current->cpuset derefence, because it is guarded
- * from concurrent freeing of current->cpuset using RCU.
- *
- * The rcu_dereference() is technically probably not needed,
- * as I don't actually mind if I see a new cpuset pointer but
- * an old value of mems_generation.  However this really only
- * matters on alpha systems using cpusets heavily.  If I dropped
- * that rcu_dereference(), it would save them a memory barrier.
- * For all other arch's, rcu_dereference is a no-op anyway, and for
- * alpha systems not using cpusets, another planned optimization,
- * avoiding the rcu critical section for tasks in the root cpuset
- * which is statically allocated, so can't vanish, will make this
- * irrelevant.  Better to use RCU as intended, than to engage in
- * some cute trick to save a memory barrier that is impossible to
- * test, for alpha systems using cpusets heavily, which might not
- * even exist.
- *
- * This routine is needed to update the per-task mems_allowed data,
- * within the tasks context, when it is trying to allocate memory
- * (in various mm/mempolicy.c routines) and notices that some other
- * task has been modifying its cpuset.
+/*
+ * update task's spread flag if cpuset's page/slab spread flag is set
+ *
+ * Called with callback_mutex/cgroup_mutex held
  */
-
-void cpuset_update_task_memory_state(void)
+static void cpuset_update_task_spread_flag(struct cpuset *cs,
+                                       struct task_struct *tsk)
 {
-       int my_cpusets_mem_gen;
-       struct task_struct *tsk = current;
-       struct cpuset *cs;
-
-       rcu_read_lock();
-       my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
-       rcu_read_unlock();
-
-       if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
-               mutex_lock(&callback_mutex);
-               task_lock(tsk);
-               cs = task_cs(tsk); /* Maybe changed when task not locked */
-               guarantee_online_mems(cs, &tsk->mems_allowed);
-               tsk->cpuset_mems_generation = cs->mems_generation;
-               if (is_spread_page(cs))
-                       tsk->flags |= PF_SPREAD_PAGE;
-               else
-                       tsk->flags &= ~PF_SPREAD_PAGE;
-               if (is_spread_slab(cs))
-                       tsk->flags |= PF_SPREAD_SLAB;
-               else
-                       tsk->flags &= ~PF_SPREAD_SLAB;
-               task_unlock(tsk);
-               mutex_unlock(&callback_mutex);
-               mpol_rebind_task(tsk, &tsk->mems_allowed);
-       }
+       if (is_spread_page(cs))
+               tsk->flags |= PF_SPREAD_PAGE;
+       else
+               tsk->flags &= ~PF_SPREAD_PAGE;
+       if (is_spread_slab(cs))
+               tsk->flags |= PF_SPREAD_SLAB;
+       else
+               tsk->flags &= ~PF_SPREAD_SLAB;
 }
 
 /*
@@ -1007,14 +928,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
  *    other task, the task_struct mems_allowed that we are hacking
  *    is for our current task, which must allocate new pages for that
  *    migrating memory region.
- *
- *    We call cpuset_update_task_memory_state() before hacking
- *    our tasks mems_allowed, so that we are assured of being in
- *    sync with our tasks cpuset, and in particular, callbacks to
- *    cpuset_update_task_memory_state() from nested page allocations
- *    won't see any mismatch of our cpuset and task mems_generation
- *    values, so won't overwrite our hacked tasks mems_allowed
- *    nodemask.
  */
 
 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
@@ -1022,22 +935,37 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 {
        struct task_struct *tsk = current;
 
-       cpuset_update_task_memory_state();
-
-       mutex_lock(&callback_mutex);
        tsk->mems_allowed = *to;
-       mutex_unlock(&callback_mutex);
 
        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 
-       mutex_lock(&callback_mutex);
        guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
-       mutex_unlock(&callback_mutex);
 }
 
 /*
- * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new
- * nodes if memory_migrate flag is set. Called with cgroup_mutex held.
+ * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
+ * @tsk: the task to change
+ * @newmems: new nodes that the task will be set
+ *
+ * In order to avoid seeing no nodes if the old and new nodes are disjoint,
+ * we structure updates as setting all new allowed nodes, then clearing newly
+ * disallowed ones.
+ *
+ * Called with task's alloc_lock held
+ */
+static void cpuset_change_task_nodemask(struct task_struct *tsk,
+                                       nodemask_t *newmems)
+{
+       nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+       mpol_rebind_task(tsk, &tsk->mems_allowed);
+       mpol_rebind_task(tsk, newmems);
+       tsk->mems_allowed = *newmems;
+}
+
+/*
+ * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
+ * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
+ * memory_migrate flag is set. Called with cgroup_mutex held.
  */
 static void cpuset_change_nodemask(struct task_struct *p,
                                   struct cgroup_scanner *scan)
@@ -1046,12 +974,19 @@ static void cpuset_change_nodemask(struct task_struct *p,
        struct cpuset *cs;
        int migrate;
        const nodemask_t *oldmem = scan->data;
+       nodemask_t newmems;
+
+       cs = cgroup_cs(scan->cg);
+       guarantee_online_mems(cs, &newmems);
+
+       task_lock(p);
+       cpuset_change_task_nodemask(p, &newmems);
+       task_unlock(p);
 
        mm = get_task_mm(p);
        if (!mm)
                return;
 
-       cs = cgroup_cs(scan->cg);
        migrate = is_memory_migrate(cs);
 
        mpol_rebind_mm(mm, &cs->mems_allowed);
@@ -1104,10 +1039,10 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
 /*
  * Handle user request to change the 'mems' memory placement
  * of a cpuset.  Needs to validate the request, update the
- * cpusets mems_allowed and mems_generation, and for each
- * task in the cpuset, rebind any vma mempolicies and if
- * the cpuset is marked 'memory_migrate', migrate the tasks
- * pages to the new memory.
+ * cpusets mems_allowed, and for each task in the cpuset,
+ * update mems_allowed and rebind task's mempolicy and any vma
+ * mempolicies and if the cpuset is marked 'memory_migrate',
+ * migrate the tasks pages to the new memory.
  *
  * Call with cgroup_mutex held.  May take callback_mutex during call.
  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
@@ -1160,7 +1095,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
 
        mutex_lock(&callback_mutex);
        cs->mems_allowed = trialcs->mems_allowed;
-       cs->mems_generation = cpuset_mems_generation++;
        mutex_unlock(&callback_mutex);
 
        update_tasks_nodemask(cs, &oldmem, &heap);
@@ -1192,6 +1126,46 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
        return 0;
 }
 
+/*
+ * cpuset_change_flag - make a task's spread flags the same as its cpuset's
+ * @tsk: task to be updated
+ * @scan: struct cgroup_scanner containing the cgroup of the task
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup.
+ *
+ * We don't need to re-check for the cgroup/cpuset membership, since we're
+ * holding cgroup_lock() at this point.
+ */
+static void cpuset_change_flag(struct task_struct *tsk,
+                               struct cgroup_scanner *scan)
+{
+       cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+}
+
+/*
+ * update_tasks_flags - update the spread flags of tasks in the cpuset.
+ * @cs: the cpuset in which each task's spread flags needs to be changed
+ * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ *
+ * Called with cgroup_mutex held
+ *
+ * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * calling callback functions for each.
+ *
+ * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * if @heap != NULL.
+ */
+static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
+{
+       struct cgroup_scanner scan;
+
+       scan.cg = cs->css.cgroup;
+       scan.test_task = NULL;
+       scan.process_task = cpuset_change_flag;
+       scan.heap = heap;
+       cgroup_scan_tasks(&scan);
+}
+
 /*
  * update_flag - read a 0 or a 1 in a file and update associated flag
  * bit:                the bit to update (see cpuset_flagbits_t)
@@ -1205,8 +1179,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
                       int turning_on)
 {
        struct cpuset *trialcs;
-       int err;
        int balance_flag_changed;
+       int spread_flag_changed;
+       struct ptr_heap heap;
+       int err;
 
        trialcs = alloc_trial_cpuset(cs);
        if (!trialcs)
@@ -1221,9 +1197,16 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
        if (err < 0)
                goto out;
 
+       err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
+       if (err < 0)
+               goto out;
+
        balance_flag_changed = (is_sched_load_balance(cs) !=
                                is_sched_load_balance(trialcs));
 
+       spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
+                       || (is_spread_page(cs) != is_spread_page(trialcs)));
+
        mutex_lock(&callback_mutex);
        cs->flags = trialcs->flags;
        mutex_unlock(&callback_mutex);
@@ -1231,6 +1214,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
        if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
                async_rebuild_sched_domains();
 
+       if (spread_flag_changed)
+               update_tasks_flags(cs, &heap);
+       heap_free(&heap);
 out:
        free_trial_cpuset(trialcs);
        return err;
@@ -1372,15 +1358,20 @@ static void cpuset_attach(struct cgroup_subsys *ss,
 
        if (cs == &top_cpuset) {
                cpumask_copy(cpus_attach, cpu_possible_mask);
+               to = node_possible_map;
        } else {
-               mutex_lock(&callback_mutex);
                guarantee_online_cpus(cs, cpus_attach);
-               mutex_unlock(&callback_mutex);
+               guarantee_online_mems(cs, &to);
        }
        err = set_cpus_allowed_ptr(tsk, cpus_attach);
        if (err)
                return;
 
+       task_lock(tsk);
+       cpuset_change_task_nodemask(tsk, &to);
+       task_unlock(tsk);
+       cpuset_update_task_spread_flag(cs, tsk);
+
        from = oldcs->mems_allowed;
        to = cs->mems_allowed;
        mm = get_task_mm(tsk);
@@ -1442,11 +1433,9 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
                break;
        case FILE_SPREAD_PAGE:
                retval = update_flag(CS_SPREAD_PAGE, cs, val);
-               cs->mems_generation = cpuset_mems_generation++;
                break;
        case FILE_SPREAD_SLAB:
                retval = update_flag(CS_SPREAD_SLAB, cs, val);
-               cs->mems_generation = cpuset_mems_generation++;
                break;
        default:
                retval = -EINVAL;
@@ -1786,8 +1775,6 @@ static struct cgroup_subsys_state *cpuset_create(
        struct cpuset *parent;
 
        if (!cont->parent) {
-               /* This is early initialization for the top cgroup */
-               top_cpuset.mems_generation = cpuset_mems_generation++;
                return &top_cpuset.css;
        }
        parent = cgroup_cs(cont->parent);
@@ -1799,7 +1786,6 @@ static struct cgroup_subsys_state *cpuset_create(
                return ERR_PTR(-ENOMEM);
        }
 
-       cpuset_update_task_memory_state();
        cs->flags = 0;
        if (is_spread_page(parent))
                set_bit(CS_SPREAD_PAGE, &cs->flags);
@@ -1808,7 +1794,6 @@ static struct cgroup_subsys_state *cpuset_create(
        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
        cpumask_clear(cs->cpus_allowed);
        nodes_clear(cs->mems_allowed);
-       cs->mems_generation = cpuset_mems_generation++;
        fmeter_init(&cs->fmeter);
        cs->relax_domain_level = -1;
 
@@ -1827,8 +1812,6 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
 {
        struct cpuset *cs = cgroup_cs(cont);
 
-       cpuset_update_task_memory_state();
-
        if (is_sched_load_balance(cs))
                update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 
@@ -1849,21 +1832,6 @@ struct cgroup_subsys cpuset_subsys = {
        .early_init = 1,
 };
 
-/*
- * cpuset_init_early - just enough so that the calls to
- * cpuset_update_task_memory_state() in early init code
- * are harmless.
- */
-
-int __init cpuset_init_early(void)
-{
-       alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT);
-
-       top_cpuset.mems_generation = cpuset_mems_generation++;
-       return 0;
-}
-
-
 /**
  * cpuset_init - initialize cpusets at system boot
  *
@@ -1874,11 +1842,13 @@ int __init cpuset_init(void)
 {
        int err = 0;
 
+       if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
+               BUG();
+
        cpumask_setall(top_cpuset.cpus_allowed);
        nodes_setall(top_cpuset.mems_allowed);
 
        fmeter_init(&top_cpuset.fmeter);
-       top_cpuset.mems_generation = cpuset_mems_generation++;
        set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
        top_cpuset.relax_domain_level = -1;
 
index 4430eb1376f257bd008dab17d3c6ea19cc9c2d3b..be022c200da67fdf1206761ba3adbb38a3208cdd 100644 (file)
@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages)
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep =
                kmem_cache_create("task_struct", sizeof(struct task_struct),
-                       ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+                       ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 #endif
 
        /* do the arch specific task caches init */
@@ -1470,20 +1470,20 @@ void __init proc_caches_init(void)
 {
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
-                       sighand_ctor);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+                       SLAB_NOTRACK, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        files_cachep = kmem_cache_create("files_cache",
                        sizeof(struct files_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
        mmap_init();
 }
diff --git a/kernel/groups.c b/kernel/groups.c
new file mode 100644 (file)
index 0000000..2b45b2e
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * Supplementary group IDs
+ */
+#include <linux/cred.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+
+/* init to 2 - one for init_task, one to ensure it is never freed */
+struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+
+struct group_info *groups_alloc(int gidsetsize)
+{
+       struct group_info *group_info;
+       int nblocks;
+       int i;
+
+       nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
+       /* Make sure we always allocate at least one indirect block pointer */
+       nblocks = nblocks ? : 1;
+       group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
+       if (!group_info)
+               return NULL;
+       group_info->ngroups = gidsetsize;
+       group_info->nblocks = nblocks;
+       atomic_set(&group_info->usage, 1);
+
+       if (gidsetsize <= NGROUPS_SMALL)
+               group_info->blocks[0] = group_info->small_block;
+       else {
+               for (i = 0; i < nblocks; i++) {
+                       gid_t *b;
+                       b = (void *)__get_free_page(GFP_USER);
+                       if (!b)
+                               goto out_undo_partial_alloc;
+                       group_info->blocks[i] = b;
+               }
+       }
+       return group_info;
+
+out_undo_partial_alloc:
+       while (--i >= 0) {
+               free_page((unsigned long)group_info->blocks[i]);
+       }
+       kfree(group_info);
+       return NULL;
+}
+
+EXPORT_SYMBOL(groups_alloc);
+
+void groups_free(struct group_info *group_info)
+{
+       if (group_info->blocks[0] != group_info->small_block) {
+               int i;
+               for (i = 0; i < group_info->nblocks; i++)
+                       free_page((unsigned long)group_info->blocks[i]);
+       }
+       kfree(group_info);
+}
+
+EXPORT_SYMBOL(groups_free);
+
+/* export the group_info to a user-space array */
+static int groups_to_user(gid_t __user *grouplist,
+                         const struct group_info *group_info)
+{
+       int i;
+       unsigned int count = group_info->ngroups;
+
+       for (i = 0; i < group_info->nblocks; i++) {
+               unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+               unsigned int len = cp_count * sizeof(*grouplist);
+
+               if (copy_to_user(grouplist, group_info->blocks[i], len))
+                       return -EFAULT;
+
+               grouplist += NGROUPS_PER_BLOCK;
+               count -= cp_count;
+       }
+       return 0;
+}
+
+/* fill a group_info from a user-space array - it must be allocated already */
+static int groups_from_user(struct group_info *group_info,
+    gid_t __user *grouplist)
+{
+       int i;
+       unsigned int count = group_info->ngroups;
+
+       for (i = 0; i < group_info->nblocks; i++) {
+               unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+               unsigned int len = cp_count * sizeof(*grouplist);
+
+               if (copy_from_user(group_info->blocks[i], grouplist, len))
+                       return -EFAULT;
+
+               grouplist += NGROUPS_PER_BLOCK;
+               count -= cp_count;
+       }
+       return 0;
+}
+
+/* a simple Shell sort */
+static void groups_sort(struct group_info *group_info)
+{
+       int base, max, stride;
+       int gidsetsize = group_info->ngroups;
+
+       for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
+               ; /* nothing */
+       stride /= 3;
+
+       while (stride) {
+               max = gidsetsize - stride;
+               for (base = 0; base < max; base++) {
+                       int left = base;
+                       int right = left + stride;
+                       gid_t tmp = GROUP_AT(group_info, right);
+
+                       while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
+                               GROUP_AT(group_info, right) =
+                                   GROUP_AT(group_info, left);
+                               right = left;
+                               left -= stride;
+                       }
+                       GROUP_AT(group_info, right) = tmp;
+               }
+               stride /= 3;
+       }
+}
+
+/* a simple bsearch */
+int groups_search(const struct group_info *group_info, gid_t grp)
+{
+       unsigned int left, right;
+
+       if (!group_info)
+               return 0;
+
+       left = 0;
+       right = group_info->ngroups;
+       while (left < right) {
+               unsigned int mid = (left+right)/2;
+               int cmp = grp - GROUP_AT(group_info, mid);
+               if (cmp > 0)
+                       left = mid + 1;
+               else if (cmp < 0)
+                       right = mid;
+               else
+                       return 1;
+       }
+       return 0;
+}
+
+/**
+ * set_groups - Change a group subscription in a set of credentials
+ * @new: The newly prepared set of credentials to alter
+ * @group_info: The group list to install
+ *
+ * Validate a group subscription and, if valid, insert it into a set
+ * of credentials.
+ */
+int set_groups(struct cred *new, struct group_info *group_info)
+{
+       int retval;
+
+       retval = security_task_setgroups(group_info);
+       if (retval)
+               return retval;
+
+       put_group_info(new->group_info);
+       groups_sort(group_info);
+       get_group_info(group_info);
+       new->group_info = group_info;
+       return 0;
+}
+
+EXPORT_SYMBOL(set_groups);
+
+/**
+ * set_current_groups - Change current's group subscription
+ * @group_info: The group list to impose
+ *
+ * Validate a group subscription and, if valid, impose it upon current's task
+ * security record.
+ */
+int set_current_groups(struct group_info *group_info)
+{
+       struct cred *new;
+       int ret;
+
+       new = prepare_creds();
+       if (!new)
+               return -ENOMEM;
+
+       ret = set_groups(new, group_info);
+       if (ret < 0) {
+               abort_creds(new);
+               return ret;
+       }
+
+       return commit_creds(new);
+}
+
+EXPORT_SYMBOL(set_current_groups);
+
+SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
+{
+       const struct cred *cred = current_cred();
+       int i;
+
+       if (gidsetsize < 0)
+               return -EINVAL;
+
+       /* no need to grab task_lock here; it cannot change */
+       i = cred->group_info->ngroups;
+       if (gidsetsize) {
+               if (i > gidsetsize) {
+                       i = -EINVAL;
+                       goto out;
+               }
+               if (groups_to_user(grouplist, cred->group_info)) {
+                       i = -EFAULT;
+                       goto out;
+               }
+       }
+out:
+       return i;
+}
+
+/*
+ *     SMP: Our groups are copy-on-write. We can set them safely
+ *     without another task interfering.
+ */
+
+SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+{
+       struct group_info *group_info;
+       int retval;
+
+       if (!capable(CAP_SETGID))
+               return -EPERM;
+       if ((unsigned)gidsetsize > NGROUPS_MAX)
+               return -EINVAL;
+
+       group_info = groups_alloc(gidsetsize);
+       if (!group_info)
+               return -ENOMEM;
+       retval = groups_from_user(group_info, grouplist);
+       if (retval) {
+               put_group_info(group_info);
+               return retval;
+       }
+
+       retval = set_current_groups(group_info);
+       put_group_info(group_info);
+
+       return retval;
+}
+
+/*
+ * Check whether we're fsgid/egid or in the supplemental group..
+ */
+int in_group_p(gid_t grp)
+{
+       const struct cred *cred = current_cred();
+       int retval = 1;
+
+       if (grp != cred->fsgid)
+               retval = groups_search(cred->group_info, grp);
+       return retval;
+}
+
+EXPORT_SYMBOL(in_group_p);
+
+int in_egroup_p(gid_t grp)
+{
+       const struct cred *cred = current_cred();
+       int retval = 1;
+
+       if (grp != cred->egid)
+               retval = groups_search(cred->group_info, grp);
+       return retval;
+}
+
+EXPORT_SYMBOL(in_egroup_p);
index bc41ad0f24f881845d96d77a00a3d3535968566b..26539e3228e50f81ca892122a23b6bd1fcf98fe6 100644 (file)
@@ -72,9 +72,9 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
 
        /*
         * round up to the next power of 2, since our 'let the indices
-        * wrap' tachnique works only in this case.
+        * wrap' technique works only in this case.
         */
-       if (size & (size - 1)) {
+       if (!is_power_of_2(size)) {
                BUG_ON(size > 0x80000000);
                size = roundup_pow_of_two(size);
        }
index 41c88fe40500399c76f0382d51a3fb05aef03211..7fa441333529588586cd5a538e7b1fbdd118fbfd 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kthread.h>
 #include <linux/completion.h>
 #include <linux/err.h>
+#include <linux/cpuset.h>
 #include <linux/unistd.h>
 #include <linux/file.h>
 #include <linux/module.h>
@@ -236,6 +237,7 @@ int kthreadd(void *unused)
        ignore_signals(tsk);
        set_user_nice(tsk, KTHREAD_NICE_LEVEL);
        set_cpus_allowed_ptr(tsk, cpu_all_mask);
+       set_mems_allowed(node_possible_map);
 
        current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
 
index ca634019497a5cf8b035872bdaf8edcfaefd8b88..da2072d7381160493d540a51976f45a966b265fb 100644 (file)
@@ -117,9 +117,12 @@ int freeze_processes(void)
        if (error)
                goto Exit;
        printk("done.");
+
+       oom_killer_disable();
  Exit:
        BUG_ON(in_atomic());
        printk("\n");
+
        return error;
 }
 
@@ -145,6 +148,8 @@ static void thaw_tasks(bool nosig_only)
 
 void thaw_processes(void)
 {
+       oom_killer_enable();
+
        printk("Restarting tasks ... ");
        thaw_tasks(true);
        thaw_tasks(false);
index 28cf26ad2d247e497411f4603496209b1f8493f0..69911b5745eb1a9fd406e7bf3ef4a653033898a2 100644 (file)
@@ -365,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
                node = cpu_to_node(cpu);
                per_cpu(cpu_profile_flip, cpu) = 0;
                if (!per_cpu(cpu_profile_hits, cpu)[1]) {
-                       page = alloc_pages_node(node,
+                       page = alloc_pages_exact_node(node,
                                        GFP_KERNEL | __GFP_ZERO,
                                        0);
                        if (!page)
@@ -373,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
                        per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
                }
                if (!per_cpu(cpu_profile_hits, cpu)[0]) {
-                       page = alloc_pages_node(node,
+                       page = alloc_pages_exact_node(node,
                                        GFP_KERNEL | __GFP_ZERO,
                                        0);
                        if (!page)
@@ -564,14 +564,14 @@ static int create_hash_tables(void)
                int node = cpu_to_node(cpu);
                struct page *page;
 
-               page = alloc_pages_node(node,
+               page = alloc_pages_exact_node(node,
                                GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                0);
                if (!page)
                        goto out_cleanup;
                per_cpu(cpu_profile_hits, cpu)[1]
                                = (struct profile_hit *)page_address(page);
-               page = alloc_pages_node(node,
+               page = alloc_pages_exact_node(node,
                                GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
                                0);
                if (!page)
index 809a228019adeb06c8979d86f52473a0d4a168ac..d81f4952eebbcfa484c338256e3ff2e84c9d3b1e 100644 (file)
@@ -832,6 +832,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
 {
        struct sigpending *pending;
        struct sigqueue *q;
+       int override_rlimit;
 
        trace_sched_signal_send(sig, t);
 
@@ -863,9 +864,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
           make sure at least one signal gets delivered and don't
           pass on the info struct.  */
 
-       q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
-                                            (is_si_special(info) ||
-                                             info->si_code >= 0)));
+       if (sig < SIGRTMIN)
+               override_rlimit = (is_si_special(info) || info->si_code >= 0);
+       else
+               override_rlimit = 0;
+
+       q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+               override_rlimit);
        if (q) {
                list_add_tail(&q->list, &pending->list);
                switch ((unsigned long) info) {
index 521ed2004d63fc7cf2bfbd8eff33d7d1bf58f1d3..09d7519557d35181fab35591a17516efe5aba723 100644 (file)
@@ -318,6 +318,15 @@ cant_get_ref:
 }
 EXPORT_SYMBOL(slow_work_enqueue);
 
+/*
+ * Schedule a cull of the thread pool at some time in the near future
+ */
+static void slow_work_schedule_cull(void)
+{
+       mod_timer(&slow_work_cull_timer,
+                 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
+}
+
 /*
  * Worker thread culling algorithm
  */
@@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void)
                    list_empty(&vslow_work_queue) &&
                    atomic_read(&slow_work_thread_count) >
                    slow_work_min_threads) {
-                       mod_timer(&slow_work_cull_timer,
-                                 jiffies + SLOW_WORK_CULL_TIMEOUT);
+                       slow_work_schedule_cull();
                        do_cull = true;
                }
        }
@@ -393,8 +401,7 @@ static int slow_work_thread(void *_data)
                            list_empty(&vslow_work_queue) &&
                            atomic_read(&slow_work_thread_count) >
                            slow_work_min_threads)
-                               mod_timer(&slow_work_cull_timer,
-                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
+                               slow_work_schedule_cull();
                        continue;
                }
 
@@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
                if (atomic_dec_and_test(&slow_work_thread_count))
                        BUG(); /* we're running on a slow work thread... */
                mod_timer(&slow_work_oom_timer,
-                         jiffies + SLOW_WORK_OOM_TIMEOUT);
+                         round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
        } else {
                /* ratelimit the starting of new threads */
                mod_timer(&slow_work_oom_timer, jiffies + 1);
@@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
                        if (n < 0 && !slow_work_may_not_start_new_thread)
                                slow_work_enqueue(&slow_work_new_thread);
                        else if (n > 0)
-                               mod_timer(&slow_work_cull_timer,
-                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
+                               slow_work_schedule_cull();
                }
                mutex_unlock(&slow_work_user_lock);
        }
@@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
                                atomic_read(&slow_work_thread_count);
 
                        if (n < 0)
-                               mod_timer(&slow_work_cull_timer,
-                                         jiffies + SLOW_WORK_CULL_TIMEOUT);
+                               slow_work_schedule_cull();
                }
                mutex_unlock(&slow_work_user_lock);
        }
index 258885a543db60692a695aa682abb6c95f3ffcdf..b41fb710e114f5d233c18c37b0e0aee9344fa7d8 100644 (file)
@@ -382,6 +382,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(__tasklet_hi_schedule);
 
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+       BUG_ON(!irqs_disabled());
+
+       t->next = __get_cpu_var(tasklet_hi_vec).head;
+       __get_cpu_var(tasklet_hi_vec).head = t;
+       __raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
 static void tasklet_action(struct softirq_action *a)
 {
        struct tasklet_struct *list;
index 438d99a38c87f343dd318c3966799ec73bb6072a..b3f1097c76fa481df3597e035d7b0c36fe2db6c2 100644 (file)
@@ -1113,289 +1113,6 @@ out:
        return err;
 }
 
-/*
- * Supplementary group IDs
- */
-
-/* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
-
-struct group_info *groups_alloc(int gidsetsize)
-{
-       struct group_info *group_info;
-       int nblocks;
-       int i;
-
-       nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
-       /* Make sure we always allocate at least one indirect block pointer */
-       nblocks = nblocks ? : 1;
-       group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
-       if (!group_info)
-               return NULL;
-       group_info->ngroups = gidsetsize;
-       group_info->nblocks = nblocks;
-       atomic_set(&group_info->usage, 1);
-
-       if (gidsetsize <= NGROUPS_SMALL)
-               group_info->blocks[0] = group_info->small_block;
-       else {
-               for (i = 0; i < nblocks; i++) {
-                       gid_t *b;
-                       b = (void *)__get_free_page(GFP_USER);
-                       if (!b)
-                               goto out_undo_partial_alloc;
-                       group_info->blocks[i] = b;
-               }
-       }
-       return group_info;
-
-out_undo_partial_alloc:
-       while (--i >= 0) {
-               free_page((unsigned long)group_info->blocks[i]);
-       }
-       kfree(group_info);
-       return NULL;
-}
-
-EXPORT_SYMBOL(groups_alloc);
-
-void groups_free(struct group_info *group_info)
-{
-       if (group_info->blocks[0] != group_info->small_block) {
-               int i;
-               for (i = 0; i < group_info->nblocks; i++)
-                       free_page((unsigned long)group_info->blocks[i]);
-       }
-       kfree(group_info);
-}
-
-EXPORT_SYMBOL(groups_free);
-
-/* export the group_info to a user-space array */
-static int groups_to_user(gid_t __user *grouplist,
-                         const struct group_info *group_info)
-{
-       int i;
-       unsigned int count = group_info->ngroups;
-
-       for (i = 0; i < group_info->nblocks; i++) {
-               unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
-               unsigned int len = cp_count * sizeof(*grouplist);
-
-               if (copy_to_user(grouplist, group_info->blocks[i], len))
-                       return -EFAULT;
-
-               grouplist += NGROUPS_PER_BLOCK;
-               count -= cp_count;
-       }
-       return 0;
-}
-
-/* fill a group_info from a user-space array - it must be allocated already */
-static int groups_from_user(struct group_info *group_info,
-    gid_t __user *grouplist)
-{
-       int i;
-       unsigned int count = group_info->ngroups;
-
-       for (i = 0; i < group_info->nblocks; i++) {
-               unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
-               unsigned int len = cp_count * sizeof(*grouplist);
-
-               if (copy_from_user(group_info->blocks[i], grouplist, len))
-                       return -EFAULT;
-
-               grouplist += NGROUPS_PER_BLOCK;
-               count -= cp_count;
-       }
-       return 0;
-}
-
-/* a simple Shell sort */
-static void groups_sort(struct group_info *group_info)
-{
-       int base, max, stride;
-       int gidsetsize = group_info->ngroups;
-
-       for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
-               ; /* nothing */
-       stride /= 3;
-
-       while (stride) {
-               max = gidsetsize - stride;
-               for (base = 0; base < max; base++) {
-                       int left = base;
-                       int right = left + stride;
-                       gid_t tmp = GROUP_AT(group_info, right);
-
-                       while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
-                               GROUP_AT(group_info, right) =
-                                   GROUP_AT(group_info, left);
-                               right = left;
-                               left -= stride;
-                       }
-                       GROUP_AT(group_info, right) = tmp;
-               }
-               stride /= 3;
-       }
-}
-
-/* a simple bsearch */
-int groups_search(const struct group_info *group_info, gid_t grp)
-{
-       unsigned int left, right;
-
-       if (!group_info)
-               return 0;
-
-       left = 0;
-       right = group_info->ngroups;
-       while (left < right) {
-               unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
-                       left = mid + 1;
-               else if (cmp < 0)
-                       right = mid;
-               else
-                       return 1;
-       }
-       return 0;
-}
-
-/**
- * set_groups - Change a group subscription in a set of credentials
- * @new: The newly prepared set of credentials to alter
- * @group_info: The group list to install
- *
- * Validate a group subscription and, if valid, insert it into a set
- * of credentials.
- */
-int set_groups(struct cred *new, struct group_info *group_info)
-{
-       int retval;
-
-       retval = security_task_setgroups(group_info);
-       if (retval)
-               return retval;
-
-       put_group_info(new->group_info);
-       groups_sort(group_info);
-       get_group_info(group_info);
-       new->group_info = group_info;
-       return 0;
-}
-
-EXPORT_SYMBOL(set_groups);
-
-/**
- * set_current_groups - Change current's group subscription
- * @group_info: The group list to impose
- *
- * Validate a group subscription and, if valid, impose it upon current's task
- * security record.
- */
-int set_current_groups(struct group_info *group_info)
-{
-       struct cred *new;
-       int ret;
-
-       new = prepare_creds();
-       if (!new)
-               return -ENOMEM;
-
-       ret = set_groups(new, group_info);
-       if (ret < 0) {
-               abort_creds(new);
-               return ret;
-       }
-
-       return commit_creds(new);
-}
-
-EXPORT_SYMBOL(set_current_groups);
-
-SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
-{
-       const struct cred *cred = current_cred();
-       int i;
-
-       if (gidsetsize < 0)
-               return -EINVAL;
-
-       /* no need to grab task_lock here; it cannot change */
-       i = cred->group_info->ngroups;
-       if (gidsetsize) {
-               if (i > gidsetsize) {
-                       i = -EINVAL;
-                       goto out;
-               }
-               if (groups_to_user(grouplist, cred->group_info)) {
-                       i = -EFAULT;
-                       goto out;
-               }
-       }
-out:
-       return i;
-}
-
-/*
- *     SMP: Our groups are copy-on-write. We can set them safely
- *     without another task interfering.
- */
-SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
-{
-       struct group_info *group_info;
-       int retval;
-
-       if (!capable(CAP_SETGID))
-               return -EPERM;
-       if ((unsigned)gidsetsize > NGROUPS_MAX)
-               return -EINVAL;
-
-       group_info = groups_alloc(gidsetsize);
-       if (!group_info)
-               return -ENOMEM;
-       retval = groups_from_user(group_info, grouplist);
-       if (retval) {
-               put_group_info(group_info);
-               return retval;
-       }
-
-       retval = set_current_groups(group_info);
-       put_group_info(group_info);
-
-       return retval;
-}
-
-/*
- * Check whether we're fsgid/egid or in the supplemental group..
- */
-int in_group_p(gid_t grp)
-{
-       const struct cred *cred = current_cred();
-       int retval = 1;
-
-       if (grp != cred->fsgid)
-               retval = groups_search(cred->group_info, grp);
-       return retval;
-}
-
-EXPORT_SYMBOL(in_group_p);
-
-int in_egroup_p(gid_t grp)
-{
-       const struct cred *cred = current_cred();
-       int retval = 1;
-
-       if (grp != cred->egid)
-               retval = groups_search(cred->group_info, grp);
-       return retval;
-}
-
-EXPORT_SYMBOL(in_egroup_p);
-
 DECLARE_RWSEM(uts_sem);
 
 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
index 0e51a35a44869425aa3f4e7cf9fca498787fd0fb..ab462b9968d579072de5e3421dcfa5ead354d590 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/security.h>
 #include <linux/ctype.h>
 #include <linux/utsname.h>
+#include <linux/kmemcheck.h>
 #include <linux/smp_lock.h>
 #include <linux/fs.h>
 #include <linux/init.h>
@@ -967,6 +968,17 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = &proc_dointvec,
        },
 #endif
+#ifdef CONFIG_KMEMCHECK
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "kmemcheck",
+               .data           = &kmemcheck_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
+#endif
+
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
@@ -1325,7 +1337,6 @@ static struct ctl_table vm_table[] = {
                .extra2         = &one,
        },
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "scan_unevictable_pages",
@@ -1334,7 +1345,6 @@ static struct ctl_table vm_table[] = {
                .mode           = 0644,
                .proc_handler   = &scan_unevictable_handler,
        },
-#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
index 4a13e5a01ce318c62c75c991a8694096e4821f40..61071fecc82e5a6f2ed1dad5605efe35a40e3f6e 100644 (file)
@@ -147,7 +147,7 @@ config IRQSOFF_TRACER
          disabled by default and can be runtime (re-)started
          via:
 
-             echo 0 > /debugfs/tracing/tracing_max_latency
+             echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 
          (Note that kernel size and overhead increases with this option
          enabled. This option and the preempt-off timing option can be
@@ -168,7 +168,7 @@ config PREEMPT_TRACER
          disabled by default and can be runtime (re-)started
          via:
 
-             echo 0 > /debugfs/tracing/tracing_max_latency
+             echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 
          (Note that kernel size and overhead increases with this option
          enabled. This option and the irqs-off timing option can be
@@ -261,7 +261,7 @@ config PROFILE_ANNOTATED_BRANCHES
          This tracer profiles all the the likely and unlikely macros
          in the kernel. It will display the results in:
 
-         /debugfs/tracing/profile_annotated_branch
+         /sys/kernel/debug/tracing/profile_annotated_branch
 
          Note: this will add a significant overhead, only turn this
          on if you need to profile the system's use of these macros.
@@ -274,7 +274,7 @@ config PROFILE_ALL_BRANCHES
          taken in the kernel is recorded whether it hit or miss.
          The results will be displayed in:
 
-         /debugfs/tracing/profile_branch
+         /sys/kernel/debug/tracing/profile_branch
 
          This option also enables the likely/unlikely profiler.
 
@@ -323,7 +323,7 @@ config STACK_TRACER
        select KALLSYMS
        help
          This special tracer records the maximum stack footprint of the
-         kernel and displays it in debugfs/tracing/stack_trace.
+         kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
 
          This tracer works by hooking into every function call that the
          kernel executes, and keeping a maximum stack depth value and
index 2e642b2b7253d11ddce40c657a471f007561be65..dc4dc70171ce2236236a396430f228f15b1a732d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/mutex.h>
@@ -1270,6 +1271,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
        if (tail < BUF_PAGE_SIZE) {
                /* Mark the rest of the page with padding */
                event = __rb_page_index(tail_page, tail);
+               kmemcheck_annotate_bitfield(event, bitfield);
                rb_event_set_padding(event);
        }
 
@@ -1327,6 +1329,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                return NULL;
 
        event = __rb_page_index(tail_page, tail);
+       kmemcheck_annotate_bitfield(event, bitfield);
        rb_update_event(event, type, length);
 
        /* The passed in type is zero for DATA */
index 8acd9b81a5d76046ee52c9d1dc9224cc43edb1c2..c1878bfb2e1ec4fe8efbf56764a341023b1307c3 100644 (file)
@@ -344,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock =
 /*
  * Copy the new maximum trace into the separate maximum-trace
  * structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /debugfs/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
  */
 static void
 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2414,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = {
 
 static const char readme_msg[] =
        "tracing mini-HOWTO:\n\n"
-       "# mkdir /debug\n"
-       "# mount -t debugfs nodev /debug\n\n"
-       "# cat /debug/tracing/available_tracers\n"
+       "# mount -t debugfs nodev /sys/kernel/debug\n\n"
+       "# cat /sys/kernel/debug/tracing/available_tracers\n"
        "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
-       "# cat /debug/tracing/current_tracer\n"
+       "# cat /sys/kernel/debug/tracing/current_tracer\n"
        "nop\n"
-       "# echo sched_switch > /debug/tracing/current_tracer\n"
-       "# cat /debug/tracing/current_tracer\n"
+       "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
+       "# cat /sys/kernel/debug/tracing/current_tracer\n"
        "sched_switch\n"
-       "# cat /debug/tracing/trace_options\n"
+       "# cat /sys/kernel/debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
-       "# echo print-parent > /debug/tracing/trace_options\n"
-       "# echo 1 > /debug/tracing/tracing_enabled\n"
-       "# cat /debug/tracing/trace > /tmp/trace.txt\n"
-       "# echo 0 > /debug/tracing/tracing_enabled\n"
+       "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
+       "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+       "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
+       "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
 ;
 
 static ssize_t
index 850e0ba41c1e60f7983f2c3e7890bee4b2bec53c..2c000e7132acec2201a872e992ca0fc791894528 100644 (file)
@@ -75,21 +75,6 @@ static void uid_hash_remove(struct user_struct *up)
        put_user_ns(up->user_ns);
 }
 
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
-{
-       struct user_struct *user;
-       struct hlist_node *h;
-
-       hlist_for_each_entry(user, h, hashent, uidhash_node) {
-               if (user->uid == uid) {
-                       atomic_inc(&user->__count);
-                       return user;
-               }
-       }
-
-       return NULL;
-}
-
 #ifdef CONFIG_USER_SCHED
 
 static void sched_destroy_user(struct user_struct *up)
@@ -119,6 +104,23 @@ static int sched_create_user(struct user_struct *up) { return 0; }
 
 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
 
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+       struct user_struct *user;
+       struct hlist_node *h;
+
+       hlist_for_each_entry(user, h, hashent, uidhash_node) {
+               if (user->uid == uid) {
+                       /* possibly resurrect an "almost deleted" object */
+                       if (atomic_inc_return(&user->__count) == 1)
+                               cancel_delayed_work(&user->work);
+                       return user;
+               }
+       }
+
+       return NULL;
+}
+
 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
 static DEFINE_MUTEX(uids_mutex);
 
@@ -283,12 +285,12 @@ int __init uids_sysfs_init(void)
        return uids_user_create(&root_user);
 }
 
-/* work function to remove sysfs directory for a user and free up
+/* delayed work function to remove sysfs directory for a user and free up
  * corresponding structures.
  */
 static void cleanup_user_struct(struct work_struct *w)
 {
-       struct user_struct *up = container_of(w, struct user_struct, work);
+       struct user_struct *up = container_of(w, struct user_struct, work.work);
        unsigned long flags;
        int remove_user = 0;
 
@@ -297,15 +299,12 @@ static void cleanup_user_struct(struct work_struct *w)
         */
        uids_mutex_lock();
 
-       local_irq_save(flags);
-
-       if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+       spin_lock_irqsave(&uidhash_lock, flags);
+       if (atomic_read(&up->__count) == 0) {
                uid_hash_remove(up);
                remove_user = 1;
-               spin_unlock_irqrestore(&uidhash_lock, flags);
-       } else {
-               local_irq_restore(flags);
        }
+       spin_unlock_irqrestore(&uidhash_lock, flags);
 
        if (!remove_user)
                goto done;
@@ -331,16 +330,28 @@ done:
  */
 static void free_user(struct user_struct *up, unsigned long flags)
 {
-       /* restore back the count */
-       atomic_inc(&up->__count);
        spin_unlock_irqrestore(&uidhash_lock, flags);
-
-       INIT_WORK(&up->work, cleanup_user_struct);
-       schedule_work(&up->work);
+       INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
+       schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
 }
 
 #else  /* CONFIG_USER_SCHED && CONFIG_SYSFS */
 
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+       struct user_struct *user;
+       struct hlist_node *h;
+
+       hlist_for_each_entry(user, h, hashent, uidhash_node) {
+               if (user->uid == uid) {
+                       atomic_inc(&user->__count);
+                       return user;
+               }
+       }
+
+       return NULL;
+}
+
 int uids_sysfs_init(void) { return 0; }
 static inline int uids_user_create(struct user_struct *up) { return 0; }
 static inline void uids_mutex_lock(void) { }
index 116a35051be6fef457075c18b78e5c4cb17ec58e..6b0c2d8a21294e9de3a1ca3738134746c035f2d5 100644 (file)
@@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
 
 config DEBUG_SLAB
        bool "Debug slab memory allocations"
-       depends on DEBUG_KERNEL && SLAB
+       depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
        help
          Say Y here to have the kernel do limited verification on memory
          allocation as well as poisoning memory on free to catch use of freed
@@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK
 
 config SLUB_DEBUG_ON
        bool "SLUB debugging on by default"
-       depends on SLUB && SLUB_DEBUG
+       depends on SLUB && SLUB_DEBUG && !KMEMCHECK
        default n
        help
          Boot with debugging on by default. SLUB boots by default with
@@ -996,3 +996,5 @@ config DMA_API_DEBUG
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
+
+source "lib/Kconfig.kmemcheck"
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
new file mode 100644 (file)
index 0000000..603c81b
--- /dev/null
@@ -0,0 +1,91 @@
+config HAVE_ARCH_KMEMCHECK
+       bool
+
+menuconfig KMEMCHECK
+       bool "kmemcheck: trap use of uninitialized memory"
+       depends on DEBUG_KERNEL
+       depends on !X86_USE_3DNOW
+       depends on SLUB || SLAB
+       depends on !CC_OPTIMIZE_FOR_SIZE
+       depends on !FUNCTION_TRACER
+       select FRAME_POINTER
+       select STACKTRACE
+       default n
+       help
+         This option enables tracing of dynamically allocated kernel memory
+         to see if memory is used before it has been given an initial value.
+         Be aware that this requires half of your memory for bookkeeping and
+         will insert extra code at *every* read and write to tracked memory
+         thus slow down the kernel code (but user code is unaffected).
+
+         The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
+         or enable kmemcheck at boot-time. If the kernel is started with
+         kmemcheck=0, the large memory and CPU overhead is not incurred.
+
+choice
+       prompt "kmemcheck: default mode at boot"
+       depends on KMEMCHECK
+       default KMEMCHECK_ONESHOT_BY_DEFAULT
+       help
+         This option controls the default behaviour of kmemcheck when the
+         kernel boots and no kmemcheck= parameter is given.
+
+config KMEMCHECK_DISABLED_BY_DEFAULT
+       bool "disabled"
+       depends on KMEMCHECK
+
+config KMEMCHECK_ENABLED_BY_DEFAULT
+       bool "enabled"
+       depends on KMEMCHECK
+
+config KMEMCHECK_ONESHOT_BY_DEFAULT
+       bool "one-shot"
+       depends on KMEMCHECK
+       help
+         In one-shot mode, only the first error detected is reported before
+         kmemcheck is disabled.
+
+endchoice
+
+config KMEMCHECK_QUEUE_SIZE
+       int "kmemcheck: error queue size"
+       depends on KMEMCHECK
+       default 64
+       help
+         Select the maximum number of errors to store in the queue. Since
+         errors can occur virtually anywhere and in any context, we need a
+         temporary storage area which is guarantueed not to generate any
+         other faults. The queue will be emptied as soon as a tasklet may
+         be scheduled. If the queue is full, new error reports will be
+         lost.
+
+config KMEMCHECK_SHADOW_COPY_SHIFT
+       int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
+       depends on KMEMCHECK
+       range 2 8
+       default 5
+       help
+         Select the number of shadow bytes to save along with each entry of
+         the queue. These bytes indicate what parts of an allocation are
+         initialized, uninitialized, etc. and will be displayed when an
+         error is detected to help the debugging of a particular problem.
+
+config KMEMCHECK_PARTIAL_OK
+       bool "kmemcheck: allow partially uninitialized memory"
+       depends on KMEMCHECK
+       default y
+       help
+         This option works around certain GCC optimizations that produce
+         32-bit reads from 16-bit variables where the upper 16 bits are
+         thrown away afterwards. This may of course also hide some real
+         bugs.
+
+config KMEMCHECK_BITOPS_OK
+       bool "kmemcheck: allow bit-field manipulation"
+       depends on KMEMCHECK
+       default n
+       help
+         This option silences warnings that would be generated for bit-field
+         accesses where not all the bits are initialized at the same time.
+         This may also hide some real bugs.
+
index a65c314555416d9f1ea262455d1da313e1959902..e73822aa6e9a68131df955c0a6025c7aa5556fc6 100644 (file)
  */
 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
-#ifdef CONFIG_SMP
        /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
        if (atomic_add_unless(atomic, -1, 1))
                return 0;
-#endif
+
        /* Otherwise do it the slow way */
        spin_lock(lock);
        if (atomic_dec_and_test(atomic))
index f6d276db2d58f97c80db2b0aececde84120893a9..eed2bdb865e76673bfe24834301de8406d9cfc88 100644 (file)
@@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool)
        int bit, end_bit;
 
 
-       write_lock(&pool->lock);
        list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
                chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
                list_del(&chunk->next_chunk);
index f07c0db81d262eb8ff185391bf6577adae064e1f..39af2560f765f9815ccb724fecbe38173c986aab 100644 (file)
@@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
 
                for (j = 0; j < ngroups; j++)
                        lx += scnprintf(linebuf + lx, linebuflen - lx,
-                               "%16.16llx ", (unsigned long long)*(ptr8 + j));
+                               "%s%16.16llx", j ? " " : "",
+                               (unsigned long long)*(ptr8 + j));
                ascii_column = 17 * ngroups + 2;
                break;
        }
@@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
 
                for (j = 0; j < ngroups; j++)
                        lx += scnprintf(linebuf + lx, linebuflen - lx,
-                               "%8.8x ", *(ptr4 + j));
+                               "%s%8.8x", j ? " " : "", *(ptr4 + j));
                ascii_column = 9 * ngroups + 2;
                break;
        }
@@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
 
                for (j = 0; j < ngroups; j++)
                        lx += scnprintf(linebuf + lx, linebuflen - lx,
-                               "%4.4x ", *(ptr2 + j));
+                               "%s%4.4x", j ? " " : "", *(ptr2 + j));
                ascii_column = 5 * ngroups + 2;
                break;
        }
 
        default:
-               for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
-                    j++) {
+               for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
                        ch = ptr[j];
                        linebuf[lx++] = hex_asc_hi(ch);
                        linebuf[lx++] = hex_asc_lo(ch);
                        linebuf[lx++] = ' ';
                }
+               if (j)
+                       lx--;
+
                ascii_column = 3 * rowsize + 2;
                break;
        }
@@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
 
        while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
                linebuf[lx++] = ' ';
-       for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++)
+       for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
                linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
                                : '.';
 nil:
index bacf6fe4f7a0347bb958ebc85a059ab582eb93b4..b512b746d2aff464e1c18a01c53300ef7b6e68f4 100644 (file)
@@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name,
                                struct kobject *parent_kobj)
 {
        struct kset *kset;
+       int retval;
 
        kset = kzalloc(sizeof(*kset), GFP_KERNEL);
        if (!kset)
                return NULL;
-       kobject_set_name(&kset->kobj, name);
+       retval = kobject_set_name(&kset->kobj, name);
+       if (retval) {
+               kfree(kset);
+               return NULL;
+       }
        kset->uevent_ops = uevent_ops;
        kset->kobj.parent = parent_kobj;
 
index 4bb42a0344ec49f5224d74c932786f0842446bef..23abbd93cae1fd50b87b2ed5f8ffd95d940230b1 100644 (file)
@@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root,
 }
 EXPORT_SYMBOL(radix_tree_insert);
 
-/**
- *     radix_tree_lookup_slot    -    lookup a slot in a radix tree
- *     @root:          radix tree root
- *     @index:         index key
- *
- *     Returns:  the slot corresponding to the position @index in the
- *     radix tree @root. This is useful for update-if-exists operations.
- *
- *     This function can be called under rcu_read_lock iff the slot is not
- *     modified by radix_tree_replace_slot, otherwise it must be called
- *     exclusive from other writers. Any dereference of the slot must be done
- *     using radix_tree_deref_slot.
+/*
+ * is_slot == 1 : search for the slot.
+ * is_slot == 0 : search for the node.
  */
-void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+static void *radix_tree_lookup_element(struct radix_tree_root *root,
+                               unsigned long index, int is_slot)
 {
        unsigned int height, shift;
        struct radix_tree_node *node, **slot;
@@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
        if (!radix_tree_is_indirect_ptr(node)) {
                if (index > 0)
                        return NULL;
-               return (void **)&root->rnode;
+               return is_slot ? (void *)&root->rnode : node;
        }
        node = radix_tree_indirect_to_ptr(node);
 
@@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
                height--;
        } while (height > 0);
 
-       return (void **)slot;
+       return is_slot ? (void *)slot:node;
+}
+
+/**
+ *     radix_tree_lookup_slot    -    lookup a slot in a radix tree
+ *     @root:          radix tree root
+ *     @index:         index key
+ *
+ *     Returns:  the slot corresponding to the position @index in the
+ *     radix tree @root. This is useful for update-if-exists operations.
+ *
+ *     This function can be called under rcu_read_lock iff the slot is not
+ *     modified by radix_tree_replace_slot, otherwise it must be called
+ *     exclusive from other writers. Any dereference of the slot must be done
+ *     using radix_tree_deref_slot.
+ */
+void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+{
+       return (void **)radix_tree_lookup_element(root, index, 1);
 }
 EXPORT_SYMBOL(radix_tree_lookup_slot);
 
@@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
  */
 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 {
-       unsigned int height, shift;
-       struct radix_tree_node *node, **slot;
-
-       node = rcu_dereference(root->rnode);
-       if (node == NULL)
-               return NULL;
-
-       if (!radix_tree_is_indirect_ptr(node)) {
-               if (index > 0)
-                       return NULL;
-               return node;
-       }
-       node = radix_tree_indirect_to_ptr(node);
-
-       height = node->height;
-       if (index > radix_tree_maxindex(height))
-               return NULL;
-
-       shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-
-       do {
-               slot = (struct radix_tree_node **)
-                       (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
-               node = rcu_dereference(*slot);
-               if (node == NULL)
-                       return NULL;
-
-               shift -= RADIX_TREE_MAP_SHIFT;
-               height--;
-       } while (height > 0);
-
-       return node;
+       return radix_tree_lookup_element(root, index, 0);
 }
 EXPORT_SYMBOL(radix_tree_lookup);
 
@@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
 }
 EXPORT_SYMBOL(radix_tree_next_hole);
 
+/**
+ *     radix_tree_prev_hole    -    find the prev hole (not-present entry)
+ *     @root:          tree root
+ *     @index:         index key
+ *     @max_scan:      maximum range to search
+ *
+ *     Search backwards in the range [max(index-max_scan+1, 0), index]
+ *     for the first hole.
+ *
+ *     Returns: the index of the hole if found, otherwise returns an index
+ *     outside of the set specified (in which case 'index - return >= max_scan'
+ *     will be true). In rare cases of wrap-around, LONG_MAX will be returned.
+ *
+ *     radix_tree_next_hole may be called under rcu_read_lock. However, like
+ *     radix_tree_gang_lookup, this will not atomically search a snapshot of
+ *     the tree at a single point in time. For example, if a hole is created
+ *     at index 10, then subsequently a hole is created at index 5,
+ *     radix_tree_prev_hole covering both indexes may return 5 if called under
+ *     rcu_read_lock.
+ */
+unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+                                  unsigned long index, unsigned long max_scan)
+{
+       unsigned long i;
+
+       for (i = 0; i < max_scan; i++) {
+               if (!radix_tree_lookup(root, index))
+                       break;
+               index--;
+               if (index == LONG_MAX)
+                       break;
+       }
+
+       return index;
+}
+EXPORT_SYMBOL(radix_tree_prev_hole);
+
 static unsigned int
 __lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
        unsigned int max_items, unsigned long *next_index)
index f653659e0bc1fb161525332d775c973e2fb0ba98..e2aa3be29858cdcdd69bf4a43a67813db84079ee 100644 (file)
@@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
                node = node->rb_right;
                while ((left = node->rb_left) != NULL)
                        node = left;
+
+               if (rb_parent(old)) {
+                       if (rb_parent(old)->rb_left == old)
+                               rb_parent(old)->rb_left = node;
+                       else
+                               rb_parent(old)->rb_right = node;
+               } else
+                       root->rb_node = node;
+
                child = node->rb_right;
                parent = rb_parent(node);
                color = rb_color(node);
 
-               if (child)
-                       rb_set_parent(child, parent);
                if (parent == old) {
-                       parent->rb_right = child;
                        parent = node;
-               } else
+               } else {
+                       if (child)
+                               rb_set_parent(child, parent);
                        parent->rb_left = child;
 
+                       node->rb_right = old->rb_right;
+                       rb_set_parent(old->rb_right, node);
+               }
+
                node->rb_parent_color = old->rb_parent_color;
-               node->rb_right = old->rb_right;
                node->rb_left = old->rb_left;
-
-               if (rb_parent(old))
-               {
-                       if (rb_parent(old)->rb_left == old)
-                               rb_parent(old)->rb_left = node;
-                       else
-                               rb_parent(old)->rb_right = node;
-               } else
-                       root->rb_node = node;
-
                rb_set_parent(old->rb_left, node);
-               if (old->rb_right)
-                       rb_set_parent(old->rb_right, node);
+
                goto color;
        }
 
index 6f4610a9ce55fe10de35b24c42850533bce2b7aa..c948d4ca8bde0dc0d73ba1c40b984d395a73b4d8 100644 (file)
@@ -203,25 +203,13 @@ config VIRT_TO_BUS
        def_bool y
        depends on !ARCH_NO_VIRT_TO_BUS
 
-config UNEVICTABLE_LRU
-       bool "Add LRU list to track non-evictable pages"
-       default y
-       help
-         Keeps unevictable pages off of the active and inactive pageout
-         lists, so kswapd will not waste CPU time or have its balancing
-         algorithms thrown off by scanning these pages.  Selecting this
-         will use one page flag and increase the code size a little,
-         say Y unless you know what you are doing.
-
-         See Documentation/vm/unevictable-lru.txt for more information.
-
 config HAVE_MLOCK
        bool
        default y if MMU=y
 
 config HAVE_MLOCKED_PAGE_BIT
        bool
-       default y if HAVE_MLOCK=y && UNEVICTABLE_LRU=y
+       default y if HAVE_MLOCK=y
 
 config MMU_NOTIFIER
        bool
index bb01e298f260b84c82b03acdfb11582e42ecbbe6..aa99fd1f7109f03ef24085dd2ab37e5eb7c72534 100644 (file)
@@ -2,6 +2,7 @@ config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
        depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC
        depends on !HIBERNATION || !PPC && !SPARC
+       depends on !KMEMCHECK
        ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
index e89acb090b4dfe7c24aefc846ad492bb157fcbc6..5e0bd64266932e54184879dff3104c372c12c152 100644 (file)
@@ -12,6 +12,7 @@ obj-y                 := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
                           page_isolation.o mm_init.o $(mmu-y)
+obj-y += init-mm.o
 
 obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
 obj-$(CONFIG_BOUNCE)   += bounce.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
 obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
 obj-$(CONFIG_SLAB) += slab.o
 obj-$(CONFIG_SLUB) += slub.o
+obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
 obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
index 54a0f8040afa339a7e2f0cb459fd5b23c3d16acf..e43359214f6ff15020b6f05cc07aa5e6ddcb4162 100644 (file)
@@ -101,7 +101,7 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
                
                ret = force_page_cache_readahead(mapping, file,
                                start_index,
-                               max_sane_readahead(nrpages));
+                               nrpages);
                if (ret > 0)
                        ret = 0;
                break;
index 1b60f30cebfa88aa135c17173ec9ce831612fa90..22396713feb9684647aa5c90a3b18e6661b2f798 100644 (file)
@@ -521,7 +521,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
 {
        if (cpuset_do_page_mem_spread()) {
                int n = cpuset_mem_spread_node();
-               return alloc_pages_node(n, gfp, 0);
+               return alloc_pages_exact_node(n, gfp, 0);
        }
        return alloc_pages(gfp, 0);
 }
@@ -1004,9 +1004,6 @@ EXPORT_SYMBOL(grab_cache_page_nowait);
 static void shrink_readahead_size_eio(struct file *filp,
                                        struct file_ra_state *ra)
 {
-       if (!ra->ra_pages)
-               return;
-
        ra->ra_pages /= 4;
 }
 
@@ -1390,8 +1387,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
        if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
                return -EINVAL;
 
-       force_page_cache_readahead(mapping, filp, index,
-                                       max_sane_readahead(nr));
+       force_page_cache_readahead(mapping, filp, index, nr);
        return 0;
 }
 
@@ -1457,6 +1453,73 @@ static int page_cache_read(struct file *file, pgoff_t offset)
 
 #define MMAP_LOTSAMISS  (100)
 
+/*
+ * Synchronous readahead happens when we don't even find
+ * a page in the page cache at all.
+ */
+static void do_sync_mmap_readahead(struct vm_area_struct *vma,
+                                  struct file_ra_state *ra,
+                                  struct file *file,
+                                  pgoff_t offset)
+{
+       unsigned long ra_pages;
+       struct address_space *mapping = file->f_mapping;
+
+       /* If we don't want any read-ahead, don't bother */
+       if (VM_RandomReadHint(vma))
+               return;
+
+       if (VM_SequentialReadHint(vma) ||
+                       offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
+               page_cache_sync_readahead(mapping, ra, file, offset,
+                                         ra->ra_pages);
+               return;
+       }
+
+       if (ra->mmap_miss < INT_MAX)
+               ra->mmap_miss++;
+
+       /*
+        * Do we miss much more than hit in this file? If so,
+        * stop bothering with read-ahead. It will only hurt.
+        */
+       if (ra->mmap_miss > MMAP_LOTSAMISS)
+               return;
+
+       /*
+        * mmap read-around
+        */
+       ra_pages = max_sane_readahead(ra->ra_pages);
+       if (ra_pages) {
+               ra->start = max_t(long, 0, offset - ra_pages/2);
+               ra->size = ra_pages;
+               ra->async_size = 0;
+               ra_submit(ra, mapping, file);
+       }
+}
+
+/*
+ * Asynchronous readahead happens when we find the page and PG_readahead,
+ * so we want to possibly extend the readahead further..
+ */
+static void do_async_mmap_readahead(struct vm_area_struct *vma,
+                                   struct file_ra_state *ra,
+                                   struct file *file,
+                                   struct page *page,
+                                   pgoff_t offset)
+{
+       struct address_space *mapping = file->f_mapping;
+
+       /* If we don't want any read-ahead, don't bother */
+       if (VM_RandomReadHint(vma))
+               return;
+       if (ra->mmap_miss > 0)
+               ra->mmap_miss--;
+       if (PageReadahead(page))
+               page_cache_async_readahead(mapping, ra, file,
+                                          page, offset, ra->ra_pages);
+}
+
 /**
  * filemap_fault - read in file data for page fault handling
  * @vma:       vma in which the fault was taken
@@ -1476,78 +1539,44 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
+       pgoff_t offset = vmf->pgoff;
        struct page *page;
        pgoff_t size;
-       int did_readaround = 0;
        int ret = 0;
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (vmf->pgoff >= size)
+       if (offset >= size)
                return VM_FAULT_SIGBUS;
 
-       /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(vma))
-               goto no_cached_page;
-
        /*
         * Do we have something in the page cache already?
         */
-retry_find:
-       page = find_lock_page(mapping, vmf->pgoff);
-       /*
-        * For sequential accesses, we use the generic readahead logic.
-        */
-       if (VM_SequentialReadHint(vma)) {
-               if (!page) {
-                       page_cache_sync_readahead(mapping, ra, file,
-                                                          vmf->pgoff, 1);
-                       page = find_lock_page(mapping, vmf->pgoff);
-                       if (!page)
-                               goto no_cached_page;
-               }
-               if (PageReadahead(page)) {
-                       page_cache_async_readahead(mapping, ra, file, page,
-                                                          vmf->pgoff, 1);
-               }
-       }
-
-       if (!page) {
-               unsigned long ra_pages;
-
-               ra->mmap_miss++;
-
+       page = find_get_page(mapping, offset);
+       if (likely(page)) {
                /*
-                * Do we miss much more than hit in this file? If so,
-                * stop bothering with read-ahead. It will only hurt.
+                * We found the page, so try async readahead before
+                * waiting for the lock.
                 */
-               if (ra->mmap_miss > MMAP_LOTSAMISS)
-                       goto no_cached_page;
+               do_async_mmap_readahead(vma, ra, file, page, offset);
+               lock_page(page);
 
-               /*
-                * To keep the pgmajfault counter straight, we need to
-                * check did_readaround, as this is an inner loop.
-                */
-               if (!did_readaround) {
-                       ret = VM_FAULT_MAJOR;
-                       count_vm_event(PGMAJFAULT);
-               }
-               did_readaround = 1;
-               ra_pages = max_sane_readahead(file->f_ra.ra_pages);
-               if (ra_pages) {
-                       pgoff_t start = 0;
-
-                       if (vmf->pgoff > ra_pages / 2)
-                               start = vmf->pgoff - ra_pages / 2;
-                       do_page_cache_readahead(mapping, file, start, ra_pages);
+               /* Did it get truncated? */
+               if (unlikely(page->mapping != mapping)) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto no_cached_page;
                }
-               page = find_lock_page(mapping, vmf->pgoff);
+       } else {
+               /* No page in the page cache at all */
+               do_sync_mmap_readahead(vma, ra, file, offset);
+               count_vm_event(PGMAJFAULT);
+               ret = VM_FAULT_MAJOR;
+retry_find:
+               page = find_lock_page(mapping, offset);
                if (!page)
                        goto no_cached_page;
        }
 
-       if (!did_readaround)
-               ra->mmap_miss--;
-
        /*
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
@@ -1555,18 +1584,18 @@ retry_find:
        if (unlikely(!PageUptodate(page)))
                goto page_not_uptodate;
 
-       /* Must recheck i_size under page lock */
+       /*
+        * Found the page and have a reference on it.
+        * We must recheck i_size under page lock.
+        */
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (unlikely(vmf->pgoff >= size)) {
+       if (unlikely(offset >= size)) {
                unlock_page(page);
                page_cache_release(page);
                return VM_FAULT_SIGBUS;
        }
 
-       /*
-        * Found the page and have a reference on it.
-        */
-       ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
 
@@ -1575,7 +1604,7 @@ no_cached_page:
         * We're only likely to ever get here if MADV_RANDOM is in
         * effect.
         */
-       error = page_cache_read(file, vmf->pgoff);
+       error = page_cache_read(file, offset);
 
        /*
         * The page we want has now been added to the page cache.
@@ -1595,12 +1624,6 @@ no_cached_page:
        return VM_FAULT_SIGBUS;
 
 page_not_uptodate:
-       /* IO error path */
-       if (!did_readaround) {
-               ret = VM_FAULT_MAJOR;
-               count_vm_event(PGMAJFAULT);
-       }
-
        /*
         * Umm, take care of errors if the page isn't up-to-date.
         * Try to re-read it _once_. We do this synchronously,
index e83ad2c9228c1242a582920c3a03b691f5dd56f9..a56e6f3ce97937bc3d28713f67555cf890dc9069 100644 (file)
@@ -578,41 +578,6 @@ static void free_huge_page(struct page *page)
                hugetlb_put_quota(mapping, 1);
 }
 
-/*
- * Increment or decrement surplus_huge_pages.  Keep node-specific counters
- * balanced by operating on them in a round-robin fashion.
- * Returns 1 if an adjustment was made.
- */
-static int adjust_pool_surplus(struct hstate *h, int delta)
-{
-       static int prev_nid;
-       int nid = prev_nid;
-       int ret = 0;
-
-       VM_BUG_ON(delta != -1 && delta != 1);
-       do {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
-
-               /* To shrink on this node, there must be a surplus page */
-               if (delta < 0 && !h->surplus_huge_pages_node[nid])
-                       continue;
-               /* Surplus cannot exceed the total number of pages */
-               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
-                                               h->nr_huge_pages_node[nid])
-                       continue;
-
-               h->surplus_huge_pages += delta;
-               h->surplus_huge_pages_node[nid] += delta;
-               ret = 1;
-               break;
-       } while (nid != prev_nid);
-
-       prev_nid = nid;
-       return ret;
-}
-
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        set_compound_page_dtor(page, free_huge_page);
@@ -623,6 +588,34 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
        put_page(page); /* free it into the hugepage allocator */
 }
 
+static void prep_compound_gigantic_page(struct page *page, unsigned long order)
+{
+       int i;
+       int nr_pages = 1 << order;
+       struct page *p = page + 1;
+
+       /* we rely on prep_new_huge_page to set the destructor */
+       set_compound_order(page, order);
+       __SetPageHead(page);
+       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+               __SetPageTail(p);
+               p->first_page = page;
+       }
+}
+
+int PageHuge(struct page *page)
+{
+       compound_page_dtor *dtor;
+
+       if (!PageCompound(page))
+               return 0;
+
+       page = compound_head(page);
+       dtor = get_compound_page_dtor(page);
+
+       return dtor == free_huge_page;
+}
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
@@ -630,7 +623,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
        if (h->order >= MAX_ORDER)
                return NULL;
 
-       page = alloc_pages_node(nid,
+       page = alloc_pages_exact_node(nid,
                htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
                                                __GFP_REPEAT|__GFP_NOWARN,
                huge_page_order(h));
@@ -649,7 +642,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  * Use a helper variable to find the next node and then
  * copy it back to hugetlb_next_nid afterwards:
  * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+ * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
  * But we don't need to use a spin_lock here: it really
  * doesn't matter if occasionally a racer chooses the
  * same nid as we do.  Move nid forward in the mask even
@@ -875,7 +868,7 @@ static void return_unused_surplus_pages(struct hstate *h,
         * can no longer free unreserved surplus pages. This occurs when
         * the nodes with surplus pages have no free pages.
         */
-       unsigned long remaining_iterations = num_online_nodes();
+       unsigned long remaining_iterations = nr_online_nodes;
 
        /* Uncommit the reservation */
        h->resv_huge_pages -= unused_resv_pages;
@@ -904,7 +897,7 @@ static void return_unused_surplus_pages(struct hstate *h,
                        h->surplus_huge_pages--;
                        h->surplus_huge_pages_node[nid]--;
                        nr_pages--;
-                       remaining_iterations = num_online_nodes();
+                       remaining_iterations = nr_online_nodes;
                }
        }
 }
@@ -1140,6 +1133,41 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
 }
 #endif
 
+/*
+ * Increment or decrement surplus_huge_pages.  Keep node-specific counters
+ * balanced by operating on them in a round-robin fashion.
+ * Returns 1 if an adjustment was made.
+ */
+static int adjust_pool_surplus(struct hstate *h, int delta)
+{
+       static int prev_nid;
+       int nid = prev_nid;
+       int ret = 0;
+
+       VM_BUG_ON(delta != -1 && delta != 1);
+       do {
+               nid = next_node(nid, node_online_map);
+               if (nid == MAX_NUMNODES)
+                       nid = first_node(node_online_map);
+
+               /* To shrink on this node, there must be a surplus page */
+               if (delta < 0 && !h->surplus_huge_pages_node[nid])
+                       continue;
+               /* Surplus cannot exceed the total number of pages */
+               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
+                                               h->nr_huge_pages_node[nid])
+                       continue;
+
+               h->surplus_huge_pages += delta;
+               h->surplus_huge_pages_node[nid] += delta;
+               ret = 1;
+               break;
+       } while (nid != prev_nid);
+
+       prev_nid = nid;
+       return ret;
+}
+
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
 {
diff --git a/mm/init-mm.c b/mm/init-mm.c
new file mode 100644 (file)
index 0000000..57aba0d
--- /dev/null
@@ -0,0 +1,20 @@
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+
+#include <asm/atomic.h>
+#include <asm/pgtable.h>
+
+struct mm_struct init_mm = {
+       .mm_rb          = RB_ROOT,
+       .pgd            = swapper_pg_dir,
+       .mm_users       = ATOMIC_INIT(2),
+       .mm_count       = ATOMIC_INIT(1),
+       .mmap_sem       = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+       .page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
+       .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
+       .cpu_vm_mask    = CPU_MASK_ALL,
+};
index 987bb03fbdd88f33f36986b6ecbb33f02a62c6e2..f290c4db528b0e38afb243e498e8891007788545 100644 (file)
@@ -16,9 +16,6 @@
 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
                unsigned long floor, unsigned long ceiling);
 
-extern void prep_compound_page(struct page *page, unsigned long order);
-extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
-
 static inline void set_page_count(struct page *page, int v)
 {
        atomic_set(&page->_count, v);
@@ -51,6 +48,8 @@ extern void putback_lru_page(struct page *page);
  */
 extern unsigned long highest_memmap_pfn;
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
+extern void prep_compound_page(struct page *page, unsigned long order);
+
 
 /*
  * function for dealing with page's order in buddy system.
@@ -74,7 +73,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * unevictable_migrate_page() called only from migrate_page_copy() to
  * migrate unevictable flag to new page.
@@ -86,11 +84,6 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
        if (TestClearPageUnevictable(old))
                SetPageUnevictable(new);
 }
-#else
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
-}
-#endif
 
 #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 /*
@@ -150,23 +143,6 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
        }
 }
 
-/*
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
- * Page should not be on lru, so no need to fix that up.
- * free_pages_check() will verify...
- */
-static inline void free_page_mlock(struct page *page)
-{
-       if (unlikely(TestClearPageMlocked(page))) {
-               unsigned long flags;
-
-               local_irq_save(flags);
-               __dec_zone_page_state(page, NR_MLOCK);
-               __count_vm_event(UNEVICTABLE_MLOCKFREED);
-               local_irq_restore(flags);
-       }
-}
-
 #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
 static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
 {
@@ -175,7 +151,6 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-static inline void free_page_mlock(struct page *page) { }
 
 #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
 
@@ -284,4 +259,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long start, int len, int flags,
                     struct page **pages, struct vm_area_struct **vmas);
 
+#define ZONE_RECLAIM_NOSCAN    -2
+#define ZONE_RECLAIM_FULL      -1
+#define ZONE_RECLAIM_SOME      0
+#define ZONE_RECLAIM_SUCCESS   1
 #endif
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
new file mode 100644 (file)
index 0000000..fd814fd
--- /dev/null
@@ -0,0 +1,122 @@
+#include <linux/gfp.h>
+#include <linux/mm_types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/kmemcheck.h>
+
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+       struct page *shadow;
+       int pages;
+       int i;
+
+       pages = 1 << order;
+
+       /*
+        * With kmemcheck enabled, we need to allocate a memory area for the
+        * shadow bits as well.
+        */
+       shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
+       if (!shadow) {
+               if (printk_ratelimit())
+                       printk(KERN_ERR "kmemcheck: failed to allocate "
+                               "shadow bitmap\n");
+               return;
+       }
+
+       for(i = 0; i < pages; ++i)
+               page[i].shadow = page_address(&shadow[i]);
+
+       /*
+        * Mark it as non-present for the MMU so that our accesses to
+        * this memory will trigger a page fault and let us analyze
+        * the memory accesses.
+        */
+       kmemcheck_hide_pages(page, pages);
+}
+
+void kmemcheck_free_shadow(struct page *page, int order)
+{
+       struct page *shadow;
+       int pages;
+       int i;
+
+       if (!kmemcheck_page_is_tracked(page))
+               return;
+
+       pages = 1 << order;
+
+       kmemcheck_show_pages(page, pages);
+
+       shadow = virt_to_page(page[0].shadow);
+
+       for(i = 0; i < pages; ++i)
+               page[i].shadow = NULL;
+
+       __free_pages(shadow, order);
+}
+
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+                         size_t size)
+{
+       /*
+        * Has already been memset(), which initializes the shadow for us
+        * as well.
+        */
+       if (gfpflags & __GFP_ZERO)
+               return;
+
+       /* No need to initialize the shadow of a non-tracked slab. */
+       if (s->flags & SLAB_NOTRACK)
+               return;
+
+       if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
+               /*
+                * Allow notracked objects to be allocated from
+                * tracked caches. Note however that these objects
+                * will still get page faults on access, they just
+                * won't ever be flagged as uninitialized. If page
+                * faults are not acceptable, the slab cache itself
+                * should be marked NOTRACK.
+                */
+               kmemcheck_mark_initialized(object, size);
+       } else if (!s->ctor) {
+               /*
+                * New objects should be marked uninitialized before
+                * they're returned to the called.
+                */
+               kmemcheck_mark_uninitialized(object, size);
+       }
+}
+
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
+{
+       /* TODO: RCU freeing is unsupported for now; hide false positives. */
+       if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
+               kmemcheck_mark_freed(object, size);
+}
+
+void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
+                              gfp_t gfpflags)
+{
+       int pages;
+
+       if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
+               return;
+
+       pages = 1 << order;
+
+       /*
+        * NOTE: We choose to track GFP_ZERO pages too; in fact, they
+        * can become uninitialized by copying uninitialized memory
+        * into them.
+        */
+
+       /* XXX: Can use zone->node for node? */
+       kmemcheck_alloc_shadow(page, order, gfpflags, -1);
+
+       if (gfpflags & __GFP_ZERO)
+               kmemcheck_mark_initialized_pages(page, pages);
+       else
+               kmemcheck_mark_uninitialized_pages(page, pages);
+}
index b9ce574827c8a2a48b972f0261decc234edf9e27..76eb4193acddb2e739b1e42a0a8371b59464f39f 100644 (file)
@@ -123,8 +123,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
                end = vma->vm_end;
        end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
-       force_page_cache_readahead(file->f_mapping,
-                       file, start, max_sane_readahead(end - start));
+       force_page_cache_readahead(file->f_mapping, file, start, end - start);
        return 0;
 }
 
@@ -239,12 +238,30 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
                break;
 
        default:
-               error = -EINVAL;
+               BUG();
                break;
        }
        return error;
 }
 
+static int
+madvise_behavior_valid(int behavior)
+{
+       switch (behavior) {
+       case MADV_DOFORK:
+       case MADV_DONTFORK:
+       case MADV_NORMAL:
+       case MADV_SEQUENTIAL:
+       case MADV_RANDOM:
+       case MADV_REMOVE:
+       case MADV_WILLNEED:
+       case MADV_DONTNEED:
+               return 1;
+
+       default:
+               return 0;
+       }
+}
 /*
  * The madvise(2) system call.
  *
@@ -290,6 +307,9 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
        int write;
        size_t len;
 
+       if (!madvise_behavior_valid(behavior))
+               return error;
+
        write = madvise_need_mmap_write(behavior);
        if (write)
                down_write(&current->mm->mmap_sem);
index 78eb8552818b6b94d4add7ec06ecdc881add1f67..70db6e0a5eece0a3a6d40f2089d91271968c3361 100644 (file)
@@ -570,6 +570,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
        return 0;
 }
 
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+       unsigned long active;
+       unsigned long inactive;
+
+       inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
+       active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
+
+       return (active > inactive);
+}
+
 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
                                       struct zone *zone,
                                       enum lru_list lru)
index 4126dd16778c36595af938988c9d8ec144d0f8aa..d5d1653d60a6be51a4ba74de53fd997d8b0bc94c 100644 (file)
@@ -1360,6 +1360,56 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        return i;
 }
 
+/**
+ * get_user_pages() - pin user pages in memory
+ * @tsk:       task_struct of target task
+ * @mm:                mm_struct of target mm
+ * @start:     starting user address
+ * @len:       number of pages from start to pin
+ * @write:     whether pages will be written to by the caller
+ * @force:     whether to force write access even if user mapping is
+ *             readonly. This will result in the page being COWed even
+ *             in MAP_SHARED mappings. You do not want this.
+ * @pages:     array that receives pointers to the pages pinned.
+ *             Should be at least nr_pages long. Or NULL, if caller
+ *             only intends to ensure the pages are faulted in.
+ * @vmas:      array of pointers to vmas corresponding to each page.
+ *             Or NULL if the caller does not require them.
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If len is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with. vmas will only
+ * remain valid while mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If write=0, the page must not be written to. If the page is written to,
+ * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
+ * after the page is finished with, and before put_page is called.
+ *
+ * get_user_pages is typically used for fewer-copy IO operations, to get a
+ * handle on the memory by some means other than accesses via the user virtual
+ * addresses. The pages may be submitted for DMA to devices or accessed via
+ * their kernel linear mapping (via the kmap APIs). Care should be taken to
+ * use the correct cache flushing APIs.
+ *
+ * See also get_user_pages_fast, for performance critical applications.
+ */
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
@@ -3053,22 +3103,13 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
-#ifdef CONFIG_HAVE_IOREMAP_PROT
-int follow_phys(struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags,
-               unsigned long *prot, resource_size_t *phys)
+static int follow_pte(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
-       pte_t *ptep, pte;
-       spinlock_t *ptl;
-       resource_size_t phys_addr = 0;
-       struct mm_struct *mm = vma->vm_mm;
-       int ret = -EINVAL;
-
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               goto out;
+       pte_t *ptep;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
@@ -3086,22 +3127,71 @@ int follow_phys(struct vm_area_struct *vma,
        if (pmd_huge(*pmd))
                goto out;
 
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
        if (!ptep)
                goto out;
+       if (!pte_present(*ptep))
+               goto unlock;
+       *ptepp = ptep;
+       return 0;
+unlock:
+       pte_unmap_unlock(ptep, *ptlp);
+out:
+       return -EINVAL;
+}
 
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn)
+{
+       int ret = -EINVAL;
+       spinlock_t *ptl;
+       pte_t *ptep;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+
+       ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
+{
+       int ret = -EINVAL;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+
+       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               goto out;
        pte = *ptep;
-       if (!pte_present(pte))
-               goto unlock;
+
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
-       phys_addr = pte_pfn(pte);
-       phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
-       *phys = phys_addr;
-       ret = 0;
+       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
 
+       ret = 0;
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
index c083cf5fd6df920b8f984351f11942cfa2f9b317..e4412a676c88494ec313ebb3e974d2d71ee5c525 100644 (file)
@@ -422,7 +422,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
        zone->present_pages += onlined_pages;
        zone->zone_pgdat->node_present_pages += onlined_pages;
 
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
+       calculate_zone_inactive_ratio(zone);
        if (onlined_pages) {
                kswapd_run(zone_to_nid(zone));
                node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -832,6 +833,9 @@ repeat:
        totalram_pages -= offlined_pages;
        num_physpages -= offlined_pages;
 
+       setup_per_zone_wmarks();
+       calculate_zone_inactive_ratio(zone);
+
        vm_total_pages = nr_free_pagecache_pages();
        writeback_set_ratelimit();
 
index 3eb4a6fdc04377130f628daaadfc2f970503addc..e08e2c4da63a5dc45bfcfb9f56cd440d557d0772 100644 (file)
@@ -182,13 +182,54 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
        return 0;
 }
 
-/* Create a new policy */
+/*
+ * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
+ * any, for the new policy.  mpol_new() has already validated the nodes
+ * parameter with respect to the policy mode and flags.  But, we need to
+ * handle an empty nodemask with MPOL_PREFERRED here.
+ *
+ * Must be called holding task's alloc_lock to protect task's mems_allowed
+ * and mempolicy.  May also be called holding the mmap_semaphore for write.
+ */
+static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
+{
+       nodemask_t cpuset_context_nmask;
+       int ret;
+
+       /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
+       if (pol == NULL)
+               return 0;
+
+       VM_BUG_ON(!nodes);
+       if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
+               nodes = NULL;   /* explicit local allocation */
+       else {
+               if (pol->flags & MPOL_F_RELATIVE_NODES)
+                       mpol_relative_nodemask(&cpuset_context_nmask, nodes,
+                                              &cpuset_current_mems_allowed);
+               else
+                       nodes_and(cpuset_context_nmask, *nodes,
+                                 cpuset_current_mems_allowed);
+               if (mpol_store_user_nodemask(pol))
+                       pol->w.user_nodemask = *nodes;
+               else
+                       pol->w.cpuset_mems_allowed =
+                                               cpuset_current_mems_allowed;
+       }
+
+       ret = mpol_ops[pol->mode].create(pol,
+                               nodes ? &cpuset_context_nmask : NULL);
+       return ret;
+}
+
+/*
+ * This function just creates a new policy, does some check and simple
+ * initialization. You must invoke mpol_set_nodemask() to set nodes.
+ */
 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
                                  nodemask_t *nodes)
 {
        struct mempolicy *policy;
-       nodemask_t cpuset_context_nmask;
-       int ret;
 
        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
                 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
@@ -210,7 +251,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
                        if (((flags & MPOL_F_STATIC_NODES) ||
                             (flags & MPOL_F_RELATIVE_NODES)))
                                return ERR_PTR(-EINVAL);
-                       nodes = NULL;   /* flag local alloc */
                }
        } else if (nodes_empty(*nodes))
                return ERR_PTR(-EINVAL);
@@ -221,30 +261,6 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
        policy->mode = mode;
        policy->flags = flags;
 
-       if (nodes) {
-               /*
-                * cpuset related setup doesn't apply to local allocation
-                */
-               cpuset_update_task_memory_state();
-               if (flags & MPOL_F_RELATIVE_NODES)
-                       mpol_relative_nodemask(&cpuset_context_nmask, nodes,
-                                              &cpuset_current_mems_allowed);
-               else
-                       nodes_and(cpuset_context_nmask, *nodes,
-                                 cpuset_current_mems_allowed);
-               if (mpol_store_user_nodemask(policy))
-                       policy->w.user_nodemask = *nodes;
-               else
-                       policy->w.cpuset_mems_allowed =
-                                               cpuset_mems_allowed(current);
-       }
-
-       ret = mpol_ops[mode].create(policy,
-                               nodes ? &cpuset_context_nmask : NULL);
-       if (ret < 0) {
-               kmem_cache_free(policy_cache, policy);
-               return ERR_PTR(ret);
-       }
        return policy;
 }
 
@@ -324,6 +340,8 @@ static void mpol_rebind_policy(struct mempolicy *pol,
 /*
  * Wrapper for mpol_rebind_policy() that just requires task
  * pointer, and updates task mempolicy.
+ *
+ * Called with task's alloc_lock held.
  */
 
 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
@@ -600,8 +618,9 @@ static void mpol_set_task_struct_flag(void)
 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
                             nodemask_t *nodes)
 {
-       struct mempolicy *new;
+       struct mempolicy *new, *old;
        struct mm_struct *mm = current->mm;
+       int ret;
 
        new = mpol_new(mode, flags, nodes);
        if (IS_ERR(new))
@@ -615,20 +634,33 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
         */
        if (mm)
                down_write(&mm->mmap_sem);
-       mpol_put(current->mempolicy);
+       task_lock(current);
+       ret = mpol_set_nodemask(new, nodes);
+       if (ret) {
+               task_unlock(current);
+               if (mm)
+                       up_write(&mm->mmap_sem);
+               mpol_put(new);
+               return ret;
+       }
+       old = current->mempolicy;
        current->mempolicy = new;
        mpol_set_task_struct_flag();
        if (new && new->mode == MPOL_INTERLEAVE &&
            nodes_weight(new->v.nodes))
                current->il_next = first_node(new->v.nodes);
+       task_unlock(current);
        if (mm)
                up_write(&mm->mmap_sem);
 
+       mpol_put(old);
        return 0;
 }
 
 /*
  * Return nodemask for policy for get_mempolicy() query
+ *
+ * Called with task's alloc_lock held
  */
 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 {
@@ -674,7 +706,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
        struct vm_area_struct *vma = NULL;
        struct mempolicy *pol = current->mempolicy;
 
-       cpuset_update_task_memory_state();
        if (flags &
                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
                return -EINVAL;
@@ -683,7 +714,9 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
                        return -EINVAL;
                *policy = 0;    /* just so it's initialized */
+               task_lock(current);
                *nmask  = cpuset_current_mems_allowed;
+               task_unlock(current);
                return 0;
        }
 
@@ -738,8 +771,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
        }
 
        err = 0;
-       if (nmask)
+       if (nmask) {
+               task_lock(current);
                get_policy_nodemask(pol, nmask);
+               task_unlock(current);
+       }
 
  out:
        mpol_cond_put(pol);
@@ -767,7 +803,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
 
 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 {
-       return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
+       return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 }
 
 /*
@@ -979,6 +1015,14 @@ static long do_mbind(unsigned long start, unsigned long len,
                        return err;
        }
        down_write(&mm->mmap_sem);
+       task_lock(current);
+       err = mpol_set_nodemask(new, nmask);
+       task_unlock(current);
+       if (err) {
+               up_write(&mm->mmap_sem);
+               mpol_put(new);
+               return err;
+       }
        vma = check_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
 
@@ -1545,8 +1589,6 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
        struct mempolicy *pol = get_vma_policy(current, vma, addr);
        struct zonelist *zl;
 
-       cpuset_update_task_memory_state();
-
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
@@ -1593,8 +1635,6 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 {
        struct mempolicy *pol = current->mempolicy;
 
-       if ((gfp & __GFP_WAIT) && !in_interrupt())
-               cpuset_update_task_memory_state();
        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
                pol = &default_policy;
 
@@ -1854,6 +1894,8 @@ restart:
  */
 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
 {
+       int ret;
+
        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
        spin_lock_init(&sp->lock);
 
@@ -1863,9 +1905,19 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
 
                /* contextualize the tmpfs mount point mempolicy */
                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
-               mpol_put(mpol); /* drop our ref on sb mpol */
-               if (IS_ERR(new))
+               if (IS_ERR(new)) {
+                       mpol_put(mpol); /* drop our ref on sb mpol */
                        return;         /* no valid nodemask intersection */
+               }
+
+               task_lock(current);
+               ret = mpol_set_nodemask(new, &mpol->w.user_nodemask);
+               task_unlock(current);
+               mpol_put(mpol); /* drop our ref on sb mpol */
+               if (ret) {
+                       mpol_put(new);
+                       return;
+               }
 
                /* Create pseudo-vma that contains just the policy */
                memset(&pvma, 0, sizeof(struct vm_area_struct));
@@ -2086,8 +2138,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
        new = mpol_new(mode, mode_flags, &nodes);
        if (IS_ERR(new))
                err = 1;
-       else if (no_context)
-               new->w.user_nodemask = nodes;   /* save for contextualization */
+       else {
+               int ret;
+
+               task_lock(current);
+               ret = mpol_set_nodemask(new, &nodes);
+               task_unlock(current);
+               if (ret)
+                       err = 1;
+               else if (no_context) {
+                       /* save for contextualization */
+                       new->w.user_nodemask = nodes;
+               }
+       }
 
 out:
        /* Restore string for error message */
index 068655d8f883a8d2a79ae3f3247ced23f0b5db76..939888f9ddab21ecabdc2f732a8ee604cb56da13 100644 (file)
@@ -802,7 +802,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
 
        *result = &pm->status;
 
-       return alloc_pages_node(pm->node,
+       return alloc_pages_exact_node(pm->node,
                                GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
@@ -820,7 +820,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
        struct page_to_node *pp;
        LIST_HEAD(pagelist);
 
-       migrate_prep();
        down_read(&mm->mmap_sem);
 
        /*
@@ -907,6 +906,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
        pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
        if (!pm)
                goto out;
+
+       migrate_prep();
+
        /*
         * Store a chunk of page_to_node array in a page,
         * but keep the last one as a marker
index ac130433c7d35da275b1ad081bfddd9fbf017173..45eb650b9654ed09bebe3b4de3c203442538792d 100644 (file)
@@ -31,7 +31,6 @@ int can_do_mlock(void)
 }
 EXPORT_SYMBOL(can_do_mlock);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * Mlocked pages are marked with PageMlocked() flag for efficient testing
  * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -261,27 +260,6 @@ static int __mlock_posix_error_return(long retval)
        return retval;
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED.  No-op if unlocking.
- */
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
-{
-       if (mlock && (vma->vm_flags & VM_LOCKED))
-               return make_pages_present(start, end);
-       return 0;
-}
-
-static inline int __mlock_posix_error_return(long retval)
-{
-       return 0;
-}
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
 /**
  * mlock_vma_pages_range() - mlock pages in specified vma range.
  * @vma - the vma containing the specfied address range
index a7b2460e922b779252ebcc225cecaf6060b0e964..175a67a78a99e7b0368dfba11edf9888bc049674 100644 (file)
@@ -58,6 +58,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
        unsigned long points, cpu_time, run_time;
        struct mm_struct *mm;
        struct task_struct *child;
+       int oom_adj;
 
        task_lock(p);
        mm = p->mm;
@@ -65,6 +66,11 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
                task_unlock(p);
                return 0;
        }
+       oom_adj = mm->oom_adj;
+       if (oom_adj == OOM_DISABLE) {
+               task_unlock(p);
+               return 0;
+       }
 
        /*
         * The memory size of the process is the basis for the badness.
@@ -148,15 +154,15 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
                points /= 8;
 
        /*
-        * Adjust the score by oomkilladj.
+        * Adjust the score by oom_adj.
         */
-       if (p->oomkilladj) {
-               if (p->oomkilladj > 0) {
+       if (oom_adj) {
+               if (oom_adj > 0) {
                        if (!points)
                                points = 1;
-                       points <<= p->oomkilladj;
+                       points <<= oom_adj;
                } else
-                       points >>= -(p->oomkilladj);
+                       points >>= -(oom_adj);
        }
 
 #ifdef DEBUG
@@ -251,11 +257,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
                        *ppoints = ULONG_MAX;
                }
 
-               if (p->oomkilladj == OOM_DISABLE)
-                       continue;
-
                points = badness(p, uptime.tv_sec);
-               if (points > *ppoints || !chosen) {
+               if (points > *ppoints) {
                        chosen = p;
                        *ppoints = points;
                }
@@ -304,8 +307,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
                }
                printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d     %3d %s\n",
                       p->pid, __task_cred(p)->uid, p->tgid, mm->total_vm,
-                      get_mm_rss(mm), (int)task_cpu(p), p->oomkilladj,
-                      p->comm);
+                      get_mm_rss(mm), (int)task_cpu(p), mm->oom_adj, p->comm);
                task_unlock(p);
        } while_each_thread(g, p);
 }
@@ -323,11 +325,8 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
                return;
        }
 
-       if (!p->mm) {
-               WARN_ON(1);
-               printk(KERN_WARNING "tried to kill an mm-less task!\n");
+       if (!p->mm)
                return;
-       }
 
        if (verbose)
                printk(KERN_ERR "Killed process %d (%s)\n",
@@ -349,28 +348,13 @@ static int oom_kill_task(struct task_struct *p)
        struct mm_struct *mm;
        struct task_struct *g, *q;
 
+       task_lock(p);
        mm = p->mm;
-
-       /* WARNING: mm may not be dereferenced since we did not obtain its
-        * value from get_task_mm(p).  This is OK since all we need to do is
-        * compare mm to q->mm below.
-        *
-        * Furthermore, even if mm contains a non-NULL value, p->mm may
-        * change to NULL at any time since we do not hold task_lock(p).
-        * However, this is of no concern to us.
-        */
-
-       if (mm == NULL)
+       if (!mm || mm->oom_adj == OOM_DISABLE) {
+               task_unlock(p);
                return 1;
-
-       /*
-        * Don't kill the process if any threads are set to OOM_DISABLE
-        */
-       do_each_thread(g, q) {
-               if (q->mm == mm && q->oomkilladj == OOM_DISABLE)
-                       return 1;
-       } while_each_thread(g, q);
-
+       }
+       task_unlock(p);
        __oom_kill_task(p, 1);
 
        /*
@@ -393,10 +377,11 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        struct task_struct *c;
 
        if (printk_ratelimit()) {
-               printk(KERN_WARNING "%s invoked oom-killer: "
-                       "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
-                       current->comm, gfp_mask, order, current->oomkilladj);
                task_lock(current);
+               printk(KERN_WARNING "%s invoked oom-killer: "
+                       "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
+                       current->comm, gfp_mask, order,
+                       current->mm ? current->mm->oom_adj : OOM_DISABLE);
                cpuset_print_task_mems_allowed(current);
                task_unlock(current);
                dump_stack();
@@ -409,8 +394,9 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
        /*
         * If the task is already exiting, don't alarm the sysadmin or kill
         * its children or threads, just set TIF_MEMDIE so it can die quickly
+        * if its mm is still attached.
         */
-       if (p->flags & PF_EXITING) {
+       if (p->mm && (p->flags & PF_EXITING)) {
                __oom_kill_task(p, 0);
                return 0;
        }
index bb553c3e955da10631ca7f8229be2b1bdb791f25..7b0dcea4935bea292730ce7d1926fc0ea00df062 100644 (file)
@@ -265,18 +265,19 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi,
  * This avoids exceeding the total dirty_limit when the floating averages
  * fluctuate too quickly.
  */
-static void
-clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
+static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
+               unsigned long dirty, unsigned long *pbdi_dirty)
 {
-       long avail_dirty;
+       unsigned long avail_dirty;
 
-       avail_dirty = dirty -
-               (global_page_state(NR_FILE_DIRTY) +
+       avail_dirty = global_page_state(NR_FILE_DIRTY) +
                 global_page_state(NR_WRITEBACK) +
                 global_page_state(NR_UNSTABLE_NFS) +
-                global_page_state(NR_WRITEBACK_TEMP));
+                global_page_state(NR_WRITEBACK_TEMP);
 
-       if (avail_dirty < 0)
+       if (avail_dirty < dirty)
+               avail_dirty = dirty - avail_dirty;
+       else
                avail_dirty = 0;
 
        avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
@@ -299,10 +300,10 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
  *
  *   dirty -= (dirty/8) * p_{t}
  */
-static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
 {
        long numerator, denominator;
-       long dirty = *pdirty;
+       unsigned long dirty = *pdirty;
        u64 inv = dirty >> 3;
 
        task_dirties_fraction(tsk, &numerator, &denominator);
index 17d5f539a9aa58a18accc8f97904faf02d93d04e..a5f3c278c5732f4f3c8c9258f5b95b2c86b3e490 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
@@ -161,17 +162,25 @@ static unsigned long __meminitdata dma_reserve;
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
 int page_group_by_mobility_disabled __read_mostly;
 
 static void set_pageblock_migratetype(struct page *page, int migratetype)
 {
+
+       if (unlikely(page_group_by_mobility_disabled))
+               migratetype = MIGRATE_UNMOVABLE;
+
        set_pageblock_flags_group(page, (unsigned long)migratetype,
                                        PB_migrate, PB_migrate_end);
 }
 
+bool oom_killer_disabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -294,23 +303,6 @@ void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
-#ifdef CONFIG_HUGETLBFS
-void prep_compound_gigantic_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-       struct page *p = page + 1;
-
-       set_compound_page_dtor(page, free_compound_page);
-       set_compound_order(page, order);
-       __SetPageHead(page);
-       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-               __SetPageTail(p);
-               p->first_page = page;
-       }
-}
-#endif
-
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
@@ -417,7 +409,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
                return 0;
 
        if (PageBuddy(buddy) && page_order(buddy) == order) {
-               BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON(page_count(buddy) != 0);
                return 1;
        }
        return 0;
@@ -448,22 +440,22 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  */
 
 static inline void __free_one_page(struct page *page,
-               struct zone *zone, unsigned int order)
+               struct zone *zone, unsigned int order,
+               int migratetype)
 {
        unsigned long page_idx;
-       int order_size = 1 << order;
-       int migratetype = get_pageblock_migratetype(page);
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
                        return;
 
+       VM_BUG_ON(migratetype == -1);
+
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 
-       VM_BUG_ON(page_idx & (order_size - 1));
+       VM_BUG_ON(page_idx & ((1 << order) - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct page *buddy;
@@ -487,12 +479,27 @@ static inline void __free_one_page(struct page *page,
        zone->free_area[order].nr_free++;
 }
 
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+       __ClearPageMlocked(page);
+       __dec_zone_page_state(page, NR_MLOCK);
+       __count_vm_event(UNEVICTABLE_MLOCKFREED);
+}
+#else
+static void free_page_mlock(struct page *page) { }
+#endif
+
 static inline int free_pages_check(struct page *page)
 {
-       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0) |
                (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
                bad_page(page);
                return 1;
@@ -519,6 +526,8 @@ static void free_pages_bulk(struct zone *zone, int count,
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
        while (count--) {
                struct page *page;
 
@@ -526,17 +535,20 @@ static void free_pages_bulk(struct zone *zone, int count,
                page = list_entry(list->prev, struct page, lru);
                /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
-               __free_one_page(page, zone, order);
+               __free_one_page(page, zone, order, page_private(page));
        }
        spin_unlock(&zone->lock);
 }
 
-static void free_one_page(struct zone *zone, struct page *page, int order)
+static void free_one_page(struct zone *zone, struct page *page, int order,
+                               int migratetype)
 {
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
-       __free_one_page(page, zone, order);
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+       __free_one_page(page, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -545,6 +557,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
+       int clearMlocked = PageMlocked(page);
+
+       kmemcheck_free_shadow(page, order);
 
        for (i = 0 ; i < (1 << order) ; ++i)
                bad += free_pages_check(page + i);
@@ -560,8 +575,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
-       free_one_page(page_zone(page), page, order);
+       free_one_page(page_zone(page), page, order,
+                                       get_pageblock_migratetype(page));
        local_irq_restore(flags);
 }
 
@@ -632,7 +650,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0)  |
                (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
                bad_page(page);
                return 1;
@@ -657,7 +675,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  * Go through the free lists for the given migratetype and remove
  * the smallest available page from the freelists
  */
-static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+static inline
+struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                                                int migratetype)
 {
        unsigned int current_order;
@@ -675,7 +694,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area, migratetype);
                return page;
        }
@@ -766,8 +784,8 @@ static int move_freepages_block(struct zone *zone, struct page *page,
 }
 
 /* Remove an element from the buddy allocator from the fallback list */
-static struct page *__rmqueue_fallback(struct zone *zone, int order,
-                                               int start_migratetype)
+static inline struct page *
+__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 {
        struct free_area * area;
        int current_order;
@@ -815,8 +833,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                        /* Remove the page from the freelists */
                        list_del(&page->lru);
                        rmv_page_order(page);
-                       __mod_zone_page_state(zone, NR_FREE_PAGES,
-                                                       -(1UL << order));
 
                        if (current_order == pageblock_order)
                                set_pageblock_migratetype(page,
@@ -827,8 +843,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                }
        }
 
-       /* Use MIGRATE_RESERVE rather than fail an allocation */
-       return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
+       return NULL;
 }
 
 /*
@@ -840,11 +855,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
 {
        struct page *page;
 
+retry_reserve:
        page = __rmqueue_smallest(zone, order, migratetype);
 
-       if (unlikely(!page))
+       if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
                page = __rmqueue_fallback(zone, order, migratetype);
 
+               /*
+                * Use MIGRATE_RESERVE rather than fail an allocation. goto
+                * is used because __rmqueue_smallest is an inline function
+                * and we want just one call site
+                */
+               if (!page) {
+                       migratetype = MIGRATE_RESERVE;
+                       goto retry_reserve;
+               }
+       }
+
        return page;
 }
 
@@ -878,6 +905,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                set_page_private(page, migratetype);
                list = &page->lru;
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
        spin_unlock(&zone->lock);
        return i;
 }
@@ -993,6 +1021,9 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
+       int clearMlocked = PageMlocked(page);
+
+       kmemcheck_free_shadow(page, 0);
 
        if (PageAnon(page))
                page->mapping = NULL;
@@ -1007,13 +1038,16 @@ static void free_hot_cold_page(struct page *page, int cold)
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
+       set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_event(PGFREE);
+
        if (cold)
                list_add_tail(&page->lru, &pcp->list);
        else
                list_add(&page->lru, &pcp->list);
-       set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -1047,6 +1081,16 @@ void split_page(struct page *page, unsigned int order)
 
        VM_BUG_ON(PageCompound(page));
        VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * Split shadow pages too, because free(page[0]) would
+        * otherwise free the whole shadow.
+        */
+       if (kmemcheck_page_is_tracked(page))
+               split_page(virt_to_page(page[0].shadow), order);
+#endif
+
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
 }
@@ -1056,14 +1100,15 @@ void split_page(struct page *page, unsigned int order)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *buffered_rmqueue(struct zone *preferred_zone,
-                       struct zone *zone, int order, gfp_t gfp_flags)
+static inline
+struct page *buffered_rmqueue(struct zone *preferred_zone,
+                       struct zone *zone, int order, gfp_t gfp_flags,
+                       int migratetype)
 {
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
        int cpu;
-       int migratetype = allocflags_to_migratetype(gfp_flags);
 
 again:
        cpu  = get_cpu();
@@ -1100,8 +1145,22 @@ again:
                list_del(&page->lru);
                pcp->count--;
        } else {
+               if (unlikely(gfp_flags & __GFP_NOFAIL)) {
+                       /*
+                        * __GFP_NOFAIL is not to be used in new code.
+                        *
+                        * All __GFP_NOFAIL callers should be fixed so that they
+                        * properly detect and handle allocation failures.
+                        *
+                        * We most definitely don't want callers attempting to
+                        * allocate greater than single-page units with
+                        * __GFP_NOFAIL.
+                        */
+                       WARN_ON_ONCE(order > 0);
+               }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
@@ -1123,10 +1182,15 @@ failed:
        return NULL;
 }
 
-#define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
-#define ALLOC_WMARK_MIN                0x02 /* use pages_min watermark */
-#define ALLOC_WMARK_LOW                0x04 /* use pages_low watermark */
-#define ALLOC_WMARK_HIGH       0x08 /* use pages_high watermark */
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
+#define ALLOC_WMARK_MIN                WMARK_MIN
+#define ALLOC_WMARK_LOW                WMARK_LOW
+#define ALLOC_WMARK_HIGH       WMARK_HIGH
+#define ALLOC_NO_WATERMARKS    0x04 /* don't check watermarks at all */
+
+/* Mask to get the watermark bits */
+#define ALLOC_WMARK_MASK       (ALLOC_NO_WATERMARKS-1)
+
 #define ALLOC_HARDER           0x10 /* try to alloc harder */
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
@@ -1384,23 +1448,18 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
-               struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
+               struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
+               struct zone *preferred_zone, int migratetype)
 {
        struct zoneref *z;
        struct page *page = NULL;
        int classzone_idx;
-       struct zone *zone, *preferred_zone;
+       struct zone *zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
-       (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
-                                                       &preferred_zone);
-       if (!preferred_zone)
-               return NULL;
-
        classzone_idx = zone_idx(preferred_zone);
-
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
@@ -1415,31 +1474,49 @@ zonelist_scan:
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
-                       if (alloc_flags & ALLOC_WMARK_MIN)
-                               mark = zone->pages_min;
-                       else if (alloc_flags & ALLOC_WMARK_LOW)
-                               mark = zone->pages_low;
-                       else
-                               mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags)) {
-                               if (!zone_reclaim_mode ||
-                                   !zone_reclaim(zone, gfp_mask, order))
+                       int ret;
+
+                       mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+                       if (zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags))
+                               goto try_this_zone;
+
+                       if (zone_reclaim_mode == 0)
+                               goto this_zone_full;
+
+                       ret = zone_reclaim(zone, gfp_mask, order);
+                       switch (ret) {
+                       case ZONE_RECLAIM_NOSCAN:
+                               /* did not scan */
+                               goto try_next_zone;
+                       case ZONE_RECLAIM_FULL:
+                               /* scanned but unreclaimable */
+                               goto this_zone_full;
+                       default:
+                               /* did we reclaim enough */
+                               if (!zone_watermark_ok(zone, order, mark,
+                                               classzone_idx, alloc_flags))
                                        goto this_zone_full;
                        }
                }
 
-               page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
+try_this_zone:
+               page = buffered_rmqueue(preferred_zone, zone, order,
+                                               gfp_mask, migratetype);
                if (page)
                        break;
 this_zone_full:
                if (NUMA_BUILD)
                        zlc_mark_zone_full(zonelist, z);
 try_next_zone:
-               if (NUMA_BUILD && !did_zlc_setup) {
-                       /* we do zlc_setup after the first zone is tried */
+               if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+                       /*
+                        * we do zlc_setup after the first zone is tried but only
+                        * if there are multiple nodes make it worthwhile
+                        */
                        allowednodes = zlc_setup(zonelist, alloc_flags);
                        zlc_active = 1;
                        did_zlc_setup = 1;
@@ -1454,47 +1531,217 @@ try_next_zone:
        return page;
 }
 
+static inline int
+should_alloc_retry(gfp_t gfp_mask, unsigned int order,
+                               unsigned long pages_reclaimed)
+{
+       /* Do not loop if specifically requested */
+       if (gfp_mask & __GFP_NORETRY)
+               return 0;
+
+       /*
+        * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
+        * means __GFP_NOFAIL, but that may not be true in other
+        * implementations.
+        */
+       if (order <= PAGE_ALLOC_COSTLY_ORDER)
+               return 1;
+
+       /*
+        * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
+        * specified, then we retry until we no longer reclaim any pages
+        * (above), or we've reclaimed an order of pages at least as
+        * large as the allocation's order. In both cases, if the
+        * allocation still fails, we stop retrying.
+        */
+       if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
+               return 1;
+
+       /*
+        * Don't let big-order allocations loop unless the caller
+        * explicitly requests that.
+        */
+       if (gfp_mask & __GFP_NOFAIL)
+               return 1;
+
+       return 0;
+}
+
+static inline struct page *
+__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       struct page *page;
+
+       /* Acquire the OOM killer lock for the zones in zonelist */
+       if (!try_set_zone_oom(zonelist, gfp_mask)) {
+               schedule_timeout_uninterruptible(1);
+               return NULL;
+       }
+
+       /*
+        * Go through the zonelist yet one more time, keep very high watermark
+        * here, this is only to catch a parallel oom killing, we must fail if
+        * we're still under heavy pressure.
+        */
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+               order, zonelist, high_zoneidx,
+               ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+               preferred_zone, migratetype);
+       if (page)
+               goto out;
+
+       /* The OOM killer will not help higher order allocs */
+       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
+               goto out;
+
+       /* Exhausted what can be done so it's blamo time */
+       out_of_memory(zonelist, gfp_mask, order);
+
+out:
+       clear_zonelist_oom(zonelist, gfp_mask);
+       return page;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       struct page *page = NULL;
+       struct reclaim_state reclaim_state;
+       struct task_struct *p = current;
+
+       cond_resched();
+
+       /* We now go into synchronous reclaim */
+       cpuset_memory_pressure_bump();
+
+       /*
+        * The task's cpuset might have expanded its set of allowable nodes
+        */
+       p->flags |= PF_MEMALLOC;
+       lockdep_set_current_reclaim_state(gfp_mask);
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
+
+       *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+
+       p->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+       p->flags &= ~PF_MEMALLOC;
+
+       cond_resched();
+
+       if (order != 0)
+               drain_all_pages();
+
+       if (likely(*did_some_progress))
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                                       zonelist, high_zoneidx,
+                                       alloc_flags, preferred_zone,
+                                       migratetype);
+       return page;
+}
+
 /*
- * This is the 'heart' of the zoned buddy allocator.
+ * This is called in the allocator slow-path if the allocation request is of
+ * sufficient urgency to ignore watermarks and take other desperate measures
  */
-struct page *
-__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
-                       struct zonelist *zonelist, nodemask_t *nodemask)
+static inline struct page *
+__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       struct page *page;
+
+       do {
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
+
+               if (!page && gfp_mask & __GFP_NOFAIL)
+                       congestion_wait(WRITE, HZ/50);
+       } while (!page && (gfp_mask & __GFP_NOFAIL));
+
+       return page;
+}
+
+static inline
+void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
+                                               enum zone_type high_zoneidx)
 {
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
-       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        struct zoneref *z;
        struct zone *zone;
-       struct page *page;
-       struct reclaim_state reclaim_state;
-       struct task_struct *p = current;
-       int do_retry;
-       int alloc_flags;
-       unsigned long did_some_progress;
-       unsigned long pages_reclaimed = 0;
 
-       lockdep_trace_alloc(gfp_mask);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+               wakeup_kswapd(zone, order);
+}
 
-       might_sleep_if(wait);
+static inline int
+gfp_to_alloc_flags(gfp_t gfp_mask)
+{
+       struct task_struct *p = current;
+       int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
 
-       if (should_fail_alloc_page(gfp_mask, order))
-               return NULL;
+       /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+       BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
 
-restart:
-       z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
+       /*
+        * The caller may dip into page reserves a bit more if the caller
+        * cannot run direct reclaim, or if the caller has realtime scheduling
+        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
+        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        */
+       alloc_flags |= (gfp_mask & __GFP_HIGH);
 
-       if (unlikely(!z->zone)) {
+       if (!wait) {
+               alloc_flags |= ALLOC_HARDER;
                /*
-                * Happens if we have an empty zonelist as a result of
-                * GFP_THISNODE being used on a memoryless node
+                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
                 */
-               return NULL;
+               alloc_flags &= ~ALLOC_CPUSET;
+       } else if (unlikely(rt_task(p)))
+               alloc_flags |= ALLOC_HARDER;
+
+       if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
+               if (!in_interrupt() &&
+                   ((p->flags & PF_MEMALLOC) ||
+                    unlikely(test_thread_flag(TIF_MEMDIE))))
+                       alloc_flags |= ALLOC_NO_WATERMARKS;
        }
 
-       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
-       if (page)
-               goto got_pg;
+       return alloc_flags;
+}
+
+static inline struct page *
+__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       struct page *page = NULL;
+       int alloc_flags;
+       unsigned long pages_reclaimed = 0;
+       unsigned long did_some_progress;
+       struct task_struct *p = current;
+
+       /*
+        * In the slowpath, we sanity check order to avoid ever trying to
+        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
+        * be using allocators in order of preference for an area that is
+        * too large.
+        */
+       if (WARN_ON_ONCE(order >= MAX_ORDER))
+               return NULL;
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1507,154 +1754,83 @@ restart:
        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-               wakeup_kswapd(zone, order);
+       wake_all_kswapd(order, zonelist, high_zoneidx);
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
         * reclaim. Now things get more complex, so set up alloc_flags according
         * to how we want to proceed.
-        *
-        * The caller may dip into page reserves a bit more if the caller
-        * cannot run direct reclaim, or if the caller has realtime scheduling
-        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
         */
-       alloc_flags = ALLOC_WMARK_MIN;
-       if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
-               alloc_flags |= ALLOC_HARDER;
-       if (gfp_mask & __GFP_HIGH)
-               alloc_flags |= ALLOC_HIGH;
-       if (wait)
-               alloc_flags |= ALLOC_CPUSET;
+       alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-       /*
-        * Go through the zonelist again. Let __GFP_HIGH and allocations
-        * coming from realtime tasks go deeper into reserves.
-        *
-        * This is the last chance, in general, before the goto nopage.
-        * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-        * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
-        */
+restart:
+       /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
-                                               high_zoneidx, alloc_flags);
+                       high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
        if (page)
                goto got_pg;
 
-       /* This allocation should allow future memory freeing. */
-
 rebalance:
-       if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
-                       && !in_interrupt()) {
-               if (!(gfp_mask & __GFP_NOMEMALLOC)) {
-nofail_alloc:
-                       /* go through the zonelist yet again, ignoring mins */
-                       page = get_page_from_freelist(gfp_mask, nodemask, order,
-                               zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
-                       if (page)
-                               goto got_pg;
-                       if (gfp_mask & __GFP_NOFAIL) {
-                               congestion_wait(WRITE, HZ/50);
-                               goto nofail_alloc;
-                       }
-               }
-               goto nopage;
+       /* Allocate without watermarks if the context allows */
+       if (alloc_flags & ALLOC_NO_WATERMARKS) {
+               page = __alloc_pages_high_priority(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
+               if (page)
+                       goto got_pg;
        }
 
        /* Atomic allocations - we can't balance anything */
        if (!wait)
                goto nopage;
 
-       cond_resched();
+       /* Avoid recursion of direct reclaim */
+       if (p->flags & PF_MEMALLOC)
+               goto nopage;
+
+       /* Try direct reclaim and then allocating */
+       page = __alloc_pages_direct_reclaim(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask,
+                                       alloc_flags, preferred_zone,
+                                       migratetype, &did_some_progress);
+       if (page)
+               goto got_pg;
 
-       /* We now go into synchronous reclaim */
-       cpuset_memory_pressure_bump();
        /*
-        * The task's cpuset might have expanded its set of allowable nodes
+        * If we failed to make any progress reclaiming, then we are
+        * running out of options and have to consider going OOM
         */
-       cpuset_update_task_memory_state();
-       p->flags |= PF_MEMALLOC;
-
-       lockdep_set_current_reclaim_state(gfp_mask);
-       reclaim_state.reclaimed_slab = 0;
-       p->reclaim_state = &reclaim_state;
-
-       did_some_progress = try_to_free_pages(zonelist, order,
-                                               gfp_mask, nodemask);
-
-       p->reclaim_state = NULL;
-       lockdep_clear_current_reclaim_state();
-       p->flags &= ~PF_MEMALLOC;
-
-       cond_resched();
+       if (!did_some_progress) {
+               if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+                       if (oom_killer_disabled)
+                               goto nopage;
+                       page = __alloc_pages_may_oom(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask, preferred_zone,
+                                       migratetype);
+                       if (page)
+                               goto got_pg;
 
-       if (order != 0)
-               drain_all_pages();
+                       /*
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
+                        */
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
+                               goto nopage;
 
-       if (likely(did_some_progress)) {
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
-                                       zonelist, high_zoneidx, alloc_flags);
-               if (page)
-                       goto got_pg;
-       } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
-               if (!try_set_zone_oom(zonelist, gfp_mask)) {
-                       schedule_timeout_uninterruptible(1);
                        goto restart;
                }
-
-               /*
-                * Go through the zonelist yet one more time, keep
-                * very high watermark here, this is only to catch
-                * a parallel oom killing, we must fail if we're still
-                * under heavy pressure.
-                */
-               page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
-                       order, zonelist, high_zoneidx,
-                       ALLOC_WMARK_HIGH|ALLOC_CPUSET);
-               if (page) {
-                       clear_zonelist_oom(zonelist, gfp_mask);
-                       goto got_pg;
-               }
-
-               /* The OOM killer will not help higher order allocs so fail */
-               if (order > PAGE_ALLOC_COSTLY_ORDER) {
-                       clear_zonelist_oom(zonelist, gfp_mask);
-                       goto nopage;
-               }
-
-               out_of_memory(zonelist, gfp_mask, order);
-               clear_zonelist_oom(zonelist, gfp_mask);
-               goto restart;
        }
 
-       /*
-        * Don't let big-order allocations loop unless the caller explicitly
-        * requests that.  Wait for some write requests to complete then retry.
-        *
-        * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
-        * means __GFP_NOFAIL, but that may not be true in other
-        * implementations.
-        *
-        * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
-        * specified, then we retry until we no longer reclaim any pages
-        * (above), or we've reclaimed an order of pages at least as
-        * large as the allocation's order. In both cases, if the
-        * allocation still fails, we stop retrying.
-        */
+       /* Check if we should retry the allocation */
        pages_reclaimed += did_some_progress;
-       do_retry = 0;
-       if (!(gfp_mask & __GFP_NORETRY)) {
-               if (order <= PAGE_ALLOC_COSTLY_ORDER) {
-                       do_retry = 1;
-               } else {
-                       if (gfp_mask & __GFP_REPEAT &&
-                               pages_reclaimed < (1 << order))
-                                       do_retry = 1;
-               }
-               if (gfp_mask & __GFP_NOFAIL)
-                       do_retry = 1;
-       }
-       if (do_retry) {
+       if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
+               /* Wait for some write requests to complete then retry */
                congestion_wait(WRITE, HZ/50);
                goto rebalance;
        }
@@ -1667,10 +1843,58 @@ nopage:
                dump_stack();
                show_mem();
        }
+       return page;
 got_pg:
+       if (kmemcheck_enabled)
+               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
+       return page;
+
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ */
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+                       struct zonelist *zonelist, nodemask_t *nodemask)
+{
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zone *preferred_zone;
+       struct page *page;
+       int migratetype = allocflags_to_migratetype(gfp_mask);
+
+       lockdep_trace_alloc(gfp_mask);
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       if (should_fail_alloc_page(gfp_mask, order))
+               return NULL;
+
+       /*
+        * Check the zones suitable for the gfp_mask contain at least one
+        * valid zone. It's possible to have an empty zonelist as a result
+        * of GFP_THISNODE and a memoryless node
+        */
+       if (unlikely(!zonelist->_zonerefs->zone))
+               return NULL;
+
+       /* The preferred zone is used for statistics later */
+       first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
+       /* First allocation attempt */
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
+                       preferred_zone, migratetype);
+       if (unlikely(!page))
+               page = __alloc_pages_slowpath(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
+
        return page;
 }
-EXPORT_SYMBOL(__alloc_pages_internal);
+EXPORT_SYMBOL(__alloc_pages_nodemask);
 
 /*
  * Common helper functions.
@@ -1799,7 +2023,7 @@ static unsigned int nr_free_zone_pages(int offset)
 
        for_each_zone_zonelist(zone, z, zonelist, offset) {
                unsigned long size = zone->present_pages;
-               unsigned long high = zone->pages_high;
+               unsigned long high = high_wmark_pages(zone);
                if (size > high)
                        sum += size - high;
        }
@@ -1891,19 +2115,14 @@ void show_free_areas(void)
 
        printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
                " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
@@ -1927,25 +2146,21 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
                        K(zone_page_state(zone, NR_FREE_PAGES)),
-                       K(zone->pages_min),
-                       K(zone->pages_low),
-                       K(zone->pages_high),
+                       K(min_wmark_pages(zone)),
+                       K(low_wmark_pages(zone)),
+                       K(high_wmark_pages(zone)),
                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -2103,7 +2318,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 }
 
 
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
 static int node_load[MAX_NUMNODES];
 
 /**
@@ -2312,7 +2527,7 @@ static void build_zonelists(pg_data_t *pgdat)
 
        /* NUMA-aware ordering of nodes */
        local_node = pgdat->node_id;
-       load = num_online_nodes();
+       load = nr_online_nodes;
        prev_node = local_node;
        nodes_clear(used_mask);
 
@@ -2463,7 +2678,7 @@ void build_all_zonelists(void)
 
        printk("Built %i zonelists in %s order, mobility grouping %s.  "
                "Total pages: %ld\n",
-                       num_online_nodes(),
+                       nr_online_nodes,
                        zonelist_order_name[current_zonelist_order],
                        page_group_by_mobility_disabled ? "off" : "on",
                        vm_total_pages);
@@ -2542,8 +2757,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
 
 /*
  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
- * of blocks reserved is based on zone->pages_min. The memory within the
- * reserve will tend to store contiguous free pages. Setting min_free_kbytes
+ * of blocks reserved is based on min_wmark_pages(zone). The memory within
+ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  * higher will lead to a bigger reserve which will get freed as contiguous
  * blocks as reclaim kicks in
  */
@@ -2556,7 +2771,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
-       reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
+       reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -3488,7 +3703,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_scan = 0;
+                       zone->lru[l].nr_saved_scan = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4025,6 +4240,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);
 
+       /*
+        * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
+        * that node_mask, clear it at first
+        */
+       nodes_clear(node_states[N_HIGH_MEMORY]);
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
@@ -4159,8 +4379,8 @@ static void calculate_totalreserve_pages(void)
                                        max = zone->lowmem_reserve[j];
                        }
 
-                       /* we treat pages_high as reserved pages. */
-                       max += zone->pages_high;
+                       /* we treat the high watermark as reserved pages. */
+                       max += high_wmark_pages(zone);
 
                        if (max > zone->present_pages)
                                max = zone->present_pages;
@@ -4210,12 +4430,13 @@ static void setup_per_zone_lowmem_reserve(void)
 }
 
 /**
- * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
  *
- * Ensures that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
  */
-void setup_per_zone_pages_min(void)
+void setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -4240,7 +4461,7 @@ void setup_per_zone_pages_min(void)
                         * need highmem pages, so cap pages_min to a small
                         * value here.
                         *
-                        * The (pages_high-pages_low) and (pages_low-pages_min)
+                        * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
                         * deltas controls asynch page reclaim, and so should
                         * not be capped for highmem.
                         */
@@ -4251,17 +4472,17 @@ void setup_per_zone_pages_min(void)
                                min_pages = SWAP_CLUSTER_MAX;
                        if (min_pages > 128)
                                min_pages = 128;
-                       zone->pages_min = min_pages;
+                       zone->watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->pages_min = tmp;
+                       zone->watermark[WMARK_MIN] = tmp;
                }
 
-               zone->pages_low   = zone->pages_min + (tmp >> 2);
-               zone->pages_high  = zone->pages_min + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -4271,8 +4492,6 @@ void setup_per_zone_pages_min(void)
 }
 
 /**
- * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
- *
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
  * to be referenced again before it is swapped out.
@@ -4293,21 +4512,26 @@ void setup_per_zone_pages_min(void)
  *    1TB     101        10GB
  *   10TB     320        32GB
  */
-static void setup_per_zone_inactive_ratio(void)
+void calculate_zone_inactive_ratio(struct zone *zone)
 {
-       struct zone *zone;
-
-       for_each_zone(zone) {
-               unsigned int gb, ratio;
+       unsigned int gb, ratio;
 
-               /* Zone size in gigabytes */
-               gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       /* Zone size in gigabytes */
+       gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       if (gb)
                ratio = int_sqrt(10 * gb);
-               if (!ratio)
-                       ratio = 1;
+       else
+               ratio = 1;
 
-               zone->inactive_ratio = ratio;
-       }
+       zone->inactive_ratio = ratio;
+}
+
+static void __init setup_per_zone_inactive_ratio(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone)
+               calculate_zone_inactive_ratio(zone);
 }
 
 /*
@@ -4334,7 +4558,7 @@ static void setup_per_zone_inactive_ratio(void)
  * 8192MB:     11584k
  * 16384MB:    16384k
  */
-static int __init init_per_zone_pages_min(void)
+static int __init init_per_zone_wmark_min(void)
 {
        unsigned long lowmem_kbytes;
 
@@ -4345,12 +4569,12 @@ static int __init init_per_zone_pages_min(void)
                min_free_kbytes = 128;
        if (min_free_kbytes > 65536)
                min_free_kbytes = 65536;
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
        setup_per_zone_lowmem_reserve();
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_pages_min)
+module_init(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
@@ -4362,7 +4586,7 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
 {
        proc_dointvec(table, write, file, buffer, length, ppos);
        if (write)
-               setup_per_zone_pages_min();
+               setup_per_zone_wmarks();
        return 0;
 }
 
@@ -4406,7 +4630,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  *     whenever sysctl_lowmem_reserve_ratio changes.
  *
  * The reserve ratio obviously has absolutely no relation with the
- * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * minimum watermarks. The lowmem reserve ratio can only make sense
  * if in function of the boot time zone sizes.
  */
 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
@@ -4513,23 +4737,13 @@ void *__init alloc_large_system_hash(const char *tablename,
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order = get_order(size);
-                       table = (void*) __get_free_pages(GFP_ATOMIC, order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
-                        * some pages at the end of hash table.
+                        * some pages at the end of hash table which
+                        * alloc_pages_exact() automatically does
                         */
-                       if (table) {
-                               unsigned long alloc_end = (unsigned long)table +
-                                               (PAGE_SIZE << order);
-                               unsigned long used = (unsigned long)table +
-                                               PAGE_ALIGN(size);
-                               split_page(virt_to_page(table), order);
-                               while (used < alloc_end) {
-                                       free_page(used);
-                                       used += PAGE_SIZE;
-                               }
-                       }
+                       if (get_order(size) < MAX_ORDER)
+                               table = alloc_pages_exact(size, GFP_ATOMIC);
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
 
index 3023c475e0415fc0c6ec537a556e6d3835a2b1d9..c6f3e5071de3bb57c737cef88ab32c3cbdac4aee 100644 (file)
@@ -120,7 +120,7 @@ out:
        return ret;
 }
 
-int swap_readpage(struct file *file, struct page *page)
+int swap_readpage(struct page *page)
 {
        struct bio *bio;
        int ret = 0;
index 133b6d525513a886247ce3adc5eff41ef4e91752..aa1aa23452355067af9d62179cd41d62c64e68fc 100644 (file)
@@ -133,15 +133,12 @@ out:
 }
 
 /*
- * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
+ * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all
  * the pages first, then submits them all for I/O. This avoids the very bad
  * behaviour which would occur if page allocations are causing VM writeback.
  * We really don't want to intermingle reads and writes like that.
  *
  * Returns the number of pages requested, or the maximum amount of I/O allowed.
- *
- * do_page_cache_readahead() returns -1 if it encountered request queue
- * congestion.
  */
 static int
 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -210,6 +207,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
        if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
                return -EINVAL;
 
+       nr_to_read = max_sane_readahead(nr_to_read);
        while (nr_to_read) {
                int err;
 
@@ -230,22 +228,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
        return ret;
 }
 
-/*
- * This version skips the IO if the queue is read-congested, and will tell the
- * block layer to abandon the readahead if request allocation would block.
- *
- * force_page_cache_readahead() will ignore queue congestion and will block on
- * request queues.
- */
-int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
-                       pgoff_t offset, unsigned long nr_to_read)
-{
-       if (bdi_read_congested(mapping->backing_dev_info))
-               return -1;
-
-       return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
-}
-
 /*
  * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
  * sensible upper limit.
@@ -259,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
 /*
  * Submit IO for the read-ahead request in file_ra_state.
  */
-static unsigned long ra_submit(struct file_ra_state *ra,
+unsigned long ra_submit(struct file_ra_state *ra,
                       struct address_space *mapping, struct file *filp)
 {
        int actual;
@@ -347,6 +329,59 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
  * it approaches max_readhead.
  */
 
+/*
+ * Count contiguously cached pages from @offset-1 to @offset-@max,
+ * this count is a conservative estimation of
+ *     - length of the sequential read sequence, or
+ *     - thrashing threshold in memory tight systems
+ */
+static pgoff_t count_history_pages(struct address_space *mapping,
+                                  struct file_ra_state *ra,
+                                  pgoff_t offset, unsigned long max)
+{
+       pgoff_t head;
+
+       rcu_read_lock();
+       head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
+       rcu_read_unlock();
+
+       return offset - 1 - head;
+}
+
+/*
+ * page cache context based read-ahead
+ */
+static int try_context_readahead(struct address_space *mapping,
+                                struct file_ra_state *ra,
+                                pgoff_t offset,
+                                unsigned long req_size,
+                                unsigned long max)
+{
+       pgoff_t size;
+
+       size = count_history_pages(mapping, ra, offset, max);
+
+       /*
+        * no history pages:
+        * it could be a random read
+        */
+       if (!size)
+               return 0;
+
+       /*
+        * starts from beginning of file:
+        * it is a strong indication of long-run stream (or whole-file-read)
+        */
+       if (size >= offset)
+               size *= 2;
+
+       ra->start = offset;
+       ra->size = get_init_ra_size(size + req_size, max);
+       ra->async_size = ra->size;
+
+       return 1;
+}
+
 /*
  * A minimal readahead algorithm for trivial sequential/random reads.
  */
@@ -356,34 +391,26 @@ ondemand_readahead(struct address_space *mapping,
                   bool hit_readahead_marker, pgoff_t offset,
                   unsigned long req_size)
 {
-       int     max = ra->ra_pages;     /* max readahead pages */
-       pgoff_t prev_offset;
-       int     sequential;
+       unsigned long max = max_sane_readahead(ra->ra_pages);
+
+       /*
+        * start of file
+        */
+       if (!offset)
+               goto initial_readahead;
 
        /*
         * It's the expected callback offset, assume sequential access.
         * Ramp up sizes, and push forward the readahead window.
         */
-       if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
-                       offset == (ra->start + ra->size))) {
+       if ((offset == (ra->start + ra->size - ra->async_size) ||
+            offset == (ra->start + ra->size))) {
                ra->start += ra->size;
                ra->size = get_next_ra_size(ra, max);
                ra->async_size = ra->size;
                goto readit;
        }
 
-       prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
-       sequential = offset - prev_offset <= 1UL || req_size > max;
-
-       /*
-        * Standalone, small read.
-        * Read as is, and do not pollute the readahead state.
-        */
-       if (!hit_readahead_marker && !sequential) {
-               return __do_page_cache_readahead(mapping, filp,
-                                               offset, req_size, 0);
-       }
-
        /*
         * Hit a marked page without valid readahead state.
         * E.g. interleaved reads.
@@ -394,7 +421,7 @@ ondemand_readahead(struct address_space *mapping,
                pgoff_t start;
 
                rcu_read_lock();
-               start = radix_tree_next_hole(&mapping->page_tree, offset,max+1);
+               start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
                rcu_read_unlock();
 
                if (!start || start - offset > max)
@@ -402,23 +429,53 @@ ondemand_readahead(struct address_space *mapping,
 
                ra->start = start;
                ra->size = start - offset;      /* old async_size */
+               ra->size += req_size;
                ra->size = get_next_ra_size(ra, max);
                ra->async_size = ra->size;
                goto readit;
        }
 
        /*
-        * It may be one of
-        *      - first read on start of file
-        *      - sequential cache miss
-        *      - oversize random read
-        * Start readahead for it.
+        * oversize read
+        */
+       if (req_size > max)
+               goto initial_readahead;
+
+       /*
+        * sequential cache miss
+        */
+       if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
+               goto initial_readahead;
+
+       /*
+        * Query the page cache and look for the traces(cached history pages)
+        * that a sequential stream would leave behind.
+        */
+       if (try_context_readahead(mapping, ra, offset, req_size, max))
+               goto readit;
+
+       /*
+        * standalone, small random read
+        * Read as is, and do not pollute the readahead state.
         */
+       return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
+
+initial_readahead:
        ra->start = offset;
        ra->size = get_init_ra_size(req_size, max);
        ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
 
 readit:
+       /*
+        * Will this read hit the readahead marker made by itself?
+        * If so, trigger the readahead marker hit now, and merge
+        * the resulted next readahead window into the current one.
+        */
+       if (offset == ra->start && ra->size == ra->async_size) {
+               ra->async_size = get_next_ra_size(ra, max);
+               ra->size += ra->async_size;
+       }
+
        return ra_submit(ra, mapping, filp);
 }
 
index 23122af3261177713b9e2640b9ef108017083457..c9ccc1a72dc32652827c0d6d81add911f59abbfb 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -333,7 +333,9 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
 static int page_referenced_one(struct page *page,
-       struct vm_area_struct *vma, unsigned int *mapcount)
+                              struct vm_area_struct *vma,
+                              unsigned int *mapcount,
+                              unsigned long *vm_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -381,11 +383,14 @@ out_unmap:
        (*mapcount)--;
        pte_unmap_unlock(pte, ptl);
 out:
+       if (referenced)
+               *vm_flags |= vma->vm_flags;
        return referenced;
 }
 
 static int page_referenced_anon(struct page *page,
-                               struct mem_cgroup *mem_cont)
+                               struct mem_cgroup *mem_cont,
+                               unsigned long *vm_flags)
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
@@ -405,7 +410,8 @@ static int page_referenced_anon(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma, &mapcount);
+               referenced += page_referenced_one(page, vma,
+                                                 &mapcount, vm_flags);
                if (!mapcount)
                        break;
        }
@@ -418,6 +424,7 @@ static int page_referenced_anon(struct page *page,
  * page_referenced_file - referenced check for object-based rmap
  * @page: the page we're checking references on.
  * @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  *
  * For an object-based mapped page, find all the places it is mapped and
  * check/clear the referenced flag.  This is done by following the page->mapping
@@ -427,7 +434,8 @@ static int page_referenced_anon(struct page *page,
  * This function is only called from page_referenced for object-based pages.
  */
 static int page_referenced_file(struct page *page,
-                               struct mem_cgroup *mem_cont)
+                               struct mem_cgroup *mem_cont,
+                               unsigned long *vm_flags)
 {
        unsigned int mapcount;
        struct address_space *mapping = page->mapping;
@@ -467,7 +475,8 @@ static int page_referenced_file(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma, &mapcount);
+               referenced += page_referenced_one(page, vma,
+                                                 &mapcount, vm_flags);
                if (!mapcount)
                        break;
        }
@@ -481,29 +490,35 @@ static int page_referenced_file(struct page *page,
  * @page: the page to test
  * @is_locked: caller holds lock on the page
  * @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  *
  * Quick test_and_clear_referenced for all mappings to a page,
  * returns the number of ptes which referenced the page.
  */
-int page_referenced(struct page *page, int is_locked,
-                       struct mem_cgroup *mem_cont)
+int page_referenced(struct page *page,
+                   int is_locked,
+                   struct mem_cgroup *mem_cont,
+                   unsigned long *vm_flags)
 {
        int referenced = 0;
 
        if (TestClearPageReferenced(page))
                referenced++;
 
+       *vm_flags = 0;
        if (page_mapped(page) && page->mapping) {
                if (PageAnon(page))
-                       referenced += page_referenced_anon(page, mem_cont);
+                       referenced += page_referenced_anon(page, mem_cont,
+                                                               vm_flags);
                else if (is_locked)
-                       referenced += page_referenced_file(page, mem_cont);
+                       referenced += page_referenced_file(page, mem_cont,
+                                                               vm_flags);
                else if (!trylock_page(page))
                        referenced++;
                else {
                        if (page->mapping)
-                               referenced +=
-                                       page_referenced_file(page, mem_cont);
+                               referenced += page_referenced_file(page,
+                                                       mem_cont, vm_flags);
                        unlock_page(page);
                }
        }
@@ -1202,7 +1217,6 @@ int try_to_unmap(struct page *page, int migration)
        return ret;
 }
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /**
  * try_to_munlock - try to munlock a page
  * @page: the page to be munlocked
@@ -1226,4 +1240,4 @@ int try_to_munlock(struct page *page)
        else
                return try_to_unmap_file(page, 1, 0);
 }
-#endif
+
index 0132fbd45a23837d5abc3cdea527a6d3cafb53a7..e89d7ec18eda46d42abf68c3994db0e33cfded06 100644 (file)
@@ -1097,7 +1097,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        shmem_swp_unmap(entry);
 unlock:
        spin_unlock(&info->lock);
-       swap_free(swap);
+       swapcache_free(swap, NULL);
 redirty:
        set_page_dirty(page);
        if (wbc->for_reclaim)
@@ -2612,7 +2612,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
  * @size: size to be set for the file
  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
  */
-struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
 {
        int error;
        struct file *file;
index 18e3164de09a9ce3b5c69b9fd53b6533b39e9486..f257d4dd474db5f3ee0ed42016b86dd3d5b02bc4 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
 #include       <linux/debugobjects.h>
+#include       <linux/kmemcheck.h>
 
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
 #endif
 
 /*
@@ -380,87 +381,6 @@ static void kmem_list3_init(struct kmem_list3 *parent)
        MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);  \
        } while (0)
 
-/*
- * struct kmem_cache
- *
- * manages a cache.
- */
-
-struct kmem_cache {
-/* 1) per-cpu data, touched during every alloc/free */
-       struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
-       unsigned int batchcount;
-       unsigned int limit;
-       unsigned int shared;
-
-       unsigned int buffer_size;
-       u32 reciprocal_buffer_size;
-/* 3) touched by every alloc & free from the backend */
-
-       unsigned int flags;             /* constant flags */
-       unsigned int num;               /* # of objs per slab */
-
-/* 4) cache_grow/shrink */
-       /* order of pgs per slab (2^n) */
-       unsigned int gfporder;
-
-       /* force GFP flags, e.g. GFP_DMA */
-       gfp_t gfpflags;
-
-       size_t colour;                  /* cache colouring range */
-       unsigned int colour_off;        /* colour offset */
-       struct kmem_cache *slabp_cache;
-       unsigned int slab_size;
-       unsigned int dflags;            /* dynamic flags */
-
-       /* constructor func */
-       void (*ctor)(void *obj);
-
-/* 5) cache creation/removal */
-       const char *name;
-       struct list_head next;
-
-/* 6) statistics */
-#if STATS
-       unsigned long num_active;
-       unsigned long num_allocations;
-       unsigned long high_mark;
-       unsigned long grown;
-       unsigned long reaped;
-       unsigned long errors;
-       unsigned long max_freeable;
-       unsigned long node_allocs;
-       unsigned long node_frees;
-       unsigned long node_overflow;
-       atomic_t allochit;
-       atomic_t allocmiss;
-       atomic_t freehit;
-       atomic_t freemiss;
-#endif
-#if DEBUG
-       /*
-        * If debugging is enabled, then the allocator can add additional
-        * fields and/or padding to every object. buffer_size contains the total
-        * object size including these internal fields, the following two
-        * variables contain the offset to the user object and its size.
-        */
-       int obj_offset;
-       int obj_size;
-#endif
-       /*
-        * We put nodelists[] at the end of kmem_cache, because we want to size
-        * this array to nr_node_ids slots instead of MAX_NUMNODES
-        * (see kmem_cache_init())
-        * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
-        * is statically defined, so we reserve the max number of nodes.
-        */
-       struct kmem_list3 *nodelists[MAX_NUMNODES];
-       /*
-        * Do not add fields after nodelists[]
-        */
-};
-
 #define CFLGS_OFF_SLAB         (0x80000000UL)
 #define        OFF_SLAB(x)     ((x)->flags & CFLGS_OFF_SLAB)
 
@@ -898,7 +818,6 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
   */
 
 static int use_alien_caches __read_mostly = 1;
-static int numa_platform __read_mostly = 1;
 static int __init noaliencache_setup(char *s)
 {
        use_alien_caches = 0;
@@ -1457,10 +1376,8 @@ void __init kmem_cache_init(void)
        int order;
        int node;
 
-       if (num_possible_nodes() == 1) {
+       if (num_possible_nodes() == 1)
                use_alien_caches = 0;
-               numa_platform = 0;
-       }
 
        for (i = 0; i < NUM_INIT_LISTS; i++) {
                kmem_list3_init(&initkmem_list3[i]);
@@ -1707,7 +1624,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                flags |= __GFP_RECLAIMABLE;
 
-       page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+       page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
        if (!page)
                return NULL;
 
@@ -1720,6 +1637,16 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
                        NR_SLAB_UNRECLAIMABLE, nr_pages);
        for (i = 0; i < nr_pages; i++)
                __SetPageSlab(page + i);
+
+       if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
+               kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
+
+               if (cachep->ctor)
+                       kmemcheck_mark_uninitialized_pages(page, nr_pages);
+               else
+                       kmemcheck_mark_unallocated_pages(page, nr_pages);
+       }
+
        return page_address(page);
 }
 
@@ -1732,6 +1659,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
        struct page *page = virt_to_page(addr);
        const unsigned long nr_freed = i;
 
+       kmemcheck_free_shadow(page, cachep->gfporder);
+
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
                sub_zone_page_state(page_zone(page),
                                NR_SLAB_RECLAIMABLE, nr_freed);
@@ -3261,7 +3190,7 @@ retry:
                if (local_flags & __GFP_WAIT)
                        local_irq_enable();
                kmem_flagcheck(cache, flags);
-               obj = kmem_getpages(cache, local_flags, -1);
+               obj = kmem_getpages(cache, local_flags, numa_node_id());
                if (local_flags & __GFP_WAIT)
                        local_irq_disable();
                if (obj) {
@@ -3407,6 +3336,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
        kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
                                 flags);
 
+       if (likely(ptr))
+               kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
+
        if (unlikely((flags & __GFP_ZERO) && ptr))
                memset(ptr, 0, obj_size(cachep));
 
@@ -3467,6 +3399,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
                                 flags);
        prefetchw(objp);
 
+       if (likely(objp))
+               kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
+
        if (unlikely((flags & __GFP_ZERO) && objp))
                memset(objp, 0, obj_size(cachep));
 
@@ -3583,6 +3518,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
+       kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
         * This will avoid cache misses that happen while accessing slabp (which
@@ -3590,7 +3527,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
         * variable to skip the call, which is mostly likely to be present in
         * the cache.
         */
-       if (numa_platform && cache_free_alien(cachep, objp))
+       if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
                return;
 
        if (likely(ac->avail < ac->limit)) {
index 12f261499925a66b2b190e4f193893e4087c6fb4..64f6db1943bfd501281c046a04f74023b198d132 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -46,7 +46,7 @@
  * NUMA support in SLOB is fairly simplistic, pushing most of the real
  * logic down to the page allocator, and simply doing the node accounting
  * on the upper levels. In the event that a node id is explicitly
- * provided, alloc_pages_node() with the specified node id is used
+ * provided, alloc_pages_exact_node() with the specified node id is used
  * instead. The common case (or when the node id isn't explicitly provided)
  * will default to the current node, as per numa_node_id().
  *
@@ -244,7 +244,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
 
 #ifdef CONFIG_NUMA
        if (node != -1)
-               page = alloc_pages_node(node, gfp, order);
+               page = alloc_pages_exact_node(node, gfp, order);
        else
 #endif
                page = alloc_pages(gfp, order);
index 30354bfeb43d5b093669fb0ef62b213e0c3991f2..2701419b0adcab389434cb07a3f7f9af175afeca 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -18,6 +18,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/kmemtrace.h>
+#include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/kmemleak.h>
                SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
 
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
-               SLAB_CACHE_DMA)
+               SLAB_CACHE_DMA | SLAB_NOTRACK)
 
 #ifndef ARCH_KMALLOC_MINALIGN
 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
@@ -1071,6 +1072,8 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
 {
        int order = oo_order(oo);
 
+       flags |= __GFP_NOTRACK;
+
        if (node == -1)
                return alloc_pages(flags, order);
        else
@@ -1098,6 +1101,24 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
                stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
        }
+
+       if (kmemcheck_enabled
+               && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
+       {
+               int pages = 1 << oo_order(oo);
+
+               kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
+
+               /*
+                * Objects from caches that have a constructor don't get
+                * cleared when they're allocated, so we need to do it here.
+                */
+               if (s->ctor)
+                       kmemcheck_mark_uninitialized_pages(page, pages);
+               else
+                       kmemcheck_mark_unallocated_pages(page, pages);
+       }
+
        page->objects = oo_objects(oo);
        mod_zone_page_state(page_zone(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1171,6 +1192,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
                __ClearPageSlubDebug(page);
        }
 
+       kmemcheck_free_shadow(page, compound_order(page));
+
        mod_zone_page_state(page_zone(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -1626,7 +1649,9 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
        if (unlikely((gfpflags & __GFP_ZERO) && object))
                memset(object, 0, objsize);
 
+       kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
        kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+
        return object;
 }
 
@@ -1759,6 +1784,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
        kmemleak_free_recursive(x, s->flags);
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
+       kmemcheck_slab_free(s, object, c->objsize);
        debug_check_no_locks_freed(object, c->objsize);
        if (!(s->flags & SLAB_DEBUG_OBJECTS))
                debug_check_no_obj_freed(object, c->objsize);
@@ -2633,7 +2659,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
 
        if (!s || !text || !kmem_cache_open(s, flags, text,
                        realsize, ARCH_KMALLOC_MINALIGN,
-                       SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
+                       SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED,
+                       NULL)) {
                kfree(s);
                kfree(text);
                goto unlock_out;
@@ -2727,9 +2754,10 @@ EXPORT_SYMBOL(__kmalloc);
 
 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
-       struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
-                                               get_order(size));
+       struct page *page;
 
+       flags |= __GFP_COMP | __GFP_NOTRACK;
+       page = alloc_pages_node(node, flags, get_order(size));
        if (page)
                return page_address(page);
        else
@@ -3737,7 +3765,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
                                                 to_cpumask(l->cpus));
                }
 
-               if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
+               if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
                                len < PAGE_SIZE - 60) {
                        len += sprintf(buf + len, " nodes=");
                        len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
@@ -4412,6 +4440,8 @@ static char *create_unique_id(struct kmem_cache *s)
                *p++ = 'a';
        if (s->flags & SLAB_DEBUG_FREE)
                *p++ = 'F';
+       if (!(s->flags & SLAB_NOTRACK))
+               *p++ = 't';
        if (p != name + 1)
                *p++ = '-';
        p += sprintf(p, "%07d", s->size);
index 1416e7e9e02db3b5da60fe3a270e98e7b84024bb..42cd38eba79f1ffb096db8683bc11dd5604fe367 100644 (file)
@@ -124,7 +124,6 @@ void __delete_from_swap_cache(struct page *page)
 /**
  * add_to_swap - allocate swap space for a page
  * @page: page we want to move to swap
- * @gfp_mask: memory allocation flags
  *
  * Allocate swap space for the page and add the page to the
  * swap cache.  Caller needs to hold the page lock. 
@@ -162,11 +161,11 @@ int add_to_swap(struct page *page)
                        return 1;
                case -EEXIST:
                        /* Raced with "speculative" read_swap_cache_async */
-                       swap_free(entry);
+                       swapcache_free(entry, NULL);
                        continue;
                default:
                        /* -ENOMEM radix-tree allocation failure */
-                       swap_free(entry);
+                       swapcache_free(entry, NULL);
                        return 0;
                }
        }
@@ -188,8 +187,7 @@ void delete_from_swap_cache(struct page *page)
        __delete_from_swap_cache(page);
        spin_unlock_irq(&swapper_space.tree_lock);
 
-       mem_cgroup_uncharge_swapcache(page, entry);
-       swap_free(entry);
+       swapcache_free(entry, page);
        page_cache_release(page);
 }
 
@@ -293,7 +291,10 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                /*
                 * Swap entry may have been freed since our caller observed it.
                 */
-               if (!swap_duplicate(entry))
+               err = swapcache_prepare(entry);
+               if (err == -EEXIST) /* seems racy */
+                       continue;
+               if (err)           /* swp entry is obsolete ? */
                        break;
 
                /*
@@ -312,12 +313,12 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                         * Initiate read into locked page and return.
                         */
                        lru_cache_add_anon(new_page);
-                       swap_readpage(NULL, new_page);
+                       swap_readpage(new_page);
                        return new_page;
                }
                ClearPageSwapBacked(new_page);
                __clear_page_locked(new_page);
-               swap_free(entry);
+               swapcache_free(entry, NULL);
        } while (err != -ENOMEM);
 
        if (new_page)
index 312fafe0ab6ed4815ac02da3f712aca18bacbbad..28faa01cf578bd1bc05fa20771015194fd966e4d 100644 (file)
@@ -53,6 +53,59 @@ static struct swap_info_struct swap_info[MAX_SWAPFILES];
 
 static DEFINE_MUTEX(swapon_mutex);
 
+/* For reference count accounting in swap_map */
+/* enum for swap_map[] handling. internal use only */
+enum {
+       SWAP_MAP = 0,   /* ops for reference from swap users */
+       SWAP_CACHE,     /* ops for reference from swap cache */
+};
+
+static inline int swap_count(unsigned short ent)
+{
+       return ent & SWAP_COUNT_MASK;
+}
+
+static inline bool swap_has_cache(unsigned short ent)
+{
+       return !!(ent & SWAP_HAS_CACHE);
+}
+
+static inline unsigned short encode_swapmap(int count, bool has_cache)
+{
+       unsigned short ret = count;
+
+       if (has_cache)
+               return SWAP_HAS_CACHE | ret;
+       return ret;
+}
+
+/* returnes 1 if swap entry is freed */
+static int
+__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
+{
+       int type = si - swap_info;
+       swp_entry_t entry = swp_entry(type, offset);
+       struct page *page;
+       int ret = 0;
+
+       page = find_get_page(&swapper_space, entry.val);
+       if (!page)
+               return 0;
+       /*
+        * This function is called from scan_swap_map() and it's called
+        * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
+        * We have to use trylock for avoiding deadlock. This is a special
+        * case and you should use try_to_free_swap() with explicit lock_page()
+        * in usual operations.
+        */
+       if (trylock_page(page)) {
+               ret = try_to_free_swap(page);
+               unlock_page(page);
+       }
+       page_cache_release(page);
+       return ret;
+}
+
 /*
  * We need this because the bdev->unplug_fn can sleep and we cannot
  * hold swap_lock while calling the unplug_fn. And swap_lock
@@ -167,7 +220,8 @@ static int wait_for_discard(void *word)
 #define SWAPFILE_CLUSTER       256
 #define LATENCY_LIMIT          256
 
-static inline unsigned long scan_swap_map(struct swap_info_struct *si)
+static inline unsigned long scan_swap_map(struct swap_info_struct *si,
+                                         int cache)
 {
        unsigned long offset;
        unsigned long scan_base;
@@ -273,6 +327,19 @@ checks:
                goto no_page;
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
+
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+               int swap_was_freed;
+               spin_unlock(&swap_lock);
+               swap_was_freed = __try_to_reclaim_swap(si, offset);
+               spin_lock(&swap_lock);
+               /* entry was freed successfully, try to use this again */
+               if (swap_was_freed)
+                       goto checks;
+               goto scan; /* check next one */
+       }
+
        if (si->swap_map[offset])
                goto scan;
 
@@ -285,7 +352,10 @@ checks:
                si->lowest_bit = si->max;
                si->highest_bit = 0;
        }
-       si->swap_map[offset] = 1;
+       if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
+               si->swap_map[offset] = encode_swapmap(0, true);
+       else /* at suspend */
+               si->swap_map[offset] = encode_swapmap(1, false);
        si->cluster_next = offset + 1;
        si->flags -= SWP_SCANNING;
 
@@ -351,6 +421,10 @@ scan:
                        spin_lock(&swap_lock);
                        goto checks;
                }
+               if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+                       spin_lock(&swap_lock);
+                       goto checks;
+               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
@@ -362,6 +436,10 @@ scan:
                        spin_lock(&swap_lock);
                        goto checks;
                }
+               if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+                       spin_lock(&swap_lock);
+                       goto checks;
+               }
                if (unlikely(--latency_ration < 0)) {
                        cond_resched();
                        latency_ration = LATENCY_LIMIT;
@@ -401,7 +479,8 @@ swp_entry_t get_swap_page(void)
                        continue;
 
                swap_list.next = next;
-               offset = scan_swap_map(si);
+               /* This is called for allocating swap entry for cache */
+               offset = scan_swap_map(si, SWAP_CACHE);
                if (offset) {
                        spin_unlock(&swap_lock);
                        return swp_entry(type, offset);
@@ -415,6 +494,7 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
 swp_entry_t get_swap_page_of_type(int type)
 {
        struct swap_info_struct *si;
@@ -424,7 +504,8 @@ swp_entry_t get_swap_page_of_type(int type)
        si = swap_info + type;
        if (si->flags & SWP_WRITEOK) {
                nr_swap_pages--;
-               offset = scan_swap_map(si);
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, SWAP_MAP);
                if (offset) {
                        spin_unlock(&swap_lock);
                        return swp_entry(type, offset);
@@ -471,25 +552,38 @@ out:
        return NULL;
 }
 
-static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
+static int swap_entry_free(struct swap_info_struct *p,
+                          swp_entry_t ent, int cache)
 {
        unsigned long offset = swp_offset(ent);
-       int count = p->swap_map[offset];
-
-       if (count < SWAP_MAP_MAX) {
-               count--;
-               p->swap_map[offset] = count;
-               if (!count) {
-                       if (offset < p->lowest_bit)
-                               p->lowest_bit = offset;
-                       if (offset > p->highest_bit)
-                               p->highest_bit = offset;
-                       if (p->prio > swap_info[swap_list.next].prio)
-                               swap_list.next = p - swap_info;
-                       nr_swap_pages++;
-                       p->inuse_pages--;
-                       mem_cgroup_uncharge_swap(ent);
+       int count = swap_count(p->swap_map[offset]);
+       bool has_cache;
+
+       has_cache = swap_has_cache(p->swap_map[offset]);
+
+       if (cache == SWAP_MAP) { /* dropping usage count of swap */
+               if (count < SWAP_MAP_MAX) {
+                       count--;
+                       p->swap_map[offset] = encode_swapmap(count, has_cache);
                }
+       } else { /* dropping swap cache flag */
+               VM_BUG_ON(!has_cache);
+               p->swap_map[offset] = encode_swapmap(count, false);
+
+       }
+       /* return code. */
+       count = p->swap_map[offset];
+       /* free if no reference */
+       if (!count) {
+               if (offset < p->lowest_bit)
+                       p->lowest_bit = offset;
+               if (offset > p->highest_bit)
+                       p->highest_bit = offset;
+               if (p->prio > swap_info[swap_list.next].prio)
+                       swap_list.next = p - swap_info;
+               nr_swap_pages++;
+               p->inuse_pages--;
+               mem_cgroup_uncharge_swap(ent);
        }
        return count;
 }
@@ -504,9 +598,26 @@ void swap_free(swp_entry_t entry)
 
        p = swap_info_get(entry);
        if (p) {
-               swap_entry_free(p, entry);
+               swap_entry_free(p, entry, SWAP_MAP);
+               spin_unlock(&swap_lock);
+       }
+}
+
+/*
+ * Called after dropping swapcache to decrease refcnt to swap entries.
+ */
+void swapcache_free(swp_entry_t entry, struct page *page)
+{
+       struct swap_info_struct *p;
+
+       if (page)
+               mem_cgroup_uncharge_swapcache(page, entry);
+       p = swap_info_get(entry);
+       if (p) {
+               swap_entry_free(p, entry, SWAP_CACHE);
                spin_unlock(&swap_lock);
        }
+       return;
 }
 
 /*
@@ -521,8 +632,7 @@ static inline int page_swapcount(struct page *page)
        entry.val = page_private(page);
        p = swap_info_get(entry);
        if (p) {
-               /* Subtract the 1 for the swap cache itself */
-               count = p->swap_map[swp_offset(entry)] - 1;
+               count = swap_count(p->swap_map[swp_offset(entry)]);
                spin_unlock(&swap_lock);
        }
        return count;
@@ -584,7 +694,7 @@ int free_swap_and_cache(swp_entry_t entry)
 
        p = swap_info_get(entry);
        if (p) {
-               if (swap_entry_free(p, entry) == 1) {
+               if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
                        page = find_get_page(&swapper_space, entry.val);
                        if (page && !trylock_page(page)) {
                                page_cache_release(page);
@@ -891,7 +1001,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
                        i = 1;
                }
                count = si->swap_map[i];
-               if (count && count != SWAP_MAP_BAD)
+               if (count && swap_count(count) != SWAP_MAP_BAD)
                        break;
        }
        return i;
@@ -995,13 +1105,13 @@ static int try_to_unuse(unsigned int type)
                 */
                shmem = 0;
                swcount = *swap_map;
-               if (swcount > 1) {
+               if (swap_count(swcount)) {
                        if (start_mm == &init_mm)
                                shmem = shmem_unuse(entry, page);
                        else
                                retval = unuse_mm(start_mm, entry, page);
                }
-               if (*swap_map > 1) {
+               if (swap_count(*swap_map)) {
                        int set_start_mm = (*swap_map >= swcount);
                        struct list_head *p = &start_mm->mmlist;
                        struct mm_struct *new_start_mm = start_mm;
@@ -1011,7 +1121,7 @@ static int try_to_unuse(unsigned int type)
                        atomic_inc(&new_start_mm->mm_users);
                        atomic_inc(&prev_mm->mm_users);
                        spin_lock(&mmlist_lock);
-                       while (*swap_map > 1 && !retval && !shmem &&
+                       while (swap_count(*swap_map) && !retval && !shmem &&
                                        (p = p->next) != &start_mm->mmlist) {
                                mm = list_entry(p, struct mm_struct, mmlist);
                                if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1023,14 +1133,16 @@ static int try_to_unuse(unsigned int type)
                                cond_resched();
 
                                swcount = *swap_map;
-                               if (swcount <= 1)
+                               if (!swap_count(swcount)) /* any usage ? */
                                        ;
                                else if (mm == &init_mm) {
                                        set_start_mm = 1;
                                        shmem = shmem_unuse(entry, page);
                                } else
                                        retval = unuse_mm(mm, entry, page);
-                               if (set_start_mm && *swap_map < swcount) {
+
+                               if (set_start_mm &&
+                                   swap_count(*swap_map) < swcount) {
                                        mmput(new_start_mm);
                                        atomic_inc(&mm->mm_users);
                                        new_start_mm = mm;
@@ -1057,21 +1169,25 @@ static int try_to_unuse(unsigned int type)
                }
 
                /*
-                * How could swap count reach 0x7fff when the maximum
-                * pid is 0x7fff, and there's no way to repeat a swap
-                * page within an mm (except in shmem, where it's the
-                * shared object which takes the reference count)?
-                * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
-                *
+                * How could swap count reach 0x7ffe ?
+                * There's no way to repeat a swap page within an mm
+                * (except in shmem, where it's the shared object which takes
+                * the reference count)?
+                * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
+                * short is too small....)
                 * If that's wrong, then we should worry more about
                 * exit_mmap() and do_munmap() cases described above:
                 * we might be resetting SWAP_MAP_MAX too early here.
                 * We know "Undead"s can happen, they're okay, so don't
                 * report them; but do report if we reset SWAP_MAP_MAX.
                 */
-               if (*swap_map == SWAP_MAP_MAX) {
+               /* We might release the lock_page() in unuse_mm(). */
+               if (!PageSwapCache(page) || page_private(page) != entry.val)
+                       goto retry;
+
+               if (swap_count(*swap_map) == SWAP_MAP_MAX) {
                        spin_lock(&swap_lock);
-                       *swap_map = 1;
+                       *swap_map = encode_swapmap(0, true);
                        spin_unlock(&swap_lock);
                        reset_overflow = 1;
                }
@@ -1089,7 +1205,8 @@ static int try_to_unuse(unsigned int type)
                 * pages would be incorrect if swap supported "shared
                 * private" pages, but they are handled by tmpfs files.
                 */
-               if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
+               if (swap_count(*swap_map) &&
+                    PageDirty(page) && PageSwapCache(page)) {
                        struct writeback_control wbc = {
                                .sync_mode = WB_SYNC_NONE,
                        };
@@ -1116,6 +1233,7 @@ static int try_to_unuse(unsigned int type)
                 * mark page dirty so shrink_page_list will preserve it.
                 */
                SetPageDirty(page);
+retry:
                unlock_page(page);
                page_cache_release(page);
 
@@ -1942,15 +2060,23 @@ void si_swapinfo(struct sysinfo *val)
  *
  * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
  * "permanent", but will be reclaimed by the next swapoff.
+ * Returns error code in following case.
+ * - success -> 0
+ * - swp_entry is invalid -> EINVAL
+ * - swp_entry is migration entry -> EINVAL
+ * - swap-cache reference is requested but there is already one. -> EEXIST
+ * - swap-cache reference is requested but the entry is not used. -> ENOENT
  */
-int swap_duplicate(swp_entry_t entry)
+static int __swap_duplicate(swp_entry_t entry, bool cache)
 {
        struct swap_info_struct * p;
        unsigned long offset, type;
-       int result = 0;
+       int result = -EINVAL;
+       int count;
+       bool has_cache;
 
        if (is_migration_entry(entry))
-               return 1;
+               return -EINVAL;
 
        type = swp_type(entry);
        if (type >= nr_swapfiles)
@@ -1959,17 +2085,40 @@ int swap_duplicate(swp_entry_t entry)
        offset = swp_offset(entry);
 
        spin_lock(&swap_lock);
-       if (offset < p->max && p->swap_map[offset]) {
-               if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
-                       p->swap_map[offset]++;
-                       result = 1;
-               } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
+
+       if (unlikely(offset >= p->max))
+               goto unlock_out;
+
+       count = swap_count(p->swap_map[offset]);
+       has_cache = swap_has_cache(p->swap_map[offset]);
+
+       if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
+
+               /* set SWAP_HAS_CACHE if there is no cache and entry is used */
+               if (!has_cache && count) {
+                       p->swap_map[offset] = encode_swapmap(count, true);
+                       result = 0;
+               } else if (has_cache) /* someone added cache */
+                       result = -EEXIST;
+               else if (!count) /* no users */
+                       result = -ENOENT;
+
+       } else if (count || has_cache) {
+               if (count < SWAP_MAP_MAX - 1) {
+                       p->swap_map[offset] = encode_swapmap(count + 1,
+                                                            has_cache);
+                       result = 0;
+               } else if (count <= SWAP_MAP_MAX) {
                        if (swap_overflow++ < 5)
-                               printk(KERN_WARNING "swap_dup: swap entry overflow\n");
-                       p->swap_map[offset] = SWAP_MAP_MAX;
-                       result = 1;
+                               printk(KERN_WARNING
+                                      "swap_dup: swap entry overflow\n");
+                       p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
+                                                             has_cache);
+                       result = 0;
                }
-       }
+       } else
+               result = -ENOENT; /* unused swap entry */
+unlock_out:
        spin_unlock(&swap_lock);
 out:
        return result;
@@ -1978,6 +2127,27 @@ bad_file:
        printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
        goto out;
 }
+/*
+ * increase reference count of swap entry by 1.
+ */
+void swap_duplicate(swp_entry_t entry)
+{
+       __swap_duplicate(entry, SWAP_MAP);
+}
+
+/*
+ * @entry: swap entry for which we allocate swap cache.
+ *
+ * Called when allocating swap cache for exising swap entry,
+ * This can return error codes. Returns 0 at success.
+ * -EBUSY means there is a swap cache.
+ * Note: return code is different from swap_duplicate().
+ */
+int swapcache_prepare(swp_entry_t entry)
+{
+       return __swap_duplicate(entry, SWAP_CACHE);
+}
+
 
 struct swap_info_struct *
 get_swap_info_struct(unsigned type)
@@ -2016,7 +2186,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
                /* Don't read in free or bad pages */
                if (!si->swap_map[toff])
                        break;
-               if (si->swap_map[toff] == SWAP_MAP_BAD)
+               if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
                        break;
        }
        /* Count contiguous allocated slots below our target */
@@ -2024,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
                /* Don't read in free or bad pages */
                if (!si->swap_map[toff])
                        break;
-               if (si->swap_map[toff] == SWAP_MAP_BAD)
+               if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD)
                        break;
        }
        spin_unlock(&swap_lock);
index 12e1579f916546db1933998f4f655a8c1f8e2d34..ccc3ecf7cb9839a90eddc0086be770796e5b8884 100644 (file)
@@ -267,8 +267,21 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
 }
 EXPORT_SYMBOL(truncate_inode_pages);
 
-unsigned long __invalidate_mapping_pages(struct address_space *mapping,
-                               pgoff_t start, pgoff_t end, bool be_atomic)
+/**
+ * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
+ * @mapping: the address_space which holds the pages to invalidate
+ * @start: the offset 'from' which to invalidate
+ * @end: the offset 'to' which to invalidate (inclusive)
+ *
+ * This function only removes the unlocked pages, if you want to
+ * remove all the pages of one inode, you must call truncate_inode_pages.
+ *
+ * invalidate_mapping_pages() will not block on IO activity. It will not
+ * invalidate pages which are dirty, locked, under writeback or mapped into
+ * pagetables.
+ */
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+                                      pgoff_t start, pgoff_t end)
 {
        struct pagevec pvec;
        pgoff_t next = start;
@@ -309,30 +322,10 @@ unlock:
                                break;
                }
                pagevec_release(&pvec);
-               if (likely(!be_atomic))
-                       cond_resched();
+               cond_resched();
        }
        return ret;
 }
-
-/**
- * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
- * @mapping: the address_space which holds the pages to invalidate
- * @start: the offset 'from' which to invalidate
- * @end: the offset 'to' which to invalidate (inclusive)
- *
- * This function only removes the unlocked pages, if you want to
- * remove all the pages of one inode, you must call truncate_inode_pages.
- *
- * invalidate_mapping_pages() will not block on IO activity. It will not
- * invalidate pages which are dirty, locked, under writeback or mapped into
- * pagetables.
- */
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
-                               pgoff_t start, pgoff_t end)
-{
-       return __invalidate_mapping_pages(mapping, start, end, false);
-}
 EXPORT_SYMBOL(invalidate_mapping_pages);
 
 /*
index abc65aa7cdfc7bcfc76817afe2451cce7a4ece3e..d5d2213728c51725fb658b5e20225cb429d93015 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -233,13 +233,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  * @pages:     array that receives pointers to the pages pinned.
  *             Should be at least nr_pages long.
  *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
  * Returns number of pages pinned. This may be fewer than the number
  * requested. If nr_pages is 0 or negative, returns 0. If no pages
  * were pinned, returns -errno.
+ *
+ * get_user_pages_fast provides equivalent functionality to get_user_pages,
+ * operating on current and current->mm, with force=0 and vma=NULL. However
+ * unlike get_user_pages, it must be called without mmap_sem held.
+ *
+ * get_user_pages_fast may take mmap_sem and page table locks, so no
+ * assumptions can be made about lack of locking. get_user_pages_fast is to be
+ * implemented in a way that is advantageous (vs get_user_pages()) when the
+ * user memory area is already faulted in and present in ptes. However if the
+ * pages have to be faulted in, it may turn out to be slightly slower so
+ * callers need to carefully consider what to use. On many architectures,
+ * get_user_pages_fast simply falls back to get_user_pages.
  */
 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
                                int nr_pages, int write, struct page **pages)
index 95c08a8cc2ba4fb015f71b605271f9b8804c68fb..4139aa52b941e16d95b20ae481a0775b02158518 100644 (file)
@@ -470,8 +470,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                swp_entry_t swap = { .val = page_private(page) };
                __delete_from_swap_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
-               mem_cgroup_uncharge_swapcache(page, swap);
-               swap_free(swap);
+               swapcache_free(swap, page);
        } else {
                __remove_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
@@ -514,7 +513,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
  *
  * lru_lock must not be held, interrupts must be enabled.
  */
-#ifdef CONFIG_UNEVICTABLE_LRU
 void putback_lru_page(struct page *page)
 {
        int lru;
@@ -568,20 +566,6 @@ redo:
        put_page(page);         /* drop ref from isolate */
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-void putback_lru_page(struct page *page)
-{
-       int lru;
-       VM_BUG_ON(PageLRU(page));
-
-       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
-       lru_cache_add_lru(page, lru);
-       put_page(page);
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -593,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        struct pagevec freed_pvec;
        int pgactivate = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long vm_flags;
 
        cond_resched();
 
@@ -643,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1, sc->mem_cgroup);
+               referenced = page_referenced(page, 1,
+                                               sc->mem_cgroup, &vm_flags);
                /* In active use or really unfreeable?  Activate it. */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
                                        referenced && page_mapping_inuse(page))
@@ -943,18 +929,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
                                continue;
-                       switch (__isolate_lru_page(cursor_page, mode, file)) {
-                       case 0:
+                       if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
                                nr_taken++;
                                scan++;
-                               break;
-
-                       case -EBUSY:
-                               /* else it is being freed elsewhere */
-                               list_move(&cursor_page->lru, src);
-                       default:
-                               break;  /* ! on LRU or wrong list */
                        }
                }
        }
@@ -1061,6 +1039,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int lumpy_reclaim = 0;
+
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        *
+        * We use the same threshold as pageout congestion_wait below.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               lumpy_reclaim = 1;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               lumpy_reclaim = 1;
 
        pagevec_init(&pvec, 1);
 
@@ -1073,19 +1064,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_freed;
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = ISOLATE_INACTIVE;
-
-               /*
-                * If we need a large contiguous chunk of memory, or have
-                * trouble getting a small set of contiguous pages, we
-                * will reclaim both active and inactive pages.
-                *
-                * We use the same threshold as pageout congestion_wait below.
-                */
-               if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-                       mode = ISOLATE_BOTH;
-               else if (sc->order && priority < DEF_PRIORITY - 2)
-                       mode = ISOLATE_BOTH;
+               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
 
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
@@ -1122,7 +1101,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * but that should be acceptable to the caller
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
-                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+                   lumpy_reclaim) {
                        congestion_wait(WRITE, HZ/10);
 
                        /*
@@ -1217,18 +1196,54 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  * But we had to alter page->flags anyway.
  */
 
+static void move_active_pages_to_lru(struct zone *zone,
+                                    struct list_head *list,
+                                    enum lru_list lru)
+{
+       unsigned long pgmoved = 0;
+       struct pagevec pvec;
+       struct page *page;
+
+       pagevec_init(&pvec, 1);
+
+       while (!list_empty(list)) {
+               page = lru_to_page(list);
+               prefetchw_prev_lru_page(page, list, flags);
+
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
+
+               VM_BUG_ON(!PageActive(page));
+               if (!is_active_lru(lru))
+                       ClearPageActive(page);  /* we are de-activating */
+
+               list_move(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_add_lru_list(page, lru);
+               pgmoved++;
+
+               if (!pagevec_add(&pvec, page) || list_empty(list)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       if (buffer_heads_over_limit)
+                               pagevec_strip(&pvec);
+                       __pagevec_release(&pvec);
+                       spin_lock_irq(&zone->lru_lock);
+               }
+       }
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
+       if (!is_active_lru(lru))
+               __count_vm_events(PGDEACTIVATE, pgmoved);
+}
 
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        struct scan_control *sc, int priority, int file)
 {
        unsigned long pgmoved;
-       int pgdeactivate = 0;
        unsigned long pgscanned;
+       unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
+       LIST_HEAD(l_active);
        LIST_HEAD(l_inactive);
        struct page *page;
-       struct pagevec pvec;
-       enum lru_list lru;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        lru_add_drain();
@@ -1245,13 +1260,14 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
        reclaim_stat->recent_scanned[!!file] += pgmoved;
 
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
                __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
        else
                __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
        spin_unlock_irq(&zone->lru_lock);
 
-       pgmoved = 0;
+       pgmoved = 0;  /* count referenced (mapping) mapped pages */
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
@@ -1264,58 +1280,44 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup))
+                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
                        pgmoved++;
+                       /*
+                        * Identify referenced, file-backed active pages and
+                        * give them one more trip around the active list. So
+                        * that executable code get better chances to stay in
+                        * memory under moderate memory pressure.  Anon pages
+                        * are not likely to be evicted by use-once streaming
+                        * IO, plus JVM can create lots of anon VM_EXEC pages,
+                        * so we ignore them here.
+                        */
+                       if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+                               list_add(&page->lru, &l_active);
+                               continue;
+                       }
+               }
 
                list_add(&page->lru, &l_inactive);
        }
 
        /*
-        * Move the pages to the [file or anon] inactive list.
+        * Move pages back to the lru list.
         */
-       pagevec_init(&pvec, 1);
-       lru = LRU_BASE + file * LRU_FILE;
-
        spin_lock_irq(&zone->lru_lock);
        /*
-        * Count referenced pages from currently used mappings as
-        * rotated, even though they are moved to the inactive list.
-        * This helps balance scan pressure between file and anonymous
-        * pages in get_scan_ratio.
+        * Count referenced pages from currently used mappings as rotated,
+        * even though only some of them are actually re-activated.  This
+        * helps balance scan pressure between file and anonymous pages in
+        * get_scan_ratio.
         */
        reclaim_stat->recent_rotated[!!file] += pgmoved;
 
-       pgmoved = 0;
-       while (!list_empty(&l_inactive)) {
-               page = lru_to_page(&l_inactive);
-               prefetchw_prev_lru_page(page, &l_inactive, flags);
-               VM_BUG_ON(PageLRU(page));
-               SetPageLRU(page);
-               VM_BUG_ON(!PageActive(page));
-               ClearPageActive(page);
+       move_active_pages_to_lru(zone, &l_active,
+                                               LRU_ACTIVE + file * LRU_FILE);
+       move_active_pages_to_lru(zone, &l_inactive,
+                                               LRU_BASE   + file * LRU_FILE);
 
-               list_move(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_add_lru_list(page, lru);
-               pgmoved++;
-               if (!pagevec_add(&pvec, page)) {
-                       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-                       spin_unlock_irq(&zone->lru_lock);
-                       pgdeactivate += pgmoved;
-                       pgmoved = 0;
-                       if (buffer_heads_over_limit)
-                               pagevec_strip(&pvec);
-                       __pagevec_release(&pvec);
-                       spin_lock_irq(&zone->lru_lock);
-               }
-       }
-       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-       pgdeactivate += pgmoved;
-       __count_zone_vm_events(PGREFILL, zone, pgscanned);
-       __count_vm_events(PGDEACTIVATE, pgdeactivate);
        spin_unlock_irq(&zone->lru_lock);
-       if (buffer_heads_over_limit)
-               pagevec_strip(&pvec);
-       pagevec_release(&pvec);
 }
 
 static int inactive_anon_is_low_global(struct zone *zone)
@@ -1350,12 +1352,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
        return low;
 }
 
+static int inactive_file_is_low_global(struct zone *zone)
+{
+       unsigned long active, inactive;
+
+       active = zone_page_state(zone, NR_ACTIVE_FILE);
+       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+
+       return (active > inactive);
+}
+
+/**
+ * inactive_file_is_low - check if file pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * When the system is doing streaming IO, memory pressure here
+ * ensures that active file pages get deactivated, until more
+ * than half of the file pages are on the inactive list.
+ *
+ * Once we get to that situation, protect the system's working
+ * set from being evicted by disabling active file page aging.
+ *
+ * This uses a different ratio than the anonymous pages, because
+ * the page cache uses a use-once replacement algorithm.
+ */
+static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_file_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+       return low;
+}
+
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        struct zone *zone, struct scan_control *sc, int priority)
 {
        int file = is_file_lru(lru);
 
-       if (lru == LRU_ACTIVE_FILE) {
+       if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
                shrink_active_list(nr_to_scan, zone, sc, priority, file);
                return 0;
        }
@@ -1384,13 +1422,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long ap, fp;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
-       /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (nr_swap_pages <= 0)) {
-               percent[0] = 0;
-               percent[1] = 100;
-               return;
-       }
-
        anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
                zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
        file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
@@ -1400,7 +1431,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
                   force-scan anon pages. */
-               if (unlikely(file + free <= zone->pages_high)) {
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
                        percent[0] = 100;
                        percent[1] = 0;
                        return;
@@ -1455,6 +1486,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        percent[1] = 100 - percent[0];
 }
 
+/*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan,
+                                      unsigned long swap_cluster_max)
+{
+       unsigned long nr;
+
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
+
+       if (nr >= swap_cluster_max)
+               *nr_saved_scan = 0;
+       else
+               nr = 0;
+
+       return nr;
+}
 
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
@@ -1468,26 +1519,30 @@ static void shrink_zone(int priority, struct zone *zone,
        enum lru_list l;
        unsigned long nr_reclaimed = sc->nr_reclaimed;
        unsigned long swap_cluster_max = sc->swap_cluster_max;
+       int noswap = 0;
 
-       get_scan_ratio(zone, sc, percent);
+       /* If we have no swap space, do not bother scanning anon pages. */
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+               noswap = 1;
+               percent[0] = 0;
+               percent[1] = 100;
+       } else
+               get_scan_ratio(zone, sc, percent);
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
                unsigned long scan;
 
                scan = zone_nr_pages(zone, sc, l);
-               if (priority) {
+               if (priority || noswap) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc)) {
-                       zone->lru[l].nr_scan += scan;
-                       nr[l] = zone->lru[l].nr_scan;
-                       if (nr[l] >= swap_cluster_max)
-                               zone->lru[l].nr_scan = 0;
-                       else
-                               nr[l] = 0;
-               } else
+               if (scanning_global_lru(sc))
+                       nr[l] = nr_scan_try_batch(scan,
+                                                 &zone->lru[l].nr_saved_scan,
+                                                 swap_cluster_max);
+               else
                        nr[l] = scan;
        }
 
@@ -1521,7 +1576,7 @@ static void shrink_zone(int priority, struct zone *zone,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc))
+       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
@@ -1532,11 +1587,13 @@ static void shrink_zone(int priority, struct zone *zone,
  * try to reclaim pages from zones which will satisfy the caller's allocation
  * request.
  *
- * We reclaim from a zone even if that zone is over pages_high.  Because:
+ * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
+ * Because:
  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  *    allocation or
- * b) The zones may be over pages_high but they must go *over* pages_high to
- *    satisfy the `incremental min' zone defense algorithm.
+ * b) The target zone may be at high_wmark_pages(zone) but the lower zones
+ *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
+ *    zone defense algorithm.
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
@@ -1742,7 +1799,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
- * they are all at pages_high.
+ * they are all at high_wmark_pages(zone).
  *
  * Returns the number of pages which were actually freed.
  *
@@ -1755,11 +1812,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  * the zone for when the problem goes away.
  *
  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
- * zones which have free_pages > pages_high, but once a zone is found to have
- * free_pages <= pages_high, we scan that zone and the lower zones regardless
- * of the number of free pages in the lower zones.  This interoperates with
- * the page allocator fallback scheme to ensure that aging of pages is balanced
- * across the zones.
+ * zones which have free_pages > high_wmark_pages(zone), but once a zone is
+ * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
+ * lower zones regardless of the number of free pages in the lower zones. This
+ * interoperates with the page allocator fallback scheme to ensure that aging
+ * of pages is balanced across the zones.
  */
 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 {
@@ -1780,7 +1837,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        };
        /*
         * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to free_pages == pages_high.
+        * this zone was successfully refilled to
+        * free_pages == high_wmark_pages(zone).
         */
        int temp_priority[MAX_NR_ZONES];
 
@@ -1825,8 +1883,8 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              0, 0)) {
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                        }
@@ -1860,8 +1918,8 @@ loop_again:
                                        priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0))
                                all_zones_ok = 0;
                        temp_priority[i] = priority;
                        sc.nr_scanned = 0;
@@ -1870,8 +1928,8 @@ loop_again:
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
-                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
-                                               end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       8*high_wmark_pages(zone), end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2037,7 +2095,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
 
        pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
+       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
@@ -2084,11 +2142,11 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
+                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_scan = 0;
+                               zone->lru[l].nr_saved_scan = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
@@ -2290,6 +2348,48 @@ int sysctl_min_unmapped_ratio = 1;
  */
 int sysctl_min_slab_ratio = 5;
 
+static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
+{
+       unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
+       unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
+               zone_page_state(zone, NR_ACTIVE_FILE);
+
+       /*
+        * It's possible for there to be more file mapped pages than
+        * accounted for by the pages on the file LRU lists because
+        * tmpfs pages accounted for as ANON can also be FILE_MAPPED
+        */
+       return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
+}
+
+/* Work out how many page cache pages we can reclaim in this reclaim_mode */
+static long zone_pagecache_reclaimable(struct zone *zone)
+{
+       long nr_pagecache_reclaimable;
+       long delta = 0;
+
+       /*
+        * If RECLAIM_SWAP is set, then all file pages are considered
+        * potentially reclaimable. Otherwise, we have to worry about
+        * pages like swapcache and zone_unmapped_file_pages() provides
+        * a better estimate
+        */
+       if (zone_reclaim_mode & RECLAIM_SWAP)
+               nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
+       else
+               nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
+
+       /* If we can't clean pages, remove dirty pages from consideration */
+       if (!(zone_reclaim_mode & RECLAIM_WRITE))
+               delta += zone_page_state(zone, NR_FILE_DIRTY);
+
+       /* Watch for any possible underflows due to delta */
+       if (unlikely(delta > nr_pagecache_reclaimable))
+               delta = nr_pagecache_reclaimable;
+
+       return nr_pagecache_reclaimable - delta;
+}
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -2324,9 +2424,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-               zone_page_state(zone, NR_FILE_MAPPED) >
-               zone->min_unmapped_pages) {
+       if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
                /*
                 * Free memory by calling shrink zone with increasing
                 * priorities until we have enough memory freed.
@@ -2384,20 +2482,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * if less than a specified percentage of the zone is used by
         * unmapped file backed pages.
         */
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
-           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
-                       <= zone->min_slab_pages)
-               return 0;
+       if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
+           zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
+               return ZONE_RECLAIM_FULL;
 
        if (zone_is_all_unreclaimable(zone))
-               return 0;
+               return ZONE_RECLAIM_FULL;
 
        /*
         * Do not scan if the allocation should not be delayed.
         */
        if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
-                       return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        /*
         * Only run zone reclaim on the local zone or on zones that do not
@@ -2407,18 +2503,21 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         */
        node_id = zone_to_nid(zone);
        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
+
        ret = __zone_reclaim(zone, gfp_mask, order);
        zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
 
+       if (!ret)
+               count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
+
        return ret;
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
@@ -2665,4 +2764,3 @@ void scan_unevictable_unregister_node(struct node *node)
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
 
-#endif
index 74d66dba0cbe45429f7b663c783a6f839e2ceb3f..138bed53706ed325e2d4aac3ae0065cffccdda47 100644 (file)
@@ -629,10 +629,8 @@ static const char * const vmstat_text[] = {
        "nr_active_anon",
        "nr_inactive_file",
        "nr_active_file",
-#ifdef CONFIG_UNEVICTABLE_LRU
        "nr_unevictable",
        "nr_mlock",
-#endif
        "nr_anon_pages",
        "nr_mapped",
        "nr_file_pages",
@@ -675,6 +673,9 @@ static const char * const vmstat_text[] = {
        TEXTS_FOR_ZONES("pgscan_kswapd")
        TEXTS_FOR_ZONES("pgscan_direct")
 
+#ifdef CONFIG_NUMA
+       "zone_reclaim_failed",
+#endif
        "pginodesteal",
        "slabs_scanned",
        "kswapd_steal",
@@ -687,7 +688,6 @@ static const char * const vmstat_text[] = {
        "htlb_buddy_alloc_success",
        "htlb_buddy_alloc_fail",
 #endif
-#ifdef CONFIG_UNEVICTABLE_LRU
        "unevictable_pgs_culled",
        "unevictable_pgs_scanned",
        "unevictable_pgs_rescued",
@@ -697,7 +697,6 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_stranded",
        "unevictable_pgs_mlockfreed",
 #endif
-#endif
 };
 
 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
@@ -710,18 +709,14 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        min      %lu"
                   "\n        low      %lu"
                   "\n        high     %lu"
-                  "\n        scanned  %lu (aa: %lu ia: %lu af: %lu if: %lu)"
+                  "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
                   zone_page_state(zone, NR_FREE_PAGES),
-                  zone->pages_min,
-                  zone->pages_low,
-                  zone->pages_high,
+                  min_wmark_pages(zone),
+                  low_wmark_pages(zone),
+                  high_wmark_pages(zone),
                   zone->pages_scanned,
-                  zone->lru[LRU_ACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_INACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_ACTIVE_FILE].nr_scan,
-                  zone->lru[LRU_INACTIVE_FILE].nr_scan,
                   zone->spanned_pages,
                   zone->present_pages);
 
index 1a94a3037370a4c70ee3862fb9676a4975eddeb1..5c93435b0347cc691db1ba36eeb1a23cb33809b6 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/in.h>
@@ -201,6 +202,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        skb->data = data;
        skb_reset_tail_pointer(skb);
        skb->end = skb->tail + size;
+       kmemcheck_annotate_bitfield(skb, flags1);
+       kmemcheck_annotate_bitfield(skb, flags2);
        /* make sure we initialize shinfo sequentially */
        shinfo = skb_shinfo(skb);
        atomic_set(&shinfo->dataref, 1);
@@ -217,6 +220,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
                struct sk_buff *child = skb + 1;
                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 
+               kmemcheck_annotate_bitfield(child, flags1);
+               kmemcheck_annotate_bitfield(child, flags2);
                skb->fclone = SKB_FCLONE_ORIG;
                atomic_set(fclone_ref, 1);
 
@@ -635,6 +640,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
                if (!n)
                        return NULL;
+
+               kmemcheck_annotate_bitfield(n, flags1);
+               kmemcheck_annotate_bitfield(n, flags2);
                n->fclone = SKB_FCLONE_UNAVAILABLE;
        }
 
index 06e26b77ad9e78437948592eb1781539b6947703..b0ba569bc97361dec5b2619a46ffbeab50331dc2 100644 (file)
@@ -945,6 +945,8 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
                sk = kmalloc(prot->obj_size, priority);
 
        if (sk != NULL) {
+               kmemcheck_annotate_bitfield(sk, flags);
+
                if (security_sk_alloc(sk, family, priority))
                        goto out_free;
 
index 68a8d892c711e44764e8856ef265dbb60f1f915b..61283f9288250cc6b74d440abf7621947ed91313 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <net/inet_hashtables.h>
 #include <net/inet_timewait_sock.h>
 #include <net/ip.h>
@@ -120,6 +121,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
        if (tw != NULL) {
                const struct inet_sock *inet = inet_sk(sk);
 
+               kmemcheck_annotate_bitfield(tw, flags);
+
                /* Give us an identity. */
                tw->tw_daddr        = inet->daddr;
                tw->tw_rcv_saddr    = inet->rcv_saddr;
index 8847add6ca164a7aea722b96978c44abb5ba7158..5ed8931dfe98f8af810a28a046f9c6729f07e6a3 100644 (file)
@@ -124,7 +124,7 @@ svc_pool_map_choose_mode(void)
 {
        unsigned int node;
 
-       if (num_online_nodes() > 1) {
+       if (nr_online_nodes > 1) {
                /*
                 * Actually have multiple NUMA nodes,
                 * so split pools on NUMA node boundaries
index b75d28cba3f75fef74d90d01b2fb8990984a1c55..428b065ba6959791e4f7ab4c180a914c9e3ba917 100644 (file)
@@ -26,7 +26,8 @@ config SAMPLE_TRACE_EVENTS
          This build trace event example modules.
 
 config SAMPLE_KOBJECT
-       tristate "Build kobject examples"
+       tristate "Build kobject examples -- loadable modules only"
+       depends on m
        help
          This config option will allow you to build a number of
          different kobject sample modules showing how to use kobjects,
diff --git a/samples/firmware_class/firmware_sample_driver.c b/samples/firmware_class/firmware_sample_driver.c
deleted file mode 100644 (file)
index 219a298..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * firmware_sample_driver.c -
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * Sample code on how to use request_firmware() from drivers.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-
-static struct device ghost_device = {
-       .bus_id    = "ghost0",
-};
-
-
-static void sample_firmware_load(char *firmware, int size)
-{
-       u8 buf[size+1];
-       memcpy(buf, firmware, size);
-       buf[size] = '\0';
-       printk(KERN_INFO "firmware_sample_driver: firmware: %s\n", buf);
-}
-
-static void sample_probe_default(void)
-{
-       /* uses the default method to get the firmware */
-       const struct firmware *fw_entry;
-       int retval;
-
-       printk(KERN_INFO "firmware_sample_driver: "
-               "a ghost device got inserted :)\n");
-
-       retval = request_firmware(&fw_entry, "sample_driver_fw", &ghost_device);
-       if (retval) {
-               printk(KERN_ERR
-                      "firmware_sample_driver: Firmware not available\n");
-               return;
-       }
-
-       sample_firmware_load(fw_entry->data, fw_entry->size);
-
-       release_firmware(fw_entry);
-
-       /* finish setting up the device */
-}
-
-static void sample_probe_specific(void)
-{
-       int retval;
-       /* Uses some specific hotplug support to get the firmware from
-        * userspace  directly into the hardware, or via some sysfs file */
-
-       /* NOTE: This currently doesn't work */
-
-       printk(KERN_INFO "firmware_sample_driver: "
-               "a ghost device got inserted :)\n");
-
-       retval = request_firmware(NULL, "sample_driver_fw", &ghost_device);
-       if (retval) {
-               printk(KERN_ERR
-                      "firmware_sample_driver: Firmware load failed\n");
-               return;
-       }
-
-       /* request_firmware blocks until userspace finished, so at
-        * this point the firmware should be already in the device */
-
-       /* finish setting up the device */
-}
-
-static void sample_probe_async_cont(const struct firmware *fw, void *context)
-{
-       if (!fw) {
-               printk(KERN_ERR
-                      "firmware_sample_driver: firmware load failed\n");
-               return;
-       }
-
-       printk(KERN_INFO "firmware_sample_driver: device pointer \"%s\"\n",
-              (char *)context);
-       sample_firmware_load(fw->data, fw->size);
-}
-
-static void sample_probe_async(void)
-{
-       /* Let's say that I can't sleep */
-       int error;
-       error = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
-                                       "sample_driver_fw", &ghost_device,
-                                       "my device pointer",
-                                       sample_probe_async_cont);
-       if (error)
-               printk(KERN_ERR "firmware_sample_driver:"
-                      " request_firmware_nowait failed\n");
-}
-
-static int __init sample_init(void)
-{
-       device_initialize(&ghost_device);
-       /* since there is no real hardware insertion I just call the
-        * sample probe functions here */
-       sample_probe_specific();
-       sample_probe_default();
-       sample_probe_async();
-       return 0;
-}
-
-static void __exit sample_exit(void)
-{
-}
-
-module_init(sample_init);
-module_exit(sample_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/samples/firmware_class/firmware_sample_firmware_class.c b/samples/firmware_class/firmware_sample_firmware_class.c
deleted file mode 100644 (file)
index e6cf7a4..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * firmware_sample_firmware_class.c -
- *
- * Copyright (c) 2003 Manuel Estrada Sainz
- *
- * NOTE: This is just a probe of concept, if you think that your driver would
- * be well served by this mechanism please contact me first.
- *
- * DON'T USE THIS CODE AS IS
- *
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/firmware.h>
-
-
-MODULE_AUTHOR("Manuel Estrada Sainz");
-MODULE_DESCRIPTION("Hackish sample for using firmware class directly");
-MODULE_LICENSE("GPL");
-
-static inline struct class_device *to_class_dev(struct kobject *obj)
-{
-       return container_of(obj, struct class_device, kobj);
-}
-
-static inline
-struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
-{
-       return container_of(_attr, struct class_device_attribute, attr);
-}
-
-struct firmware_priv {
-       char fw_id[FIRMWARE_NAME_MAX];
-       s32 loading:2;
-       u32 abort:1;
-};
-
-static ssize_t firmware_loading_show(struct class_device *class_dev, char *buf)
-{
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-       return sprintf(buf, "%d\n", fw_priv->loading);
-}
-
-static ssize_t firmware_loading_store(struct class_device *class_dev,
-                                     const char *buf, size_t count)
-{
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-       int prev_loading = fw_priv->loading;
-
-       fw_priv->loading = simple_strtol(buf, NULL, 10);
-
-       switch (fw_priv->loading) {
-       case -1:
-               /* abort load an panic */
-               break;
-       case 1:
-               /* setup load */
-               break;
-       case 0:
-               if (prev_loading == 1) {
-                       /* finish load and get the device back to working
-                        * state */
-               }
-               break;
-       }
-
-       return count;
-}
-static CLASS_DEVICE_ATTR(loading, 0644,
-                        firmware_loading_show, firmware_loading_store);
-
-static ssize_t firmware_data_read(struct kobject *kobj,
-                                 struct bin_attribute *bin_attr,
-                                 char *buffer, loff_t offset, size_t count)
-{
-       struct class_device *class_dev = to_class_dev(kobj);
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-       /* read from the devices firmware memory */
-
-       return count;
-}
-static ssize_t firmware_data_write(struct kobject *kobj,
-                                  struct bin_attribute *bin_attr,
-                                  char *buffer, loff_t offset, size_t count)
-{
-       struct class_device *class_dev = to_class_dev(kobj);
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-       /* write to the devices firmware memory */
-
-       return count;
-}
-static struct bin_attribute firmware_attr_data = {
-       .attr = {.name = "data", .mode = 0644},
-       .size = 0,
-       .read = firmware_data_read,
-       .write = firmware_data_write,
-};
-static int fw_setup_class_device(struct class_device *class_dev,
-                                const char *fw_name,
-                                struct device *device)
-{
-       int retval;
-       struct firmware_priv *fw_priv;
-
-       fw_priv = kzalloc(sizeof(struct firmware_priv), GFP_KERNEL);
-       if (!fw_priv) {
-               retval = -ENOMEM;
-               goto out;
-       }
-
-       memset(class_dev, 0, sizeof(*class_dev));
-
-       strncpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX);
-       fw_priv->fw_id[FIRMWARE_NAME_MAX-1] = '\0';
-
-       strncpy(class_dev->class_id, device->bus_id, BUS_ID_SIZE);
-       class_dev->class_id[BUS_ID_SIZE-1] = '\0';
-       class_dev->dev = device;
-
-       class_dev->class = &firmware_class;
-       class_set_devdata(class_dev, fw_priv);
-       retval = class_device_register(class_dev);
-       if (retval) {
-               printk(KERN_ERR "%s: class_device_register failed\n",
-                      __func__);
-               goto error_free_fw_priv;
-       }
-
-       retval = sysfs_create_bin_file(&class_dev->kobj, &firmware_attr_data);
-       if (retval) {
-               printk(KERN_ERR "%s: sysfs_create_bin_file failed\n",
-                      __func__);
-               goto error_unreg_class_dev;
-       }
-
-       retval = class_device_create_file(class_dev,
-                                         &class_device_attr_loading);
-       if (retval) {
-               printk(KERN_ERR "%s: class_device_create_file failed\n",
-                      __func__);
-               goto error_remove_data;
-       }
-
-       goto out;
-
-error_remove_data:
-       sysfs_remove_bin_file(&class_dev->kobj, &firmware_attr_data);
-error_unreg_class_dev:
-       class_device_unregister(class_dev);
-error_free_fw_priv:
-       kfree(fw_priv);
-out:
-       return retval;
-}
-static void fw_remove_class_device(struct class_device *class_dev)
-{
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-
-       class_device_remove_file(class_dev, &class_device_attr_loading);
-       sysfs_remove_bin_file(&class_dev->kobj, &firmware_attr_data);
-       class_device_unregister(class_dev);
-}
-
-static struct class_device *class_dev;
-
-static struct device my_device = {
-       .bus_id    = "my_dev0",
-};
-
-static int __init firmware_sample_init(void)
-{
-       int error;
-
-       device_initialize(&my_device);
-       class_dev = kmalloc(sizeof(struct class_device), GFP_KERNEL);
-       if (!class_dev)
-               return -ENOMEM;
-
-       error = fw_setup_class_device(class_dev, "my_firmware_image",
-                                     &my_device);
-       if (error) {
-               kfree(class_dev);
-               return error;
-       }
-       return 0;
-
-}
-static void __exit firmware_sample_exit(void)
-{
-       struct firmware_priv *fw_priv = class_get_devdata(class_dev);
-       fw_remove_class_device(class_dev);
-       kfree(fw_priv);
-       kfree(class_dev);
-}
-
-module_init(firmware_sample_init);
-module_exit(firmware_sample_exit);
index 60dc0c48c929b15953a751a1875c1aebd767e80e..3e733146cd51c01acd792503162cc43e4b7a3d74 100755 (executable)
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.15';
+my $V = '0.16';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -55,6 +55,10 @@ foreach my $chief (@penguin_chief) {
 }
 my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
 
+# rfc822 email address - preloaded methods go here.
+my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
+my $rfc822_char = '[\\000-\\377]';
+
 if (!GetOptions(
                'email!' => \$email,
                'git!' => \$email_git,
@@ -161,7 +165,7 @@ foreach my $file (@ARGV) {
        }
        close(PATCH);
        if ($file_cnt == @files) {
-           die "$P: file '${file}' doesn't appear to be a patch.  "
+           warn "$P: file '${file}' doesn't appear to be a patch.  "
                . "Add -f to options?\n";
        }
        @files = sort_and_uniq(@files);
@@ -169,6 +173,7 @@ foreach my $file (@ARGV) {
 }
 
 my @email_to = ();
+my @list_to = ();
 my @scm = ();
 my @web = ();
 my @subsystem = ();
@@ -182,7 +187,7 @@ foreach my $file (@files) {
 
     my $exclude = 0;
     foreach my $line (@typevalue) {
-       if ($line =~ m/^(\C):(.*)/) {
+       if ($line =~ m/^(\C):\s*(.*)/) {
            my $type = $1;
            my $value = $2;
            if ($type eq 'X') {
@@ -196,7 +201,7 @@ foreach my $file (@files) {
     if (!$exclude) {
        my $tvi = 0;
        foreach my $line (@typevalue) {
-           if ($line =~ m/^(\C):(.*)/) {
+           if ($line =~ m/^(\C):\s*(.*)/) {
                my $type = $1;
                my $value = $2;
                if ($type eq 'F') {
@@ -215,29 +220,33 @@ foreach my $file (@files) {
 
 }
 
-if ($email_git_penguin_chiefs) {
+if ($email) {
     foreach my $chief (@penguin_chief) {
        if ($chief =~ m/^(.*):(.*)/) {
-           my $chief_name = $1;
-           my $chief_addr = $2;
+           my $email_address;
            if ($email_usename) {
-               push(@email_to, format_email($chief_name, $chief_addr));
+               $email_address = format_email($1, $2);
+           } else {
+               $email_address = $2;
+           }
+           if ($email_git_penguin_chiefs) {
+               push(@email_to, $email_address);
            } else {
-               push(@email_to, $chief_addr);
+               @email_to = grep(!/${email_address}/, @email_to);
            }
        }
     }
 }
 
-if ($email) {
-    my $address_cnt = @email_to;
-    if ($address_cnt == 0 && $email_list) {
-       push(@email_to, "linux-kernel\@vger.kernel.org");
+if ($email || $email_list) {
+    my @to = ();
+    if ($email) {
+       @to = (@to, @email_to);
     }
-
-#Don't sort email address list, but do remove duplicates
-    @email_to = uniq(@email_to);
-    output(@email_to);
+    if ($email_list) {
+       @to = (@to, @list_to);
+    }
+    output(uniq(@to));
 }
 
 if ($scm) {
@@ -307,10 +316,10 @@ Output type options:
   --multiline => print 1 entry per line
 
 Default options:
-  [--email --git --m --l --multiline]
+  [--email --git --m --n --l --multiline]
 
 Other options:
-  --version -> show version
+  --version => show version
   --help => show this help information
 
 EOT
@@ -347,6 +356,7 @@ sub format_email {
     my ($name, $email) = @_;
 
     $name =~ s/^\s+|\s+$//g;
+    $name =~ s/^\"|\"$//g;
     $email =~ s/^\s+|\s+$//g;
 
     my $formatted_email = "";
@@ -366,36 +376,41 @@ sub add_categories {
     $index = $index - 1;
     while ($index >= 0) {
        my $tv = $typevalue[$index];
-       if ($tv =~ m/^(\C):(.*)/) {
+       if ($tv =~ m/^(\C):\s*(.*)/) {
            my $ptype = $1;
            my $pvalue = $2;
            if ($ptype eq "L") {
-               my $subscr = $pvalue;
-               if ($subscr =~ m/\s*\(subscribers-only\)/) {
+               my $list_address = $pvalue;
+               my $list_additional = "";
+               if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
+                   $list_address = $1;
+                   $list_additional = $2;
+               }
+               if ($list_additional =~ m/subscribers-only/) {
                    if ($email_subscriber_list) {
-                       $subscr =~ s/\s*\(subscribers-only\)//g;
-                       push(@email_to, $subscr);
+                       push(@list_to, $list_address);
                    }
                } else {
                    if ($email_list) {
-                       push(@email_to, $pvalue);
+                       push(@list_to, $list_address);
                    }
                }
            } elsif ($ptype eq "M") {
-               if ($email_maintainer) {
-                   if ($index >= 0) {
-                       my $tv = $typevalue[$index - 1];
-                       if ($tv =~ m/^(\C):(.*)/) {
-                           if ($1 eq "P" && $email_usename) {
-                               push(@email_to, format_email($2, $pvalue));
-                           } else {
-                               push(@email_to, $pvalue);
+               my $p_used = 0;
+               if ($index >= 0) {
+                   my $tv = $typevalue[$index - 1];
+                   if ($tv =~ m/^(\C):\s*(.*)/) {
+                       if ($1 eq "P") {
+                           if ($email_usename) {
+                               push_email_address(format_email($2, $pvalue));
+                               $p_used = 1;
                            }
                        }
-                   } else {
-                       push(@email_to, $pvalue);
                    }
                }
+               if (!$p_used) {
+                   push_email_addresses($pvalue);
+               }
            } elsif ($ptype eq "T") {
                push(@scm, $pvalue);
            } elsif ($ptype eq "W") {
@@ -412,10 +427,45 @@ sub add_categories {
     }
 }
 
+sub push_email_address {
+    my ($email_address) = @_;
+
+    my $email_name = "";
+    if ($email_address =~ m/([^<]+)<(.*\@.*)>$/) {
+       $email_name = $1;
+       $email_address = $2;
+    }
+
+    if ($email_maintainer) {
+       if ($email_usename && $email_name) {
+           push(@email_to, format_email($email_name, $email_address));
+       } else {
+           push(@email_to, $email_address);
+       }
+    }
+}
+
+sub push_email_addresses {
+    my ($address) = @_;
+
+    my @address_list = ();
+
+    if (rfc822_valid($address)) {
+       push_email_address($address);
+    } elsif (@address_list = rfc822_validlist($address)) {
+       my $array_count = shift(@address_list);
+       while (my $entry = shift(@address_list)) {
+           push_email_address($entry);
+       }
+    } else {
+       warn("Invalid MAINTAINERS address: '" . $address . "'\n");
+    }
+}
+
 sub which {
     my ($bin) = @_;
 
-    foreach my $path (split /:/, $ENV{PATH}) {
+    foreach my $path (split(/:/, $ENV{PATH})) {
        if (-e "$path/$bin") {
            return "$path/$bin";
        }
@@ -434,16 +484,21 @@ sub recent_git_signoffs {
     my @lines = ();
 
     if (which("git") eq "") {
-       die("$P: git not found.  Add --nogit to options?\n");
+       warn("$P: git not found.  Add --nogit to options?\n");
+       return;
+    }
+    if (!(-d ".git")) {
+       warn("$P: .git directory not found.  Use a git repository for better results.\n");
+       warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n");
+       return;
     }
 
     $cmd = "git log --since=${email_git_since} -- ${file}";
-    $cmd .= " | grep -Pi \"^[-_        a-z]+by:.*\\\@\"";
+    $cmd .= " | grep -Ei \"^[-_        a-z]+by:.*\\\@.*\$\"";
     if (!$email_git_penguin_chiefs) {
-       $cmd .= " | grep -Pv \"${penguin_chiefs}\"";
+       $cmd .= " | grep -Ev \"${penguin_chiefs}\"";
     }
     $cmd .= " | cut -f2- -d\":\"";
-    $cmd .= " | sed -e \"s/^\\s+//g\"";
     $cmd .= " | sort | uniq -c | sort -rn";
 
     $output = `${cmd}`;
@@ -465,10 +520,6 @@ sub recent_git_signoffs {
        if ($line =~ m/(.+)<(.+)>/) {
            my $git_name = $1;
            my $git_addr = $2;
-           $git_name =~ tr/^\"//;
-           $git_name =~ tr/^\\s*//;
-           $git_name =~ tr/\"$//;
-           $git_name =~ tr/\\s*$//;
            if ($email_usename) {
                push(@email_to, format_email($git_name, $git_addr));
            } else {
@@ -481,7 +532,6 @@ sub recent_git_signoffs {
            push(@email_to, $line);
        }
     }
-    return $output;
 }
 
 sub uniq {
@@ -513,3 +563,97 @@ sub output {
        print("\n");
     }
 }
+
+my $rfc822re;
+
+sub make_rfc822re {
+#   Basic lexical tokens are specials, domain_literal, quoted_string, atom, and
+#   comment.  We must allow for rfc822_lwsp (or comments) after each of these.
+#   This regexp will only work on addresses which have had comments stripped
+#   and replaced with rfc822_lwsp.
+
+    my $specials = '()<>@,;:\\\\".\\[\\]';
+    my $controls = '\\000-\\037\\177';
+
+    my $dtext = "[^\\[\\]\\r\\\\]";
+    my $domain_literal = "\\[(?:$dtext|\\\\.)*\\]$rfc822_lwsp*";
+
+    my $quoted_string = "\"(?:[^\\\"\\r\\\\]|\\\\.|$rfc822_lwsp)*\"$rfc822_lwsp*";
+
+#   Use zero-width assertion to spot the limit of an atom.  A simple
+#   $rfc822_lwsp* causes the regexp engine to hang occasionally.
+    my $atom = "[^$specials $controls]+(?:$rfc822_lwsp+|\\Z|(?=[\\[\"$specials]))";
+    my $word = "(?:$atom|$quoted_string)";
+    my $localpart = "$word(?:\\.$rfc822_lwsp*$word)*";
+
+    my $sub_domain = "(?:$atom|$domain_literal)";
+    my $domain = "$sub_domain(?:\\.$rfc822_lwsp*$sub_domain)*";
+
+    my $addr_spec = "$localpart\@$rfc822_lwsp*$domain";
+
+    my $phrase = "$word*";
+    my $route = "(?:\@$domain(?:,\@$rfc822_lwsp*$domain)*:$rfc822_lwsp*)";
+    my $route_addr = "\\<$rfc822_lwsp*$route?$addr_spec\\>$rfc822_lwsp*";
+    my $mailbox = "(?:$addr_spec|$phrase$route_addr)";
+
+    my $group = "$phrase:$rfc822_lwsp*(?:$mailbox(?:,\\s*$mailbox)*)?;\\s*";
+    my $address = "(?:$mailbox|$group)";
+
+    return "$rfc822_lwsp*$address";
+}
+
+sub rfc822_strip_comments {
+    my $s = shift;
+#   Recursively remove comments, and replace with a single space.  The simpler
+#   regexps in the Email Addressing FAQ are imperfect - they will miss escaped
+#   chars in atoms, for example.
+
+    while ($s =~ s/^((?:[^"\\]|\\.)*
+                    (?:"(?:[^"\\]|\\.)*"(?:[^"\\]|\\.)*)*)
+                    \((?:[^()\\]|\\.)*\)/$1 /osx) {}
+    return $s;
+}
+
+#   valid: returns true if the parameter is an RFC822 valid address
+#
+sub rfc822_valid ($) {
+    my $s = rfc822_strip_comments(shift);
+
+    if (!$rfc822re) {
+        $rfc822re = make_rfc822re();
+    }
+
+    return $s =~ m/^$rfc822re$/so && $s =~ m/^$rfc822_char*$/;
+}
+
+#   validlist: In scalar context, returns true if the parameter is an RFC822
+#              valid list of addresses.
+#
+#              In list context, returns an empty list on failure (an invalid
+#              address was found); otherwise a list whose first element is the
+#              number of addresses found and whose remaining elements are the
+#              addresses.  This is needed to disambiguate failure (invalid)
+#              from success with no addresses found, because an empty string is
+#              a valid list.
+
+sub rfc822_validlist ($) {
+    my $s = rfc822_strip_comments(shift);
+
+    if (!$rfc822re) {
+        $rfc822re = make_rfc822re();
+    }
+    # * null list items are valid according to the RFC
+    # * the '1' business is to aid in distinguishing failure from no results
+
+    my @r;
+    if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
+       $s =~ m/^$rfc822_char*$/) {
+        while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
+            push @r, $1;
+        }
+        return wantarray ? (scalar(@r), @r) : 1;
+    }
+    else {
+        return wantarray ? () : 0;
+    }
+}
diff --git a/scripts/gfp-translate b/scripts/gfp-translate
new file mode 100644 (file)
index 0000000..073cb6d
--- /dev/null
@@ -0,0 +1,81 @@
+#!/bin/bash
+# Translate the bits making up a GFP mask
+# (c) 2009, Mel Gorman <mel@csn.ul.ie>
+# Licensed under the terms of the GNU GPL License version 2
+SOURCE=
+GFPMASK=none
+
+# Helper function to report failures and exit
+die() {
+       echo ERROR: $@
+       if [ "$TMPFILE" != "" ]; then
+               rm -f $TMPFILE
+       fi
+       exit -1
+}
+
+usage() {
+       echo "usage: gfp-translate [-h] [ --source DIRECTORY ] gfpmask"
+       exit 0
+}
+
+# Parse command-line arguements
+while [ $# -gt 0 ]; do
+       case $1 in
+               --source)
+                       SOURCE=$2
+                       shift 2
+                       ;;
+               -h)
+                       usage
+                       ;;
+               --help)
+                       usage
+                       ;;
+               *)
+                       GFPMASK=$1
+                       shift
+                       ;;
+       esac
+done
+
+# Guess the kernel source directory if it's not set. Preference is in order of
+# o current directory
+# o /usr/src/linux
+if [ "$SOURCE" = "" ]; then
+       if [ -r "/usr/src/linux/Makefile" ]; then
+               SOURCE=/usr/src/linux
+       fi
+       if [ -r "`pwd`/Makefile" ]; then
+               SOURCE=`pwd`
+       fi
+fi
+
+# Confirm that a source directory exists
+if [ ! -r "$SOURCE/Makefile" ]; then
+       die "Could not locate kernel source directory or it is invalid"
+fi
+
+# Confirm that a GFP mask has been specified
+if [ "$GFPMASK" = "none" ]; then
+       usage
+fi
+
+# Extract GFP flags from the kernel source
+TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
+grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
+
+# Parse the flags
+IFS="
+"
+echo Source: $SOURCE
+echo Parsing: $GFPMASK
+for LINE in `cat $TMPFILE`; do
+       MASK=`echo $LINE | awk '{print $3}'`
+       if [ $(($GFPMASK&$MASK)) -ne 0 ]; then
+               echo $LINE
+       fi
+done
+
+rm -f $TMPFILE
+exit 0
index 6aa2a2483f8df04abe75421f28d8deca7cff42c3..64f5ddb09ea626de131090d68aa5aeac10de060e 100644 (file)
@@ -237,22 +237,22 @@ static void write_header(void)
     fprintf(out, " *  Linux logo %s\n", logoname);
     fputs(" */\n\n", out);
     fputs("#include <linux/linux_logo.h>\n\n", out);
-    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
+    fprintf(out, "static const unsigned char %s_data[] __initconst = {\n",
            logoname);
 }
 
 static void write_footer(void)
 {
     fputs("\n};\n\n", out);
-    fprintf(out, "struct linux_logo %s __initdata = {\n", logoname);
-    fprintf(out, "    .type\t= %s,\n", logo_types[logo_type]);
-    fprintf(out, "    .width\t= %d,\n", logo_width);
-    fprintf(out, "    .height\t= %d,\n", logo_height);
+    fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
+    fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
+    fprintf(out, "\t.width\t\t= %d,\n", logo_width);
+    fprintf(out, "\t.height\t\t= %d,\n", logo_height);
     if (logo_type == LINUX_LOGO_CLUT224) {
-       fprintf(out, "    .clutsize\t= %d,\n", logo_clutsize);
-       fprintf(out, "    .clut\t= %s_clut,\n", logoname);
+       fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize);
+       fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname);
     }
-    fprintf(out, "    .data\t= %s_data\n", logoname);
+    fprintf(out, "\t.data\t\t= %s_data\n", logoname);
     fputs("};\n\n", out);
 
     /* close logo file */
@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
     fputs("\n};\n\n", out);
 
     /* write logo clut */
-    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
+    fprintf(out, "static const unsigned char %s_clut[] __initconst = {\n",
            logoname);
     write_hex_cnt = 0;
     for (i = 0; i < logo_clutsize; i++) {
index 902f9a992620446ed42acf46390226f71ac22fa7..db40fa04cd513f4a68d86af935fec02f27e507d2 100644 (file)
@@ -12,10 +12,9 @@ calls. Only the functions's names and the the call time are provided.
 
 Usage:
        Be sure that you have CONFIG_FUNCTION_TRACER
-       # mkdir /debugfs
-       # mount -t debug debug /debug
-       # echo function > /debug/tracing/current_tracer
-       $ cat /debug/tracing/trace_pipe > ~/raw_trace_func
+       # mount -t debugfs nodev /sys/kernel/debug
+       # echo function > /sys/kernel/debug/tracing/current_tracer
+       $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
        Wait some times but not too much, the script is a bit slow.
        Break the pipe (Ctrl + Z)
        $ scripts/draw_functrace.py < raw_trace_func > draw_functrace
index cdef2664218ff7c27582d49d0a987bcc2f65d030..174dd2ff0f22ceec380458349ca099681e1d35ab 100644 (file)
@@ -10,6 +10,7 @@
 #define __PCSP_H__
 
 #include <linux/hrtimer.h>
+#include <linux/timex.h>
 #if defined(CONFIG_MIPS) || defined(CONFIG_X86)
 /* Use the global PIT lock ! */
 #include <asm/i8253.h>
index 36c3ea62086be882dbba7d630643b2f6ea110978..8f7d175767a21e75b5167bf7eb117295bef3d250 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <linux/init.h>
 #include <linux/spinlock.h>
-#include <asm/timex.h>
+#include <linux/timex.h>
 #include "sound_config.h"
 
 #include "pas2.h"
index 2b302bbffe7361c6faccc1eb84099f2e1c5b1baf..12522e6913d92d068676f4b3916571b03dae2d87 100644 (file)
@@ -27,6 +27,11 @@ MODULE_DESCRIPTION("Core sound module");
 MODULE_AUTHOR("Alan Cox");
 MODULE_LICENSE("GPL");
 
+static char *sound_nodename(struct device *dev)
+{
+       return kasprintf(GFP_KERNEL, "snd/%s", dev_name(dev));
+}
+
 static int __init init_soundcore(void)
 {
        int rc;
@@ -41,6 +46,8 @@ static int __init init_soundcore(void)
                return PTR_ERR(sound_class);
        }
 
+       sound_class->nodename = sound_nodename;
+
        return 0;
 }