Merge branch 'sparc-perf-events-fixes-for-linus' of git://git.kernel.org/pub/scm...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Oct 2009 19:05:50 +0000 (12:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Oct 2009 19:05:50 +0000 (12:05 -0700)
* 'sparc-perf-events-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  mm, perf_event: Make vmalloc_user() align base kernel virtual address to SHMLBA
  perf_event: Provide vmalloc() based mmap() backing

728 files changed:
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
Documentation/SubmittingPatches
Documentation/arm/tcm.txt
Documentation/cgroups/cgroups.txt
Documentation/connector/cn_test.c
Documentation/connector/connector.txt
Documentation/filesystems/ext4.txt
Documentation/filesystems/proc.txt
Documentation/filesystems/vfat.txt
Documentation/hwmon/ltc4215
Documentation/hwmon/ltc4245
Documentation/i2c/chips/eeprom [deleted file]
Documentation/i2c/chips/max6875 [deleted file]
Documentation/i2c/instantiating-devices
Documentation/isdn/INTERFACE.CAPI
Documentation/kernel-parameters.txt
Documentation/misc-devices/eeprom [new file with mode: 0644]
Documentation/misc-devices/max6875 [new file with mode: 0644]
Documentation/networking/pktgen.txt
Documentation/networking/timestamping/timestamping.c
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/vm/ksm.txt
Documentation/vm/page-types.c
Documentation/vm/pagemap.txt
Documentation/w1/masters/ds2482
MAINTAINERS
Makefile
arch/arm/common/sa1111.c
arch/arm/configs/h3600_defconfig
arch/arm/configs/iop33x_defconfig
arch/arm/include/asm/glue.h
arch/arm/include/asm/hardware/iop3xx.h
arch/arm/include/asm/smp_plat.h [new file with mode: 0644]
arch/arm/include/asm/unistd.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/head-common.S
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/traps.c
arch/arm/mach-bcmring/core.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-iop32x/include/mach/iop32x.h
arch/arm/mach-iop33x/include/mach/iop33x.h
arch/arm/mach-ns9xxx/clock.c
arch/arm/mach-omap1/id.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-zoom2.c
arch/arm/mach-omap2/clock34xx.c
arch/arm/mach-omap2/cm4xxx.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/iommu2.c
arch/arm/mach-omap2/mailbox.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/pm-debug.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-sa1100/Kconfig
arch/arm/mach-sa1100/time.c
arch/arm/mach-u300/gpio.c
arch/arm/mach-u300/include/mach/gpio.h
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/fault.c
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm/mm/pabort-legacy.S [new file with mode: 0644]
arch/arm/mm/pabort-v6.S [new file with mode: 0644]
arch/arm/mm/pabort-v7.S [new file with mode: 0644]
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm1022.S
arch/arm/mm/proc-arm1026.S
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-arm720.S
arch/arm/mm/proc-arm740.S
arch/arm/mm/proc-arm7tdmi.S
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm940.S
arch/arm/mm/proc-arm946.S
arch/arm/mm/proc-arm9tdmi.S
arch/arm/mm/proc-fa526.S
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-sa110.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xscale.S
arch/arm/plat-iop/pci.c
arch/arm/plat-iop/time.c
arch/arm/plat-omap/gpio.c
arch/arm/plat-omap/include/mach/cpu.h
arch/arm/plat-omap/include/mach/keypad.h
arch/arm/plat-omap/include/mach/mux.h
arch/arm/plat-omap/include/mach/powerdomain.h
arch/arm/plat-omap/iovmm.c
arch/arm/plat-omap/sram.c
arch/arm/plat-s3c24xx/include/plat/mci.h
arch/blackfin/mach-bf561/coreb.c
arch/cris/arch-v10/drivers/sync_serial.c
arch/cris/arch-v32/drivers/mach-fs/gpio.c
arch/m32r/include/asm/io.h
arch/m32r/kernel/m32r_ksyms.c
arch/m32r/kernel/time.c
arch/m32r/kernel/traps.c
arch/m32r/lib/delay.c
arch/m32r/mm/discontig.c
arch/m32r/mm/mmu.S
arch/m68k/include/asm/hardirq_mm.h
arch/m68knommu/kernel/asm-offsets.c
arch/m68knommu/kernel/entry.S
arch/m68knommu/mm/init.c
arch/m68knommu/platform/5206e/config.c
arch/m68knommu/platform/68328/entry.S
arch/m68knommu/platform/68360/entry.S
arch/m68knommu/platform/coldfire/entry.S
arch/microblaze/kernel/entry.S
arch/microblaze/kernel/hw_exception_handler.S
arch/microblaze/kernel/process.c
arch/mips/alchemy/common/dbdma.c
arch/mips/basler/excite/excite_iodev.c
arch/mips/bcm63xx/Makefile
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/dev-pcmcia.c [new file with mode: 0644]
arch/mips/bcm63xx/dev-uart.c [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h [new file with mode: 0644]
arch/mips/include/asm/smp.h
arch/mips/include/asm/unaligned.h
arch/mips/kernel/kspd.c
arch/mips/kernel/rtlx.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/vpe.c
arch/mips/mm/sc-mips.c
arch/mips/oprofile/op_model_loongson2.c
arch/mips/pci/ops-pmcmsp.c
arch/mips/sgi-ip27/ip27-smp.c
arch/mips/sibyte/bcm1480/irq.c
arch/mips/sibyte/common/sb_tbprof.c
arch/mips/sibyte/swarm/setup.c
arch/mn10300/include/asm/uaccess.h
arch/mn10300/unit-asb2303/include/unit/clock.h
arch/mn10300/unit-asb2305/include/unit/clock.h
arch/parisc/Kconfig
arch/parisc/include/asm/fixmap.h
arch/parisc/include/asm/hardirq.h
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/syscall.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/asm-offsets.c
arch/parisc/kernel/entry.S
arch/parisc/kernel/irq.c
arch/parisc/kernel/module.c
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/signal.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/vmlinux.lds.S
arch/parisc/mm/init.c
arch/powerpc/kvm/timing.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/pseries/dtl.c
arch/s390/kvm/kvm-s390.h
arch/sparc/Kconfig
arch/sparc/include/asm/hardirq_32.h
arch/sparc/include/asm/irq_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/kernel/ktlb.S
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/smp_64.c
arch/sparc/oprofile/init.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/checksum_32.h
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/i386_ksyms_32.c
arch/x86/kernel/irq.c
arch/x86/kernel/smp.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/cmpxchg8b_emu.S [new file with mode: 0644]
arch/x86/xen/debugfs.c
block/blk-barrier.c
block/blk-core.c
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
block/cfq-iosched.c
block/compat_ioctl.c
block/genhd.c
block/ioctl.c
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/acpi_pad.c [new file with mode: 0644]
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/proc.c
drivers/acpi/processor_core.c
drivers/acpi/scan.c
drivers/acpi/video.c
drivers/atm/ambassador.c
drivers/atm/eni.c
drivers/atm/firestream.c
drivers/atm/fore200e.c
drivers/atm/he.c
drivers/atm/horizon.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/block/DAC960.c
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/cpqarray.c
drivers/char/agp/parisc-agp.c
drivers/char/apm-emulation.c
drivers/char/bfin-otp.c
drivers/char/cyclades.c
drivers/char/dtlk.c
drivers/char/hw_random/omap-rng.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/pty.c
drivers/char/serial167.c
drivers/char/tty_ldisc.c
drivers/char/vt_ioctl.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/connector/cn_proc.c
drivers/connector/cn_queue.c
drivers/connector/connector.c
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/amd64_edac_inj.c
drivers/firewire/core-cdev.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/radeon/.gitignore [new file with mode: 0644]
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/avivod.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r100_track.h
drivers/gpu/drm/radeon/r100d.h
drivers/gpu/drm/radeon/r200.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300d.h
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/r420d.h
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r520.c
drivers/gpu/drm/radeon/r520d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_blit.c
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_reg.h
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs100d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs400d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs600d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rs690d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rs690r.h [deleted file]
drivers/gpu/drm/radeon/rv200d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rv250d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rv350d.h [new file with mode: 0644]
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv515d.h
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/ttm/ttm_global.c
drivers/hid/hidraw.c
drivers/hwmon/fschmd.c
drivers/hwmon/lis3lv02d_spi.c
drivers/hwmon/ltc4215.c
drivers/hwmon/ltc4245.c
drivers/i2c/busses/i2c-amd756.c
drivers/i2c/busses/i2c-amd8111.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-sis96x.c
drivers/i2c/busses/i2c-viapro.c
drivers/ide/ide-proc.c
drivers/ide/sis5513.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_main.c
drivers/input/evdev.c
drivers/input/input.c
drivers/input/joydev.c
drivers/input/misc/uinput.c
drivers/input/mousedev.c
drivers/isdn/capi/capi.c
drivers/isdn/capi/capidrv.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/gigaset/asyncdata.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/gigaset/common.c
drivers/isdn/gigaset/ev-layer.c
drivers/isdn/gigaset/i4l.c
drivers/isdn/gigaset/interface.c
drivers/isdn/gigaset/isocdata.c
drivers/isdn/hardware/mISDN/Kconfig
drivers/isdn/i4l/Kconfig
drivers/isdn/mISDN/socket.c
drivers/leds/leds-pca9532.c
drivers/lguest/lguest_user.c
drivers/macintosh/therm_adt746x.c
drivers/macintosh/therm_pm72.c
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_smu_sat.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/dm.c
drivers/media/dvb/dvb-core/dmxdev.c
drivers/media/dvb/dvb-core/dvb_demux.c
drivers/media/dvb/firewire/firedtv-ci.c
drivers/media/radio/radio-cadet.c
drivers/media/video/cpia.c
drivers/mfd/ab3100-core.c
drivers/mfd/ucb1400_core.c
drivers/misc/eeprom/max6875.c
drivers/misc/phantom.c
drivers/misc/sgi-gru/grufile.c
drivers/mmc/core/debugfs.c
drivers/mmc/core/sdio_cis.c
drivers/mmc/host/Kconfig
drivers/mmc/host/mmci.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/s3cmci.h
drivers/mtd/mtd_blkdevs.c
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/au1000_eth.c
drivers/net/bcm63xx_enet.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/cnic.c
drivers/net/cnic_if.h
drivers/net/e1000/e1000.h
drivers/net/e1000/e1000_ethtool.c
drivers/net/e1000/e1000_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000/e1000_param.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/netdev.c
drivers/net/ethoc.c
drivers/net/hamradio/mkiss.c
drivers/net/igb/igb_main.c
drivers/net/iseries_veth.c
drivers/net/ixgbe/ixgbe_82598.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_common.c
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ks8851_mll.c [new file with mode: 0644]
drivers/net/meth.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pasemi_mac_ethtool.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/pppol2tp.c
drivers/net/qlge/qlge.h
drivers/net/qlge/qlge_ethtool.c
drivers/net/qlge/qlge_main.c
drivers/net/qlge/qlge_mpi.c
drivers/net/sgiseeq.c
drivers/net/skge.c
drivers/net/skge.h
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/usb/rndis_host.c
drivers/net/virtio_net.c
drivers/net/wireless/Kconfig
drivers/net/wireless/ath/ar9170/phy.c
drivers/net/wireless/b43/pio.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-3945.h
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/pcmcia/Kconfig
drivers/pcmcia/Makefile
drivers/pcmcia/at91_cf.c
drivers/pcmcia/au1000_generic.c
drivers/pcmcia/bcm63xx_pcmcia.c [new file with mode: 0644]
drivers/pcmcia/bcm63xx_pcmcia.h [new file with mode: 0644]
drivers/pcmcia/bfin_cf_pcmcia.c
drivers/pcmcia/cs.c
drivers/pcmcia/i82092.c
drivers/pcmcia/i82365.c
drivers/pcmcia/m32r_cfc.c
drivers/pcmcia/m32r_pcc.c
drivers/pcmcia/m8xx_pcmcia.c
drivers/pcmcia/omap_cf.c
drivers/pcmcia/pd6729.c
drivers/pcmcia/pxa2xx_base.c
drivers/pcmcia/sa1100_assabet.c
drivers/pcmcia/sa1100_generic.c
drivers/pcmcia/sa1100_neponset.c
drivers/pcmcia/sa1111_generic.c
drivers/pcmcia/tcic.c
drivers/pcmcia/vrc4171_card.c
drivers/pcmcia/yenta_socket.c
drivers/platform/x86/sony-laptop.c
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_perf.c
drivers/scsi/sg.c
drivers/serial/8250.c
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/bcm63xx_uart.c [new file with mode: 0644]
drivers/serial/icom.c
drivers/serial/sa1100.c
drivers/serial/serial_cs.c
drivers/serial/serial_txx9.c
drivers/sfi/sfi_core.c
drivers/spi/Makefile
drivers/spi/mxc_spi.c [deleted file]
drivers/spi/spi_imx.c [new file with mode: 0644]
drivers/spi/spidev.c
drivers/staging/dst/dcore.c
drivers/staging/iio/light/tsl2561.c
drivers/staging/pohmelfs/config.c
drivers/usb/class/usbtmc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/printer.c
drivers/usb/host/whci/debug.c
drivers/usb/misc/rio500.c
drivers/uwb/uwb-debug.c
drivers/video/da8xx-fb.c
drivers/video/fbmem.c
drivers/video/msm/mddi.c
drivers/video/omap/blizzard.c
drivers/video/omap/omapfb_main.c
drivers/video/uvesafb.c
drivers/w1/masters/ds2482.c
drivers/w1/w1_netlink.c
drivers/xen/xenfs/xenbus.c
firmware/Makefile
firmware/WHENCE
firmware/cis/COMpad2.cis.ihex [new file with mode: 0644]
firmware/cis/COMpad4.cis.ihex [new file with mode: 0644]
firmware/cis/DP83903.cis.ihex [new file with mode: 0644]
firmware/cis/NE2K.cis.ihex [new file with mode: 0644]
firmware/cis/tamarack.cis.ihex [new file with mode: 0644]
fs/afs/cache.h [deleted file]
fs/afs/internal.h
fs/anon_inodes.c
fs/bio.c
fs/btrfs/acl.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/xattr.c
fs/coda/psdev.c
fs/ext4/Kconfig
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/migrate.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/super.c
fs/fat/fat.h
fs/fat/inode.c
fs/fat/misc.c
fs/fat/namei_vfat.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/nfsd/nfsctl.c
fs/nilfs2/btnode.c
fs/nilfs2/dir.c
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/nilfs2/mdt.c
fs/nilfs2/nilfs.h
fs/nls/nls_base.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/netdebug.c
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/super.c
fs/omfs/dir.c
fs/omfs/file.c
fs/omfs/omfs.h
fs/partitions/check.c
fs/proc/kcore.c
fs/proc/page.c
fs/select.c
include/asm-generic/gpio.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_fb_helper.h
include/drm/drm_pciids.h
include/linux/atmdev.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/cgroup.h
include/linux/connector.h
include/linux/fs.h
include/linux/genhd.h
include/linux/if_tunnel.h
include/linux/jbd2.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/net.h
include/linux/netfilter.h
include/linux/poll.h
include/linux/res_counter.h
include/linux/serial_core.h
include/linux/socket.h
include/net/compat.h
include/net/inet_connection_sock.h
include/net/ip.h
include/net/ipip.h
include/net/ipv6.h
include/net/sctp/structs.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/wext.h
include/pcmcia/ss.h
include/trace/events/block.h
include/trace/events/ext4.h
include/trace/events/jbd2.h
kernel/cgroup.c
kernel/hrtimer.c
kernel/kprobes.c
kernel/module.c
kernel/rcutree_trace.c
kernel/res_counter.c
kernel/sched.c
kernel/sched_clock.c
kernel/time/tick-sched.c
kernel/time/timer_list.c
kernel/time/timer_stats.c
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/kmemtrace.c
lib/vsprintf.c
mm/Kconfig
mm/ksm.c
mm/memcontrol.c
mm/percpu.c
mm/rmap.c
mm/swapfile.c
mm/vmalloc.c
net/8021q/vlan_netlink.c
net/atm/common.c
net/atm/common.h
net/atm/pvc.c
net/atm/svc.c
net/ax25/af_ax25.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bridge/br_if.c
net/can/raw.c
net/compat.c
net/core/dev.c
net/core/net-sysfs.c
net/core/pktgen.c
net/core/sock.c
net/dcb/dcbnl.c
net/dccp/dccp.h
net/dccp/proto.c
net/decnet/af_decnet.c
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_impl.h
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/raw.c
net/ipv6/sit.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipx/af_ipx.c
net/irda/af_irda.c
net/iucv/af_iucv.c
net/llc/af_llc.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/netfilter/nf_sockopt.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/packet/af_packet.c
net/phonet/pep.c
net/phonet/socket.c
net/rds/af_rds.c
net/rfkill/core.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/sctp/socket.c
net/socket.c
net/tipc/socket.c
net/wireless/sme.c
net/wireless/wext-sme.c
net/wireless/wext.c
net/x25/af_x25.c
samples/tracepoints/tracepoint-sample.c
security/integrity/ima/ima_fs.c
sound/aoa/codecs/tas.c
sound/drivers/opl3/opl3_midi.c
sound/mips/hal2.c
sound/mips/sgio2audio.c
sound/pci/ctxfi/ctatc.c
sound/pci/echoaudio/echoaudio.c
sound/pci/echoaudio/mia.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/ice1712/ice1712.c
sound/pci/ice1712/ice1724.c
sound/pci/intel8x0.c
sound/pci/via82xx.c
sound/ppc/keywest.c
sound/soc/blackfin/Kconfig
sound/soc/blackfin/bf5xx-i2s.c
sound/soc/blackfin/bf5xx-tdm.c
sound/soc/codecs/wm8350.c
sound/soc/codecs/wm8940.c
sound/soc/davinci/davinci-i2s.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/davinci/davinci-mcasp.h
sound/soc/davinci/davinci-pcm.c
sound/soc/davinci/davinci-pcm.h
sound/soc/imx/mxc-ssi.c
sound/soc/pxa/Kconfig
sound/soc/soc-dapm.c
sound/usb/usbmixer.c
virt/kvm/kvm_main.c

index 0a92a7c93a62067f4526fe581f4604c3eb9f5ee0..4f29e5f1ebfa5b19cd7851e6148b80219911e9c2 100644 (file)
@@ -31,3 +31,31 @@ Date:                March 2009
 Kernel Version: 2.6.30
 Contact:       iss_storagedev@hp.com
 Description:   A symbolic link to /sys/block/cciss!cXdY
+
+Where:         /sys/bus/pci/devices/<dev>/ccissX/rescan
+Date:          August 2009
+Kernel Version:        2.6.31
+Contact:       iss_storagedev@hp.com
+Description:   Kicks of a rescan of the controller to discover logical
+               drive topology changes.
+
+Where:         /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid
+Date:          August 2009
+Kernel Version: 2.6.31
+Contact:       iss_storagedev@hp.com
+Description:   Displays the 8-byte LUN ID used to address logical
+               drive Y of controller X.
+
+Where:         /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level
+Date:          August 2009
+Kernel Version: 2.6.31
+Contact:       iss_storagedev@hp.com
+Description:   Displays the RAID level of logical drive Y of
+               controller X.
+
+Where:         /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count
+Date:          August 2009
+Kernel Version: 2.6.31
+Contact:       iss_storagedev@hp.com
+Description:   Displays the usage count (number of opens) of logical drive Y
+               of controller X.
index b7f9d3b4bbf6b15934091f55b20e327419cddd6b..72651f788f4e3536149ef5e7ddfbed96a8f14d2f 100644 (file)
@@ -232,7 +232,7 @@ your e-mail client so that it sends your patches untouched.
 When sending patches to Linus, always follow step #7.
 
 Large changes are not appropriate for mailing lists, and some
-maintainers.  If your patch, uncompressed, exceeds 40 kB in size,
+maintainers.  If your patch, uncompressed, exceeds 300 kB in size,
 it is preferred that you store your patch on an Internet-accessible
 server, and provide instead a URL (link) pointing to your patch.
 
index 074f4be6667fa902662fb195e35ccc3988015eee..77fd9376e6d73b4cc47d39afb8d7b01ad4b00120 100644 (file)
@@ -29,11 +29,13 @@ TCM location and size. Notice that this is not a MMU table: you
 actually move the physical location of the TCM around. At the
 place you put it, it will mask any underlying RAM from the
 CPU so it is usually wise not to overlap any physical RAM with
-the TCM. The TCM memory exists totally outside the MMU and will
-override any MMU mappings.
+the TCM.
 
-Code executing inside the ITCM does not "see" any MMU mappings
-and e.g. register accesses must be made to physical addresses.
+The TCM memory can then be remapped to another address again using
+the MMU, but notice that the TCM if often used in situations where
+the MMU is turned off. To avoid confusion the current Linux
+implementation will map the TCM 1 to 1 from physical to virtual
+memory in the location specified by the machine.
 
 TCM is used for a few things:
 
index 455d4e6d346d839eb0bd8b811efed40afd3642fa..0b33bfe7dde99ecc98df6d0fa4466b3e79eb8f2b 100644 (file)
@@ -227,7 +227,14 @@ as the path relative to the root of the cgroup file system.
 Each cgroup is represented by a directory in the cgroup file system
 containing the following files describing that cgroup:
 
- - tasks: list of tasks (by pid) attached to that cgroup
+ - tasks: list of tasks (by pid) attached to that cgroup.  This list
+   is not guaranteed to be sorted.  Writing a thread id into this file
+   moves the thread into this cgroup.
+ - cgroup.procs: list of tgids in the cgroup.  This list is not
+   guaranteed to be sorted or free of duplicate tgids, and userspace
+   should sort/uniquify the list if this property is required.
+   Writing a tgid into this file moves all threads with that tgid into
+   this cgroup.
  - notify_on_release flag: run the release agent on exit?
  - release_agent: the path to use for release notifications (this file
    exists in the top cgroup only)
@@ -374,7 +381,7 @@ Now you want to do something with this cgroup.
 
 In this directory you can find several files:
 # ls
-notify_on_release tasks
+cgroup.procs notify_on_release tasks
 (plus whatever files added by the attached subsystems)
 
 Now attach your shell to this cgroup:
index 1711adc3337349ddfbc8d2bb0958123602193d14..b07add3467f1b4d84e07d95a43fece6b8ec7f83e 100644 (file)
@@ -34,7 +34,7 @@ static char cn_test_name[] = "cn_test";
 static struct sock *nls;
 static struct timer_list cn_test_timer;
 
-static void cn_test_callback(struct cn_msg *msg)
+static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
                __func__, jiffies, msg->id.idx, msg->id.val,
index 81e6bf6ead57b172ad0b8822ae9a1ad972e05839..78c9466a9aa8005c4171bdb160c60183042318d6 100644 (file)
@@ -23,7 +23,7 @@ handling, etc...  The Connector driver allows any kernelspace agents to use
 netlink based networking for inter-process communication in a significantly
 easier way:
 
-int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *));
+int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
 void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask);
 
 struct cb_id
@@ -53,15 +53,15 @@ struct cn_msg
 Connector interfaces.
 /*****************************************/
 
-int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *));
+int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
 
  Registers new callback with connector core.
 
  struct cb_id *id              - unique connector's user identifier.
                                  It must be registered in connector.h for legal in-kernel users.
  char *name                    - connector's callback symbolic name.
- void (*callback) (void *)     - connector's callback.
-                                 Argument must be dereferenced to struct cn_msg *.
+ void (*callback) (struct cn..)        - connector's callback.
+                                 cn_msg and the sender's credentials
 
 
 void cn_del_callback(struct cb_id *id);
index 18b5ec8cea45cd45b49eac4053646d9d33e3b2e1..bf4f4b7e11b38298c67d4eac41fec30e63d70c16 100644 (file)
@@ -282,9 +282,16 @@ stripe=n           Number of filesystem blocks that mballoc will try
                        to use for allocation size and alignment. For RAID5/6
                        systems this should be the number of data
                        disks *  RAID chunk size in file system blocks.
-delalloc       (*)     Deferring block allocation until write-out time.
-nodelalloc             Disable delayed allocation. Blocks are allocation
-                       when data is copied from user to page cache.
+
+delalloc       (*)     Defer block allocation until just before ext4
+                       writes out the block(s) in question.  This
+                       allows ext4 to better allocation decisions
+                       more efficiently.
+nodelalloc             Disable delayed allocation.  Blocks are allocated
+                       when the data is copied from userspace to the
+                       page cache, either via the write(2) system call
+                       or when an mmap'ed page which was previously
+                       unallocated is written for the first time.
 
 max_batch_time=usec    Maximum amount of time ext4 should wait for
                        additional filesystem operations to be batch
index b5aee7838a001755350d7510de1ebd02f000c95d..2c48f945546b4dc3f6ebfa0463687445429ad5cb 100644 (file)
@@ -1113,7 +1113,6 @@ Table 1-12: Files in /proc/fs/ext4/<devname>
 ..............................................................................
  File            Content                                        
  mb_groups       details of multiblock allocator buddy cache of free blocks
- mb_history      multiblock allocation history
 ..............................................................................
 
 
index b58b84b50fa21944ccb98b87f5a2519f34a01705..eed520fd0c8e11c2d89f6a98f4753a73a37755e9 100644 (file)
@@ -102,7 +102,7 @@ shortname=lower|win95|winnt|mixed
                 winnt: emulate the Windows NT rule for display/create.
                 mixed: emulate the Windows NT rule for display,
                        emulate the Windows 95 rule for create.
-                Default setting is `lower'.
+                Default setting is `mixed'.
 
 tz=UTC        -- Interpret timestamps as UTC rather than local time.
                  This option disables the conversion of timestamps
index 2e6a21eb656c556ce4e4c154a6e1229c35275938..c196a18462592f842d7637e3b443c295092cb78b 100644 (file)
@@ -22,12 +22,13 @@ Usage Notes
 -----------
 
 This driver does not probe for LTC4215 devices, due to the fact that some
-of the possible addresses are unfriendly to probing. You will need to use
-the "force" parameter to tell the driver where to find the device.
+of the possible addresses are unfriendly to probing. You will have to
+instantiate the devices explicitly.
 
 Example: the following will load the driver for an LTC4215 at address 0x44
 on I2C bus #0:
-$ modprobe ltc4215 force=0,0x44
+$ modprobe ltc4215
+$ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
 
 
 Sysfs entries
index bae7a3adc5d8ff72aded4e8abbd07c30de454f59..02838a47d86210fa8bee469f595c94851594bd44 100644 (file)
@@ -23,12 +23,13 @@ Usage Notes
 -----------
 
 This driver does not probe for LTC4245 devices, due to the fact that some
-of the possible addresses are unfriendly to probing. You will need to use
-the "force" parameter to tell the driver where to find the device.
+of the possible addresses are unfriendly to probing. You will have to
+instantiate the devices explicitly.
 
 Example: the following will load the driver for an LTC4245 at address 0x23
 on I2C bus #1:
-$ modprobe ltc4245 force=1,0x23
+$ modprobe ltc4245
+$ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
 
 
 Sysfs entries
diff --git a/Documentation/i2c/chips/eeprom b/Documentation/i2c/chips/eeprom
deleted file mode 100644 (file)
index f7e8104..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-Kernel driver eeprom
-====================
-
-Supported chips:
-  * Any EEPROM chip in the designated address range
-    Prefix: 'eeprom'
-    Addresses scanned: I2C 0x50 - 0x57
-    Datasheets: Publicly available from:
-                Atmel (www.atmel.com),
-                Catalyst (www.catsemi.com),
-                Fairchild (www.fairchildsemi.com),
-                Microchip (www.microchip.com),
-                Philips (www.semiconductor.philips.com),
-                Rohm (www.rohm.com),
-                ST (www.st.com),
-                Xicor (www.xicor.com),
-                and others.
-
-        Chip     Size (bits)    Address
-        24C01     1K            0x50 (shadows at 0x51 - 0x57)
-        24C01A    1K            0x50 - 0x57 (Typical device on DIMMs)
-        24C02     2K            0x50 - 0x57
-        24C04     4K            0x50, 0x52, 0x54, 0x56
-                                (additional data at 0x51, 0x53, 0x55, 0x57)
-        24C08     8K            0x50, 0x54 (additional data at 0x51, 0x52,
-                                0x53, 0x55, 0x56, 0x57)
-        24C16    16K            0x50 (additional data at 0x51 - 0x57)
-        Sony      2K            0x57
-
-        Atmel     34C02B  2K    0x50 - 0x57, SW write protect at 0x30-37
-        Catalyst  34FC02  2K    0x50 - 0x57, SW write protect at 0x30-37
-        Catalyst  34RC02  2K    0x50 - 0x57, SW write protect at 0x30-37
-        Fairchild 34W02   2K    0x50 - 0x57, SW write protect at 0x30-37
-        Microchip 24AA52  2K    0x50 - 0x57, SW write protect at 0x30-37
-        ST        M34C02  2K    0x50 - 0x57, SW write protect at 0x30-37
-
-
-Authors:
-        Frodo Looijaard <frodol@dds.nl>,
-        Philip Edelbrock <phil@netroedge.com>,
-        Jean Delvare <khali@linux-fr.org>,
-        Greg Kroah-Hartman <greg@kroah.com>,
-        IBM Corp.
-
-Description
------------
-
-This is a simple EEPROM module meant to enable reading the first 256 bytes
-of an EEPROM (on a SDRAM DIMM for example). However, it will access serial
-EEPROMs on any I2C adapter. The supported devices are generically called
-24Cxx, and are listed above; however the numbering for these
-industry-standard devices may vary by manufacturer.
-
-This module was a programming exercise to get used to the new project
-organization laid out by Frodo, but it should be at least completely
-effective for decoding the contents of EEPROMs on DIMMs.
-
-DIMMS will typically contain a 24C01A or 24C02, or the 34C02 variants.
-The other devices will not be found on a DIMM because they respond to more
-than one address.
-
-DDC Monitors may contain any device. Often a 24C01, which responds to all 8
-addresses, is found.
-
-Recent Sony Vaio laptops have an EEPROM at 0x57. We couldn't get the
-specification, so it is guess work and far from being complete.
-
-The Microchip 24AA52/24LCS52, ST M34C02, and others support an additional
-software write protect register at 0x30 - 0x37 (0x20 less than the memory
-location). The chip responds to "write quick" detection at this address but
-does not respond to byte reads. If this register is present, the lower 128
-bytes of the memory array are not write protected. Any byte data write to
-this address will write protect the memory array permanently, and the
-device will no longer respond at the 0x30-37 address. The eeprom driver
-does not support this register.
-
-Lacking functionality:
-
-* Full support for larger devices (24C04, 24C08, 24C16). These are not
-typically found on a PC. These devices will appear as separate devices at
-multiple addresses.
-
-* Support for really large devices (24C32, 24C64, 24C128, 24C256, 24C512).
-These devices require two-byte address fields and are not supported.
-
-* Enable Writing. Again, no technical reason why not, but making it easy
-to change the contents of the EEPROMs (on DIMMs anyway) also makes it easy
-to disable the DIMMs (potentially preventing the computer from booting)
-until the values are restored somehow.
-
-Use:
-
-After inserting the module (and any other required SMBus/i2c modules), you
-should have some EEPROM directories in /sys/bus/i2c/devices/* of names such
-as "0-0050". Inside each of these is a series of files, the eeprom file
-contains the binary data from EEPROM.
diff --git a/Documentation/i2c/chips/max6875 b/Documentation/i2c/chips/max6875
deleted file mode 100644 (file)
index 10ca43c..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-Kernel driver max6875
-=====================
-
-Supported chips:
-  * Maxim MAX6874, MAX6875
-    Prefix: 'max6875'
-    Addresses scanned: None (see below)
-    Datasheet:
-        http://pdfserv.maxim-ic.com/en/ds/MAX6874-MAX6875.pdf
-
-Author: Ben Gardner <bgardner@wabtec.com>
-
-
-Description
------------
-
-The Maxim MAX6875 is an EEPROM-programmable power-supply sequencer/supervisor.
-It provides timed outputs that can be used as a watchdog, if properly wired.
-It also provides 512 bytes of user EEPROM.
-
-At reset, the MAX6875 reads the configuration EEPROM into its configuration
-registers.  The chip then begins to operate according to the values in the
-registers.
-
-The Maxim MAX6874 is a similar, mostly compatible device, with more intputs
-and outputs:
-             vin     gpi    vout
-MAX6874        6       4       8
-MAX6875        4       3       5
-
-See the datasheet for more information.
-
-
-Sysfs entries
--------------
-
-eeprom        - 512 bytes of user-defined EEPROM space.
-
-
-General Remarks
----------------
-
-Valid addresses for the MAX6875 are 0x50 and 0x52.
-Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56.
-The driver does not probe any address, so you must force the address.
-
-Example:
-$ modprobe max6875 force=0,0x50
-
-The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple
-addresses.  For example, for address 0x50, it also reserves 0x51.
-The even-address instance is called 'max6875', the odd one is 'dummy'.
-
-
-Programming the chip using i2c-dev
-----------------------------------
-
-Use the i2c-dev interface to access and program the chips.
-Reads and writes are performed differently depending on the address range.
-
-The configuration registers are at addresses 0x00 - 0x45.
-Use i2c_smbus_write_byte_data() to write a register and
-i2c_smbus_read_byte_data() to read a register.
-The command is the register number.
-
-Examples:
-To write a 1 to register 0x45:
-  i2c_smbus_write_byte_data(fd, 0x45, 1);
-
-To read register 0x45:
-  value = i2c_smbus_read_byte_data(fd, 0x45);
-
-
-The configuration EEPROM is at addresses 0x8000 - 0x8045.
-The user EEPROM is at addresses 0x8100 - 0x82ff.
-
-Use i2c_smbus_write_word_data() to write a byte to EEPROM.
-
-The command is the upper byte of the address: 0x80, 0x81, or 0x82.
-The data word is the lower part of the address or'd with data << 8.
-  cmd = address >> 8;
-  val = (address & 0xff) | (data << 8);
-
-Example:
-To write 0x5a to address 0x8003:
-  i2c_smbus_write_word_data(fd, 0x80, 0x5a03);
-
-
-Reading data from the EEPROM is a little more complicated.
-Use i2c_smbus_write_byte_data() to set the read address and then
-i2c_smbus_read_byte() or i2c_smbus_read_i2c_block_data() to read the data.
-
-Example:
-To read data starting at offset 0x8100, first set the address:
-  i2c_smbus_write_byte_data(fd, 0x81, 0x00);
-
-And then read the data
-  value = i2c_smbus_read_byte(fd);
-
-  or
-
-  count = i2c_smbus_read_i2c_block_data(fd, 0x84, 16, buffer);
-
-The block read should read 16 bytes.
-0x84 is the block read command.
-
-See the datasheet for more details.
-
index c740b7b410881a72f41256220c03b83c4796d523..e89490270abad1ece6f8003b16ac815490f8de17 100644 (file)
@@ -188,7 +188,7 @@ segment, the address is sufficient to uniquely identify the device to be
 deleted.
 
 Example:
-# echo eeprom 0x50 > /sys/class/i2c-adapter/i2c-3/new_device
+# echo eeprom 0x50 > /sys/bus/i2c/devices/i2c-3/new_device
 
 While this interface should only be used when in-kernel device declaration
 can't be done, there is a variety of cases where it can be helpful:
index 686e107923ec16f36a4f6a32fd49b791e36b30b9..5fe8de5cc7275f768b3c1106c5276f56ab35e256 100644 (file)
@@ -60,10 +60,9 @@ open() operation on regular files or character devices.
 
 After a successful return from register_appl(), CAPI messages from the
 application may be passed to the driver for the device via calls to the
-send_message() callback function. The CAPI message to send is stored in the
-data portion of an skb. Conversely, the driver may call Kernel CAPI's
-capi_ctr_handle_message() function to pass a received CAPI message to Kernel
-CAPI for forwarding to an application, specifying its ApplID.
+send_message() callback function. Conversely, the driver may call Kernel
+CAPI's capi_ctr_handle_message() function to pass a received CAPI message to
+Kernel CAPI for forwarding to an application, specifying its ApplID.
 
 Deregistration requests (CAPI operation CAPI_RELEASE) from applications are
 forwarded as calls to the release_appl() callback function, passing the same
@@ -142,6 +141,7 @@ u16  (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb)
        to accepting or queueing the message. Errors occurring during the
        actual processing of the message should be signaled with an
        appropriate reply message.
+       May be called in process or interrupt context.
        Calls to this function are not serialized by Kernel CAPI, ie. it must
        be prepared to be re-entered.
 
@@ -154,7 +154,8 @@ read_proc_t *ctr_read_proc
        system entry, /proc/capi/controllers/<n>; will be called with a
        pointer to the device's capi_ctr structure as the last (data) argument
 
-Note: Callback functions are never called in interrupt context.
+Note: Callback functions except send_message() are never called in interrupt
+context.
 
 - to be filled in before calling capi_ctr_ready():
 
@@ -171,14 +172,40 @@ u8 serial[CAPI_SERIAL_LEN]
        value to return for CAPI_GET_SERIAL
 
 
-4.3 The _cmsg Structure
+4.3 SKBs
+
+CAPI messages are passed between Kernel CAPI and the driver via send_message()
+and capi_ctr_handle_message(), stored in the data portion of a socket buffer
+(skb).  Each skb contains a single CAPI message coded according to the CAPI 2.0
+standard.
+
+For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual
+payload data immediately follows the CAPI message itself within the same skb.
+The Data and Data64 parameters are not used for processing. The Data64
+parameter may be omitted by setting the length field of the CAPI message to 22
+instead of 30.
+
+
+4.4 The _cmsg Structure
 
 (declared in <linux/isdn/capiutil.h>)
 
 The _cmsg structure stores the contents of a CAPI 2.0 message in an easily
-accessible form. It contains members for all possible CAPI 2.0 parameters, of
-which only those appearing in the message type currently being processed are
-actually used. Unused members should be set to zero.
+accessible form. It contains members for all possible CAPI 2.0 parameters,
+including subparameters of the Additional Info and B Protocol structured
+parameters, with the following exceptions:
+
+* second Calling party number (CONNECT_IND)
+
+* Data64 (DATA_B3_REQ and DATA_B3_IND)
+
+* Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ)
+
+* Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP
+  and SELECT_B_PROTOCOL_REQ)
+
+Only those parameters appearing in the message type currently being processed
+are actually used. Unused members should be set to zero.
 
 Members are named after the CAPI 2.0 standard names of the parameters they
 represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data
@@ -190,18 +217,19 @@ u16         for CAPI parameters of type 'word'
 
 u32         for CAPI parameters of type 'dword'
 
-_cstruct    for CAPI parameters of type 'struct' not containing any
-           variably-sized (struct) subparameters (eg. 'Called Party Number')
+_cstruct    for CAPI parameters of type 'struct'
            The member is a pointer to a buffer containing the parameter in
            CAPI encoding (length + content). It may also be NULL, which will
            be taken to represent an empty (zero length) parameter.
+           Subparameters are stored in encoded form within the content part.
 
-_cmstruct   for CAPI parameters of type 'struct' containing 'struct'
-           subparameters ('Additional Info' and 'B Protocol')
+_cmstruct   alternative representation for CAPI parameters of type 'struct'
+           (used only for the 'Additional Info' and 'B Protocol' parameters)
            The representation is a single byte containing one of the values:
-           CAPI_DEFAULT: the parameter is empty
-           CAPI_COMPOSE: the values of the subparameters are stored
-           individually in the corresponding _cmsg structure members
+           CAPI_DEFAULT: The parameter is empty/absent.
+           CAPI_COMPOSE: The parameter is present.
+           Subparameter values are stored individually in the corresponding
+           _cmsg structure members.
 
 Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert
 messages between their transport encoding described in the CAPI 2.0 standard
@@ -297,3 +325,26 @@ char *capi_cmd2str(u8 Command, u8 Subcommand)
        be NULL if the command/subcommand is not one of those defined in the
        CAPI 2.0 standard.
 
+
+7. Debugging
+
+The module kernelcapi has a module parameter showcapimsgs controlling some
+debugging output produced by the module. It can only be set when the module is
+loaded, via a parameter "showcapimsgs=<n>" to the modprobe command, either on
+the command line or in the configuration file.
+
+If the lowest bit of showcapimsgs is set, kernelcapi logs controller and
+application up and down events.
+
+In addition, every registered CAPI controller has an associated traceflag
+parameter controlling how CAPI messages sent from and to tha controller are
+logged. The traceflag parameter is initialized with the value of the
+showcapimsgs parameter when the controller is registered, but can later be
+changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE.
+
+If the value of traceflag is non-zero, CAPI messages are logged.
+DATA_B3 messages are only logged if the value of traceflag is > 2.
+
+If the lowest bit of traceflag is set, only the command/subcommand and message
+length are logged. Otherwise, kernelcapi logs a readable representation of
+the entire message.
index 6fa7292947e5c8d100d6e426013aa31aa421f115..9107b387e91fce095ee320dbeed53d5dfe5cb332 100644 (file)
@@ -671,6 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file
        earlyprintk=    [X86,SH,BLACKFIN]
                        earlyprintk=vga
                        earlyprintk=serial[,ttySn[,baudrate]]
+                       earlyprintk=ttySn[,baudrate]
                        earlyprintk=dbgp[debugController#]
 
                        Append ",keep" to not disable it when the real console
diff --git a/Documentation/misc-devices/eeprom b/Documentation/misc-devices/eeprom
new file mode 100644 (file)
index 0000000..f7e8104
--- /dev/null
@@ -0,0 +1,96 @@
+Kernel driver eeprom
+====================
+
+Supported chips:
+  * Any EEPROM chip in the designated address range
+    Prefix: 'eeprom'
+    Addresses scanned: I2C 0x50 - 0x57
+    Datasheets: Publicly available from:
+                Atmel (www.atmel.com),
+                Catalyst (www.catsemi.com),
+                Fairchild (www.fairchildsemi.com),
+                Microchip (www.microchip.com),
+                Philips (www.semiconductor.philips.com),
+                Rohm (www.rohm.com),
+                ST (www.st.com),
+                Xicor (www.xicor.com),
+                and others.
+
+        Chip     Size (bits)    Address
+        24C01     1K            0x50 (shadows at 0x51 - 0x57)
+        24C01A    1K            0x50 - 0x57 (Typical device on DIMMs)
+        24C02     2K            0x50 - 0x57
+        24C04     4K            0x50, 0x52, 0x54, 0x56
+                                (additional data at 0x51, 0x53, 0x55, 0x57)
+        24C08     8K            0x50, 0x54 (additional data at 0x51, 0x52,
+                                0x53, 0x55, 0x56, 0x57)
+        24C16    16K            0x50 (additional data at 0x51 - 0x57)
+        Sony      2K            0x57
+
+        Atmel     34C02B  2K    0x50 - 0x57, SW write protect at 0x30-37
+        Catalyst  34FC02  2K    0x50 - 0x57, SW write protect at 0x30-37
+        Catalyst  34RC02  2K    0x50 - 0x57, SW write protect at 0x30-37
+        Fairchild 34W02   2K    0x50 - 0x57, SW write protect at 0x30-37
+        Microchip 24AA52  2K    0x50 - 0x57, SW write protect at 0x30-37
+        ST        M34C02  2K    0x50 - 0x57, SW write protect at 0x30-37
+
+
+Authors:
+        Frodo Looijaard <frodol@dds.nl>,
+        Philip Edelbrock <phil@netroedge.com>,
+        Jean Delvare <khali@linux-fr.org>,
+        Greg Kroah-Hartman <greg@kroah.com>,
+        IBM Corp.
+
+Description
+-----------
+
+This is a simple EEPROM module meant to enable reading the first 256 bytes
+of an EEPROM (on a SDRAM DIMM for example). However, it will access serial
+EEPROMs on any I2C adapter. The supported devices are generically called
+24Cxx, and are listed above; however the numbering for these
+industry-standard devices may vary by manufacturer.
+
+This module was a programming exercise to get used to the new project
+organization laid out by Frodo, but it should be at least completely
+effective for decoding the contents of EEPROMs on DIMMs.
+
+DIMMS will typically contain a 24C01A or 24C02, or the 34C02 variants.
+The other devices will not be found on a DIMM because they respond to more
+than one address.
+
+DDC Monitors may contain any device. Often a 24C01, which responds to all 8
+addresses, is found.
+
+Recent Sony Vaio laptops have an EEPROM at 0x57. We couldn't get the
+specification, so it is guess work and far from being complete.
+
+The Microchip 24AA52/24LCS52, ST M34C02, and others support an additional
+software write protect register at 0x30 - 0x37 (0x20 less than the memory
+location). The chip responds to "write quick" detection at this address but
+does not respond to byte reads. If this register is present, the lower 128
+bytes of the memory array are not write protected. Any byte data write to
+this address will write protect the memory array permanently, and the
+device will no longer respond at the 0x30-37 address. The eeprom driver
+does not support this register.
+
+Lacking functionality:
+
+* Full support for larger devices (24C04, 24C08, 24C16). These are not
+typically found on a PC. These devices will appear as separate devices at
+multiple addresses.
+
+* Support for really large devices (24C32, 24C64, 24C128, 24C256, 24C512).
+These devices require two-byte address fields and are not supported.
+
+* Enable Writing. Again, no technical reason why not, but making it easy
+to change the contents of the EEPROMs (on DIMMs anyway) also makes it easy
+to disable the DIMMs (potentially preventing the computer from booting)
+until the values are restored somehow.
+
+Use:
+
+After inserting the module (and any other required SMBus/i2c modules), you
+should have some EEPROM directories in /sys/bus/i2c/devices/* of names such
+as "0-0050". Inside each of these is a series of files, the eeprom file
+contains the binary data from EEPROM.
diff --git a/Documentation/misc-devices/max6875 b/Documentation/misc-devices/max6875
new file mode 100644 (file)
index 0000000..1e89ee3
--- /dev/null
@@ -0,0 +1,110 @@
+Kernel driver max6875
+=====================
+
+Supported chips:
+  * Maxim MAX6874, MAX6875
+    Prefix: 'max6875'
+    Addresses scanned: None (see below)
+    Datasheet:
+        http://pdfserv.maxim-ic.com/en/ds/MAX6874-MAX6875.pdf
+
+Author: Ben Gardner <bgardner@wabtec.com>
+
+
+Description
+-----------
+
+The Maxim MAX6875 is an EEPROM-programmable power-supply sequencer/supervisor.
+It provides timed outputs that can be used as a watchdog, if properly wired.
+It also provides 512 bytes of user EEPROM.
+
+At reset, the MAX6875 reads the configuration EEPROM into its configuration
+registers.  The chip then begins to operate according to the values in the
+registers.
+
+The Maxim MAX6874 is a similar, mostly compatible device, with more intputs
+and outputs:
+             vin     gpi    vout
+MAX6874        6       4       8
+MAX6875        4       3       5
+
+See the datasheet for more information.
+
+
+Sysfs entries
+-------------
+
+eeprom        - 512 bytes of user-defined EEPROM space.
+
+
+General Remarks
+---------------
+
+Valid addresses for the MAX6875 are 0x50 and 0x52.
+Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56.
+The driver does not probe any address, so you explicitly instantiate the
+devices.
+
+Example:
+$ modprobe max6875
+$ echo max6875 0x50 > /sys/bus/i2c/devices/i2c-0/new_device
+
+The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple
+addresses.  For example, for address 0x50, it also reserves 0x51.
+The even-address instance is called 'max6875', the odd one is 'dummy'.
+
+
+Programming the chip using i2c-dev
+----------------------------------
+
+Use the i2c-dev interface to access and program the chips.
+Reads and writes are performed differently depending on the address range.
+
+The configuration registers are at addresses 0x00 - 0x45.
+Use i2c_smbus_write_byte_data() to write a register and
+i2c_smbus_read_byte_data() to read a register.
+The command is the register number.
+
+Examples:
+To write a 1 to register 0x45:
+  i2c_smbus_write_byte_data(fd, 0x45, 1);
+
+To read register 0x45:
+  value = i2c_smbus_read_byte_data(fd, 0x45);
+
+
+The configuration EEPROM is at addresses 0x8000 - 0x8045.
+The user EEPROM is at addresses 0x8100 - 0x82ff.
+
+Use i2c_smbus_write_word_data() to write a byte to EEPROM.
+
+The command is the upper byte of the address: 0x80, 0x81, or 0x82.
+The data word is the lower part of the address or'd with data << 8.
+  cmd = address >> 8;
+  val = (address & 0xff) | (data << 8);
+
+Example:
+To write 0x5a to address 0x8003:
+  i2c_smbus_write_word_data(fd, 0x80, 0x5a03);
+
+
+Reading data from the EEPROM is a little more complicated.
+Use i2c_smbus_write_byte_data() to set the read address and then
+i2c_smbus_read_byte() or i2c_smbus_read_i2c_block_data() to read the data.
+
+Example:
+To read data starting at offset 0x8100, first set the address:
+  i2c_smbus_write_byte_data(fd, 0x81, 0x00);
+
+And then read the data
+  value = i2c_smbus_read_byte(fd);
+
+  or
+
+  count = i2c_smbus_read_i2c_block_data(fd, 0x84, 16, buffer);
+
+The block read should read 16 bytes.
+0x84 is the block read command.
+
+See the datasheet for more details.
+
index c6cf4a3c16e029258e3c6790ee3b5da441259eff..61bb645d50e061d7450f3f75d8805d2ca9308501 100644 (file)
@@ -90,6 +90,11 @@ Examples:
  pgset "dstmac 00:00:00:00:00:00"    sets MAC destination address
  pgset "srcmac 00:00:00:00:00:00"    sets MAC source address
 
+ pgset "queue_map_min 0" Sets the min value of tx queue interval
+ pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices
+                         To select queue 1 of a given device,
+                         use queue_map_min=1 and queue_map_max=1
+
  pgset "src_mac_count 1" Sets the number of MACs we'll range through.  
                          The 'minimum' MAC is what you set with srcmac.
 
@@ -101,6 +106,9 @@ Examples:
                               IPDST_RND, UDPSRC_RND,
                               UDPDST_RND, MACSRC_RND, MACDST_RND 
                               MPLS_RND, VID_RND, SVID_RND
+                              QUEUE_MAP_RND # queue map random
+                              QUEUE_MAP_CPU # queue map mirrors smp_processor_id()
+
 
  pgset "udp_src_min 9"   set UDP source port min, If < udp_src_max, then
                          cycle through the port range.
index 43d143104210186b5f627b6eba211a180914ebe2..a7936fe8444a1830b5f57eccaf4ca610a42db358 100644 (file)
@@ -381,7 +381,7 @@ int main(int argc, char **argv)
        memset(&hwtstamp, 0, sizeof(hwtstamp));
        strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
        hwtstamp.ifr_data = (void *)&hwconfig;
-       memset(&hwconfig, 0, sizeof(&hwconfig));
+       memset(&hwconfig, 0, sizeof(hwconfig));
        hwconfig.tx_type =
                (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
                HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
index f1708b79f9637a39fe2926bd519bb562a789b8d3..75fddb40f4163d44d33166a486094f1c593852a0 100644 (file)
@@ -209,6 +209,7 @@ AD1884A / AD1883 / AD1984A / AD1984B
   laptop       laptop with HP jack sensing
   mobile       mobile devices with HP jack sensing
   thinkpad     Lenovo Thinkpad X300
+  touchsmart   HP Touchsmart
 
 AD1884
 ======
index 72a22f65960ed942c24c43feb9c0be4b8adab233..262d8e6793a3e008c36bb39905f49634f9e570ae 100644 (file)
@@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
 readable by all but writable only by root:
 
 max_kernel_pages - set to maximum number of kernel pages that KSM may use
-                   e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages"
+                   e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
                    Value 0 imposes no limit on the kernel pages KSM may use;
                    but note that any process using MADV_MERGEABLE can cause
                    KSM to allocate these pages, unswappable until it exits.
-                   Default: 2000 (chosen for demonstration purposes)
+                   Default: quarter of memory (chosen to not pin too much)
 
 pages_to_scan    - how many present pages to scan before ksmd goes to sleep
-                   e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan"
-                   Default: 200 (chosen for demonstration purposes)
+                   e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
+                   Default: 100 (chosen for demonstration purposes)
 
 sleep_millisecs  - how many milliseconds ksmd should sleep before next scan
                    e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs"
@@ -70,7 +70,8 @@ run              - set 0 to stop ksmd from running but keep merged pages,
                    set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run",
                    set 2 to stop ksmd and unmerge all pages currently merged,
                          but leave mergeable areas registered for next run
-                   Default: 1 (for immediate use by apps which register)
+                   Default: 0 (must be changed to 1 to activate KSM,
+                               except if CONFIG_SYSFS is disabled)
 
 The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
 
@@ -86,4 +87,4 @@ pages_volatile embraces several different kinds of activity, but a high
 proportion there would also indicate poor use of madvise MADV_MERGEABLE.
 
 Izik Eidus,
-Hugh Dickins, 30 July 2009
+Hugh Dickins, 24 Sept 2009
index fa1a30d9e9d540a27324affbfe8aae579661c877..3ec4f2a2258537a80b77f1ca341b1df03ded6ae8 100644 (file)
@@ -2,7 +2,10 @@
  * page-types: Tool for querying page flags
  *
  * Copyright (C) 2009 Intel corporation
- * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com>
+ *
+ * Authors: Wu Fengguang <fengguang.wu@intel.com>
+ *
+ * Released under the General Public License (GPL).
  */
 
 #define _LARGEFILE64_SOURCE
@@ -69,7 +72,9 @@
 #define KPF_COMPOUND_TAIL      16
 #define KPF_HUGE               17
 #define KPF_UNEVICTABLE                18
+#define KPF_HWPOISON           19
 #define KPF_NOPAGE             20
+#define KPF_KSM                        21
 
 /* [32-] kernel hacking assistances */
 #define KPF_RESERVED           32
@@ -116,7 +121,9 @@ static char *page_flag_names[] = {
        [KPF_COMPOUND_TAIL]     = "T:compound_tail",
        [KPF_HUGE]              = "G:huge",
        [KPF_UNEVICTABLE]       = "u:unevictable",
+       [KPF_HWPOISON]          = "X:hwpoison",
        [KPF_NOPAGE]            = "n:nopage",
+       [KPF_KSM]               = "x:ksm",
 
        [KPF_RESERVED]          = "r:reserved",
        [KPF_MLOCKED]           = "m:mlocked",
@@ -152,9 +159,6 @@ static unsigned long        opt_size[MAX_ADDR_RANGES];
 static int             nr_vmas;
 static unsigned long   pg_start[MAX_VMAS];
 static unsigned long   pg_end[MAX_VMAS];
-static unsigned long   voffset;
-
-static int             pagemap_fd;
 
 #define MAX_BIT_FILTERS        64
 static int             nr_bit_filters;
@@ -163,9 +167,16 @@ static uint64_t            opt_bits[MAX_BIT_FILTERS];
 
 static int             page_size;
 
-#define PAGES_BATCH    (64 << 10)      /* 64k pages */
+static int             pagemap_fd;
 static int             kpageflags_fd;
 
+static int             opt_hwpoison;
+static int             opt_unpoison;
+
+static char            *hwpoison_debug_fs = "/debug/hwpoison";
+static int             hwpoison_inject_fd;
+static int             hwpoison_forget_fd;
+
 #define HASH_SHIFT     13
 #define HASH_SIZE      (1 << HASH_SHIFT)
 #define HASH_MASK      (HASH_SIZE - 1)
@@ -207,6 +218,74 @@ static void fatal(const char *x, ...)
        exit(EXIT_FAILURE);
 }
 
+int checked_open(const char *pathname, int flags)
+{
+       int fd = open(pathname, flags);
+
+       if (fd < 0) {
+               perror(pathname);
+               exit(EXIT_FAILURE);
+       }
+
+       return fd;
+}
+
+/*
+ * pagemap/kpageflags routines
+ */
+
+static unsigned long do_u64_read(int fd, char *name,
+                                uint64_t *buf,
+                                unsigned long index,
+                                unsigned long count)
+{
+       long bytes;
+
+       if (index > ULONG_MAX / 8)
+               fatal("index overflow: %lu\n", index);
+
+       if (lseek(fd, index * 8, SEEK_SET) < 0) {
+               perror(name);
+               exit(EXIT_FAILURE);
+       }
+
+       bytes = read(fd, buf, count * 8);
+       if (bytes < 0) {
+               perror(name);
+               exit(EXIT_FAILURE);
+       }
+       if (bytes % 8)
+               fatal("partial read: %lu bytes\n", bytes);
+
+       return bytes / 8;
+}
+
+static unsigned long kpageflags_read(uint64_t *buf,
+                                    unsigned long index,
+                                    unsigned long pages)
+{
+       return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
+}
+
+static unsigned long pagemap_read(uint64_t *buf,
+                                 unsigned long index,
+                                 unsigned long pages)
+{
+       return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages);
+}
+
+static unsigned long pagemap_pfn(uint64_t val)
+{
+       unsigned long pfn;
+
+       if (val & PM_PRESENT)
+               pfn = PM_PFRAME(val);
+       else
+               pfn = 0;
+
+       return pfn;
+}
+
 
 /*
  * page flag names
@@ -255,7 +334,8 @@ static char *page_flag_longname(uint64_t flags)
  * page list and summary
  */
 
-static void show_page_range(unsigned long offset, uint64_t flags)
+static void show_page_range(unsigned long voffset,
+                           unsigned long offset, uint64_t flags)
 {
        static uint64_t      flags0;
        static unsigned long voff;
@@ -281,7 +361,8 @@ static void show_page_range(unsigned long offset, uint64_t flags)
        count  = 1;
 }
 
-static void show_page(unsigned long offset, uint64_t flags)
+static void show_page(unsigned long voffset,
+                     unsigned long offset, uint64_t flags)
 {
        if (opt_pid)
                printf("%lx\t", voffset);
@@ -362,6 +443,62 @@ static uint64_t well_known_flags(uint64_t flags)
        return flags;
 }
 
+static uint64_t kpageflags_flags(uint64_t flags)
+{
+       flags = expand_overloaded_flags(flags);
+
+       if (!opt_raw)
+               flags = well_known_flags(flags);
+
+       return flags;
+}
+
+/*
+ * page actions
+ */
+
+static void prepare_hwpoison_fd(void)
+{
+       char buf[100];
+
+       if (opt_hwpoison && !hwpoison_inject_fd) {
+               sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs);
+               hwpoison_inject_fd = checked_open(buf, O_WRONLY);
+       }
+
+       if (opt_unpoison && !hwpoison_forget_fd) {
+               sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
+               hwpoison_forget_fd = checked_open(buf, O_WRONLY);
+       }
+}
+
+static int hwpoison_page(unsigned long offset)
+{
+       char buf[100];
+       int len;
+
+       len = sprintf(buf, "0x%lx\n", offset);
+       len = write(hwpoison_inject_fd, buf, len);
+       if (len < 0) {
+               perror("hwpoison inject");
+               return len;
+       }
+       return 0;
+}
+
+static int unpoison_page(unsigned long offset)
+{
+       char buf[100];
+       int len;
+
+       len = sprintf(buf, "0x%lx\n", offset);
+       len = write(hwpoison_forget_fd, buf, len);
+       if (len < 0) {
+               perror("hwpoison forget");
+               return len;
+       }
+       return 0;
+}
 
 /*
  * page frame walker
@@ -394,104 +531,83 @@ static int hash_slot(uint64_t flags)
        exit(EXIT_FAILURE);
 }
 
-static void add_page(unsigned long offset, uint64_t flags)
+static void add_page(unsigned long voffset,
+                    unsigned long offset, uint64_t flags)
 {
-       flags = expand_overloaded_flags(flags);
-
-       if (!opt_raw)
-               flags = well_known_flags(flags);
+       flags = kpageflags_flags(flags);
 
        if (!bit_mask_ok(flags))
                return;
 
+       if (opt_hwpoison)
+               hwpoison_page(offset);
+       if (opt_unpoison)
+               unpoison_page(offset);
+
        if (opt_list == 1)
-               show_page_range(offset, flags);
+               show_page_range(voffset, offset, flags);
        else if (opt_list == 2)
-               show_page(offset, flags);
+               show_page(voffset, offset, flags);
 
        nr_pages[hash_slot(flags)]++;
        total_pages++;
 }
 
-static void walk_pfn(unsigned long index, unsigned long count)
+#define KPAGEFLAGS_BATCH       (64 << 10)      /* 64k pages */
+static void walk_pfn(unsigned long voffset,
+                    unsigned long index,
+                    unsigned long count)
 {
+       uint64_t buf[KPAGEFLAGS_BATCH];
        unsigned long batch;
-       unsigned long n;
+       unsigned long pages;
        unsigned long i;
 
-       if (index > ULONG_MAX / KPF_BYTES)
-               fatal("index overflow: %lu\n", index);
-
-       lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
-
        while (count) {
-               uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH];
-
-               batch = min_t(unsigned long, count, PAGES_BATCH);
-               n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
-               if (n == 0)
+               batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH);
+               pages = kpageflags_read(buf, index, batch);
+               if (pages == 0)
                        break;
-               if (n < 0) {
-                       perror(PROC_KPAGEFLAGS);
-                       exit(EXIT_FAILURE);
-               }
 
-               if (n % KPF_BYTES != 0)
-                       fatal("partial read: %lu bytes\n", n);
-               n = n / KPF_BYTES;
+               for (i = 0; i < pages; i++)
+                       add_page(voffset + i, index + i, buf[i]);
 
-               for (i = 0; i < n; i++)
-                       add_page(index + i, kpageflags_buf[i]);
-
-               index += batch;
-               count -= batch;
+               index += pages;
+               count -= pages;
        }
 }
 
-
-#define PAGEMAP_BATCH  4096
-static unsigned long task_pfn(unsigned long pgoff)
+#define PAGEMAP_BATCH  (64 << 10)
+static void walk_vma(unsigned long index, unsigned long count)
 {
-       static uint64_t buf[PAGEMAP_BATCH];
-       static unsigned long start;
-       static long count;
-       uint64_t pfn;
+       uint64_t buf[PAGEMAP_BATCH];
+       unsigned long batch;
+       unsigned long pages;
+       unsigned long pfn;
+       unsigned long i;
 
-       if (pgoff < start || pgoff >= start + count) {
-               if (lseek64(pagemap_fd,
-                           (uint64_t)pgoff * PM_ENTRY_BYTES,
-                           SEEK_SET) < 0) {
-                       perror("pagemap seek");
-                       exit(EXIT_FAILURE);
-               }
-               count = read(pagemap_fd, buf, sizeof(buf));
-               if (count == 0)
-                       return 0;
-               if (count < 0) {
-                       perror("pagemap read");
-                       exit(EXIT_FAILURE);
-               }
-               if (count % PM_ENTRY_BYTES) {
-                       fatal("pagemap read not aligned.\n");
-                       exit(EXIT_FAILURE);
-               }
-               count /= PM_ENTRY_BYTES;
-               start = pgoff;
-       }
+       while (count) {
+               batch = min_t(unsigned long, count, PAGEMAP_BATCH);
+               pages = pagemap_read(buf, index, batch);
+               if (pages == 0)
+                       break;
 
-       pfn = buf[pgoff - start];
-       if (pfn & PM_PRESENT)
-               pfn = PM_PFRAME(pfn);
-       else
-               pfn = 0;
+               for (i = 0; i < pages; i++) {
+                       pfn = pagemap_pfn(buf[i]);
+                       if (pfn)
+                               walk_pfn(index + i, pfn, 1);
+               }
 
-       return pfn;
+               index += pages;
+               count -= pages;
+       }
 }
 
 static void walk_task(unsigned long index, unsigned long count)
 {
-       int i = 0;
        const unsigned long end = index + count;
+       unsigned long start;
+       int i = 0;
 
        while (index < end) {
 
@@ -501,15 +617,11 @@ static void walk_task(unsigned long index, unsigned long count)
                if (pg_start[i] >= end)
                        return;
 
-               voffset = max_t(unsigned long, pg_start[i], index);
-               index   = min_t(unsigned long, pg_end[i], end);
+               start = max_t(unsigned long, pg_start[i], index);
+               index = min_t(unsigned long, pg_end[i], end);
 
-               assert(voffset < index);
-               for (; voffset < index; voffset++) {
-                       unsigned long pfn = task_pfn(voffset);
-                       if (pfn)
-                               walk_pfn(pfn, 1);
-               }
+               assert(start < index);
+               walk_vma(start, index - start);
        }
 }
 
@@ -527,18 +639,14 @@ static void walk_addr_ranges(void)
 {
        int i;
 
-       kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY);
-       if (kpageflags_fd < 0) {
-               perror(PROC_KPAGEFLAGS);
-               exit(EXIT_FAILURE);
-       }
+       kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
 
        if (!nr_addr_ranges)
                add_addr_range(0, ULONG_MAX);
 
        for (i = 0; i < nr_addr_ranges; i++)
                if (!opt_pid)
-                       walk_pfn(opt_offset[i], opt_size[i]);
+                       walk_pfn(0, opt_offset[i], opt_size[i]);
                else
                        walk_task(opt_offset[i], opt_size[i]);
 
@@ -575,6 +683,8 @@ static void usage(void)
 "            -l|--list                 Show page details in ranges\n"
 "            -L|--list-each            Show page details one by one\n"
 "            -N|--no-summary           Don't show summay info\n"
+"            -X|--hwpoison             hwpoison pages\n"
+"            -x|--unpoison             unpoison pages\n"
 "            -h|--help                 Show this usage message\n"
 "addr-spec:\n"
 "            N                         one page at offset N (unit: pages)\n"
@@ -624,11 +734,7 @@ static void parse_pid(const char *str)
        opt_pid = parse_number(str);
 
        sprintf(buf, "/proc/%d/pagemap", opt_pid);
-       pagemap_fd = open(buf, O_RDONLY);
-       if (pagemap_fd < 0) {
-               perror(buf);
-               exit(EXIT_FAILURE);
-       }
+       pagemap_fd = checked_open(buf, O_RDONLY);
 
        sprintf(buf, "/proc/%d/maps", opt_pid);
        file = fopen(buf, "r");
@@ -788,6 +894,8 @@ static struct option opts[] = {
        { "list"      , 0, NULL, 'l' },
        { "list-each" , 0, NULL, 'L' },
        { "no-summary", 0, NULL, 'N' },
+       { "hwpoison"  , 0, NULL, 'X' },
+       { "unpoison"  , 0, NULL, 'x' },
        { "help"      , 0, NULL, 'h' },
        { NULL        , 0, NULL, 0 }
 };
@@ -799,7 +907,7 @@ int main(int argc, char *argv[])
        page_size = getpagesize();
 
        while ((c = getopt_long(argc, argv,
-                               "rp:f:a:b:lLNh", opts, NULL)) != -1) {
+                               "rp:f:a:b:lLNXxh", opts, NULL)) != -1) {
                switch (c) {
                case 'r':
                        opt_raw = 1;
@@ -825,6 +933,14 @@ int main(int argc, char *argv[])
                case 'N':
                        opt_no_summary = 1;
                        break;
+               case 'X':
+                       opt_hwpoison = 1;
+                       prepare_hwpoison_fd();
+                       break;
+               case 'x':
+                       opt_unpoison = 1;
+                       prepare_hwpoison_fd();
+                       break;
                case 'h':
                        usage();
                        exit(0);
@@ -844,7 +960,7 @@ int main(int argc, char *argv[])
        walk_addr_ranges();
 
        if (opt_list == 1)
-               show_page_range(0, 0);  /* drain the buffer */
+               show_page_range(0, 0, 0);  /* drain the buffer */
 
        if (opt_no_summary)
                return 0;
index 600a304a828cb2865e63a85e031fb881f27cbbc7..df09b9650a811045ee99708682b2172c0c8fee83 100644 (file)
@@ -57,7 +57,9 @@ There are three components to pagemap:
     16. COMPOUND_TAIL
     16. HUGE
     18. UNEVICTABLE
+    19. HWPOISON
     20. NOPAGE
+    21. KSM
 
 Short descriptions to the page flags:
 
@@ -86,9 +88,15 @@ Short descriptions to the page flags:
 17. HUGE
     this is an integral part of a HugeTLB page
 
+19. HWPOISON
+    hardware detected memory corruption on this page: don't touch the data!
+
 20. NOPAGE
     no page frame exists at the requested address
 
+21. KSM
+    identical memory pages dynamically shared between one or more processes
+
     [IO related page flags]
  1. ERROR     IO error occurred
  3. UPTODATE  page has up-to-date data
index 9210d6fa50247a687e0e51e242c15e3ecbbaad28..299b91c7609f2a5d8cd439a3d71d0f2694fea1a8 100644 (file)
@@ -24,8 +24,8 @@ General Remarks
 
 Valid addresses are 0x18, 0x19, 0x1a, and 0x1b.
 However, the device cannot be detected without writing to the i2c bus, so no
-detection is done.
-You should force the device address.
+detection is done. You should instantiate the device explicitly.
 
-$ modprobe ds2482 force=0,0x18
+$ modprobe ds2482
+$ echo ds2482 0x18 > /sys/bus/i2c/devices/i2c-0/new_device
 
index c450f3abb8c9b5f1fb037b4788665419c0e1238b..e1da925b38c867d8ec46e0611d48489c6a993e6b 100644 (file)
@@ -257,6 +257,13 @@ W: http://www.lesswatts.org/projects/acpi/
 S:     Supported
 F:     drivers/acpi/fan.c
 
+ACPI PROCESSOR AGGREGATOR DRIVER
+M:     Shaohua Li <shaohua.li@intel.com>
+L:     linux-acpi@vger.kernel.org
+W:     http://www.lesswatts.org/projects/acpi/
+S:     Supported
+F:     drivers/acpi/acpi_pad.c
+
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
@@ -646,24 +653,24 @@ ARM/INTEL IOP32X ARM ARCHITECTURE
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 
 ARM/INTEL IOP33X ARM ARCHITECTURE
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 
 ARM/INTEL IOP13XX ARM ARCHITECTURE
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 
 ARM/INTEL IQ81342EX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 
 ARM/INTEL IXP2000 ARM ARCHITECTURE
 M:     Lennert Buytenhek <kernel@wantstofly.org>
@@ -691,7 +698,7 @@ ARM/INTEL XSC3 (MANZANO) ARM CORE
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 
 ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
@@ -741,23 +748,36 @@ M:        Dirk Opfer <dirk@opfer-online.de>
 S:     Maintained
 
 ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
-P:     Marek Vasut
-M:     marek.vasut@gmail.com
+M:     Marek Vasut <marek.vasut@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org
 W:     http://hackndev.com
 S:     Maintained
+F:     arch/arm/mach-pxa/include/mach/palmtx.h
+F:     arch/arm/mach-pxa/palmtx.c
+F:     arch/arm/mach-pxa/include/mach/palmt5.h
+F:     arch/arm/mach-pxa/palmt5.c
+F:     arch/arm/mach-pxa/include/mach/palmld.h
+F:     arch/arm/mach-pxa/palmld.c
+F:     arch/arm/mach-pxa/include/mach/palmte2.h
+F:     arch/arm/mach-pxa/palmte2.c
+F:     arch/arm/mach-pxa/include/mach/palmtc.h
+F:     arch/arm/mach-pxa/palmtc.c
 
 ARM/PALM TREO 680 SUPPORT
 M:     Tomas Cech <sleep_walker@suse.cz>
 L:     linux-arm-kernel@lists.infradead.org
 W:     http://hackndev.com
 S:     Maintained
+F:     arch/arm/mach-pxa/include/mach/treo680.h
+F:     arch/arm/mach-pxa/treo680.c
 
 ARM/PALMZ72 SUPPORT
 M:     Sergey Lapin <slapin@ossfans.org>
 L:     linux-arm-kernel@lists.infradead.org
 W:     http://hackndev.com
 S:     Maintained
+F:     arch/arm/mach-pxa/include/mach/palmz72.h
+F:     arch/arm/mach-pxa/palmz72.c
 
 ARM/PLEB SUPPORT
 M:     Peter Chubb <pleb@gelato.unsw.edu.au>
@@ -2682,7 +2702,7 @@ F:        include/linux/intel-iommu.h
 
 INTEL IOP-ADMA DMA DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
-S:     Supported
+S:     Maintained
 F:     drivers/dma/iop-adma.c
 
 INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
@@ -3623,6 +3643,13 @@ F:       Documentation/blockdev/nbd.txt
 F:     drivers/block/nbd.c
 F:     include/linux/nbd.h
 
+NETWORK DROP MONITOR
+M:     Neil Horman <nhorman@tuxdriver.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+W:     https://fedorahosted.org/dropwatch/
+F:     net/core/drop_monitor.c
+
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
@@ -3953,6 +3980,7 @@ F:        drivers/block/paride/
 PARISC ARCHITECTURE
 M:     Kyle McMartin <kyle@mcmartin.ca>
 M:     Helge Deller <deller@gmx.de>
+M:     "James E.J. Bottomley" <jejb@parisc-linux.org>
 L:     linux-parisc@vger.kernel.org
 W:     http://www.parisc-linux.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git
index 00444a8e304f04b67c9de2f29ea543912fd67f5d..e50569ab5fe8b656396afd38193fc9b847fe30b4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 32
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index ef12794c3c68f7f2d6be4ec3dcda643540ed091d..8ba7044c554dc2d35b19bd2dbd8a98d6b3206162 100644 (file)
@@ -1032,6 +1032,7 @@ unsigned int sa1111_pll_clock(struct sa1111_dev *sadev)
 
        return __sa1111_pll_clock(sachip);
 }
+EXPORT_SYMBOL(sa1111_pll_clock);
 
 /**
  *     sa1111_select_audio_mode - select I2S or AC link mode
@@ -1059,6 +1060,7 @@ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
 
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_select_audio_mode);
 
 /**
  *     sa1111_set_audio_rate - set the audio sample rate
@@ -1083,6 +1085,7 @@ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
 
        return 0;
 }
+EXPORT_SYMBOL(sa1111_set_audio_rate);
 
 /**
  *     sa1111_get_audio_rate - get the audio sample rate
@@ -1100,6 +1103,7 @@ int sa1111_get_audio_rate(struct sa1111_dev *sadev)
 
        return __sa1111_pll_clock(sachip) / (256 * div);
 }
+EXPORT_SYMBOL(sa1111_get_audio_rate);
 
 void sa1111_set_io_dir(struct sa1111_dev *sadev,
                       unsigned int bits, unsigned int dir,
@@ -1128,6 +1132,7 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev,
        MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16);
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_set_io_dir);
 
 void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
 {
@@ -1142,6 +1147,7 @@ void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
        MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16);
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_set_io);
 
 void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
 {
@@ -1156,6 +1162,7 @@ void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned i
        MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16);
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_set_sleep_io);
 
 /*
  * Individual device operations.
@@ -1176,6 +1183,7 @@ void sa1111_enable_device(struct sa1111_dev *sadev)
        sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_enable_device);
 
 /**
  *     sa1111_disable_device - disable an on-chip SA1111 function block
@@ -1192,6 +1200,7 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
        sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
        spin_unlock_irqrestore(&sachip->lock, flags);
 }
+EXPORT_SYMBOL(sa1111_disable_device);
 
 /*
  *     SA1111 "Register Access Bus."
@@ -1259,17 +1268,20 @@ struct bus_type sa1111_bus_type = {
        .suspend        = sa1111_bus_suspend,
        .resume         = sa1111_bus_resume,
 };
+EXPORT_SYMBOL(sa1111_bus_type);
 
 int sa1111_driver_register(struct sa1111_driver *driver)
 {
        driver->drv.bus = &sa1111_bus_type;
        return driver_register(&driver->drv);
 }
+EXPORT_SYMBOL(sa1111_driver_register);
 
 void sa1111_driver_unregister(struct sa1111_driver *driver)
 {
        driver_unregister(&driver->drv);
 }
+EXPORT_SYMBOL(sa1111_driver_unregister);
 
 static int __init sa1111_init(void)
 {
@@ -1290,16 +1302,3 @@ module_exit(sa1111_exit);
 
 MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(sa1111_select_audio_mode);
-EXPORT_SYMBOL(sa1111_set_audio_rate);
-EXPORT_SYMBOL(sa1111_get_audio_rate);
-EXPORT_SYMBOL(sa1111_set_io_dir);
-EXPORT_SYMBOL(sa1111_set_io);
-EXPORT_SYMBOL(sa1111_set_sleep_io);
-EXPORT_SYMBOL(sa1111_enable_device);
-EXPORT_SYMBOL(sa1111_disable_device);
-EXPORT_SYMBOL(sa1111_pll_clock);
-EXPORT_SYMBOL(sa1111_bus_type);
-EXPORT_SYMBOL(sa1111_driver_register);
-EXPORT_SYMBOL(sa1111_driver_unregister);
index 1502957db2c30eb801d8480703e7b318cf4b85b2..f6aed7747d4df59f15aa7af7fe0ac4a5d7021a43 100644 (file)
@@ -90,7 +90,6 @@ CONFIG_ARCH_SA1100=y
 # CONFIG_SA1100_COLLIE is not set
 # CONFIG_SA1100_H3100 is not set
 CONFIG_SA1100_H3600=y
-CONFIG_SA1100_H3XXX=y
 # CONFIG_SA1100_BADGE4 is not set
 # CONFIG_SA1100_JORNADA720 is not set
 # CONFIG_SA1100_HACKKIT is not set
index eec488298267551fb2755c68c88b5880eb91cbd2..ed2d59d01829f88cd9e7f7a4c76377902ad9f968 100644 (file)
@@ -1,29 +1,26 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24-rc5
-# Wed Dec 12 16:11:27 2007
+# Linux kernel version: 2.6.31-rc6
+# Tue Aug 18 13:41:41 2009
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
-# CONFIG_GENERIC_GPIO is not set
-# CONFIG_GENERIC_TIME is not set
-# CONFIG_GENERIC_CLOCKEVENTS is not set
+CONFIG_GENERIC_GPIO=y
 CONFIG_MMU=y
-# CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
 CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_HARDIRQS_SW_RESEND=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_VECTORS_BASE=0xffff0000
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
 
 #
 # General setup
@@ -40,21 +37,39 @@ CONFIG_SYSVIPC_SYSCTL=y
 CONFIG_BSD_PROCESS_ACCT=y
 # CONFIG_BSD_PROCESS_ACCT_V3 is not set
 # CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_CLASSIC_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
 # CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
 # CONFIG_EMBEDDED is not set
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
@@ -67,29 +82,48 @@ CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
 CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
 CONFIG_EPOLL=y
 CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODULE_FORCE_UNLOAD is not set
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
 # CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -103,6 +137,7 @@ CONFIG_IOSCHED_CFQ=y
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
 
 #
 # System Type
@@ -112,15 +147,15 @@ CONFIG_DEFAULT_IOSCHED="cfq"
 # CONFIG_ARCH_REALVIEW is not set
 # CONFIG_ARCH_VERSATILE is not set
 # CONFIG_ARCH_AT91 is not set
-# CONFIG_ARCH_CLPS7500 is not set
 # CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_GEMINI is not set
 # CONFIG_ARCH_EBSA110 is not set
 # CONFIG_ARCH_EP93XX is not set
 # CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
 # CONFIG_ARCH_NETX is not set
 # CONFIG_ARCH_H720X is not set
-# CONFIG_ARCH_IMX is not set
 # CONFIG_ARCH_IOP13XX is not set
 # CONFIG_ARCH_IOP32X is not set
 CONFIG_ARCH_IOP33X=y
@@ -128,19 +163,26 @@ CONFIG_ARCH_IOP33X=y
 # CONFIG_ARCH_IXP2000 is not set
 # CONFIG_ARCH_IXP4XX is not set
 # CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
 # CONFIG_ARCH_KS8695 is not set
 # CONFIG_ARCH_NS9XXX is not set
-# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_W90X900 is not set
 # CONFIG_ARCH_PNX4008 is not set
 # CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
 # CONFIG_ARCH_RPC is not set
 # CONFIG_ARCH_SA1100 is not set
 # CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
 # CONFIG_ARCH_SHARK is not set
 # CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
 # CONFIG_ARCH_DAVINCI is not set
 # CONFIG_ARCH_OMAP is not set
-CONFIG_IOP3XX_ATU=y
 
 #
 # IOP33x Implementation Options
@@ -151,14 +193,6 @@ CONFIG_IOP3XX_ATU=y
 #
 CONFIG_ARCH_IQ80331=y
 CONFIG_MACH_IQ80332=y
-
-#
-# Boot options
-#
-
-#
-# Power management
-#
 CONFIG_PLAT_IOP=y
 
 #
@@ -168,6 +202,7 @@ CONFIG_CPU_32=y
 CONFIG_CPU_XSCALE=y
 CONFIG_CPU_32v5=y
 CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_NOIFAR=y
 CONFIG_CPU_CACHE_VIVT=y
 CONFIG_CPU_TLB_V4WBI=y
 CONFIG_CPU_CP15=y
@@ -178,7 +213,6 @@ CONFIG_CPU_CP15_MMU=y
 #
 # CONFIG_ARM_THUMB is not set
 # CONFIG_CPU_DCACHE_DISABLE is not set
-# CONFIG_OUTER_CACHE is not set
 # CONFIG_IWMMXT is not set
 CONFIG_XSCALE_PMU=y
 
@@ -190,40 +224,54 @@ CONFIG_PCI_SYSCALL=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 CONFIG_PCI_LEGACY=y
 # CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
 # CONFIG_PCCARD is not set
 
 #
 # Kernel Features
 #
-# CONFIG_TICK_ONESHOT is not set
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
 # CONFIG_PREEMPT is not set
 CONFIG_HZ=100
 # CONFIG_AEABI is not set
-# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
 # CONFIG_SPARSEMEM_MANUAL is not set
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4096
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
 
 #
 # Boot options
 #
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc"
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc iop3xx_init_atu=y"
 # CONFIG_XIP_KERNEL is not set
 # CONFIG_KEXEC is not set
 
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
 #
 # Floating point emulation
 #
@@ -239,6 +287,8 @@ CONFIG_FPE_NWFPE=y
 # Userspace binary formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
 CONFIG_BINFMT_AOUT=y
 # CONFIG_BINFMT_MISC is not set
 # CONFIG_ARTHUR is not set
@@ -247,11 +297,7 @@ CONFIG_BINFMT_AOUT=y
 # Power management options
 #
 # CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
-
-#
-# Networking
-#
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_NET=y
 
 #
@@ -264,6 +310,7 @@ CONFIG_XFRM=y
 # CONFIG_XFRM_USER is not set
 # CONFIG_XFRM_SUB_POLICY is not set
 # CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
 # CONFIG_NET_KEY is not set
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
@@ -310,6 +357,7 @@ CONFIG_IPV6=y
 # CONFIG_IPV6_SIT is not set
 # CONFIG_IPV6_TUNNEL is not set
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
 # CONFIG_IP_DCCP is not set
@@ -317,6 +365,7 @@ CONFIG_IPV6=y
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 # CONFIG_LLC2 is not set
@@ -326,24 +375,31 @@ CONFIG_IPV6=y
 # CONFIG_LAPB is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
 # CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
 
 #
 # Network testing
 #
 # CONFIG_NET_PKTGEN is not set
 # CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
 
 #
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
 #
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -357,7 +413,9 @@ CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_DEBUG_DEVRES is not set
 # CONFIG_SYS_HYPERVISOR is not set
@@ -366,12 +424,14 @@ CONFIG_MTD=y
 # CONFIG_MTD_DEBUG is not set
 # CONFIG_MTD_CONCAT is not set
 CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
 CONFIG_MTD_REDBOOT_PARTS=y
 CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
 CONFIG_MTD_REDBOOT_PARTS_READONLY=y
 # CONFIG_MTD_CMDLINE_PARTS is not set
 # CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
 
 #
 # User Modules And Translation Layers
@@ -421,9 +481,7 @@ CONFIG_MTD_CFI_UTIL=y
 #
 # CONFIG_MTD_COMPLEX_MAPPINGS is not set
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x0
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=1
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
 # CONFIG_MTD_ARM_INTEGRATOR is not set
 # CONFIG_MTD_INTEL_VR_NOR is not set
 # CONFIG_MTD_PLATRAM is not set
@@ -446,6 +504,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=1
 # CONFIG_MTD_NAND is not set
 # CONFIG_MTD_ONENAND is not set
 
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
 #
 # UBI - Unsorted block images
 #
@@ -463,14 +526,29 @@ CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=16
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
 CONFIG_MISC_DEVICES=y
 # CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
 #
@@ -492,10 +570,6 @@ CONFIG_BLK_DEV_SD=y
 # CONFIG_BLK_DEV_SR is not set
 CONFIG_CHR_DEV_SG=y
 # CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
 # CONFIG_SCSI_MULTI_LUN is not set
 # CONFIG_SCSI_CONSTANTS is not set
 # CONFIG_SCSI_LOGGING is not set
@@ -512,6 +586,8 @@ CONFIG_SCSI_WAIT_SCAN=m
 # CONFIG_SCSI_SRP_ATTRS is not set
 CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
 # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
 # CONFIG_SCSI_3W_9XXX is not set
 # CONFIG_SCSI_ACARD is not set
@@ -520,13 +596,18 @@ CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_SCSI_AIC7XXX_OLD is not set
 # CONFIG_SCSI_AIC79XX is not set
 # CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
 # CONFIG_SCSI_DPT_I2O is not set
 # CONFIG_SCSI_ADVANSYS is not set
 # CONFIG_SCSI_ARCMSR is not set
 # CONFIG_MEGARAID_NEWGEN is not set
 # CONFIG_MEGARAID_LEGACY is not set
 # CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
 # CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
 # CONFIG_SCSI_DMX3191D is not set
 # CONFIG_SCSI_FUTURE_DOMAIN is not set
 # CONFIG_SCSI_IPS is not set
@@ -543,15 +624,18 @@ CONFIG_SCSI_LOWLEVEL=y
 # CONFIG_SCSI_NSP32 is not set
 # CONFIG_SCSI_DEBUG is not set
 # CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
 # CONFIG_ATA is not set
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 CONFIG_MD_LINEAR=y
 CONFIG_MD_RAID0=y
 CONFIG_MD_RAID1=y
 # CONFIG_MD_RAID10 is not set
 CONFIG_MD_RAID456=y
-# CONFIG_MD_RAID5_RESHAPE is not set
+CONFIG_MD_RAID6_PQ=y
 # CONFIG_MD_MULTIPATH is not set
 # CONFIG_MD_FAULTY is not set
 CONFIG_BLK_DEV_DM=y
@@ -568,27 +652,34 @@ CONFIG_BLK_DEV_DM=y
 #
 # IEEE 1394 (FireWire) support
 #
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
+#
 # CONFIG_FIREWIRE is not set
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
 # CONFIG_TUN is not set
 # CONFIG_VETH is not set
-# CONFIG_IP1000 is not set
 # CONFIG_ARCNET is not set
 # CONFIG_NET_ETHERNET is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-CONFIG_E1000_NAPI=y
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
 # CONFIG_NS83820 is not set
 # CONFIG_HAMACHI is not set
 # CONFIG_YELLOWFIN is not set
@@ -596,23 +687,34 @@ CONFIG_E1000_NAPI=y
 # CONFIG_SIS190 is not set
 # CONFIG_SKGE is not set
 # CONFIG_SKY2 is not set
-# CONFIG_SK98LIN is not set
 # CONFIG_VIA_VELOCITY is not set
 # CONFIG_TIGON3 is not set
 # CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
 # CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
 CONFIG_NETDEV_10000=y
 # CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
 # CONFIG_IXGBE is not set
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
 # CONFIG_MYRI10GE is not set
 # CONFIG_NETXEN_NIC is not set
 # CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
 # CONFIG_MLX4_CORE is not set
 # CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
+# CONFIG_BE2NET is not set
 # CONFIG_TR is not set
 
 #
@@ -620,13 +722,16 @@ CONFIG_NETDEV_10000=y
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_HIPPI is not set
 # CONFIG_PPP is not set
 # CONFIG_SLIP is not set
 # CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
@@ -670,10 +775,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
 # Character devices
 #
 CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
 CONFIG_VT_CONSOLE=y
 CONFIG_HW_CONSOLE=y
 # CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
 # CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
 
 #
 # Serial drivers
@@ -692,11 +800,12 @@ CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
-# CONFIG_NVRAM is not set
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_RAW_DRIVER is not set
@@ -705,16 +814,14 @@ CONFIG_DEVPORT=y
 CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
 
 #
-# I2C Algorithms
+# I2C Hardware Bus support
 #
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
 
 #
-# I2C Hardware Bus support
+# PC SMBus host controller drivers
 #
 # CONFIG_I2C_ALI1535 is not set
 # CONFIG_I2C_ALI1563 is not set
@@ -722,50 +829,82 @@ CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_AMD756 is not set
 # CONFIG_I2C_AMD8111 is not set
 # CONFIG_I2C_I801 is not set
-# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_ISCH is not set
 # CONFIG_I2C_PIIX4 is not set
-CONFIG_I2C_IOP3XX=y
 # CONFIG_I2C_NFORCE2 is not set
-# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
-# CONFIG_I2C_PROSAVAGE is not set
-# CONFIG_I2C_SAVAGE4 is not set
-# CONFIG_I2C_SIMTEC is not set
 # CONFIG_I2C_SIS5595 is not set
 # CONFIG_I2C_SIS630 is not set
 # CONFIG_I2C_SIS96X is not set
-# CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_STUB is not set
 # CONFIG_I2C_VIA is not set
 # CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+CONFIG_I2C_IOP3XX=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Graphics adapter I2C/DDC channel drivers
+#
 # CONFIG_I2C_VOODOO3 is not set
 
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
 #
 # Miscellaneous I2C Chip support
 #
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
 # CONFIG_DS1682 is not set
-# CONFIG_EEPROM_LEGACY is not set
 # CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_MAX6875 is not set
 # CONFIG_SENSORS_TSL2550 is not set
 # CONFIG_I2C_DEBUG_CORE is not set
 # CONFIG_I2C_DEBUG_ALGO is not set
 # CONFIG_I2C_DEBUG_BUS is not set
 # CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
 
 #
-# SPI support
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+
+#
+# SPI GPIO expanders:
 #
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
 CONFIG_HWMON=y
 # CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
 # CONFIG_SENSORS_AD7418 is not set
 # CONFIG_SENSORS_ADM1021 is not set
 # CONFIG_SENSORS_ADM1025 is not set
@@ -773,13 +912,17 @@ CONFIG_HWMON=y
 # CONFIG_SENSORS_ADM1029 is not set
 # CONFIG_SENSORS_ADM1031 is not set
 # CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
 # CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
 # CONFIG_SENSORS_ATXP1 is not set
 # CONFIG_SENSORS_DS1621 is not set
 # CONFIG_SENSORS_I5K_AMB is not set
 # CONFIG_SENSORS_F71805F is not set
 # CONFIG_SENSORS_F71882FG is not set
 # CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
 # CONFIG_SENSORS_GL518SM is not set
 # CONFIG_SENSORS_GL520SM is not set
 # CONFIG_SENSORS_IT87 is not set
@@ -794,16 +937,23 @@ CONFIG_HWMON=y
 # CONFIG_SENSORS_LM90 is not set
 # CONFIG_SENSORS_LM92 is not set
 # CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
 # CONFIG_SENSORS_MAX1619 is not set
 # CONFIG_SENSORS_MAX6650 is not set
 # CONFIG_SENSORS_PC87360 is not set
 # CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
 # CONFIG_SENSORS_SIS5595 is not set
 # CONFIG_SENSORS_DME1737 is not set
 # CONFIG_SENSORS_SMSC47M1 is not set
 # CONFIG_SENSORS_SMSC47M192 is not set
 # CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
 # CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
 # CONFIG_SENSORS_VIA686A is not set
 # CONFIG_SENSORS_VT1211 is not set
 # CONFIG_SENSORS_VT8231 is not set
@@ -812,28 +962,38 @@ CONFIG_HWMON=y
 # CONFIG_SENSORS_W83792D is not set
 # CONFIG_SENSORS_W83793 is not set
 # CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
 # CONFIG_SENSORS_W83627HF is not set
 # CONFIG_SENSORS_W83627EHF is not set
 # CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
 # CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_MEDIA_SUPPORT is not set
 
 #
 # Graphics support
@@ -854,15 +1014,16 @@ CONFIG_DAB=y
 #
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 CONFIG_HID_SUPPORT=y
 CONFIG_HID=y
 # CONFIG_HID_DEBUG is not set
 # CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -870,14 +1031,21 @@ CONFIG_USB_ARCH_HAS_EHCI=y
 # CONFIG_USB is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
 #
 
 #
-# USB Gadget Support
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
 #
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
 # CONFIG_NEW_LEDS is not set
 CONFIG_RTC_LIB=y
 # CONFIG_RTC_CLASS is not set
@@ -893,6 +1061,12 @@ CONFIG_DMA_ENGINE=y
 # DMA Clients
 #
 CONFIG_NET_DMA=y
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
@@ -901,10 +1075,11 @@ CONFIG_EXT2_FS=y
 # CONFIG_EXT2_FS_XATTR is not set
 # CONFIG_EXT2_FS_XIP is not set
 CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 CONFIG_EXT3_FS_XATTR=y
 # CONFIG_EXT3_FS_POSIX_ACL is not set
 # CONFIG_EXT3_FS_SECURITY is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=y
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
@@ -913,16 +1088,22 @@ CONFIG_FS_MBCACHE=y
 # CONFIG_XFS_FS is not set
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
 # CONFIG_FUSE_FS is not set
 
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
 #
 # CD-ROM/DVD Filesystems
 #
@@ -941,15 +1122,13 @@ CONFIG_DNOTIFY=y
 #
 CONFIG_PROC_FS=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 # CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_HUGETLB_PAGE is not set
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_HFS_FS is not set
@@ -959,29 +1138,31 @@ CONFIG_TMPFS=y
 # CONFIG_EFS_FS is not set
 # CONFIG_JFFS2_FS is not set
 CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
 # CONFIG_HPFS_FS is not set
 # CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
 # CONFIG_NFSD_V3_ACL is not set
 # CONFIG_NFSD_V4 is not set
-# CONFIG_NFSD_TCP is not set
-CONFIG_ROOT_NFS=y
 CONFIG_LOCKD=y
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_BIND34 is not set
 # CONFIG_RPCSEC_GSS_KRB5 is not set
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -1013,9 +1194,6 @@ CONFIG_MSDOS_PARTITION=y
 # CONFIG_SYSV68_PARTITION is not set
 # CONFIG_NLS is not set
 # CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 
 #
 # Kernel hacking
@@ -1023,6 +1201,7 @@ CONFIG_INSTRUMENTATION=y
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
@@ -1030,10 +1209,17 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_SHIRQ is not set
 CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
 CONFIG_SCHED_DEBUG=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
 # CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
 # CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
@@ -1047,16 +1233,41 @@ CONFIG_SCHED_DEBUG=y
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
 CONFIG_FRAME_POINTER=y
-# CONFIG_FORCED_INLINING is not set
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
 CONFIG_DEBUG_USER=y
 # CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
 CONFIG_DEBUG_LL=y
 # CONFIG_DEBUG_ICEDCC is not set
 
@@ -1065,24 +1276,117 @@ CONFIG_DEBUG_LL=y
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 CONFIG_XOR_BLOCKS=y
 CONFIG_ASYNC_CORE=y
 CONFIG_ASYNC_MEMCPY=y
 CONFIG_ASYNC_XOR=y
-# CONFIG_CRYPTO is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
 
 #
 # Library routines
 #
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 # CONFIG_CRC32 is not set
 # CONFIG_CRC7 is not set
 # CONFIG_LIBCRC32C is not set
 CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
 CONFIG_HAS_IOMEM=y
 CONFIG_HAS_IOPORT=y
 CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
index a0e39d5d00c936a68ac4f99ca9a9b6559b61854f..234a3fc1c78ee43357dba69b4c850aa73c819541 100644 (file)
 #endif
 
 /*
- * Prefetch abort handler.  If the CPU has an IFAR use that, otherwise
- * use the address of the aborted instruction
+ *     Prefetch Abort Model
+ *     ================
+ *
+ *     We have the following to choose from:
+ *       legacy        - no IFSR, no IFAR
+ *       v6            - ARMv6: IFSR, no IFAR
+ *       v7            - ARMv7: IFSR and IFAR
  */
+
 #undef CPU_PABORT_HANDLER
 #undef MULTI_PABORT
 
-#ifdef CONFIG_CPU_PABRT_IFAR
+#ifdef CONFIG_CPU_PABRT_LEGACY
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER legacy_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V6
 # ifdef CPU_PABORT_HANDLER
 #  define MULTI_PABORT 1
 # else
-#  define CPU_PABORT_HANDLER(reg, insn)        mrc p15, 0, reg, cr6, cr0, 2
+#  define CPU_PABORT_HANDLER v6_pabort
 # endif
 #endif
 
-#ifdef CONFIG_CPU_PABRT_NOIFAR
+#ifdef CONFIG_CPU_PABRT_V7
 # ifdef CPU_PABORT_HANDLER
 #  define MULTI_PABORT 1
 # else
-#  define CPU_PABORT_HANDLER(reg, insn)        mov reg, insn
+#  define CPU_PABORT_HANDLER v7_pabort
 # endif
 #endif
 
index 4b8e7f559929970b739af0f080b67a6c16247602..8d60ad267e3a0ab244f05866da614b13aeb4d05d 100644 (file)
@@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void);
  * IOP3XX I/O and Mem space regions for PCI autoconfiguration
  */
 #define IOP3XX_PCI_LOWER_MEM_PA        0x80000000
+#define IOP3XX_PCI_MEM_WINDOW_SIZE     0x08000000
 
 #define IOP3XX_PCI_IO_WINDOW_SIZE      0x00010000
 #define IOP3XX_PCI_LOWER_IO_PA         0x90000000
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
new file mode 100644 (file)
index 0000000..59303e2
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * ARM specific SMP header, this contains our implementation
+ * details.
+ */
+#ifndef __ASMARM_SMP_PLAT_H
+#define __ASMARM_SMP_PLAT_H
+
+#include <asm/cputype.h>
+
+/* all SMP configurations have the extended CPUID registers */
+static inline int tlb_ops_need_broadcast(void)
+{
+       return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
+}
+
+#endif
index 89f7eade20af86bda34d943963cef49d17b312f1..7020217fc49fb3919fda1bba63a7491c7b2dc85f 100644 (file)
  * Unimplemented (or alternatively implemented) syscalls
  */
 #define __IGNORE_fadvise64_64          1
+#define __IGNORE_migrate_pages         1
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_UNISTD_H */
index 0a2ba51cf35df1602db7d5a692c7c712a4bcf0d3..322410be573ca027bd8e03da67e134492423974d 100644 (file)
@@ -311,22 +311,16 @@ __pabt_svc:
        tst     r3, #PSR_I_BIT
        biceq   r9, r9, #PSR_I_BIT
 
-       @
-       @ set args, then call main handler
-       @
-       @  r0 - address of faulting instruction
-       @  r1 - pointer to registers on stack
-       @
-#ifdef MULTI_PABORT
        mov     r0, r2                  @ pass address of aborted instruction.
+#ifdef MULTI_PABORT
        ldr     r4, .LCprocfns
        mov     lr, pc
        ldr     pc, [r4, #PROCESSOR_PABT_FUNC]
 #else
-       CPU_PABORT_HANDLER(r0, r2)
+       bl      CPU_PABORT_HANDLER
 #endif
        msr     cpsr_c, r9                      @ Maybe enable interrupts
-       mov     r1, sp                          @ regs
+       mov     r2, sp                          @ regs
        bl      do_PrefetchAbort                @ call abort handler
 
        @
@@ -701,16 +695,16 @@ ENDPROC(__und_usr_unknown)
 __pabt_usr:
        usr_entry
 
-#ifdef MULTI_PABORT
        mov     r0, r2                  @ pass address of aborted instruction.
+#ifdef MULTI_PABORT
        ldr     r4, .LCprocfns
        mov     lr, pc
        ldr     pc, [r4, #PROCESSOR_PABT_FUNC]
 #else
-       CPU_PABORT_HANDLER(r0, r2)
+       bl      CPU_PABORT_HANDLER
 #endif
        enable_irq                              @ Enable interrupts
-       mov     r1, sp                          @ regs
+       mov     r2, sp                          @ regs
        bl      do_PrefetchAbort                @ call abort handler
  UNWIND(.fnend         )
        /* fall through */
index 807cfebb0f44486d5b19b7869cf3b155e630ac7e..f0fe95b7085d8020682b297915ba1910af476828 100644 (file)
@@ -126,7 +126,7 @@ ENTRY(__gnu_mcount_nc)
        cmp r0, r2
        bne gnu_trace
        ldmia sp!, {r0-r3, ip, lr}
-       bx ip
+       mov pc, ip
 
 gnu_trace:
        ldr r1, [sp, #20]                       @ lr of instrumented routine
@@ -135,7 +135,7 @@ gnu_trace:
        mov lr, pc
        mov pc, r2
        ldmia sp!, {r0-r3, ip, lr}
-       bx ip
+       mov pc, ip
 
 ENTRY(mcount)
        stmdb sp!, {r0-r3, lr}
@@ -425,13 +425,6 @@ sys_mmap2:
 #endif
 ENDPROC(sys_mmap2)
 
-ENTRY(pabort_ifar)
-               mrc     p15, 0, r0, cr6, cr0, 2
-ENTRY(pabort_noifar)
-               mov     pc, lr
-ENDPROC(pabort_ifar)
-ENDPROC(pabort_noifar)
-
 #ifdef CONFIG_OABI_COMPAT
 
 /*
index 93ad576b2d74b311331542f6a79fc4054a0306d8..885a7214418d81877d3cec0eb2f3c4d2f0d07133 100644 (file)
@@ -13,6 +13,7 @@
 
 #define ATAG_CORE 0x54410001
 #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
+#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
 
        .align  2
        .type   __switch_data, %object
@@ -251,7 +252,8 @@ __vet_atags:
        bne     1f
 
        ldr     r5, [r2, #0]                    @ is first tag ATAG_CORE?
-       subs    r5, r5, #ATAG_CORE_SIZE
+       cmp     r5, #ATAG_CORE_SIZE
+       cmpne   r5, #ATAG_CORE_SIZE_EMPTY
        bne     1f
        ldr     r5, [r2, #4]
        ldr     r6, =ATAG_CORE
index e0d32770bb3d51c061554ba348c9c69beffd25ea..57162af53dc982cc0911e09537915eb1eddc1cbb 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
 #include <asm/localtimer.h>
+#include <asm/smp_plat.h>
 
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
@@ -153,7 +154,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
 {
        unsigned int cpu = smp_processor_id();
        struct task_struct *p;
@@ -200,7 +201,7 @@ int __cpuexit __cpu_disable(void)
  * called on the thread which is asking for a CPU to be shutdown -
  * waits until shutdown has completed, or it is timed out.
  */
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
 {
        if (!platform_cpu_kill(cpu))
                printk("CPU%u: unable to kill\n", cpu);
@@ -214,7 +215,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void __cpuexit cpu_die(void)
+void __ref cpu_die(void)
 {
        unsigned int cpu = smp_processor_id();
 
@@ -586,12 +587,6 @@ struct tlb_args {
        unsigned long ta_end;
 };
 
-/* all SMP configurations have the extended CPUID registers */
-static inline int tlb_ops_need_broadcast(void)
-{
-       return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
-}
-
 static inline void ipi_flush_tlb_all(void *ignored)
 {
        local_flush_tlb_all();
index d8c88c633c6f0a880f5015992be409ffa68226de..a73a34dccf2a6de247f20bb5fc4e9622b03561a1 100644 (file)
@@ -166,10 +166,12 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
        clockevents_register_device(clk);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 /*
  * take a local timer down
  */
-void __cpuexit twd_timer_stop(void)
+void twd_timer_stop(void)
 {
        __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
 }
+#endif
index 57eb0f6f60053e6fd12fd48113e8a5fdb6b744b8..467b69ed1021b39fb87848b4009edfef9fb9f1d2 100644 (file)
@@ -418,12 +418,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
 static inline void
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
+       struct mm_struct *mm = current->active_mm;
        struct vm_area_struct *vma;
 
        if (end < start || flags)
                return;
 
-       vma = find_vma(current->active_mm, start);
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, start);
        if (vma && vma->vm_start < end) {
                if (start < vma->vm_start)
                        start = vma->vm_start;
@@ -432,6 +434,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
 
                flush_cache_user_range(vma, start, end);
        }
+       up_read(&mm->mmap_sem);
 }
 
 /*
index 492c649f451ec357089b0dce22408d66788e0f8e..4b4f69251b313a07676a6dcef5b19418c9c3c379 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
 
-#include <linux/amba/bus.h>
 #include <mach/csp/mm_addr.h>
 #include <mach/hardware.h>
 #include <asm/clkdev.h>
@@ -45,7 +44,6 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 #include <asm/mach/map.h>
-#include <asm/mach/mmc.h>
 
 #include <cfg_global.h>
 
index a1d5e7dac741004b076a416ac271f60aac921b30..52dd8046b3051eb05fefd2140586e3f19f9182a2 100644 (file)
@@ -35,7 +35,6 @@
 #include <mach/common.h>
 #include <mach/i2c.h>
 #include <mach/serial.h>
-#include <mach/common.h>
 #include <mach/mmc.h>
 #include <mach/nand.h>
 
index f1d72b225450951022aada4f49a50a46fb13d6cc..901cc205015e81a4149a4f56e87e056fe372951f 100644 (file)
@@ -486,7 +486,7 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
        return ret;
 }
 
-struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *sys)
+struct pci_bus * __init pci_v3_scan_bus(int nr, struct pci_sys_data *sys)
 {
        return pci_scan_bus(sys->busnr, &pci_v3_ops, sys);
 }
index abd9eb49f1033773dd0ca92cb02b7ce51842cda6..941f363aca56411bb26505bb776ceb3772dbbfe9 100644 (file)
@@ -31,7 +31,5 @@
 #define IOP32X_MAX_RAM_SIZE            0x40000000UL
 #define IOP3XX_MAX_RAM_SIZE            IOP32X_MAX_RAM_SIZE
 #define IOP3XX_PCI_LOWER_MEM_BA        0x80000000
-#define IOP32X_PCI_MEM_WINDOW_SIZE     0x04000000
-#define IOP3XX_PCI_MEM_WINDOW_SIZE     IOP32X_PCI_MEM_WINDOW_SIZE
 
 #endif
index 24567316ec88cc01646f61422b9bb47d2820dc57..a89c0a234bffff05baee92e08eb23e6a703a0103 100644 (file)
@@ -36,8 +36,6 @@
 #define IOP33X_MAX_RAM_SIZE            0x80000000UL
 #define IOP3XX_MAX_RAM_SIZE            IOP33X_MAX_RAM_SIZE
 #define IOP3XX_PCI_LOWER_MEM_BA        (PHYS_OFFSET + IOP33X_MAX_RAM_SIZE)
-#define IOP33X_PCI_MEM_WINDOW_SIZE     0x08000000
-#define IOP3XX_PCI_MEM_WINDOW_SIZE     IOP33X_PCI_MEM_WINDOW_SIZE
 
 
 #endif
index 44ed20d4a388a718bad140aeeabbfaeb70a1a04e..cf81cbc57544552970e88986efed07ad71f5c336 100644 (file)
@@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode *inode, struct file *file)
        return single_open(file, clk_debugfs_show, NULL);
 }
 
-static struct file_operations clk_debugfs_operations = {
+static const struct file_operations clk_debugfs_operations = {
        .open = clk_debugfs_open,
        .read = seq_read,
        .llseek = seq_lseek,
index 4ef26faf083e6d70bc5ae44c6587df28b7eca3c6..e5dcdf764c91888af5fa19fe93589dcd37e9931c 100644 (file)
@@ -38,7 +38,7 @@ static struct omap_id omap_ids[] __initdata = {
        { .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
        { .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
        { .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
-       { .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320500, .type = 0x08500000},
+       { .jtag_id = 0xb62c, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x08500000},
        { .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
        { .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000},
        { .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000},
index bd57ec76dc5eedde9f672d34456ed843e0cb0f64..efaf053eba85371a5d59ed1c69f06b2af3436471 100644 (file)
@@ -54,7 +54,7 @@
 
 #define TWL4030_MSECURE_GPIO 22
 
-static int sdp3430_keymap[] = {
+static int board_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_RIGHT),
        KEY(0, 2, KEY_A),
@@ -88,11 +88,15 @@ static int sdp3430_keymap[] = {
        0
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data sdp3430_kp_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 5,
        .cols           = 6,
-       .keymap         = sdp3430_keymap,
-       .keymapsize     = ARRAY_SIZE(sdp3430_keymap),
        .rep            = 1,
 };
 
index ec6854cbdd9fcb968c5bbdb3ec3e4da0361b1f47..d110a7fdfbd800949aeac4e41a9a93fac121d2c6 100644 (file)
@@ -80,7 +80,7 @@ static struct platform_device ldp_smsc911x_device = {
        },
 };
 
-static int ldp_twl4030_keymap[] = {
+static int board_keymap[] = {
        KEY(0, 0, KEY_1),
        KEY(1, 0, KEY_2),
        KEY(2, 0, KEY_3),
@@ -101,11 +101,15 @@ static int ldp_twl4030_keymap[] = {
        0
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data ldp_kp_twl4030_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 6,
        .cols           = 6,
-       .keymap         = ldp_twl4030_keymap,
-       .keymapsize     = ARRAY_SIZE(ldp_twl4030_keymap),
        .rep            = 1,
 };
 
index 500c9956876de8c71fcaddd39d1dc2258719c6ca..70df6b4dbcd4c65c5d339c797a48c5263a876f79 100644 (file)
@@ -139,8 +139,13 @@ static struct gpio_led gpio_leds[];
 static int beagle_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
+       if (system_rev >= 0x20 && system_rev <= 0x34301000) {
+               omap_cfg_reg(AG9_34XX_GPIO23);
+               mmc[0].gpio_wp = 23;
+       } else {
+               omap_cfg_reg(AH8_34XX_GPIO29);
+       }
        /* gpio + 0 is "mmc0_cd" (input/IRQ) */
-       omap_cfg_reg(AH8_34XX_GPIO29);
        mmc[0].gpio_cd = gpio + 0;
        twl4030_mmc_init(mmc);
 
index d50b9be905802ce5e88bcef03d6dd44100b3ab50..e4ec0c591216c50a2d5d1fdf16c4cf36ecb53e15 100644 (file)
@@ -159,7 +159,7 @@ static struct twl4030_usb_data omap3evm_usb_data = {
        .usb_mode       = T2_USB_MODE_ULPI,
 };
 
-static int omap3evm_keymap[] = {
+static int board_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_RIGHT),
        KEY(0, 2, KEY_A),
@@ -178,11 +178,15 @@ static int omap3evm_keymap[] = {
        KEY(3, 3, KEY_P)
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data omap3evm_kp_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 4,
        .cols           = 4,
-       .keymap         = omap3evm_keymap,
-       .keymapsize     = ARRAY_SIZE(omap3evm_keymap),
        .rep            = 1,
 };
 
index b43f6e36b6d9d256768169b5760ab927a462cf5f..7f6bf8772af7e6c5ae961ce34d7ce7f041039c05 100644 (file)
@@ -133,7 +133,7 @@ static void __init pandora_keys_gpio_init(void)
        omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME);
 }
 
-static int pandora_keypad_map[] = {
+static int board_keymap[] = {
        /* col, row, code */
        KEY(0, 0, KEY_9),
        KEY(0, 1, KEY_0),
@@ -180,11 +180,15 @@ static int pandora_keypad_map[] = {
        KEY(5, 2, KEY_FN),
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data pandora_kp_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 8,
        .cols           = 6,
-       .keymap         = pandora_keypad_map,
-       .keymapsize     = ARRAY_SIZE(pandora_keypad_map),
        .rep            = 1,
 };
 
index e6e8290b7828613fd631abd6556f531f2c27852e..b45ad312c58732c480e6c5b96fb8a4b722e6f688 100644 (file)
@@ -36,7 +36,7 @@
 #define SYSTEM_REV_B_USES_VAUX3        0x1699
 #define SYSTEM_REV_S_USES_VAUX3 0x8
 
-static int rx51_keymap[] = {
+static int board_keymap[] = {
        KEY(0, 0, KEY_Q),
        KEY(0, 1, KEY_W),
        KEY(0, 2, KEY_E),
@@ -83,11 +83,15 @@ static int rx51_keymap[] = {
        KEY(0xff, 5, KEY_F10),
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data rx51_kp_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 8,
        .cols           = 8,
-       .keymap         = rx51_keymap,
-       .keymapsize     = ARRAY_SIZE(rx51_keymap),
        .rep            = 1,
 };
 
index 324009edbd53c7e711aad3ac4cbfc3df1ffb087a..b7b32208ced7768ff43253947916312515080917 100644 (file)
@@ -27,7 +27,7 @@
 #include "mmc-twl4030.h"
 
 /* Zoom2 has Qwerty keyboard*/
-static int zoom2_twl4030_keymap[] = {
+static int board_keymap[] = {
        KEY(0, 0, KEY_E),
        KEY(1, 0, KEY_R),
        KEY(2, 0, KEY_T),
@@ -82,11 +82,15 @@ static int zoom2_twl4030_keymap[] = {
        0
 };
 
+static struct matrix_keymap_data board_map_data = {
+       .keymap                 = board_keymap,
+       .keymap_size            = ARRAY_SIZE(board_keymap),
+};
+
 static struct twl4030_keypad_data zoom2_kp_twl4030_data = {
+       .keymap_data    = &board_map_data,
        .rows           = 8,
        .cols           = 8,
-       .keymap         = zoom2_twl4030_keymap,
-       .keymapsize     = ARRAY_SIZE(zoom2_twl4030_keymap),
        .rep            = 1,
 };
 
index fafcd32e6907a2a70fed302ef656c09f7112fe34..489556eecbd1189a7a61638deec0f33d3aff59ad 100644 (file)
@@ -338,6 +338,13 @@ static struct omap_clk omap34xx_clks[] = {
  */
 #define SDRC_MPURATE_LOOPS             96
 
+/*
+ * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define DPLL5_FREQ_FOR_USBHOST         120000000
+
 /**
  * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
  * @clk: struct clk * being enabled
@@ -1056,6 +1063,28 @@ void omap2_clk_prepare_for_reboot(void)
 #endif
 }
 
+static void omap3_clk_lock_dpll5(void)
+{
+       struct clk *dpll5_clk;
+       struct clk *dpll5_m2_clk;
+
+       dpll5_clk = clk_get(NULL, "dpll5_ck");
+       clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
+       clk_enable(dpll5_clk);
+
+       /* Enable autoidle to allow it to enter low power bypass */
+       omap3_dpll_allow_idle(dpll5_clk);
+
+       /* Program dpll5_m2_clk divider for no division */
+       dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
+       clk_enable(dpll5_m2_clk);
+       clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+
+       clk_disable(dpll5_m2_clk);
+       clk_disable(dpll5_clk);
+       return;
+}
+
 /* REVISIT: Move this init stuff out into clock.c */
 
 /*
@@ -1148,6 +1177,12 @@ int __init omap2_clk_init(void)
         */
        clk_enable_init_clocks();
 
+       /*
+        * Lock DPLL5 and put it in autoidle.
+        */
+       if (omap_rev() >= OMAP3430_REV_ES2_0)
+               omap3_clk_lock_dpll5();
+
        /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
        /* REVISIT: not yet ready for 343x */
 #if 0
index e4ebd6d5313589d54bddc91858466ed4e0c097ee..4af76bb1003a779db9d08835de5322b4a5b90f29 100644 (file)
@@ -22,7 +22,6 @@
 #include <asm/atomic.h>
 
 #include "cm.h"
-#include "cm-regbits-4xxx.h"
 
 /* XXX move this to cm.h */
 /* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
  */
 int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs)
 {
-       int i = 0;
-       u8 cm_id;
-       u16 prcm_mod_offs;
-       u32 mask = OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK;
-
-       cm_id = prcm_mod >> OMAP4_PRCM_MOD_CM_ID_SHIFT;
-       prcm_mod_offs = prcm_mod & OMAP4_PRCM_MOD_OFFS_MASK;
-
-       while (((omap4_cm_read_mod_reg(cm_id, prcm_mod_offs, prcm_dev_offs,
-                                      OMAP4_CM_CLKCTRL_DREG) & mask) != 0) &&
-              (i++ < MAX_MODULE_READY_TIME))
-               udelay(1);
-
-       return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+       /* FIXME: Add clock manager related code */
+       return 0;
 }
 
index bcfcfc7fdb9bffe60d95a8865498cf72c7b5f0f3..faf7a1e0c525abacf0d62378687089216cf340f0 100644 (file)
@@ -355,29 +355,60 @@ static struct platform_device omap2_mcspi4 = {
 };
 #endif
 
-static void omap_init_mcspi(void)
+#ifdef CONFIG_ARCH_OMAP4
+static inline void omap4_mcspi_fixup(void)
 {
-       if (cpu_is_omap44xx()) {
-               omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
-               omap2_mcspi1_resources[0].end   = OMAP4_MCSPI1_BASE + 0xff;
-               omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
-               omap2_mcspi2_resources[0].end   = OMAP4_MCSPI2_BASE + 0xff;
-               omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
-               omap2_mcspi3_resources[0].end   = OMAP4_MCSPI3_BASE + 0xff;
-               omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
-               omap2_mcspi4_resources[0].end   = OMAP4_MCSPI4_BASE + 0xff;
-       }
-       platform_device_register(&omap2_mcspi1);
-       platform_device_register(&omap2_mcspi2);
+       omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
+       omap2_mcspi1_resources[0].end   = OMAP4_MCSPI1_BASE + 0xff;
+       omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
+       omap2_mcspi2_resources[0].end   = OMAP4_MCSPI2_BASE + 0xff;
+       omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
+       omap2_mcspi3_resources[0].end   = OMAP4_MCSPI3_BASE + 0xff;
+       omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
+       omap2_mcspi4_resources[0].end   = OMAP4_MCSPI4_BASE + 0xff;
+}
+#else
+static inline void omap4_mcspi_fixup(void)
+{
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
        defined(CONFIG_ARCH_OMAP4)
-       if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
-               platform_device_register(&omap2_mcspi3);
+static inline void omap2_mcspi3_init(void)
+{
+       platform_device_register(&omap2_mcspi3);
+}
+#else
+static inline void omap2_mcspi3_init(void)
+{
+}
 #endif
+
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-       if (cpu_is_omap343x() || cpu_is_omap44xx())
-               platform_device_register(&omap2_mcspi4);
+static inline void omap2_mcspi4_init(void)
+{
+       platform_device_register(&omap2_mcspi4);
+}
+#else
+static inline void omap2_mcspi4_init(void)
+{
+}
 #endif
+
+static void omap_init_mcspi(void)
+{
+       if (cpu_is_omap44xx())
+               omap4_mcspi_fixup();
+
+       platform_device_register(&omap2_mcspi1);
+       platform_device_register(&omap2_mcspi2);
+
+       if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
+               omap2_mcspi3_init();
+
+       if (cpu_is_omap343x() || cpu_is_omap44xx())
+               omap2_mcspi4_init();
 }
 
 #else
index 7574b6f20e8ed2314c9149dd4e3c7a3dba2907eb..e3a3bad1d84fee7e3c014119dff38a3228cf3f4b 100644 (file)
@@ -294,10 +294,10 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
        else if (cpu_is_omap34xx())
                hwmods = omap34xx_hwmods;
 
-       omap_hwmod_init(hwmods);
-       omap2_mux_init();
 #ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
        /* The OPP tables have to be registered before a clk init */
+       omap_hwmod_init(hwmods);
+       omap2_mux_init();
        omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
        pwrdm_init(powerdomains_omap);
        clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
index 2d9b5cc981cd8f27d37e6dfedf4dea43dfc10a1e..4a0e1cd5c1f4a4f5e65dbd2cbf06acefb6fceec6 100644 (file)
@@ -79,7 +79,7 @@ static int omap2_iommu_enable(struct iommu *obj)
                l = iommu_read_reg(obj, MMU_SYSSTATUS);
                if (l & MMU_SYS_RESETDONE)
                        break;
-       } while (time_after(jiffies, timeout));
+       } while (!time_after(jiffies, timeout));
 
        if (!(l & MMU_SYS_RESETDONE)) {
                dev_err(obj->dev, "can't take mmu out of reset\n");
index 6f71f3730c97789d461880912de06abc5861fa58..c035ad3426d0bf8998a9e981b2e327ab66da5ee7 100644 (file)
 #define MAILBOX_IRQ_NEWMSG(u)          (1 << (2 * (u)))
 #define MAILBOX_IRQ_NOTFULL(u)         (1 << (2 * (u) + 1))
 
+/* SYSCONFIG: register bit definition */
+#define AUTOIDLE       (1 << 0)
+#define SOFTRESET      (1 << 1)
+#define SMARTIDLE      (2 << 3)
+
+/* SYSSTATUS: register bit definition */
+#define RESETDONE      (1 << 0)
+
 #define MBOX_REG_SIZE                  0x120
 #define MBOX_NR_REGS                   (MBOX_REG_SIZE / sizeof(u32))
 
@@ -69,21 +77,33 @@ static inline void mbox_write_reg(u32 val, size_t ofs)
 /* Mailbox H/W preparations */
 static int omap2_mbox_startup(struct omap_mbox *mbox)
 {
-       unsigned int l;
+       u32 l;
+       unsigned long timeout;
 
        mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
        if (IS_ERR(mbox_ick_handle)) {
-               printk("Could not get mailboxes_ick\n");
+               pr_err("Can't get mailboxes_ick\n");
                return -ENODEV;
        }
        clk_enable(mbox_ick_handle);
 
+       mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
+       timeout = jiffies + msecs_to_jiffies(20);
+       do {
+               l = mbox_read_reg(MAILBOX_SYSSTATUS);
+               if (l & RESETDONE)
+                       break;
+       } while (!time_after(jiffies, timeout));
+
+       if (!(l & RESETDONE)) {
+               pr_err("Can't take mmu out of reset\n");
+               return -ENODEV;
+       }
+
        l = mbox_read_reg(MAILBOX_REVISION);
        pr_info("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
 
-       /* set smart-idle & autoidle */
-       l = mbox_read_reg(MAILBOX_SYSCONFIG);
-       l |= 0x00000011;
+       l = SMARTIDLE | AUTOIDLE;
        mbox_write_reg(l, MAILBOX_SYSCONFIG);
 
        omap2_mbox_enable_irq(mbox, IRQ_RX);
@@ -156,6 +176,9 @@ static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
        u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
 
        mbox_write_reg(bit, p->irqstatus);
+
+       /* Flush posted write for irq status to avoid spurious interrupts */
+       mbox_read_reg(p->irqstatus);
 }
 
 static int omap2_mbox_is_irq(struct omap_mbox *mbox,
index 2daa595aaff40b7195772e5ce9aed761b20ee186..b5fac32aae704ea7515450779ffb1d7514b2a32e 100644 (file)
@@ -460,6 +460,8 @@ MUX_CFG_34XX("AF26_34XX_GPIO0", 0x1e0,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
 MUX_CFG_34XX("AF22_34XX_GPIO9", 0xa18,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
+MUX_CFG_34XX("AG9_34XX_GPIO23", 0x5ee,
+               OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
 MUX_CFG_34XX("AH8_34XX_GPIO29", 0x5fa,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
 MUX_CFG_34XX("U8_34XX_GPIO54_OUT", 0x0b4,
@@ -472,6 +474,8 @@ MUX_CFG_34XX("G25_34XX_GPIO86_OUT", 0x0fc,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
 MUX_CFG_34XX("AG4_34XX_GPIO134_OUT", 0x160,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
+MUX_CFG_34XX("AF4_34XX_GPIO135_OUT", 0x162,
+               OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
 MUX_CFG_34XX("AE4_34XX_GPIO136_OUT", 0x164,
                OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
 MUX_CFG_34XX("AF6_34XX_GPIO140_UP", 0x16c,
index 1b4c1600f8d8c4c0f53ed5f70bd974d219777b83..2fc4d6abbd0a8a22a580542b93fb838a565b13b3 100644 (file)
@@ -541,7 +541,7 @@ static int __init pm_dbg_init(void)
                printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
                return -ENODEV;
        }
-               
+
        d = debugfs_create_dir("pm_debug", NULL);
        if (IS_ERR(d))
                return PTR_ERR(d);
@@ -551,7 +551,7 @@ static int __init pm_dbg_init(void)
        (void) debugfs_create_file("time", S_IRUGO,
                d, (void *)DEBUG_FILE_TIMERS, &debug_fops);
 
-       pwrdm_for_each(pwrdms_setup, (void *)d);
+       pwrdm_for_each_nolock(pwrdms_setup, (void *)d);
 
        pm_dbg_dir = debugfs_create_dir("registers", d);
        if (IS_ERR(pm_dbg_dir))
index 0ff5a6c53aa0e79fca0f58d90493273f7e1b123c..378c2f618358403fd90235ad630563378012f5aa 100644 (file)
@@ -51,97 +51,112 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state);
 
 static struct powerdomain *mpu_pwrdm;
 
-/* PRCM Interrupt Handler for wakeups */
-static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
+/*
+ * PRCM Interrupt Handler Helper Function
+ *
+ * The purpose of this function is to clear any wake-up events latched
+ * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
+ * may occur whilst attempting to clear a PM_WKST_x register and thus
+ * set another bit in this register. A while loop is used to ensure
+ * that any peripheral wake-up events occurring while attempting to
+ * clear the PM_WKST_x are detected and cleared.
+ */
+static int prcm_clear_mod_irqs(s16 module, u8 regs)
 {
-       u32 wkst, irqstatus_mpu;
-       u32 fclk, iclk;
-
-       /* WKUP */
-       wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST);
+       u32 wkst, fclk, iclk, clken;
+       u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
+       u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
+       u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
+       u16 grpsel_off = (regs == 3) ?
+               OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
+       int c = 0;
+
+       wkst = prm_read_mod_reg(module, wkst_off);
+       wkst &= prm_read_mod_reg(module, grpsel_off);
        if (wkst) {
-               iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN);
-               fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN);
-               cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN);
-               cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN);
-               prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST);
-               while (prm_read_mod_reg(WKUP_MOD, PM_WKST))
-                       cpu_relax();
-               cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN);
-               cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN);
+               iclk = cm_read_mod_reg(module, iclk_off);
+               fclk = cm_read_mod_reg(module, fclk_off);
+               while (wkst) {
+                       clken = wkst;
+                       cm_set_mod_reg_bits(clken, module, iclk_off);
+                       /*
+                        * For USBHOST, we don't know whether HOST1 or
+                        * HOST2 woke us up, so enable both f-clocks
+                        */
+                       if (module == OMAP3430ES2_USBHOST_MOD)
+                               clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
+                       cm_set_mod_reg_bits(clken, module, fclk_off);
+                       prm_write_mod_reg(wkst, module, wkst_off);
+                       wkst = prm_read_mod_reg(module, wkst_off);
+                       c++;
+               }
+               cm_write_mod_reg(iclk, module, iclk_off);
+               cm_write_mod_reg(fclk, module, fclk_off);
        }
 
-       /* CORE */
-       wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1);
-       if (wkst) {
-               iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
-               fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
-               cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1);
-               cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1);
-               prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1);
-               while (prm_read_mod_reg(CORE_MOD, PM_WKST1))
-                       cpu_relax();
-               cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1);
-               cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1);
-       }
-       wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3);
-       if (wkst) {
-               iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
-               fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
-               cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3);
-               cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
-               prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3);
-               while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3))
-                       cpu_relax();
-               cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3);
-               cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
-       }
+       return c;
+}
 
-       /* PER */
-       wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST);
-       if (wkst) {
-               iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
-               fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
-               cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN);
-               cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN);
-               prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST);
-               while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST))
-                       cpu_relax();
-               cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN);
-               cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN);
-       }
+static int _prcm_int_handle_wakeup(void)
+{
+       int c;
 
+       c = prcm_clear_mod_irqs(WKUP_MOD, 1);
+       c += prcm_clear_mod_irqs(CORE_MOD, 1);
+       c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
        if (omap_rev() > OMAP3430_REV_ES1_0) {
-               /* USBHOST */
-               wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST);
-               if (wkst) {
-                       iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
-                                              CM_ICLKEN);
-                       fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
-                                              CM_FCLKEN);
-                       cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
-                                           CM_ICLKEN);
-                       cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
-                                           CM_FCLKEN);
-                       prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD,
-                                         PM_WKST);
-                       while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
-                                               PM_WKST))
-                               cpu_relax();
-                       cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD,
-                                        CM_ICLKEN);
-                       cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD,
-                                        CM_FCLKEN);
-               }
+               c += prcm_clear_mod_irqs(CORE_MOD, 3);
+               c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
        }
 
-       irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
-                                        OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
-       prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
-                         OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+       return c;
+}
+
+/*
+ * PRCM Interrupt Handler
+ *
+ * The PRM_IRQSTATUS_MPU register indicates if there are any pending
+ * interrupts from the PRCM for the MPU. These bits must be cleared in
+ * order to clear the PRCM interrupt. The PRCM interrupt handler is
+ * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
+ * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
+ * register indicates that a wake-up event is pending for the MPU and
+ * this bit can only be cleared if the all the wake-up events latched
+ * in the various PM_WKST_x registers have been cleared. The interrupt
+ * handler is implemented using a do-while loop so that if a wake-up
+ * event occurred during the processing of the prcm interrupt handler
+ * (setting a bit in the corresponding PM_WKST_x register and thus
+ * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
+ * this would be handled.
+ */
+static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
+{
+       u32 irqstatus_mpu;
+       int c = 0;
+
+       do {
+               irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+                                       OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+
+               if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) {
+                       c = _prcm_int_handle_wakeup();
+
+                       /*
+                        * Is the MPU PRCM interrupt handler racing with the
+                        * IVA2 PRCM interrupt handler ?
+                        */
+                       WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
+                            "but no wakeup sources are marked\n");
+               } else {
+                       /* XXX we need to expand our PRCM interrupt handler */
+                       WARN(1, "prcm: WARNING: PRCM interrupt received, but "
+                            "no code to handle it (%08x)\n", irqstatus_mpu);
+               }
+
+               prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
+                                       OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
-       while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET))
-               cpu_relax();
+       } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET));
 
        return IRQ_HANDLED;
 }
@@ -624,6 +639,16 @@ static void __init prcm_setup_regs(void)
        prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
                          OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
 
+       /* Enable GPIO wakeups in PER */
+       prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
+                         OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
+                         OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN);
+       /* and allow them to wake up MPU */
+       prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
+                         OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
+                         OMAP3430_GRPSEL_GPIO6,
+                         OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
+
        /* Don't attach IVA interrupts */
        prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
        prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
index 2594cbff3947e12f8d37c966e22fd99a22c5a1d1..f00289abd30f6f51d015f6f3cf932d77eb868ffc 100644 (file)
@@ -273,35 +273,50 @@ struct powerdomain *pwrdm_lookup(const char *name)
 }
 
 /**
- * pwrdm_for_each - call function on each registered clockdomain
+ * pwrdm_for_each_nolock - call function on each registered clockdomain
  * @fn: callback function *
  *
  * Call the supplied function for each registered powerdomain.  The
  * callback function can return anything but 0 to bail out early from
- * the iterator.  The callback function is called with the pwrdm_rwlock
- * held for reading, so no powerdomain structure manipulation
- * functions should be called from the callback, although hardware
- * powerdomain control functions are fine.  Returns the last return
- * value of the callback function, which should be 0 for success or
- * anything else to indicate failure; or -EINVAL if the function
- * pointer is null.
+ * the iterator.  Returns the last return value of the callback function, which
+ * should be 0 for success or anything else to indicate failure; or -EINVAL if
+ * the function pointer is null.
  */
-int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
-                       void *user)
+int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
+                               void *user)
 {
        struct powerdomain *temp_pwrdm;
-       unsigned long flags;
        int ret = 0;
 
        if (!fn)
                return -EINVAL;
 
-       read_lock_irqsave(&pwrdm_rwlock, flags);
        list_for_each_entry(temp_pwrdm, &pwrdm_list, node) {
                ret = (*fn)(temp_pwrdm, user);
                if (ret)
                        break;
        }
+
+       return ret;
+}
+
+/**
+ * pwrdm_for_each - call function on each registered clockdomain
+ * @fn: callback function *
+ *
+ * This function is the same as 'pwrdm_for_each_nolock()', but keeps the
+ * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation
+ * functions should be called from the callback, although hardware powerdomain
+ * control functions are fine.
+ */
+int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
+                       void *user)
+{
+       unsigned long flags;
+       int ret;
+
+       read_lock_irqsave(&pwrdm_rwlock, flags);
+       ret = pwrdm_for_each_nolock(fn, user);
        read_unlock_irqrestore(&pwrdm_rwlock, flags);
 
        return ret;
index 3a529c77daa8137eaede310495d8d9b217dbb014..ae2186892c85f9728e1cfdee07861412281ae0f4 100644 (file)
@@ -110,7 +110,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
                .uartclk        = OMAP24XX_BASE_BAUD * 16,
        }, {
 #ifdef CONFIG_ARCH_OMAP4
-               .membase        = IO_ADDRESS(OMAP_UART4_BASE),
+               .membase        = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
                .mapbase        = OMAP_UART4_BASE,
                .irq            = 70,
                .flags          = UPF_BOOT_AUTOCONF,
@@ -126,7 +126,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
 #ifdef CONFIG_ARCH_OMAP4
 static struct plat_serial8250_port serial_platform_data3[] = {
        {
-               .membase        = IO_ADDRESS(OMAP_UART4_BASE),
+               .membase        = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
                .mapbase        = OMAP_UART4_BASE,
                .irq            = 70,
                .flags          = UPF_BOOT_AUTOCONF,
@@ -579,7 +579,7 @@ static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = {
        {
                .pdev = {
                        .name                   = "serial8250",
-                       .id                     = 3
+                       .id                     = 3,
                        .dev                    = {
                                .platform_data  = serial_platform_data3,
                        },
index 81ffff7ed498a1633dac73b69b088d5a3e64acc1..4e5c07f4e45654368e362a20794daa4c4c3709af 100644 (file)
@@ -71,11 +71,6 @@ config SA1100_H3600
          <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
          <http://www.compaq.com/products/handhelds/pocketpc/>
 
-config SA1100_H3XXX
-       bool
-       depends on SA1100_H3100 || SA1100_H3600
-       default y
-
 config SA1100_BADGE4
        bool "HP Labs BadgePAD 4"
        select SA1111
index 95d92e8e56a81c820979d373cd87e4d152b09235..b9cbb56d6e9d9dbd9c7605196488b0872fd8bc4d 100644 (file)
@@ -77,7 +77,7 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
        .set_mode       = sa1100_osmr0_set_mode,
 };
 
-static cycle_t sa1100_read_oscr(void)
+static cycle_t sa1100_read_oscr(struct clocksource *s)
 {
        return OSCR;
 }
index 63c8f27fb15a3bc6a5eba1ec4bb8742f02275081..0b35826b7d1d419a5daf875cdf1cfc215fac726c 100644 (file)
@@ -281,6 +281,16 @@ int gpio_unregister_callback(unsigned gpio)
 }
 EXPORT_SYMBOL(gpio_unregister_callback);
 
+/* Non-zero means valid */
+int gpio_is_valid(int number)
+{
+       if (number >= 0 &&
+           number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
+               return 1;
+       return 0;
+}
+EXPORT_SYMBOL(gpio_is_valid);
+
 int gpio_request(unsigned gpio, const char *label)
 {
        if (gpio_pin[gpio].users)
index c8174128d7ebc167ec0e700894348b5f33ab9029..7b1fc984abb64c85dd3124eba6b6b108000835b9 100644 (file)
 #define PIN_TO_PORT(val) (val >> 3)
 
 /* These can be found in arch/arm/mach-u300/gpio.c */
+extern int gpio_is_valid(int number);
 extern int gpio_request(unsigned gpio, const char *label);
 extern void gpio_free(unsigned gpio);
 extern int gpio_direction_input(unsigned gpio);
index 8d43e58f9244349601d6001ec6332d6909634017..e993140edd880e3f62be0eed4d6851f5496375ab 100644 (file)
@@ -17,7 +17,7 @@ config CPU_ARM610
        select CPU_CP15_MMU
        select CPU_COPY_V3 if MMU
        select CPU_TLB_V3 if MMU
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        help
          The ARM610 is the successor to the ARM3 processor
          and was produced by VLSI Technology Inc.
@@ -31,7 +31,7 @@ config CPU_ARM7TDMI
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_LV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4
        help
          A 32-bit RISC microprocessor based on the ARM7 processor core
@@ -49,7 +49,7 @@ config CPU_ARM710
        select CPU_CP15_MMU
        select CPU_COPY_V3 if MMU
        select CPU_TLB_V3 if MMU
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        help
          A 32-bit RISC microprocessor based on the ARM7 processor core
          designed by Advanced RISC Machines Ltd. The ARM710 is the
@@ -64,7 +64,7 @@ config CPU_ARM720T
        bool "Support ARM720T processor" if ARCH_INTEGRATOR
        select CPU_32v4T
        select CPU_ABRT_LV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -83,7 +83,7 @@ config CPU_ARM740T
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_LV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V3     # although the core is v4t
        select CPU_CP15_MPU
        help
@@ -100,7 +100,7 @@ config CPU_ARM9TDMI
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_NOMMU
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4
        help
          A 32-bit RISC microprocessor based on the ARM9 processor core
@@ -114,7 +114,7 @@ config CPU_ARM920T
        bool "Support ARM920T processor" if ARCH_INTEGRATOR
        select CPU_32v4T
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WT
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -135,7 +135,7 @@ config CPU_ARM922T
        bool "Support ARM922T processor" if ARCH_INTEGRATOR
        select CPU_32v4T
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WT
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -154,7 +154,7 @@ config CPU_ARM925T
        bool "Support ARM925T processor" if ARCH_OMAP1
        select CPU_32v4T
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WT
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -173,7 +173,7 @@ config CPU_ARM926T
        bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
        select CPU_32v5
        select CPU_ABRT_EV5TJ
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_COPY_V4WB if MMU
@@ -191,7 +191,7 @@ config CPU_FA526
        bool
        select CPU_32v4
        select CPU_ABRT_EV4
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_CACHE_FA
@@ -210,7 +210,7 @@ config CPU_ARM940T
        depends on !MMU
        select CPU_32v4T
        select CPU_ABRT_NOMMU
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MPU
        help
@@ -228,7 +228,7 @@ config CPU_ARM946E
        depends on !MMU
        select CPU_32v5
        select CPU_ABRT_NOMMU
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MPU
        help
@@ -244,7 +244,7 @@ config CPU_ARM1020
        bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR
        select CPU_32v5
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WT
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -262,7 +262,7 @@ config CPU_ARM1020E
        bool "Support ARM1020E processor" if ARCH_INTEGRATOR
        select CPU_32v5
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WT
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -275,7 +275,7 @@ config CPU_ARM1022
        bool "Support ARM1022E processor" if ARCH_INTEGRATOR
        select CPU_32v5
        select CPU_ABRT_EV4T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_COPY_V4WB if MMU # can probably do better
@@ -293,7 +293,7 @@ config CPU_ARM1026
        bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR
        select CPU_32v5
        select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_COPY_V4WB if MMU # can probably do better
@@ -311,7 +311,7 @@ config CPU_SA110
        select CPU_32v3 if ARCH_RPC
        select CPU_32v4 if !ARCH_RPC
        select CPU_ABRT_EV4
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WB
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -331,7 +331,7 @@ config CPU_SA1100
        bool
        select CPU_32v4
        select CPU_ABRT_EV4
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_V4WB
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
@@ -342,7 +342,7 @@ config CPU_XSCALE
        bool
        select CPU_32v5
        select CPU_ABRT_EV5T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_TLB_V4WBI if MMU
@@ -352,7 +352,7 @@ config CPU_XSC3
        bool
        select CPU_32v5
        select CPU_ABRT_EV5T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_TLB_V4WBI if MMU
@@ -363,7 +363,7 @@ config CPU_MOHAWK
        bool
        select CPU_32v5
        select CPU_ABRT_EV5T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_TLB_V4WBI if MMU
@@ -374,7 +374,7 @@ config CPU_FEROCEON
        bool
        select CPU_32v5
        select CPU_ABRT_EV5T
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_LEGACY
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
        select CPU_COPY_FEROCEON if MMU
@@ -394,7 +394,7 @@ config CPU_V6
        bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
        select CPU_32v6
        select CPU_ABRT_EV6
-       select CPU_PABRT_NOIFAR
+       select CPU_PABRT_V6
        select CPU_CACHE_V6
        select CPU_CACHE_VIPT
        select CPU_CP15_MMU
@@ -420,7 +420,7 @@ config CPU_V7
        select CPU_32v6K
        select CPU_32v7
        select CPU_ABRT_EV7
-       select CPU_PABRT_IFAR
+       select CPU_PABRT_V7
        select CPU_CACHE_V7
        select CPU_CACHE_VIPT
        select CPU_CP15_MMU
@@ -482,10 +482,13 @@ config CPU_ABRT_EV6
 config CPU_ABRT_EV7
        bool
 
-config CPU_PABRT_IFAR
+config CPU_PABRT_LEGACY
        bool
 
-config CPU_PABRT_NOIFAR
+config CPU_PABRT_V6
+       bool
+
+config CPU_PABRT_V7
        bool
 
 # The cache model
index 63e3f6dd0e2196f96eed8be436e6cc287234b72f..055cb2aa8134c63fba47cdfbc6192e3ec3ffd2f6 100644 (file)
@@ -27,6 +27,10 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
 obj-$(CONFIG_CPU_ABRT_EV6)     += abort-ev6.o
 obj-$(CONFIG_CPU_ABRT_EV7)     += abort-ev7.o
 
+obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
+obj-$(CONFIG_CPU_PABRT_V6)     += pabort-v6.o
+obj-$(CONFIG_CPU_PABRT_V7)     += pabort-v7.o
+
 obj-$(CONFIG_CPU_CACHE_V3)     += cache-v3.o
 obj-$(CONFIG_CPU_CACHE_V4)     += cache-v4.o
 obj-$(CONFIG_CPU_CACHE_V4WT)   += cache-v4wt.o
index 379f7855605554c9ae955cf7ffe44964374f297e..ae0e25f5a70eb57987c131f5fbfd4fe795df3302 100644 (file)
@@ -519,9 +519,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        arm_notify_die("", regs, &info, fsr, 0);
 }
 
+
+static struct fsr_info ifsr_info[] = {
+       { do_bad,               SIGBUS,  0,             "unknown 0"                        },
+       { do_bad,               SIGBUS,  0,             "unknown 1"                        },
+       { do_bad,               SIGBUS,  0,             "debug event"                      },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section access flag fault"        },
+       { do_bad,               SIGBUS,  0,             "unknown 4"                        },
+       { do_translation_fault, SIGSEGV, SEGV_MAPERR,   "section translation fault"        },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page access flag fault"           },
+       { do_page_fault,        SIGSEGV, SEGV_MAPERR,   "page translation fault"           },
+       { do_bad,               SIGBUS,  0,             "external abort on non-linefetch"  },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "section domain fault"             },
+       { do_bad,               SIGBUS,  0,             "unknown 10"                       },
+       { do_bad,               SIGSEGV, SEGV_ACCERR,   "page domain fault"                },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_sect_fault,        SIGSEGV, SEGV_ACCERR,   "section permission fault"         },
+       { do_bad,               SIGBUS,  0,             "external abort on translation"    },
+       { do_page_fault,        SIGSEGV, SEGV_ACCERR,   "page permission fault"            },
+       { do_bad,               SIGBUS,  0,             "unknown 16"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 17"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 18"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 19"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 20"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 21"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 22"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 23"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 24"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 25"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 26"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 27"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 28"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 29"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 30"                       },
+       { do_bad,               SIGBUS,  0,             "unknown 31"                       },
+};
+
 asmlinkage void __exception
-do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
+do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
-       do_translation_fault(addr, FSR_LNX_PF, regs);
+       const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+       struct siginfo info;
+
+       if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+               return;
+
+       printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+               inf->name, ifsr, addr);
+
+       info.si_signo = inf->sig;
+       info.si_errno = 0;
+       info.si_code  = inf->code;
+       info.si_addr  = (void __user *)addr;
+       arm_notify_die("", regs, &info, ifsr, 0);
 }
 
index f7457fea6de8e84c5aefe949412845550b1bfa5e..2b7996401b0f9f602dfa2d809f86e40b57f79117 100644 (file)
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
 {
        if (addr < PHYS_OFFSET)
                return 0;
-       if (addr + size >= __pa(high_memory - 1))
+       if (addr + size > __pa(high_memory - 1) + 1)
                return 0;
 
        return 1;
index 4426ee67ceca5d3d74c9b0dbe0f39c5b7e2b8538..02243eeccf50d79b9175e085fa77069a3f349862 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/cachetype.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
+#include <asm/smp_plat.h>
 #include <asm/tlb.h>
 #include <asm/highmem.h>
 
@@ -709,10 +710,6 @@ static void __init sanity_check_meminfo(void)
                        if (meminfo.nr_banks >= NR_BANKS) {
                                printk(KERN_CRIT "NR_BANKS too low, "
                                                 "ignoring high memory\n");
-                       } else if (cache_is_vipt_aliasing()) {
-                               printk(KERN_CRIT "HIGHMEM is not yet supported "
-                                                "with VIPT aliasing cache, "
-                                                "ignoring high memory\n");
                        } else {
                                memmove(bank + 1, bank,
                                        (meminfo.nr_banks - i) * sizeof(*bank));
@@ -726,6 +723,8 @@ static void __init sanity_check_meminfo(void)
                        bank->size = VMALLOC_MIN - __va(bank->start);
                }
 #else
+               bank->highmem = highmem;
+
                /*
                 * Check whether this memory bank would entirely overlap
                 * the vmalloc area.
@@ -754,6 +753,38 @@ static void __init sanity_check_meminfo(void)
 #endif
                j++;
        }
+#ifdef CONFIG_HIGHMEM
+       if (highmem) {
+               const char *reason = NULL;
+
+               if (cache_is_vipt_aliasing()) {
+                       /*
+                        * Interactions between kmap and other mappings
+                        * make highmem support with aliasing VIPT caches
+                        * rather difficult.
+                        */
+                       reason = "with VIPT aliasing cache";
+#ifdef CONFIG_SMP
+               } else if (tlb_ops_need_broadcast()) {
+                       /*
+                        * kmap_high needs to occasionally flush TLB entries,
+                        * however, if the TLB entries need to be broadcast
+                        * we may deadlock:
+                        *  kmap_high(irqs off)->flush_all_zero_pkmaps->
+                        *  flush_tlb_kernel_range->smp_call_function_many
+                        *   (must not be called with irqs off)
+                        */
+                       reason = "without hardware TLB ops broadcasting";
+#endif
+               }
+               if (reason) {
+                       printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
+                               reason);
+                       while (j > 0 && meminfo.bank[j - 1].highmem)
+                               j--;
+               }
+       }
+#endif
        meminfo.nr_banks = j;
 }
 
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S
new file mode 100644 (file)
index 0000000..87970eb
--- /dev/null
@@ -0,0 +1,19 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Function: legacy_pabort
+ *
+ * Params  : r0 = address of aborted instruction
+ *
+ * Returns : r0 = address of abort
+ *        : r1 = Simulated IFSR with section translation fault status
+ *
+ * Purpose : obtain information about current prefetch abort.
+ */
+
+       .align  5
+ENTRY(legacy_pabort)
+       mov     r1, #5
+       mov     pc, lr
+ENDPROC(legacy_pabort)
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S
new file mode 100644 (file)
index 0000000..06e3d1e
--- /dev/null
@@ -0,0 +1,19 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Function: v6_pabort
+ *
+ * Params  : r0 = address of aborted instruction
+ *
+ * Returns : r0 = address of abort
+ *        : r1 = IFSR
+ *
+ * Purpose : obtain information about current prefetch abort.
+ */
+
+       .align  5
+ENTRY(v6_pabort)
+       mrc     p15, 0, r1, c5, c0, 1           @ get IFSR
+       mov     pc, lr
+ENDPROC(v6_pabort)
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S
new file mode 100644 (file)
index 0000000..a8b3b30
--- /dev/null
@@ -0,0 +1,20 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Function: v6_pabort
+ *
+ * Params  : r0 = address of aborted instruction
+ *
+ * Returns : r0 = address of abort
+ *        : r1 = IFSR
+ *
+ * Purpose : obtain information about current prefetch abort.
+ */
+
+       .align  5
+ENTRY(v7_pabort)
+       mrc     p15, 0, r0, c6, c0, 2           @ get IFAR
+       mrc     p15, 0, r1, c5, c0, 1           @ get IFSR
+       mov     pc, lr
+ENDPROC(v7_pabort)
index b5551bf010aa33668d938e09d184d3df2bc1ec5d..d9fb4b98c49ff8866a1d36288d431ce364ad1180 100644 (file)
@@ -449,7 +449,7 @@ arm1020_crval:
        .type   arm1020_processor_functions, #object
 arm1020_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm1020_proc_init
        .word   cpu_arm1020_proc_fin
        .word   cpu_arm1020_reset
index 8bc6740c29eb68d8bcfdb97111281ecca87e0f41..7453b75dcea5f1527790e940263f113a70aec417 100644 (file)
@@ -430,7 +430,7 @@ arm1020e_crval:
        .type   arm1020e_processor_functions, #object
 arm1020e_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm1020e_proc_init
        .word   cpu_arm1020e_proc_fin
        .word   cpu_arm1020e_reset
index 2cd03e66c0a318f9b28c14bdf0749571604de303..8eb72d75a8b6fe2757e0c6c8bcfa7325519c2a11 100644 (file)
@@ -413,7 +413,7 @@ arm1022_crval:
        .type   arm1022_processor_functions, #object
 arm1022_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm1022_proc_init
        .word   cpu_arm1022_proc_fin
        .word   cpu_arm1022_reset
index ad961a897f6eae5cfad3d3bee1fc6fcd0d8d865f..3b59f0d6713962d3e83222d1d05dc2c10606157c 100644 (file)
@@ -408,7 +408,7 @@ arm1026_crval:
        .type   arm1026_processor_functions, #object
 arm1026_processor_functions:
        .word   v5t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm1026_proc_init
        .word   cpu_arm1026_proc_fin
        .word   cpu_arm1026_reset
index 80d6e1de069ada45c74cf6b665b91292d5990019..3f9cd3d8f6d508c447e46c5cf29ca078dd1b630b 100644 (file)
@@ -278,7 +278,7 @@ __arm7_setup:       mov     r0, #0
                .type   arm6_processor_functions, #object
 ENTRY(arm6_processor_functions)
                .word   cpu_arm6_data_abort
-               .word   pabort_noifar
+               .word   legacy_pabort
                .word   cpu_arm6_proc_init
                .word   cpu_arm6_proc_fin
                .word   cpu_arm6_reset
@@ -295,7 +295,7 @@ ENTRY(arm6_processor_functions)
                .type   arm7_processor_functions, #object
 ENTRY(arm7_processor_functions)
                .word   cpu_arm7_data_abort
-               .word   pabort_noifar
+               .word   legacy_pabort
                .word   cpu_arm7_proc_init
                .word   cpu_arm7_proc_fin
                .word   cpu_arm7_reset
index 85ae18695f1021a4cf2299fbcad8f382b9f7c03d..0b62de24466646d8b7e8aa65b84e8ac5191335f5 100644 (file)
@@ -181,7 +181,7 @@ arm720_crval:
                .type   arm720_processor_functions, #object
 ENTRY(arm720_processor_functions)
                .word   v4t_late_abort
-               .word   pabort_noifar
+               .word   legacy_pabort
                .word   cpu_arm720_proc_init
                .word   cpu_arm720_proc_fin
                .word   cpu_arm720_reset
index 4f95bee63e95c0454cc1fc9224a686316db37368..01860cdeb2ec3df451947f325c7d132449820717 100644 (file)
@@ -126,7 +126,7 @@ __arm740_setup:
        .type   arm740_processor_functions, #object
 ENTRY(arm740_processor_functions)
        .word   v4t_late_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm740_proc_init
        .word   cpu_arm740_proc_fin
        .word   cpu_arm740_reset
index 93e05fa7bed44933341d6d4c42db7655b6fafb15..1201b98638298087063d3ec02eefd3a7e58d9aa6 100644 (file)
@@ -64,7 +64,7 @@ __arm7tdmi_setup:
                .type   arm7tdmi_processor_functions, #object
 ENTRY(arm7tdmi_processor_functions)
                .word   v4t_late_abort
-               .word   pabort_noifar
+               .word   legacy_pabort
                .word   cpu_arm7tdmi_proc_init
                .word   cpu_arm7tdmi_proc_fin
                .word   cpu_arm7tdmi_reset
index 914d688394fc150f26c9e4987542dae74a24205a..2b7c197cc58d2ab4babcf39920220b96a973570e 100644 (file)
@@ -395,7 +395,7 @@ arm920_crval:
        .type   arm920_processor_functions, #object
 arm920_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm920_proc_init
        .word   cpu_arm920_proc_fin
        .word   cpu_arm920_reset
index 51c9c9859e58475232851e89c7fa032af52d72ad..06a1aa4e33989976465155586fe0390e19109bb9 100644 (file)
@@ -399,7 +399,7 @@ arm922_crval:
        .type   arm922_processor_functions, #object
 arm922_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm922_proc_init
        .word   cpu_arm922_proc_fin
        .word   cpu_arm922_reset
index 2724526d89c1ccb302055ebf0ef52378497189bc..cb53435a85aee2120f2e97f6b01c24561f32e860 100644 (file)
@@ -462,7 +462,7 @@ arm925_crval:
        .type   arm925_processor_functions, #object
 arm925_processor_functions:
        .word   v4t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm925_proc_init
        .word   cpu_arm925_proc_fin
        .word   cpu_arm925_reset
index 54466937bff994ea288ba5ece9c7c39ddacecbd8..1c4848704bb358c98a0e4c87141f7326e802d3fb 100644 (file)
@@ -415,7 +415,7 @@ arm926_crval:
        .type   arm926_processor_functions, #object
 arm926_processor_functions:
        .word   v5tj_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm926_proc_init
        .word   cpu_arm926_proc_fin
        .word   cpu_arm926_reset
index f595117caf55bb7db716b29e477c7c0f9f49bb95..5b0f8464c8f29f9cc908cd2bbff5a820652f7751 100644 (file)
@@ -322,7 +322,7 @@ __arm940_setup:
        .type   arm940_processor_functions, #object
 ENTRY(arm940_processor_functions)
        .word   nommu_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm940_proc_init
        .word   cpu_arm940_proc_fin
        .word   cpu_arm940_reset
index e03f6ff1fb2631284176fd5cc08c164f592a6f4d..40c0449a139b829a709525984442b36561494e13 100644 (file)
@@ -377,7 +377,7 @@ __arm946_setup:
        .type   arm946_processor_functions, #object
 ENTRY(arm946_processor_functions)
        .word   nommu_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_arm946_proc_init
        .word   cpu_arm946_proc_fin
        .word   cpu_arm946_reset
index be6c11d2b3fbf10092be644df37aba7787387004..28545c29dbcda317db178a564a7a894f3a846f96 100644 (file)
@@ -64,7 +64,7 @@ __arm9tdmi_setup:
                .type   arm9tdmi_processor_functions, #object
 ENTRY(arm9tdmi_processor_functions)
                .word   nommu_early_abort
-               .word   pabort_noifar
+               .word   legacy_pabort
                .word   cpu_arm9tdmi_proc_init
                .word   cpu_arm9tdmi_proc_fin
                .word   cpu_arm9tdmi_reset
index 08b8a955d5d7843baad616afd342707714160a0b..08f5ac237ad4e83e67b7540a9680d28c619edc41 100644 (file)
@@ -191,7 +191,7 @@ fa526_cr1_set:
        .type   fa526_processor_functions, #object
 fa526_processor_functions:
        .word   v4_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_fa526_proc_init
        .word   cpu_fa526_proc_fin
        .word   cpu_fa526_reset
index 0fe1f8fc348802777a02e42da50c58f56e2a2d9e..d0d7795200fc143a0362312c960f334deb5f1957 100644 (file)
@@ -499,7 +499,7 @@ feroceon_crval:
        .type   feroceon_processor_functions, #object
 feroceon_processor_functions:
        .word   v5t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_feroceon_proc_init
        .word   cpu_feroceon_proc_fin
        .word   cpu_feroceon_reset
index 540f5078496ba155e2cc91c3656097ebaddd9c0e..52b5fd74fbb3fcbfe3295d570a2e121001318562 100644 (file)
@@ -359,7 +359,7 @@ mohawk_crval:
        .type   mohawk_processor_functions, #object
 mohawk_processor_functions:
        .word   v5t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_mohawk_proc_init
        .word   cpu_mohawk_proc_fin
        .word   cpu_mohawk_reset
index 90a7e5279f296c1193cc0a389c9b9d8b1f272186..7b706b38990625c08ca3595ff19b6657fcbb0648 100644 (file)
@@ -199,7 +199,7 @@ sa110_crval:
        .type   sa110_processor_functions, #object
 ENTRY(sa110_processor_functions)
        .word   v4_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_sa110_proc_init
        .word   cpu_sa110_proc_fin
        .word   cpu_sa110_reset
index 451e2d953e2a0389ed462b253ca904053b2b24ff..ee7700242c19567b94ad256984225c66d5e57872 100644 (file)
@@ -214,7 +214,7 @@ sa1100_crval:
        .type   sa1100_processor_functions, #object
 ENTRY(sa1100_processor_functions)
        .word   v4_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_sa1100_proc_init
        .word   cpu_sa1100_proc_fin
        .word   cpu_sa1100_reset
index 524ddae92595052c19ef0e015ab9ed09b24d4026..194737d60a22691c263daf1cba31b0f7ae189e0d 100644 (file)
@@ -191,7 +191,7 @@ v6_crval:
        .type   v6_processor_functions, #object
 ENTRY(v6_processor_functions)
        .word   v6_early_abort
-       .word   pabort_noifar
+       .word   v6_pabort
        .word   cpu_v6_proc_init
        .word   cpu_v6_proc_fin
        .word   cpu_v6_reset
index f3fa1c32fe923cb93f0e73fc14f883182b55df57..23ebcf6eab9f63241e3743170c168e2e38ec0ea2 100644 (file)
@@ -295,7 +295,7 @@ __v7_setup_stack:
        .type   v7_processor_functions, #object
 ENTRY(v7_processor_functions)
        .word   v7_early_abort
-       .word   pabort_ifar
+       .word   v7_pabort
        .word   cpu_v7_proc_init
        .word   cpu_v7_proc_fin
        .word   cpu_v7_reset
index 33515c214b92d5483e7257adf076be3758332993..2028f370288113507d2d5dcfa6e5599629270df5 100644 (file)
@@ -428,7 +428,7 @@ xsc3_crval:
        .type   xsc3_processor_functions, #object
 ENTRY(xsc3_processor_functions)
        .word   v5t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_xsc3_proc_init
        .word   cpu_xsc3_proc_fin
        .word   cpu_xsc3_reset
index 423394260bcbd17fc863deecd225bf85f617025c..f056c283682db09b37b3b282a1cded25d9683367 100644 (file)
@@ -511,7 +511,7 @@ xscale_crval:
        .type   xscale_processor_functions, #object
 ENTRY(xscale_processor_functions)
        .word   v5t_early_abort
-       .word   pabort_noifar
+       .word   legacy_pabort
        .word   cpu_xscale_proc_init
        .word   cpu_xscale_proc_fin
        .word   cpu_xscale_reset
index 77fa7cc7d162dc28b903211ff5c3691e7e613e2e..ce31f316ac75c1e72af3693a58633a10e50e7f7a 100644 (file)
@@ -257,7 +257,8 @@ void __init iop3xx_atu_setup(void)
        *IOP3XX_OUMWTVR0 = 0;
 
        /* Outbound window 1 */
-       *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + IOP3XX_PCI_MEM_WINDOW_SIZE;
+       *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA +
+                         IOP3XX_PCI_MEM_WINDOW_SIZE / 2;
        *IOP3XX_OUMWTVR1 = 0;
 
        /* BAR 3 ( Disabled ) */
index 3695bbe3ee280960599532b013b9c33c30d3bc69..8da95d57c21f6beef36ce80438c355bc73dbdf6d 100644 (file)
@@ -85,7 +85,7 @@ void __init iop_init_time(unsigned long tick_rate)
 {
        u32 timer_ctl;
 
-       ticks_per_jiffy = (tick_rate + HZ/2) / HZ;
+       ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
        ticks_per_usec = tick_rate / 1000000;
        next_jiffy_time = 0xffffffff;
        iop_tick_rate = tick_rate;
index 693839c89ad08475cec23e2f74dc92755949ffc9..71ebd7fcfea158afa5d0f6eb14b4d9202f79be5a 100644 (file)
@@ -250,7 +250,7 @@ static struct gpio_bank gpio_bank_730[7] = {
 
 #ifdef CONFIG_ARCH_OMAP850
 static struct gpio_bank gpio_bank_850[7] = {
-       { OMAP1_MPUIO_BASE,     INT_850_MPUIO,      IH_MPUIO_BASE,      METHOD_MPUIO },
+       { OMAP1_MPUIO_VBASE,     INT_850_MPUIO,     IH_MPUIO_BASE,      METHOD_MPUIO },
        { OMAP850_GPIO1_BASE,  INT_850_GPIO_BANK1,  IH_GPIO_BASE,       METHOD_GPIO_850 },
        { OMAP850_GPIO2_BASE,  INT_850_GPIO_BANK2,  IH_GPIO_BASE + 32,  METHOD_GPIO_850 },
        { OMAP850_GPIO3_BASE,  INT_850_GPIO_BANK3,  IH_GPIO_BASE + 64,  METHOD_GPIO_850 },
index 11e73d9e8928458adc5656add696d835cf330d7a..f129efb3075ead421bc204dd9bef3a4aa39ecb2e 100644 (file)
@@ -303,32 +303,21 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define cpu_is_omap2430()              0
 #define cpu_is_omap3430()              0
 
-#if defined(MULTI_OMAP1)
-# if defined(CONFIG_ARCH_OMAP730)
-#  undef  cpu_is_omap730
-#  define cpu_is_omap730()             is_omap730()
-# endif
-# if defined(CONFIG_ARCH_OMAP850)
-#  undef  cpu_is_omap850
-#  define cpu_is_omap850()             is_omap850()
-# endif
-#else
-# if defined(CONFIG_ARCH_OMAP730)
-#  undef  cpu_is_omap730
-#  define cpu_is_omap730()             1
-# endif
-#endif
-#else
-# if defined(CONFIG_ARCH_OMAP850)
-#  undef  cpu_is_omap850
-#  define cpu_is_omap850()             1
-# endif
-#endif
-
 /*
  * Whether we have MULTI_OMAP1 or not, we still need to distinguish
- * between 330 vs. 1510 and 1611B/5912 vs. 1710.
+ * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710.
  */
+
+#if defined(CONFIG_ARCH_OMAP730)
+# undef  cpu_is_omap730
+# define cpu_is_omap730()              is_omap730()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP850)
+# undef  cpu_is_omap850
+# define cpu_is_omap850()              is_omap850()
+#endif
+
 #if defined(CONFIG_ARCH_OMAP15XX)
 # undef  cpu_is_omap310
 # undef  cpu_is_omap1510
@@ -433,3 +422,5 @@ IS_OMAP_TYPE(3430, 0x3430)
 
 int omap_chip_is(struct omap_chip_id oci);
 void omap2_check_revision(void);
+
+#endif
index 45ea3ae3c995ab7f0ad51386ad3282836c46419e..d91b9be334ff2ff1b80078f95c1d805c0d278224 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef ASMARM_ARCH_KEYPAD_H
 #define ASMARM_ARCH_KEYPAD_H
 
+#include <linux/input/matrix_keypad.h>
+
 struct omap_kp_platform_data {
        int rows;
        int cols;
@@ -35,9 +37,6 @@ struct omap_kp_platform_data {
 
 #define KEY_PERSISTENT         0x00800000
 #define KEYNUM_MASK            0x00EFFFFF
-#define KEY(col, row, val) (((col) << 28) | ((row) << 24) | (val))
-#define PERSISTENT_KEY(col, row) (((col) << 28) | ((row) << 24) | \
-                                               KEY_PERSISTENT)
 
 #endif
 
index 98dfab651dfc9198272af9f1f966d4b4b2fd1dbb..0f49d2d563d9cd99a10759f935fa7923f6213abd 100644 (file)
@@ -840,12 +840,14 @@ enum omap34xx_index {
         */
        AF26_34XX_GPIO0,
        AF22_34XX_GPIO9,
+       AG9_34XX_GPIO23,
        AH8_34XX_GPIO29,
        U8_34XX_GPIO54_OUT,
        U8_34XX_GPIO54_DOWN,
        L8_34XX_GPIO63,
        G25_34XX_GPIO86_OUT,
        AG4_34XX_GPIO134_OUT,
+       AF4_34XX_GPIO135_OUT,
        AE4_34XX_GPIO136_OUT,
        AF6_34XX_GPIO140_UP,
        AE6_34XX_GPIO141,
index 6271d8556a40f32d5ef8d360bc6bbf8b74265559..fa6461423bd06baea45768ae23090315ecb94990 100644 (file)
@@ -135,6 +135,8 @@ struct powerdomain *pwrdm_lookup(const char *name);
 
 int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
                        void *user);
+int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
+                       void *user);
 
 int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
 int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
index 6fc52fcbdc033b2a85e67d10e848d4e7c4812bc8..dc3fac3dd0eac8c40787ea8e3006e6a59ce19e80 100644 (file)
@@ -47,7 +47,7 @@
  *     'va':   mpu virtual address
  *
  *     'c':    contiguous memory area
- *     'd':    dicontiguous memory area
+ *     'd':    discontiguous memory area
  *     'a':    anonymous memory allocation
  *     '()':   optional feature
  *
@@ -199,7 +199,8 @@ static void *vmap_sg(const struct sg_table *sgt)
                va += bytes;
        }
 
-       flush_cache_vmap(new->addr, new->addr + total);
+       flush_cache_vmap((unsigned long)new->addr,
+                               (unsigned long)(new->addr + total));
        return new->addr;
 
 err_out:
@@ -362,8 +363,9 @@ void *da_to_va(struct iommu *obj, u32 da)
                goto out;
        }
        va = area->va;
-       mutex_unlock(&obj->mmap_lock);
 out:
+       mutex_unlock(&obj->mmap_lock);
+
        return va;
 }
 EXPORT_SYMBOL_GPL(da_to_va);
@@ -390,14 +392,14 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
        }
 
        va_end = _va + PAGE_SIZE * i;
-       flush_cache_vmap(_va, va_end);
+       flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
 }
 
 static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
 {
        /*
         * Actually this is not necessary at all, just exists for
-        * consistency of the code readibility.
+        * consistency of the code readability.
         */
        BUG_ON(!sgt);
 }
@@ -433,7 +435,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
 {
        /*
         * Actually this is not necessary at all, just exists for
-        * consistency of the code readibility
+        * consistency of the code readability
         */
        BUG_ON(!sgt);
 }
index 925f64711c37c7d2f37bc67bc31fc87264a81290..75d1f26e5b177055f352bfd2dc428eca39ee09ed 100644 (file)
@@ -270,7 +270,8 @@ void * omap_sram_push(void * start, unsigned long size)
        omap_sram_ceil -= size;
        omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *));
        memcpy((void *)omap_sram_ceil, start, size);
-       flush_icache_range((unsigned long)start, (unsigned long)(start + size));
+       flush_icache_range((unsigned long)omap_sram_ceil,
+               (unsigned long)(omap_sram_ceil + size));
 
        return (void *)omap_sram_ceil;
 }
index 2d0852ac3b272d57a66271ce4edee234ccb53e96..c2cef6139683e6c5c68096272feba6fe8f6ee417 100644 (file)
@@ -2,8 +2,11 @@
 #define _ARCH_MCI_H
 
 struct s3c24xx_mci_pdata {
+       unsigned int    no_wprotect : 1;
+       unsigned int    no_detect : 1;
        unsigned int    wprotect_invert : 1;
        unsigned int    detect_invert : 1;   /* set => detect active high. */
+       unsigned int    use_dma : 1;
 
        unsigned int    gpio_detect;
        unsigned int    gpio_wprotect;
index 93635a766f9cded6c93e8cc011e47e35e62da0c1..1e60a92dd6028d21ec02cccf885ee1b8841bf980 100644 (file)
@@ -48,7 +48,7 @@ coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned l
        return ret;
 }
 
-static struct file_operations coreb_fops = {
+static const struct file_operations coreb_fops = {
        .owner   = THIS_MODULE,
        .ioctl   = coreb_ioctl,
 };
index 6cc1a0319a5d89cd6907df11572480675e296195..562b9a7feae776c5f9ef024726e82ff3dc66774a 100644 (file)
@@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_shadow;
 
 #define NUMBER_OF_PORTS 2
 
-static struct file_operations sync_serial_fops = {
+static const struct file_operations sync_serial_fops = {
        .owner   = THIS_MODULE,
        .write   = sync_serial_write,
        .read    = sync_serial_read,
index fe1fde893887ea76f90c699dd180c94625a704b2..d89ab80498edcafa6327bd74d6b0f404f54e5d32 100644 (file)
@@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
        return 0;
 }
 
-struct file_operations gpio_fops = {
+static const struct file_operations gpio_fops = {
        .owner       = THIS_MODULE,
        .poll        = gpio_poll,
        .ioctl       = gpio_ioctl,
index d06933bd631825cfbbb096b92ecd0a166649aedc..4010f1fc5b65ba1fc01217f071d897af5a33c125 100644 (file)
@@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr)
 #define __raw_writew writew
 #define __raw_writel writel
 
+#define ioread8 read
+#define ioread16 readw
+#define ioread32 readl
+#define iowrite8 writeb
+#define iowrite16 writew
+#define iowrite32 writel
+
 #define mmiowb()
 
 #define flush_write_buffers() do { } while (0)  /* M32R_FIXME */
index 22624b51d4d3ecf3222ce82de0464719bb731d49..700570747a906ce5c3f4eb4218d2301723f5b4a5 100644 (file)
@@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 EXPORT_SYMBOL(kernel_thread);
 
-/* Networking helper routines. */
-/* Delay loops */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__delay);
-EXPORT_SYMBOL(__const_udelay);
-
 EXPORT_SYMBOL(strncpy_from_user);
 EXPORT_SYMBOL(__strncpy_from_user);
 EXPORT_SYMBOL(clear_user);
index ba61c4c73202769f947c87ace55c3d8453b2f336..e7fee0f198d51a22947a7ff4817a6d4e35d27438 100644 (file)
 
 #include <asm/hw_irq.h>
 
+#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
+/* this needs a better home */
+DEFINE_SPINLOCK(rtc_lock);
+
+#ifdef CONFIG_RTC_DRV_CMOS_MODULE
+EXPORT_SYMBOL(rtc_lock);
+#endif
+#endif  /* pc-style 'CMOS' RTC support */
+
 #ifdef CONFIG_SMP
 extern void smp_local_timer_interrupt(void);
 #endif
index 03b14e55cd894791ab25e25fb5bfbc8827baf140..fbd109031df3aca4c83851494044492a0cc20965 100644 (file)
@@ -104,8 +104,8 @@ static void set_eit_vector_entries(void)
        eit_vector[186] = (unsigned long)smp_call_function_interrupt;
        eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
        eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
-       eit_vector[189] = (unsigned long)smp_call_function_single_interrupt;
-       eit_vector[190] = 0;
+       eit_vector[189] = 0;    /* CPU_BOOT_IPI */
+       eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
        eit_vector[191] = 0;
 #endif
        _flush_cache_copyback_all();
index ced549be80f5e9055528918394caaeca6d04270f..940f4837e42b92a359fe7d56fcdcda555f22e504 100644 (file)
@@ -122,4 +122,8 @@ void __ndelay(unsigned long nsecs)
 {
        __const_udelay(nsecs * 0x00005);  /* 2**32 / 1000000000 (rounded up) */
 }
+
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__ndelay);
index b7a78ad429b78f270913e620ea875083467141d0..5d2858f6eede4283bbb7c61717d021e3cf60797c 100644 (file)
@@ -32,6 +32,9 @@ typedef struct {
 } mem_prof_t;
 static mem_prof_t mem_prof[MAX_NUMNODES];
 
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
 static void __init mem_prof_init(void)
 {
        unsigned long start_pfn, holes, free_pfn;
@@ -42,7 +45,7 @@ static void __init mem_prof_init(void)
        /* Node#0 SDRAM */
        mp = &mem_prof[0];
        mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
-       mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE);
+       mp->pages = PFN_DOWN(memory_end - memory_start);
        mp->holes = 0;
        mp->free_pfn = PFN_UP(__pa(_end));
 
index 49a6d16a3d585d5b2a4d567ce7256162340130cc..e9491a5ae827a99f530afd409f1800ff4e54a9fd 100644 (file)
@@ -150,9 +150,13 @@ ENTRY(tme_handler)
 
        ; pmd = pmd_offset(pgd, address);
        ld      r3, @r3                 ; r3: pmd data
-       ldi     r2, #-4096
        beqz    r3, 3f                  ; pmd_none(*pmd) ?
 
+       and3    r2, r3, #0xfff
+       add3    r2, r2, #-355           ; _KERNPG_TABLE(=0x163)
+       bnez    r2, 3f                  ; pmd_bad(*pmd) ?
+       ldi     r2, #-4096
+
        ; pte = pte_offset(pmd, address);
        and     r2, r3                  ; r2: pte base addr
        srl3    r3, r0, #10
@@ -263,9 +267,9 @@ ENTRY(tme_handler)
        ld      r1, @r3                 ; r1: pmd
        beqz    r1, 3f                  ; pmd_none(*pmd) ?
 ;
-       and3    r1, r1, #0xeff
-       ldi     r4, #611                ; _KERNPG_TABLE(=611)
-       bne     r1, r4, 3f              ; !pmd_bad(*pmd) ?
+       and3    r1, r1, #0x3ff
+       ldi     r4, #0x163              ; _KERNPG_TABLE(=0x163)
+       bne     r1, r4, 3f              ; pmd_bad(*pmd) ?
 
        .fillinsn
 4:
index 554f65b6cd3b38e7ba0575def0c56ce2083189e3..394ee946015c4284eea9ee0f668e1851ccfdd38b 100644 (file)
@@ -1,8 +1,16 @@
 #ifndef __M68K_HARDIRQ_H
 #define __M68K_HARDIRQ_H
 
-#define HARDIRQ_BITS   8
+#include <linux/threads.h>
+#include <linux/cache.h>
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+       unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
 
-#include <asm-generic/hardirq.h>
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#define HARDIRQ_BITS   8
 
 #endif
index 594ee0e657fe0da77cbd20740c202b044e0d7ded..9a8876f715d81d73214c97079d2a601460fe6234 100644 (file)
@@ -45,25 +45,25 @@ int main(void)
        DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
 
        /* offsets into the pt_regs */
-       DEFINE(PT_D0, offsetof(struct pt_regs, d0));
-       DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0));
-       DEFINE(PT_D1, offsetof(struct pt_regs, d1));
-       DEFINE(PT_D2, offsetof(struct pt_regs, d2));
-       DEFINE(PT_D3, offsetof(struct pt_regs, d3));
-       DEFINE(PT_D4, offsetof(struct pt_regs, d4));
-       DEFINE(PT_D5, offsetof(struct pt_regs, d5));
-       DEFINE(PT_A0, offsetof(struct pt_regs, a0));
-       DEFINE(PT_A1, offsetof(struct pt_regs, a1));
-       DEFINE(PT_A2, offsetof(struct pt_regs, a2));
-       DEFINE(PT_PC, offsetof(struct pt_regs, pc));
-       DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+       DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
+       DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+       DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
+       DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
+       DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
+       DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
+       DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
+       DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
+       DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
+       DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
+       DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
+       DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
 
 #ifdef CONFIG_COLDFIRE
        /* bitfields are a bit difficult */
-       DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
+       DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
 #else
        /* bitfields are a bit difficult */
-       DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
+       DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
 #endif
 
        /* signal defines */
index f56faa5c9cd91a06e2c24e153c3fd54fa72a183e..56043ade394197ebb011fcfcd94a50df9d181179 100644 (file)
@@ -46,7 +46,7 @@
 ENTRY(buserr)
        SAVE_ALL
        moveq   #-1,%d0
-       movel   %d0,%sp@(PT_ORIG_D0)
+       movel   %d0,%sp@(PT_OFF_ORIG_D0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     buserr_c
        addql   #4,%sp
@@ -55,7 +55,7 @@ ENTRY(buserr)
 ENTRY(trap)
        SAVE_ALL
        moveq   #-1,%d0
-       movel   %d0,%sp@(PT_ORIG_D0)
+       movel   %d0,%sp@(PT_OFF_ORIG_D0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     trap_c
        addql   #4,%sp
@@ -67,7 +67,7 @@ ENTRY(trap)
 ENTRY(dbginterrupt)
        SAVE_ALL
        moveq   #-1,%d0
-       movel   %d0,%sp@(PT_ORIG_D0)
+       movel   %d0,%sp@(PT_OFF_ORIG_D0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     dbginterrupt_c
        addql   #4,%sp
index b1703c67a4f195f54203e8cbeed93c3441b8d9a8..f3236d0b522ddceaa39a9a468b7cc4f02d27b5d1 100644 (file)
@@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
                totalram_pages++;
                pages++;
        }
-       printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
+       printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
 }
 #endif
 
index 0f41ba82a3b533d1ff6a3815f59916c6d3e5f77e..942397984c66ca685bd93e1d559585ecbabf71f5 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
 #include <asm/mcfdma.h>
-#include <asm/mcfuart.h>
 
 /***************************************************************************/
 
index b1aef72f3bafb23dcb5e3285be12029de801542b..9d80d2c4286668f2715d64a5026026e5f98cfae7 100644 (file)
 .globl inthandler7
 
 badsys:
-       movel   #-ENOSYS,%sp@(PT_D0)
+       movel   #-ENOSYS,%sp@(PT_OFF_D0)
        jra     ret_from_exception
 
 do_trace:
-       movel   #-ENOSYS,%sp@(PT_D0)    /* needed for strace*/
+       movel   #-ENOSYS,%sp@(PT_OFF_D0)        /* needed for strace*/
        subql   #4,%sp
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
        RESTORE_SWITCH_STACK
        addql   #4,%sp
-       movel   %sp@(PT_ORIG_D0),%d1
+       movel   %sp@(PT_OFF_ORIG_D0),%d1
        movel   #-ENOSYS,%d0
        cmpl    #NR_syscalls,%d1
        jcc     1f
@@ -57,7 +57,7 @@ do_trace:
        lea     sys_call_table, %a0
        jbsr    %a0@(%d1)
 
-1:     movel   %d0,%sp@(PT_D0)         /* save the return value */
+1:     movel   %d0,%sp@(PT_OFF_D0)             /* save the return value */
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
@@ -75,7 +75,7 @@ ENTRY(system_call)
        jbsr    set_esp0
        addql   #4,%sp
 
-       movel   %sp@(PT_ORIG_D0),%d0
+       movel   %sp@(PT_OFF_ORIG_D0),%d0
 
        movel   %sp,%d1                 /* get thread_info pointer */
        andl    #-THREAD_SIZE,%d1
@@ -88,10 +88,10 @@ ENTRY(system_call)
        lea     sys_call_table,%a0
        movel   %a0@(%d0), %a0
        jbsr    %a0@
-       movel   %d0,%sp@(PT_D0)         /* save the return value*/
+       movel   %d0,%sp@(PT_OFF_D0)             /* save the return value*/
 
 ret_from_exception:
-       btst    #5,%sp@(PT_SR)          /* check if returning to kernel*/
+       btst    #5,%sp@(PT_OFF_SR)              /* check if returning to kernel*/
        jeq     Luser_return            /* if so, skip resched, signals*/
 
 Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
  */
 inthandler1:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
 
 inthandler2:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
 
 inthandler3:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
 
 inthandler4:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
 
 inthandler5:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
 
 inthandler6:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
 
 inthandler7:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
 
 inthandler:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and     #0x3ff, %d0
 
        movel   %sp,%sp@-
@@ -224,7 +224,7 @@ ret_from_interrupt:
 2:
        RESTORE_ALL
 1:
-       moveb   %sp@(PT_SR), %d0
+       moveb   %sp@(PT_OFF_SR), %d0
        and     #7, %d0
        jhi     2b
 
index 55dfefe386427ea875bbf96b39dae967095ff514..6d3460a39cacf1383a16f2e66146c60aeecf6509 100644 (file)
 .globl inthandler
 
 badsys:
-       movel   #-ENOSYS,%sp@(PT_D0)
+       movel   #-ENOSYS,%sp@(PT_OFF_D0)
        jra     ret_from_exception
 
 do_trace:
-       movel   #-ENOSYS,%sp@(PT_D0)    /* needed for strace*/
+       movel   #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
        subql   #4,%sp
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
        RESTORE_SWITCH_STACK
        addql   #4,%sp
-       movel   %sp@(PT_ORIG_D0),%d1
+       movel   %sp@(PT_OFF_ORIG_D0),%d1
        movel   #-ENOSYS,%d0
        cmpl    #NR_syscalls,%d1
        jcc     1f
@@ -53,7 +53,7 @@ do_trace:
        lea     sys_call_table, %a0
        jbsr    %a0@(%d1)
 
-1:     movel   %d0,%sp@(PT_D0)         /* save the return value */
+1:     movel   %d0,%sp@(PT_OFF_D0)     /* save the return value */
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
@@ -79,10 +79,10 @@ ENTRY(system_call)
        lea     sys_call_table,%a0
        movel   %a0@(%d0), %a0
        jbsr    %a0@
-       movel   %d0,%sp@(PT_D0)         /* save the return value*/
+       movel   %d0,%sp@(PT_OFF_D0)     /* save the return value*/
 
 ret_from_exception:
-       btst    #5,%sp@(PT_SR)          /* check if returning to kernel*/
+       btst    #5,%sp@(PT_OFF_SR)      /* check if returning to kernel*/
        jeq     Luser_return            /* if so, skip resched, signals*/
 
 Lkernel_return:
@@ -124,7 +124,7 @@ Lreturn:
  */
 inthandler:
        SAVE_ALL
-       movew   %sp@(PT_VECTOR), %d0
+       movew   %sp@(PT_OFF_VECTOR), %d0
        and.l   #0x3ff, %d0
        lsr.l   #0x02,  %d0
 
@@ -139,7 +139,7 @@ ret_from_interrupt:
 2:
        RESTORE_ALL
 1:
-       moveb   %sp@(PT_SR), %d0
+       moveb   %sp@(PT_OFF_SR), %d0
        and     #7, %d0
        jhi     2b
        /* check if we need to do software interrupts */
index 3b471c0da24a0bd6232acbf98d5b7b6b0135217b..dd7d591f70eae6df0af6504428f2afbba503140d 100644 (file)
@@ -81,11 +81,11 @@ ENTRY(system_call)
 
        movel   %d3,%a0
        jbsr    %a0@
-       movel   %d0,%sp@(PT_D0)         /* save the return value */
+       movel   %d0,%sp@(PT_OFF_D0)     /* save the return value */
        jra     ret_from_exception
 1:
-       movel   #-ENOSYS,%d2            /* strace needs -ENOSYS in PT_D0 */
-       movel   %d2,PT_D0(%sp)          /* on syscall entry */
+       movel   #-ENOSYS,%d2            /* strace needs -ENOSYS in PT_OFF_D0 */
+       movel   %d2,PT_OFF_D0(%sp)      /* on syscall entry */
        subql   #4,%sp
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
@@ -93,7 +93,7 @@ ENTRY(system_call)
        addql   #4,%sp
        movel   %d3,%a0
        jbsr    %a0@
-       movel   %d0,%sp@(PT_D0)         /* save the return value */
+       movel   %d0,%sp@(PT_OFF_D0)             /* save the return value */
        subql   #4,%sp                  /* dummy return address */
        SAVE_SWITCH_STACK
        jbsr    syscall_trace
@@ -104,7 +104,7 @@ ret_from_signal:
 
 ret_from_exception:
        move    #0x2700,%sr             /* disable intrs */
-       btst    #5,%sp@(PT_SR)          /* check if returning to kernel */
+       btst    #5,%sp@(PT_OFF_SR)      /* check if returning to kernel */
        jeq     Luser_return            /* if so, skip resched, signals */
 
 #ifdef CONFIG_PREEMPT
@@ -142,8 +142,8 @@ Luser_return:
 Lreturn:
        move    #0x2700,%sr             /* disable intrs */
        movel   sw_usp,%a0              /* get usp */
-       movel   %sp@(PT_PC),%a0@-       /* copy exception program counter */
-       movel   %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
+       movel   %sp@(PT_OFF_PC),%a0@-   /* copy exception program counter */
+       movel   %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
        moveml  %sp@,%d1-%d5/%a0-%a2
        lea     %sp@(32),%sp            /* space for 8 regs */
        movel   %sp@+,%d0
@@ -181,9 +181,9 @@ Lsignal_return:
 ENTRY(inthandler)
        SAVE_ALL
        moveq   #-1,%d0
-       movel   %d0,%sp@(PT_ORIG_D0)
+       movel   %d0,%sp@(PT_OFF_ORIG_D0)
 
-       movew   %sp@(PT_FORMATVEC),%d0  /* put exception # in d0 */
+       movew   %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
        andl    #0x03fc,%d0             /* mask out vector only */
 
        movel   %sp,%sp@-               /* push regs arg */
@@ -203,7 +203,7 @@ ENTRY(inthandler)
 ENTRY(fasthandler)
        SAVE_LOCAL
 
-       movew   %sp@(PT_FORMATVEC),%d0
+       movew   %sp@(PT_OFF_FORMATVEC),%d0
        andl    #0x03fc,%d0             /* mask out vector only */
 
        movel   %sp,%sp@-               /* push regs arg */
index acc1f05d1e2c04665f2224d91bb9d61bb9929e48..e3ecb36dd554ebc3504e08ac369a29359e8c304f 100644 (file)
@@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap):
        nop
        mfs     r7, rfsr;               /* save FSR */
        nop
+       mts     rfsr, r0;       /* Clear sticky fsr */
+       nop
        la      r12, r0, full_exception
        set_vms;
        rtbd    r12, 0;
index 6b0288ebccd6674c21c9ed0bee8a095903507412..2b86c03aa84187cb5857974fba2cb949e349ce93 100644 (file)
@@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */
        addk    r8, r17, r0; /* Load exception address */
        bralid  r15, full_exception; /* Branch to the handler */
        nop;
-       mts     r0, rfsr;       /* Clear sticky fsr */
+       mts     rfsr, r0;       /* Clear sticky fsr */
        nop
 
        /*
index 4201c743cc9fb33e17a52ae057940345d79e2681..c592d475b3d890a50061f8f3b265c4516457d749 100644 (file)
@@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
        regs->pc = pc;
        regs->r1 = usp;
        regs->pt_mode = 0;
+#ifdef CONFIG_MMU
        regs->msr |= MSR_UMS;
+#endif
 }
 
 #ifdef CONFIG_MMU
index 3ab6d80d150d742b42daea99d652b7a04734c662..19c1c82849ff53ecabd77c49a24eb51a5222f0ac 100644 (file)
@@ -175,7 +175,7 @@ static dbdev_tab_t dbdev_tab[] = {
 #define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
 
 #ifdef CONFIG_PM
-static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8];
+static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
 #endif
 
 
@@ -993,14 +993,13 @@ void au1xxx_dbdma_suspend(void)
        au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
 
        /* save channel configurations */
-       for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+       for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
                au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
                au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
                au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
                au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
                au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
                au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
-               au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
 
                /* halt channel */
                au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
@@ -1027,14 +1026,13 @@ void au1xxx_dbdma_resume(void)
        au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
 
        /* restore channel configurations */
-       for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+       for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
                au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
                au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
                au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
                au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
                au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
                au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
-               au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
                au_sync();
                addr += 0x100;  /* next channel base */
        }
index dfbfd7e2ac088887cccfc35aac853cf9cbd07785..938b1d0b765215b9360daa1f2b1d26598114d121 100644 (file)
@@ -112,10 +112,8 @@ static int iodev_open(struct inode *i, struct file *f)
 {
        int ret;
 
-       lock_kernel();
        ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
                           iodev_name, &miscdev);
-       unlock_kernel();
 
        return ret;
 }
index aaa585cf26e36aa9554fc6f4df8f542455f54049..c146d1ededede3e4a0eeeb37e268dc3472b753f5 100644 (file)
@@ -1,5 +1,5 @@
 obj-y          += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
-                  dev-dsp.o dev-enet.o
+                  dev-dsp.o dev-enet.o dev-pcmcia.o dev-uart.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
 
 obj-y          += boards/
index fd77f548207abeacf7f40f6cda06a5f0e4de86f4..78e155d21be61cdc8c59c4cf84395d6f54183805 100644 (file)
 #include <bcm63xx_cpu.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
-#include <bcm63xx_board.h>
 #include <bcm63xx_dev_pci.h>
 #include <bcm63xx_dev_enet.h>
 #include <bcm63xx_dev_dsp.h>
+#include <bcm63xx_dev_pcmcia.h>
+#include <bcm63xx_dev_uart.h>
 #include <board_bcm963xx.h>
 
 #define PFX    "board_bcm963xx: "
@@ -793,6 +794,11 @@ int __init board_register_devices(void)
 {
        u32 val;
 
+       bcm63xx_uart_register();
+
+       if (board.has_pccard)
+               bcm63xx_pcmcia_register();
+
        if (board.has_enet0 &&
            !board_get_mac_address(board.enet0.mac_addr))
                bcm63xx_enet_register(0, &board.enet0);
diff --git a/arch/mips/bcm63xx/dev-pcmcia.c b/arch/mips/bcm63xx/dev-pcmcia.c
new file mode 100644 (file)
index 0000000..de4d917
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/bootinfo.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cs.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_pcmcia.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+static struct resource pcmcia_resources[] = {
+       /* pcmcia registers */
+       {
+               /* start & end filled at runtime */
+               .flags          = IORESOURCE_MEM,
+       },
+
+       /* pcmcia memory zone resources */
+       {
+               .start          = BCM_PCMCIA_COMMON_BASE_PA,
+               .end            = BCM_PCMCIA_COMMON_END_PA,
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               .start          = BCM_PCMCIA_ATTR_BASE_PA,
+               .end            = BCM_PCMCIA_ATTR_END_PA,
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               .start          = BCM_PCMCIA_IO_BASE_PA,
+               .end            = BCM_PCMCIA_IO_END_PA,
+               .flags          = IORESOURCE_MEM,
+       },
+
+       /* PCMCIA irq */
+       {
+               /* start filled at runtime */
+               .flags          = IORESOURCE_IRQ,
+       },
+
+       /* declare PCMCIA IO resource also */
+       {
+               .start          = BCM_PCMCIA_IO_BASE_PA,
+               .end            = BCM_PCMCIA_IO_END_PA,
+               .flags          = IORESOURCE_IO,
+       },
+};
+
+static struct bcm63xx_pcmcia_platform_data pd;
+
+static struct platform_device bcm63xx_pcmcia_device = {
+       .name           = "bcm63xx_pcmcia",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(pcmcia_resources),
+       .resource       = pcmcia_resources,
+       .dev            = {
+               .platform_data = &pd,
+       },
+};
+
+static int __init config_pcmcia_cs(unsigned int cs,
+                                  u32 base, unsigned int size)
+{
+       int ret;
+
+       ret = bcm63xx_set_cs_status(cs, 0);
+       if (!ret)
+               ret = bcm63xx_set_cs_base(cs, base, size);
+       if (!ret)
+               ret = bcm63xx_set_cs_status(cs, 1);
+       return ret;
+}
+
+static const __initdata struct {
+       unsigned int    cs;
+       unsigned int    base;
+       unsigned int    size;
+} pcmcia_cs[3] = {
+       {
+               .cs     = MPI_CS_PCMCIA_COMMON,
+               .base   = BCM_PCMCIA_COMMON_BASE_PA,
+               .size   = BCM_PCMCIA_COMMON_SIZE
+       },
+       {
+               .cs     = MPI_CS_PCMCIA_ATTR,
+               .base   = BCM_PCMCIA_ATTR_BASE_PA,
+               .size   = BCM_PCMCIA_ATTR_SIZE
+       },
+       {
+               .cs     = MPI_CS_PCMCIA_IO,
+               .base   = BCM_PCMCIA_IO_BASE_PA,
+               .size   = BCM_PCMCIA_IO_SIZE
+       },
+};
+
+int __init bcm63xx_pcmcia_register(void)
+{
+       int ret, i;
+
+       if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358())
+               return 0;
+
+       /* use correct pcmcia ready gpio depending on processor */
+       switch (bcm63xx_get_cpu_id()) {
+       case BCM6348_CPU_ID:
+               pd.ready_gpio = 22;
+               break;
+
+       case BCM6358_CPU_ID:
+               pd.ready_gpio = 18;
+               break;
+
+       default:
+               return -ENODEV;
+       }
+
+       pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA);
+       pcmcia_resources[0].end = pcmcia_resources[0].start +
+               RSET_PCMCIA_SIZE - 1;
+       pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA);
+
+       /* configure pcmcia chip selects */
+       for (i = 0; i < 3; i++) {
+               ret = config_pcmcia_cs(pcmcia_cs[i].cs,
+                                      pcmcia_cs[i].base,
+                                      pcmcia_cs[i].size);
+               if (ret)
+                       goto out_err;
+       }
+
+       return platform_device_register(&bcm63xx_pcmcia_device);
+
+out_err:
+       printk(KERN_ERR "unable to set pcmcia chip select\n");
+       return ret;
+}
diff --git a/arch/mips/bcm63xx/dev-uart.c b/arch/mips/bcm63xx/dev-uart.c
new file mode 100644 (file)
index 0000000..5f3d89c
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_uart.h>
+
+static struct resource uart_resources[] = {
+       {
+               .start          = -1, /* filled at runtime */
+               .end            = -1, /* filled at runtime */
+               .flags          = IORESOURCE_MEM,
+       },
+       {
+               .start          = -1, /* filled at runtime */
+               .flags          = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device bcm63xx_uart_device = {
+       .name           = "bcm63xx_uart",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(uart_resources),
+       .resource       = uart_resources,
+};
+
+int __init bcm63xx_uart_register(void)
+{
+       uart_resources[0].start = bcm63xx_regset_address(RSET_UART0);
+       uart_resources[0].end = uart_resources[0].start;
+       uart_resources[0].end += RSET_UART_SIZE - 1;
+       uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
+       return platform_device_register(&bcm63xx_uart_device);
+}
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_pcmcia.h
new file mode 100644 (file)
index 0000000..2beb396
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef BCM63XX_DEV_PCMCIA_H_
+#define BCM63XX_DEV_PCMCIA_H_
+
+/*
+ * PCMCIA driver platform data
+ */
+struct bcm63xx_pcmcia_platform_data {
+       unsigned int ready_gpio;
+};
+
+int bcm63xx_pcmcia_register(void);
+
+#endif /* BCM63XX_DEV_PCMCIA_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_uart.h
new file mode 100644 (file)
index 0000000..bf348f5
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef BCM63XX_DEV_UART_H_
+#define BCM63XX_DEV_UART_H_
+
+int bcm63xx_uart_register(void);
+
+#endif /* BCM63XX_DEV_UART_H_ */
index e15f11a09311c282d4f9b3ccad8a13731ca35f6b..af42385245d5d44099289fafbf678e72d27a39c7 100644 (file)
@@ -77,7 +77,18 @@ extern void play_dead(void);
 
 extern asmlinkage void smp_call_function_interrupt(void);
 
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+       extern struct plat_smp_ops *mp_ops;     /* private */
+
+       mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       extern struct plat_smp_ops *mp_ops;     /* private */
+
+       mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
+}
 
 #endif /* __ASM_SMP_H */
index 792404948571847f8b9feb6067a7f63bb862e013..42f66c311473abb98fe7a845e360e365ccfed837 100644 (file)
 #if defined(__MIPSEB__)
 # include <linux/unaligned/be_struct.h>
 # include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
 # define get_unaligned __get_unaligned_be
 # define put_unaligned __put_unaligned_be
 #elif defined(__MIPSEL__)
 # include <linux/unaligned/le_struct.h>
 # include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
 # define get_unaligned __get_unaligned_le
 # define put_unaligned __put_unaligned_le
 #else
 #  error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
 #endif
 
+# include <linux/unaligned/generic.h>
+
 #endif /* _ASM_MIPS_UNALIGNED_H */
index f2397f00db439df62acc67424ac781692371d8e6..ad4e017ed2f322e40d98f8c10304edb1be0e7970 100644 (file)
@@ -172,13 +172,20 @@ static unsigned int translate_open_flags(int flags)
 }
 
 
-static void sp_setfsuidgid( uid_t uid, gid_t gid)
+static int sp_setfsuidgid(uid_t uid, gid_t gid)
 {
-       current->cred->fsuid = uid;
-       current->cred->fsgid = gid;
+       struct cred *new;
 
-       key_fsuid_changed(current);
-       key_fsgid_changed(current);
+       new = prepare_creds();
+       if (!new)
+               return -ENOMEM;
+
+       new->fsuid = uid;
+       new->fsgid = gid;
+
+       commit_creds(new);
+
+       return 0;
 }
 
 /*
@@ -196,7 +203,7 @@ void sp_work_handle_request(void)
        mm_segment_t old_fs;
        struct timeval tv;
        struct timezone tz;
-       int cmd;
+       int err, cmd;
 
        char *vcwd;
        int size;
@@ -225,8 +232,11 @@ void sp_work_handle_request(void)
        /* Run the syscall at the privilege of the user who loaded the
           SP program */
 
-       if (vpe_getuid(tclimit))
-               sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
+       if (vpe_getuid(tclimit)) {
+               err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
+               if (!err)
+                       pr_err("Change of creds failed\n");
+       }
 
        switch (sc.cmd) {
        /* needs the flags argument translating from SDE kit to
@@ -283,8 +293,11 @@ void sp_work_handle_request(void)
                break;
        } /* switch */
 
-       if (vpe_getuid(tclimit))
-               sp_setfsuidgid( 0, 0);
+       if (vpe_getuid(tclimit)) {
+               err = sp_setfsuidgid(0, 0);
+               if (!err)
+                       pr_err("restoring old creds failed\n");
+       }
 
        old_fs = get_fs();
        set_fs(KERNEL_DS);
index a10ebfdc28ae91ad4a7241e95d7d6bcac9caadf4..364f066cb4979533336a37ea437d424047111402 100644 (file)
@@ -72,8 +72,9 @@ static void rtlx_dispatch(void)
 */
 static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
 {
+       unsigned int vpeflags;
+       unsigned long flags;
        int i;
-       unsigned int flags, vpeflags;
 
        /* Ought not to be strictly necessary for SMTC builds */
        local_irq_save(flags);
@@ -392,20 +393,12 @@ out:
 
 static int file_open(struct inode *inode, struct file *filp)
 {
-       int minor = iminor(inode);
-       int err;
-
-       lock_kernel();
-       err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
-       unlock_kernel();
-       return err;
+       return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
 }
 
 static int file_release(struct inode *inode, struct file *filp)
 {
-       int minor = iminor(inode);
-
-       return rtlx_release(minor);
+       return rtlx_release(iminor(inode));
 }
 
 static unsigned int file_poll(struct file *file, poll_table * wait)
index 4eb106c6a3ec59c71ad6fcda41e3df2520f31c11..e72e6844d134759218227bee63faac5f2d9bbea8 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/cpumask.h>
 #include <linux/cpu.h>
 #include <linux/err.h>
-#include <linux/smp.h>
 
 #include <asm/atomic.h>
 #include <asm/cpu.h>
@@ -128,19 +127,6 @@ asmlinkage __cpuinit void start_secondary(void)
        cpu_idle();
 }
 
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
-       mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-}
-
-/*
- * We reuse the same vector for the single IPI
- */
-void arch_send_call_function_single_ipi(int cpu)
-{
-       mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
-}
-
 /*
  * Call into both interrupt handlers, as we share the IPI for them
  */
index 67153a0dc267a28cc3c64014933a0a1567b638e3..4d181df44a407bc487d8362ec12640cd2655b72f 100644 (file)
@@ -1098,9 +1098,8 @@ static void ipi_irq_dispatch(void)
 
 static struct irqaction irq_ipi = {
        .handler        = ipi_interrupt,
-       .flags          = IRQF_DISABLED,
-       .name           = "SMTC_IPI",
-       .flags          = IRQF_PERCPU
+       .flags          = IRQF_DISABLED | IRQF_PERCPU,
+       .name           = "SMTC_IPI"
 };
 
 static void setup_cross_vpe_interrupts(unsigned int nvpe)
index eb6c4c5b7fbe468176e74e47a71458a6afaa830f..03092ab2a296fcb89472f89ce45c44275bf922d7 100644 (file)
@@ -144,14 +144,15 @@ struct tc {
 };
 
 struct {
-       /* Virtual processing elements */
-       struct list_head vpe_list;
-
-       /* Thread contexts */
-       struct list_head tc_list;
+       spinlock_t vpe_list_lock;
+       struct list_head vpe_list;      /* Virtual processing elements */
+       spinlock_t tc_list_lock;
+       struct list_head tc_list;       /* Thread contexts */
 } vpecontrol = {
-       .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
-       .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
+       .vpe_list_lock  = SPIN_LOCK_UNLOCKED,
+       .vpe_list       = LIST_HEAD_INIT(vpecontrol.vpe_list),
+       .tc_list_lock   = SPIN_LOCK_UNLOCKED,
+       .tc_list        = LIST_HEAD_INIT(vpecontrol.tc_list)
 };
 
 static void release_progmem(void *ptr);
@@ -159,28 +160,38 @@ static void release_progmem(void *ptr);
 /* get the vpe associated with this minor */
 static struct vpe *get_vpe(int minor)
 {
-       struct vpe *v;
+       struct vpe *res, *v;
 
        if (!cpu_has_mipsmt)
                return NULL;
 
+       res = NULL;
+       spin_lock(&vpecontrol.vpe_list_lock);
        list_for_each_entry(v, &vpecontrol.vpe_list, list) {
-               if (v->minor == minor)
-                       return v;
+               if (v->minor == minor) {
+                       res = v;
+                       break;
+               }
        }
+       spin_unlock(&vpecontrol.vpe_list_lock);
 
-       return NULL;
+       return res;
 }
 
 /* get the vpe associated with this minor */
 static struct tc *get_tc(int index)
 {
-       struct tc *t;
+       struct tc *res, *t;
 
+       res = NULL;
+       spin_lock(&vpecontrol.tc_list_lock);
        list_for_each_entry(t, &vpecontrol.tc_list, list) {
-               if (t->index == index)
-                       return t;
+               if (t->index == index) {
+                       res = t;
+                       break;
+               }
        }
+       spin_unlock(&vpecontrol.tc_list_lock);
 
        return NULL;
 }
@@ -190,15 +201,17 @@ static struct vpe *alloc_vpe(int minor)
 {
        struct vpe *v;
 
-       if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
+       if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
                return NULL;
-       }
 
        INIT_LIST_HEAD(&v->tc);
+       spin_lock(&vpecontrol.vpe_list_lock);
        list_add_tail(&v->list, &vpecontrol.vpe_list);
+       spin_unlock(&vpecontrol.vpe_list_lock);
 
        INIT_LIST_HEAD(&v->notify);
        v->minor = minor;
+
        return v;
 }
 
@@ -212,7 +225,10 @@ static struct tc *alloc_tc(int index)
 
        INIT_LIST_HEAD(&tc->tc);
        tc->index = index;
+
+       spin_lock(&vpecontrol.tc_list_lock);
        list_add_tail(&tc->list, &vpecontrol.tc_list);
+       spin_unlock(&vpecontrol.tc_list_lock);
 
 out:
        return tc;
@@ -227,7 +243,7 @@ static void release_vpe(struct vpe *v)
        kfree(v);
 }
 
-static void dump_mtregs(void)
+static void __maybe_unused dump_mtregs(void)
 {
        unsigned long val;
 
@@ -1048,20 +1064,19 @@ static int vpe_open(struct inode *inode, struct file *filp)
        enum vpe_state state;
        struct vpe_notifications *not;
        struct vpe *v;
-       int ret, err = 0;
+       int ret;
 
-       lock_kernel();
        if (minor != iminor(inode)) {
                /* assume only 1 device at the moment. */
-               printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
-               err = -ENODEV;
-               goto out;
+               pr_warning("VPE loader: only vpe1 is supported\n");
+
+               return -ENODEV;
        }
 
        if ((v = get_vpe(tclimit)) == NULL) {
-               printk(KERN_WARNING "VPE loader: unable to get vpe\n");
-               err = -ENODEV;
-               goto out;
+               pr_warning("VPE loader: unable to get vpe\n");
+
+               return -ENODEV;
        }
 
        state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1101,8 +1116,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
        v->shared_ptr = NULL;
        v->__start = 0;
 
-out:
        unlock_kernel();
+
        return 0;
 }
 
@@ -1594,14 +1609,14 @@ static void __exit vpe_module_exit(void)
 {
        struct vpe *v, *n;
 
+       device_del(&vpe_device);
+       unregister_chrdev(major, module_name);
+
+       /* No locking needed here */
        list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
-               if (v->state != VPE_STATE_UNUSED) {
+               if (v->state != VPE_STATE_UNUSED)
                        release_vpe(v);
-               }
        }
-
-       device_del(&vpe_device);
-       unregister_chrdev(major, module_name);
 }
 
 module_init(vpe_module_init);
index b55c2d1b998fee54a905b4cc816d3c7196209673..5ab5fa8c1d8229659a4ba30f204943e5fcf70416 100644 (file)
@@ -32,6 +32,11 @@ static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
  */
 static void mips_sc_inv(unsigned long addr, unsigned long size)
 {
+       unsigned long lsize = cpu_scache_line_size();
+       unsigned long almask = ~(lsize - 1);
+
+       cache_op(Hit_Writeback_Inv_SD, addr & almask);
+       cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
        blast_inv_scache_range(addr, addr + size);
 }
 
index 655cb8dec34037bf7ab5b1139c000d7873c5083d..deed1d5d4982b1ecf5bd6f0ad0e66dda7583b917 100644 (file)
@@ -44,7 +44,7 @@ static struct loongson2_register_config {
        unsigned int ctrl;
        unsigned long long reset_counter1;
        unsigned long long reset_counter2;
-       int cnt1_enalbed, cnt2_enalbed;
+       int cnt1_enabled, cnt2_enabled;
 } reg;
 
 DEFINE_SPINLOCK(sample_lock);
@@ -81,8 +81,8 @@ static void loongson2_reg_setup(struct op_counter_config *cfg)
 
        reg.ctrl = ctrl;
 
-       reg.cnt1_enalbed = cfg[0].enabled;
-       reg.cnt2_enalbed = cfg[1].enabled;
+       reg.cnt1_enabled = cfg[0].enabled;
+       reg.cnt2_enabled = cfg[1].enabled;
 
 }
 
@@ -99,7 +99,7 @@ static void loongson2_cpu_setup(void *args)
 static void loongson2_cpu_start(void *args)
 {
        /* Start all counters on current CPU */
-       if (reg.cnt1_enalbed || reg.cnt2_enalbed)
+       if (reg.cnt1_enabled || reg.cnt2_enabled)
                write_c0_perfctrl(reg.ctrl);
 }
 
@@ -125,7 +125,7 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
         */
 
        /* Check whether the irq belongs to me */
-       enabled = reg.cnt1_enalbed | reg.cnt2_enalbed;
+       enabled = reg.cnt1_enabled | reg.cnt2_enabled;
        if (!enabled)
                return IRQ_NONE;
 
@@ -136,12 +136,12 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
        spin_lock_irqsave(&sample_lock, flags);
 
        if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
-               if (reg.cnt1_enalbed)
+               if (reg.cnt1_enabled)
                        oprofile_add_sample(regs, 0);
                counter1 = reg.reset_counter1;
        }
        if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
-               if (reg.cnt2_enalbed)
+               if (reg.cnt2_enabled)
                        oprofile_add_sample(regs, 1);
                counter2 = reg.reset_counter2;
        }
index 109c95ca698bb9cb3aefde8b584d4f1bfa7d834d..32548b5d68d6738ec7bf5709cf0a26f6055da3f4 100644 (file)
@@ -385,6 +385,7 @@ int msp_pcibios_config_access(unsigned char access_type,
        unsigned long intr;
        unsigned long value;
        static char pciirqflag;
+       int ret;
 #if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
        unsigned int    vpe_status;
 #endif
@@ -402,11 +403,13 @@ int msp_pcibios_config_access(unsigned char access_type,
         * allocation assigns an interrupt handler to the interrupt.
         */
        if (pciirqflag == 0) {
-               request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */
+               ret = request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */
                                bpci_interrupt,
                                IRQF_SHARED | IRQF_DISABLED,
                                "PMC MSP PCI Host",
                                preg);
+               if (ret != 0)
+                       return ret;
                pciirqflag = ~0;
        }
 
index 9aa8f2951df6efd64a79137c83de3109a376bac5..c6851df9ab741a4979ca373bc32b32862bfb5038 100644 (file)
@@ -165,7 +165,7 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
        REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
 }
 
-static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
+static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 {
        unsigned int i;
 
index ba59839a021ee688b37761e54b8cd966abc537e5..4070268aa769826c46bb3c5328f398a71abc0283 100644 (file)
@@ -117,10 +117,6 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
        unsigned long flags;
        unsigned int irq_dirty;
 
-       if (cpumask_weight(mask) != 1) {
-               printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
-               return -1;
-       }
        i = cpumask_first(mask);
 
        /* Convert logical CPU to physical CPU */
index 637a194e5cd5f95e0ee2de2687ddbab05ac45cec..15ea778b5e6664c0f305681b3c9a0c3c2fad3f2e 100644 (file)
@@ -403,36 +403,31 @@ static int sbprof_zbprof_stop(void)
 static int sbprof_tb_open(struct inode *inode, struct file *filp)
 {
        int minor;
-       int err = 0;
 
-       lock_kernel();
        minor = iminor(inode);
-       if (minor != 0) {
-               err = -ENODEV;
-               goto out;
-       }
+       if (minor != 0)
+               return -ENODEV;
 
-       if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) {
-               err = -EBUSY;
-               goto out;
-       }
+       if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
+               return -EBUSY;
 
        memset(&sbp, 0, sizeof(struct sbprof_tb));
        sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
        if (!sbp.sbprof_tbbuf) {
-               err = -ENOMEM;
-               goto out;
+               sbp.open = SB_CLOSED;
+               wmb();
+               return -ENOMEM;
        }
+
        memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
        init_waitqueue_head(&sbp.tb_sync);
        init_waitqueue_head(&sbp.tb_read);
        mutex_init(&sbp.lock);
 
        sbp.open = SB_OPEN;
+       wmb();
 
-  out:
-       unlock_kernel();
-       return err;
+       return 0;
 }
 
 static int sbprof_tb_release(struct inode *inode, struct file *filp)
@@ -440,7 +435,7 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
        int minor;
 
        minor = iminor(inode);
-       if (minor != 0 || !sbp.open)
+       if (minor != 0 || sbp.open != SB_CLOSED)
                return -ENODEV;
 
        mutex_lock(&sbp.lock);
@@ -449,7 +444,8 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
                sbprof_zbprof_stop();
 
        vfree(sbp.sbprof_tbbuf);
-       sbp.open = 0;
+       sbp.open = SB_CLOSED;
+       wmb();
 
        mutex_unlock(&sbp.lock);
 
@@ -583,7 +579,8 @@ static int __init sbprof_tb_init(void)
        }
        tb_dev = dev;
 
-       sbp.open = 0;
+       sbp.open = SB_CLOSED;
+       wmb();
        tb_period = zbbus_mhz * 10000LL;
        pr_info(DEVNAME ": initialized - tb_period = %lld\n",
                (long long) tb_period);
index 623ffc933c4cf5d3e662a8e81d3cde2a72120fe0..5277aac96b0f47730df589d1b3e4c5e972f80f84 100644 (file)
@@ -106,7 +106,7 @@ void read_persistent_clock(struct timespec *ts)
                break;
        }
        ts->tv_sec = sec;
-       tv->tv_nsec = 0;
+       ts->tv_nsec = 0;
 }
 
 int rtc_mips_set_time(unsigned long sec)
index 8a3a4dd55763eba744552c46c825831569e90610..167e10ff06d99ddb57175117e552bb9b4835faa1 100644 (file)
@@ -129,42 +129,47 @@ extern int fixup_exception(struct pt_regs *regs);
 struct __large_struct { unsigned long buf[100]; };
 #define __m(x) (*(struct __large_struct *)(x))
 
-#define __get_user_nocheck(x, ptr, size)       \
-({                                             \
-       __typeof(*(ptr)) __gu_val;              \
-       unsigned long __gu_addr;                \
-       int __gu_err;                           \
-       __gu_addr = (unsigned long) (ptr);      \
-       switch (size) {                         \
-       case 1:  __get_user_asm("bu"); break;   \
-       case 2:  __get_user_asm("hu"); break;   \
-       case 4:  __get_user_asm(""  ); break;   \
-       default: __get_user_unknown(); break;   \
-       }                                       \
-       x = (__typeof__(*(ptr))) __gu_val;      \
-       __gu_err;                               \
+#define __get_user_nocheck(x, ptr, size)                               \
+({                                                                     \
+       unsigned long __gu_addr;                                        \
+       int __gu_err;                                                   \
+       __gu_addr = (unsigned long) (ptr);                              \
+       switch (size) {                                                 \
+       case 1: {                                                       \
+               unsigned char __gu_val;                                 \
+               __get_user_asm("bu");                                   \
+               (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
+               break;                                                  \
+       }                                                               \
+       case 2: {                                                       \
+               unsigned short __gu_val;                                \
+               __get_user_asm("hu");                                   \
+               (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
+               break;                                                  \
+       }                                                               \
+       case 4: {                                                       \
+               unsigned int __gu_val;                                  \
+               __get_user_asm("");                                     \
+               (x) = *(__force __typeof__(*(ptr))*) &__gu_val;         \
+               break;                                                  \
+       }                                                               \
+       default:                                                        \
+               __get_user_unknown();                                   \
+               break;                                                  \
+       }                                                               \
+       __gu_err;                                                       \
 })
 
-#define __get_user_check(x, ptr, size)                 \
-({                                                     \
-       __typeof__(*(ptr)) __gu_val;                    \
-       unsigned long __gu_addr;                        \
-       int __gu_err;                                   \
-       __gu_addr = (unsigned long) (ptr);              \
-       if (likely(__access_ok(__gu_addr,size))) {      \
-               switch (size) {                         \
-               case 1:  __get_user_asm("bu"); break;   \
-               case 2:  __get_user_asm("hu"); break;   \
-               case 4:  __get_user_asm(""  ); break;   \
-               default: __get_user_unknown(); break;   \
-               }                                       \
-       }                                               \
-       else {                                          \
-               __gu_err = -EFAULT;                     \
-               __gu_val = 0;                           \
-       }                                               \
-       x = (__typeof__(*(ptr))) __gu_val;              \
-       __gu_err;                                       \
+#define __get_user_check(x, ptr, size)                                 \
+({                                                                     \
+       int _e;                                                         \
+       if (likely(__access_ok((unsigned long) (ptr), (size))))         \
+               _e = __get_user_nocheck((x), (ptr), (size));            \
+       else {                                                          \
+               _e = -EFAULT;                                           \
+               (x) = (__typeof__(x))0;                                 \
+       }                                                               \
+       _e;                                                             \
 })
 
 #define __get_user_asm(INSN)                                   \
index 8b450e920af1503132b1b73ac05a023043843c51..2a0bf79ab968162eb514c2190dc9a8e136845645 100644 (file)
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk;   /* IOCLK (crystal speed) in HZ */
 extern unsigned long mn10300_iobclk;
 extern unsigned long mn10300_tsc_per_HZ;
 
-#define MN10300_IOCLK          ((unsigned long)mn10300_ioclk)
+#define MN10300_IOCLK          mn10300_ioclk
 /* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      ((unsigned long)mn10300_iobclk) */
+/* #define MN10300_IOBCLK      mn10300_iobclk */
 
 #else /* !CONFIG_MN10300_RTC */
 
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
 #define MN10300_TSCCLK         MN10300_IOCLK
 
 #ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     ((unsigned long)mn10300_tsc_per_HZ)
+#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
 #else /* !CONFIG_MN10300_RTC */
 #define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
 #endif /* !CONFIG_MN10300_RTC */
index 7d514841ffda7ca790fdd07ee1a6522f4ce39203..67be3f2eb18e928b68363601ae5440bb737b9e2b 100644 (file)
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk;   /* IOCLK (crystal speed) in HZ */
 extern unsigned long mn10300_iobclk;
 extern unsigned long mn10300_tsc_per_HZ;
 
-#define MN10300_IOCLK          ((unsigned long)mn10300_ioclk)
+#define MN10300_IOCLK          mn10300_ioclk
 /* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      ((unsigned long)mn10300_iobclk) */
+/* #define MN10300_IOBCLK      mn10300_iobclk */
 
 #else /* !CONFIG_MN10300_RTC */
 
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
 #define MN10300_TSCCLK         MN10300_IOCLK
 
 #ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     ((unsigned long)mn10300_tsc_per_HZ)
+#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
 #else /* !CONFIG_MN10300_RTC */
 #define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
 #endif /* !CONFIG_MN10300_RTC */
index f388dc68f60544cf941dfd66267b2a6f7167db1f..524d9352f17e56a4120008d601d219590fad6a23 100644 (file)
@@ -18,6 +18,7 @@ config PARISC
        select BUG
        select HAVE_PERF_EVENTS
        select GENERIC_ATOMIC64 if !64BIT
+       select HAVE_ARCH_TRACEHOOK
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
          in many of their workstations & servers (HP9000 700 and 800 series,
index de3fe3a18229a23e99cea0ec2ec6a7cc7a9e6669..6fec4d4a1a1831f72650cabbdc94c89d4eaec2bb 100644 (file)
@@ -21,9 +21,9 @@
 #define KERNEL_MAP_END         (TMPALIAS_MAP_START)
 
 #ifndef __ASSEMBLY__
-extern void *vmalloc_start;
+extern void *parisc_vmalloc_start;
 #define PCXL_DMA_MAP_SIZE      (8*1024*1024)
-#define VMALLOC_START          ((unsigned long)vmalloc_start)
+#define VMALLOC_START          ((unsigned long)parisc_vmalloc_start)
 #define VMALLOC_END            (KERNEL_MAP_END)
 #endif /*__ASSEMBLY__*/
 
index ce93133d511243104080f57a2fb6be59e415bd86..0d68184a76cb54006aeb5142c54315564eabd7aa 100644 (file)
@@ -1,29 +1,11 @@
 /* hardirq.h: PA-RISC hard IRQ support.
  *
  * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
- *
- * The locking is really quite interesting.  There's a cpu-local
- * count of how many interrupts are being handled, and a global
- * lock.  An interrupt can only be serviced if the global lock
- * is free.  You can't be sure no more interrupts are being
- * serviced until you've acquired the lock and then checked
- * all the per-cpu interrupt counts are all zero.  It's a specialised
- * br_lock, and that's exactly how Sparc does it.  We don't because
- * it's more locking for us.  This way is lock-free in the interrupt path.
  */
 
 #ifndef _PARISC_HARDIRQ_H
 #define _PARISC_HARDIRQ_H
 
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
-       unsigned long __softirq_pending; /* set_bit is used on this */
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-void ack_bad_irq(unsigned int irq);
+#include <asm-generic/hardirq.h>
 
 #endif /* _PARISC_HARDIRQ_H */
index 302f68dc889cb0e3c5ddc04a1a42ad19e5e58998..aead40b16dd8f4d01206fba9a1a7b5328a321de1 100644 (file)
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task);
 #define user_mode(regs)                        (((regs)->iaoq[0] & 3) ? 1 : 0)
 #define user_space(regs)               (((regs)->iasq[1] != 0) ? 1 : 0)
 #define instruction_pointer(regs)      ((regs)->iaoq[0] & ~3)
+#define user_stack_pointer(regs)       ((regs)->gr[30])
 unsigned long profile_pc(struct pt_regs *);
 extern void show_regs(struct pt_regs *);
-#endif
+
+
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..8bdfd2c
--- /dev/null
@@ -0,0 +1,40 @@
+/* syscall.h */
+
+#ifndef _ASM_PARISC_SYSCALL_H_
+#define _ASM_PARISC_SYSCALL_H_
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *tsk,
+                                 struct pt_regs *regs)
+{
+       return regs->gr[20];
+}
+
+static inline void syscall_get_arguments(struct task_struct *tsk,
+                                        struct pt_regs *regs, unsigned int i,
+                                        unsigned int n, unsigned long *args)
+{
+       BUG_ON(i);
+
+       switch (n) {
+       case 6:
+               args[5] = regs->gr[21];
+       case 5:
+               args[4] = regs->gr[22];
+       case 4:
+               args[3] = regs->gr[23];
+       case 3:
+               args[2] = regs->gr[24];
+       case 2:
+               args[1] = regs->gr[25];
+       case 1:
+               args[0] = regs->gr[26];
+               break;
+       default:
+               BUG();
+       }
+}
+
+#endif /*_ASM_PARISC_SYSCALL_H_*/
index ac775a76bff71508a939a08fc983d9d4b8e272f5..7ecc1039cfedfcb130c83da82fd64ed325b68364 100644 (file)
@@ -32,6 +32,11 @@ struct thread_info {
 #define init_thread_info        (init_thread_union.thread_info)
 #define init_stack              (init_thread_union.stack)
 
+/* how to get the thread information struct from C */
+#define current_thread_info()  ((struct thread_info *)mfctl(30))
+
+#endif /* !__ASSEMBLY */
+
 /* thread information allocation */
 
 #define THREAD_SIZE_ORDER            2
@@ -40,11 +45,6 @@ struct thread_info {
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
 #define THREAD_SHIFT            (PAGE_SHIFT + THREAD_SIZE_ORDER)
 
-/* how to get the thread information struct from C */
-#define current_thread_info()  ((struct thread_info *)mfctl(30))
-
-#endif /* !__ASSEMBLY */
-
 #define PREEMPT_ACTIVE_BIT     28
 #define PREEMPT_ACTIVE         (1 << PREEMPT_ACTIVE_BIT)
 
@@ -60,6 +60,8 @@ struct thread_info {
 #define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
 #define TIF_FREEZE             7       /* is freezing for suspend */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
+#define TIF_SINGLESTEP         9       /* single stepping? */
+#define TIF_BLOCKSTEP          10      /* branch stepping? */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
@@ -69,6 +71,8 @@ struct thread_info {
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_FREEZE            (1 << TIF_FREEZE)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
+#define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
 
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
index 699cf8ef211816576c5ee10d29c7bc67257580fc..fcd3c707bf126005c3fdb75325a65540890f9923 100644 (file)
@@ -270,8 +270,8 @@ int main(void)
        DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
        DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
        BLANK();
-       DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT);
-       DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT);
+       DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
+       DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
        BLANK();
        DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
        DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
index 8c4712b74dc13b626f449e699ee31aca15701315..3a44f7f704fad67792fa0cc6325060adb6db933c 100644 (file)
@@ -2047,12 +2047,13 @@ syscall_do_signal:
        b,n     syscall_check_sig
 
 syscall_restore:
-       /* Are we being ptraced? */
        LDREG   TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
 
-       ldw     TASK_PTRACE(%r1), %r19
-       bb,<    %r19,31,syscall_restore_rfi
-       nop
+       /* Are we being ptraced? */
+       ldw     TASK_FLAGS(%r1),%r19
+       ldi     (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+       and,COND(=)     %r19,%r2,%r0
+       b,n     syscall_restore_rfi
 
        ldo     TASK_PT_FR31(%r1),%r19             /* reload fpregs */
        rest_fp %r19
@@ -2113,16 +2114,16 @@ syscall_restore_rfi:
        ldi     0x0b,%r20                          /* Create new PSW */
        depi    -1,13,1,%r20                       /* C, Q, D, and I bits */
 
-       /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
-        * set in include/linux/ptrace.h and converted to PA bitmap
+       /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+        * set in thread_info.h and converted to PA bitmap
         * numbers in asm-offsets.c */
 
-       /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
-       extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
+       /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+       extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
        depi    -1,27,1,%r20                       /* R bit */
 
-       /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
-       extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
+       /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+       extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
        depi    -1,7,1,%r20                        /* T bit */
 
        STREG   %r20,TASK_PT_PSW(%r1)
index 330f536a9324798afda4580d152c09eb3b04c141..2e7610cb33d5be4afa18daae6a5f9187078a2bf4 100644 (file)
@@ -423,8 +423,3 @@ void __init init_IRQ(void)
         set_eiem(cpu_eiem);    /* EIEM : enable all external intr */
 
 }
-
-void ack_bad_irq(unsigned int irq)
-{
-       printk(KERN_WARNING "unexpected IRQ %d\n", irq);
-}
index 61ee0eec4e699b93f3555770c18bc2b221c8f0de..212074653df7fc5a109da9222137a13226cc7bff 100644 (file)
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr,
         * ourselves */
        for (i = 1; i < hdr->e_shnum; i++) {
                if(sechdrs[i].sh_type == SHT_SYMTAB
-                  && (sechdrs[i].sh_type & SHF_ALLOC)) {
+                  && (sechdrs[i].sh_flags & SHF_ALLOC)) {
                        int strindex = sechdrs[i].sh_link;
                        /* FIXME: AWFUL HACK
                         * The cast is to drop the const from
index 927db3668b6ffd6bec55e8a1b90d099f5cd7a1dc..c4f49e45129dee568e4d75c99e26934fac16e523 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/user.h>
 #include <linux/personality.h>
 #include <linux/security.h>
@@ -35,7 +36,8 @@
  */
 void ptrace_disable(struct task_struct *task)
 {
-       task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
+       clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+       clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
 
        /* make sure the trap bits are not set */
        pa_psw(task)->r = 0;
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task)
 
 void user_enable_single_step(struct task_struct *task)
 {
-       task->ptrace &= ~PT_BLOCKSTEP;
-       task->ptrace |= PT_SINGLESTEP;
+       clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+       set_tsk_thread_flag(task, TIF_SINGLESTEP);
 
        if (pa_psw(task)->n) {
                struct siginfo si;
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task)
 
 void user_enable_block_step(struct task_struct *task)
 {
-       task->ptrace &= ~PT_SINGLESTEP;
-       task->ptrace |= PT_BLOCKSTEP;
+       clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+       set_tsk_thread_flag(task, TIF_BLOCKSTEP);
 
        /* Enable taken branch trap. */
        pa_psw(task)->r = 0;
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 }
 #endif
 
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+       if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+           tracehook_report_syscall_entry(regs))
+               return -1L;
+
+       return regs->gr[20];
+}
 
-void syscall_trace(void)
+void do_syscall_trace_exit(struct pt_regs *regs)
 {
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-       if (!(current->ptrace & PT_PTRACED))
-               return;
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-                                ? 0x80 : 0));
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+       int stepping = test_thread_flag(TIF_SINGLESTEP) ||
+               test_thread_flag(TIF_BLOCKSTEP);
+
+       if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, stepping);
 }
index 8eb3c63c407a43e5aaac3e96654ce391cd8764a5..e8467e4aa8d16b054aabac097bc19210ae4a409c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/errno.h>
 #include <linux/wait.h>
 #include <linux/ptrace.h>
+#include <linux/tracehook.h>
 #include <linux/unistd.h>
 #include <linux/stddef.h>
 #include <linux/compat.h>
@@ -34,7 +35,6 @@
 #include <asm/asm-offsets.h>
 
 #ifdef CONFIG_COMPAT
-#include <linux/compat.h>
 #include "signal32.h"
 #endif
 
@@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
                sigaddset(&current->blocked,sig);
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
+
+       tracehook_signal_handler(sig, info, ka, regs, 0);
+
        return 1;
 }
 
index 59fc1a43ec3ef094ae514694e549ab6b518aa05a..f5f96021caa0deedec2257af361dc95ba0307ea5 100644 (file)
@@ -288,18 +288,23 @@ tracesys:
        STREG   %r18,PT_GR18(%r2)
        /* Finished saving things for the debugger */
 
-       ldil    L%syscall_trace,%r1
+       copy    %r2,%r26
+       ldil    L%do_syscall_trace_enter,%r1
        ldil    L%tracesys_next,%r2
-       be      R%syscall_trace(%sr7,%r1)
+       be      R%do_syscall_trace_enter(%sr7,%r1)
        ldo     R%tracesys_next(%r2),%r2
        
-tracesys_next: 
+tracesys_next:
+       /* do_syscall_trace_enter either returned the syscallno, or -1L,
+        *  so we skip restoring the PT_GR20 below, since we pulled it from
+        *  task->thread.regs.gr[20] above.
+        */
+       copy    %ret0,%r20
        ldil    L%sys_call_table,%r1
        ldo     R%sys_call_table(%r1), %r19
 
        ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
        LDREG   TI_TASK(%r1), %r1
-       LDREG   TASK_PT_GR20(%r1), %r20
        LDREG   TASK_PT_GR26(%r1), %r26         /* Restore the users args */
        LDREG   TASK_PT_GR25(%r1), %r25
        LDREG   TASK_PT_GR24(%r1), %r24
@@ -336,7 +341,8 @@ tracesys_exit:
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
 #endif
-       bl      syscall_trace, %r2
+       ldo     TASK_REGS(%r1),%r26
+       bl      do_syscall_trace_exit,%r2
        STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
        ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
        LDREG   TI_TASK(%r1), %r1
@@ -353,12 +359,12 @@ tracesys_exit:
 
 tracesys_sigexit:
        ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
-       LDREG   0(%r1), %r1
+       LDREG   TI_TASK(%r1), %r1
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
 #endif
-       bl      syscall_trace, %r2
-       nop
+       bl      do_syscall_trace_exit,%r2
+       ldo     TASK_REGS(%r1),%r26
 
        ldil    L%syscall_exit_rfi,%r1
        be,n    R%syscall_exit_rfi(%sr7,%r1)
index 775be2791bc27e1313e73bf49d1e4d1ee8984dcd..fda4baa059b5002a99f64fdb832cf134b20756ee 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/cache.h>
 #include <asm/page.h>
 #include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
        
 /* ld script to make hppa Linux kernel */
 #ifndef CONFIG_64BIT
@@ -134,6 +135,15 @@ SECTIONS
        __init_begin = .;
        INIT_TEXT_SECTION(16384)
        INIT_DATA_SECTION(16)
+       /* we have to discard exit text and such at runtime, not link time */
+       .exit.text :
+       {
+               EXIT_TEXT
+       }
+       .exit.data :
+       {
+               EXIT_DATA
+       }
 
        PERCPU(PAGE_SIZE)
        . = ALIGN(PAGE_SIZE);
index d5aca31fddbb5a59e8d8100750167c12fb13f38c..13b6e3e59b994762a7efb47483c4b144ca0c5feb 100644 (file)
@@ -434,8 +434,8 @@ void mark_rodata_ro(void)
 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
                                     & ~(VM_MAP_OFFSET-1)))
 
-void *vmalloc_start __read_mostly;
-EXPORT_SYMBOL(vmalloc_start);
+void *parisc_vmalloc_start __read_mostly;
+EXPORT_SYMBOL(parisc_vmalloc_start);
 
 #ifdef CONFIG_PA11
 unsigned long pcxl_dma_start __read_mostly;
@@ -496,13 +496,14 @@ void __init mem_init(void)
 #ifdef CONFIG_PA11
        if (hppa_dma_ops == &pcxl_dma_ops) {
                pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
-               vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
+               parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+                                               + PCXL_DMA_MAP_SIZE);
        } else {
                pcxl_dma_start = 0;
-               vmalloc_start = SET_MAP_OFFSET(MAP_START);
+               parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
        }
 #else
-       vmalloc_start = SET_MAP_OFFSET(MAP_START);
+       parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
 #endif
 
        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
index 47ee603f558ed1022f4ce466b232b2b92a544eb6..2aa371e30079ced2c7938133b355ae7ca54b7343 100644 (file)
@@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
        return single_open(file, kvmppc_exit_timing_show, inode->i_private);
 }
 
-static struct file_operations kvmppc_exit_timing_fops = {
+static const struct file_operations kvmppc_exit_timing_fops = {
        .owner   = THIS_MODULE,
        .open    = kvmppc_exit_timing_open,
        .read    = seq_read,
index 961309446170b6550f96ac471ea428fd75cfee49..884e8bcec499ff13718b7bb09f4148467cb6d22d 100644 (file)
@@ -147,7 +147,7 @@ static int __fops ## _open(struct inode *inode, struct file *file)  \
        __simple_attr_check_format(__fmt, 0ull);                        \
        return spufs_attr_open(inode, file, __get, __set, __fmt);       \
 }                                                                      \
-static struct file_operations __fops = {                               \
+static const struct file_operations __fops = {                         \
        .owner   = THIS_MODULE,                                         \
        .open    = __fops ## _open,                                     \
        .release = spufs_attr_release,                                  \
index ab69925d579b7c8fe586a1cc1357cab2dbd83fb3..937a544a236d5e1a58be64b821d23bb20e965c63 100644 (file)
@@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
        return n_read * sizeof(struct dtl_entry);
 }
 
-static struct file_operations dtl_fops = {
+static const struct file_operations dtl_fops = {
        .open           = dtl_file_open,
        .release        = dtl_file_release,
        .read           = dtl_file_read,
index ec5eee7c25d83e4dcc2245f80382b13938c6a263..06cce8285ba01883ee2e4f97a126ff6b50ea11d5 100644 (file)
@@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
 int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
 
-static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
+static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.sie_block->gmslm
                - vcpu->arch.sie_block->gmsor
index 9b70a2f28dc75ac7fbdfc06aa37499c4cbfe332d..05ef5380a687e4773ffa88de981c6a895c104c1c 100644 (file)
@@ -104,6 +104,9 @@ config HAVE_SETUP_PER_CPU_AREA
 config NEED_PER_CPU_EMBED_FIRST_CHUNK
        def_bool y if SPARC64
 
+config NEED_PER_CPU_PAGE_FIRST_CHUNK
+       def_bool y if SPARC64
+
 config GENERIC_HARDIRQS_NO__DO_IRQ
        bool
        def_bool y if SPARC64
index 4f63ed8df55191517f819cacd3ec54f886a19941..162007643cdcb94faac5ceec465c095a2520be51 100644 (file)
@@ -7,17 +7,7 @@
 #ifndef __SPARC_HARDIRQ_H
 #define __SPARC_HARDIRQ_H
 
-#include <linux/threads.h>
-#include <linux/spinlock.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
-typedef struct {
-       unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
 #define HARDIRQ_BITS    8
+#include <asm-generic/hardirq.h>
 
 #endif /* __SPARC_HARDIRQ_H */
index ea43057d47633164b0100367a01f4c9f3ff67e12..cbf4801deaafc9c9bbb3cb11a3383c5e209a240d 100644 (file)
@@ -6,10 +6,10 @@
 #ifndef _SPARC_IRQ_H
 #define _SPARC_IRQ_H
 
-#include <linux/interrupt.h>
-
 #define NR_IRQS    16
 
+#include <linux/interrupt.h>
+
 #define irq_canonicalize(irq)  (irq)
 
 extern void __init init_IRQ(void);
index 0ff92fa22064f6a335233acfa39a72a6bfb88a4d..f3cb790fa2ae0771a4ac7b2b0ba7c73b14338111 100644 (file)
@@ -41,8 +41,8 @@
 #define LOW_OBP_ADDRESS                _AC(0x00000000f0000000,UL)
 #define HI_OBP_ADDRESS         _AC(0x0000000100000000,UL)
 #define VMALLOC_START          _AC(0x0000000100000000,UL)
-#define VMALLOC_END            _AC(0x0000000200000000,UL)
-#define VMEMMAP_BASE           _AC(0x0000000200000000,UL)
+#define VMALLOC_END            _AC(0x0000010000000000,UL)
+#define VMEMMAP_BASE           _AC(0x0000010000000000,UL)
 
 #define vmemmap                        ((struct page *)VMEMMAP_BASE)
 
index 3ea6e8cde8c5beba98f7460d199d8a9f128057c0..1d361477d7d639ebe871a8b36243cd42d689b54d 100644 (file)
@@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear:
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        /* Do not use the TSB for vmemmap.  */
-       mov             (VMEMMAP_BASE >> 24), %g5
-       sllx            %g5, 24, %g5
+       mov             (VMEMMAP_BASE >> 40), %g5
+       sllx            %g5, 40, %g5
        cmp             %g4,%g5
        bgeu,pn         %xcc, kvmap_vmemmap
         nop
@@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss:
        sethi           %hi(MODULES_VADDR), %g5
        cmp             %g4, %g5
        blu,pn          %xcc, kvmap_dtlb_longpath
-        mov            (VMALLOC_END >> 24), %g5
-       sllx            %g5, 24, %g5
+        mov            (VMALLOC_END >> 40), %g5
+       sllx            %g5, 40, %g5
        cmp             %g4, %g5
        bgeu,pn         %xcc, kvmap_dtlb_longpath
         nop
index 2d6a1b10c81da93a67d426fe1f1f4937e3eebc5a..04db9274389609e811ce2ba0b5a9e98cb3c57af1 100644 (file)
@@ -56,7 +56,8 @@ struct cpu_hw_events {
        struct perf_event       *events[MAX_HWEVENTS];
        unsigned long           used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
        unsigned long           active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
-       int enabled;
+       u64                     pcr;
+       int                     enabled;
 };
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
@@ -68,8 +69,30 @@ struct perf_event_map {
 #define PIC_LOWER      0x02
 };
 
+static unsigned long perf_event_encode(const struct perf_event_map *pmap)
+{
+       return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
+}
+
+static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
+{
+       *msk = val & 0xff;
+       *enc = val >> 16;
+}
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+#define CACHE_OP_UNSUPPORTED   0xfffe
+#define CACHE_OP_NONSENSE      0xffff
+
+typedef struct perf_event_map cache_map_t
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
 struct sparc_pmu {
        const struct perf_event_map     *(*event_map)(int);
+       const cache_map_t               *cache_map;
        int                             max_events;
        int                             upper_shift;
        int                             lower_shift;
@@ -80,21 +103,109 @@ struct sparc_pmu {
        int                             lower_nop;
 };
 
-static const struct perf_event_map ultra3i_perfmon_event_map[] = {
+static const struct perf_event_map ultra3_perfmon_event_map[] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
        [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
        [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
 };
 
-static const struct perf_event_map *ultra3i_event_map(int event_id)
+static const struct perf_event_map *ultra3_event_map(int event_id)
 {
-       return &ultra3i_perfmon_event_map[event_id];
+       return &ultra3_perfmon_event_map[event_id];
 }
 
-static const struct sparc_pmu ultra3i_pmu = {
-       .event_map      = ultra3i_event_map,
-       .max_events     = ARRAY_SIZE(ultra3i_perfmon_event_map),
+static const cache_map_t ultra3_cache_map = {
+[C(L1D)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
+               [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(L1I)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(LL)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
+               [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(DTLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(ITLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(BPU)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+};
+
+static const struct sparc_pmu ultra3_pmu = {
+       .event_map      = ultra3_event_map,
+       .cache_map      = &ultra3_cache_map,
+       .max_events     = ARRAY_SIZE(ultra3_perfmon_event_map),
        .upper_shift    = 11,
        .lower_shift    = 4,
        .event_mask     = 0x3f,
@@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = {
        .lower_nop      = 0x14,
 };
 
+/* Niagara1 is very limited.  The upper PIC is hard-locked to count
+ * only instructions, so it is free running which creates all kinds of
+ * problems.  Some hardware designs make one wonder if the creator
+ * even looked at how this stuff gets used by software.
+ */
+static const struct perf_event_map niagara1_perfmon_event_map[] = {
+       [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
+       [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
+       [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
+       [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
+};
+
+static const struct perf_event_map *niagara1_event_map(int event_id)
+{
+       return &niagara1_perfmon_event_map[event_id];
+}
+
+static const cache_map_t niagara1_cache_map = {
+[C(L1D)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(L1I)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
+               [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(LL)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(DTLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(ITLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(BPU)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+};
+
+static const struct sparc_pmu niagara1_pmu = {
+       .event_map      = niagara1_event_map,
+       .cache_map      = &niagara1_cache_map,
+       .max_events     = ARRAY_SIZE(niagara1_perfmon_event_map),
+       .upper_shift    = 0,
+       .lower_shift    = 4,
+       .event_mask     = 0x7,
+       .upper_nop      = 0x0,
+       .lower_nop      = 0x0,
+};
+
 static const struct perf_event_map niagara2_perfmon_event_map[] = {
        [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id)
        return &niagara2_perfmon_event_map[event_id];
 }
 
+static const cache_map_t niagara2_cache_map = {
+[C(L1D)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(L1I)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(LL)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
+       },
+       [C(OP_WRITE)] = {
+               [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
+               [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
+       },
+       [C(OP_PREFETCH)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(DTLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(ITLB)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+[C(BPU)] = {
+       [C(OP_READ)] = {
+               [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
+               [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
+               [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
+       },
+},
+};
+
 static const struct sparc_pmu niagara2_pmu = {
        .event_map      = niagara2_event_map,
+       .cache_map      = &niagara2_cache_map,
        .max_events     = ARRAY_SIZE(niagara2_perfmon_event_map),
        .upper_shift    = 19,
        .lower_shift    = 6,
@@ -151,23 +465,30 @@ static u64 nop_for_index(int idx)
                              sparc_pmu->lower_nop, idx);
 }
 
-static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
-                                           int idx)
+static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 val, mask = mask_for_index(idx);
 
-       val = pcr_ops->read();
-       pcr_ops->write((val & ~mask) | hwc->config);
+       val = cpuc->pcr;
+       val &= ~mask;
+       val |= hwc->config;
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
-static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
-                                            int idx)
+static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 mask = mask_for_index(idx);
        u64 nop = nop_for_index(idx);
-       u64 val = pcr_ops->read();
+       u64 val;
 
-       pcr_ops->write((val & ~mask) | nop);
+       val = cpuc->pcr;
+       val &= ~mask;
+       val |= nop;
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_enable(void)
@@ -182,7 +503,7 @@ void hw_perf_enable(void)
        cpuc->enabled = 1;
        barrier();
 
-       val = pcr_ops->read();
+       val = cpuc->pcr;
 
        for (i = 0; i < MAX_HWEVENTS; i++) {
                struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@ void hw_perf_enable(void)
                val |= hwc->config_base;
        }
 
-       pcr_ops->write(val);
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_disable(void)
@@ -207,10 +530,12 @@ void hw_perf_disable(void)
 
        cpuc->enabled = 0;
 
-       val = pcr_ops->read();
+       val = cpuc->pcr;
        val &= ~(PCR_UTRACE | PCR_STRACE |
                 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
-       pcr_ops->write(val);
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
 static u32 read_pmc(int idx)
@@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val)
 }
 
 static int sparc_perf_event_set_period(struct perf_event *event,
-                                        struct hw_perf_event *hwc, int idx)
+                                      struct hw_perf_event *hwc, int idx)
 {
        s64 left = atomic64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
@@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event)
        if (test_and_set_bit(idx, cpuc->used_mask))
                return -EAGAIN;
 
-       sparc_pmu_disable_event(hwc, idx);
+       sparc_pmu_disable_event(cpuc, hwc, idx);
 
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
 
        sparc_perf_event_set_period(event, hwc, idx);
-       sparc_pmu_enable_event(hwc, idx);
+       sparc_pmu_enable_event(cpuc, hwc, idx);
        perf_event_update_userpage(event);
        return 0;
 }
 
 static u64 sparc_perf_event_update(struct perf_event *event,
-                                    struct hw_perf_event *hwc, int idx)
+                                  struct hw_perf_event *hwc, int idx)
 {
        int shift = 64 - 32;
        u64 prev_raw_count, new_raw_count;
@@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event)
        int idx = hwc->idx;
 
        clear_bit(idx, cpuc->active_mask);
-       sparc_pmu_disable_event(hwc, idx);
+       sparc_pmu_disable_event(cpuc, hwc, idx);
 
        barrier();
 
@@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event)
 static void sparc_pmu_read(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
+
        sparc_perf_event_update(event, hwc, hwc->idx);
 }
 
 static void sparc_pmu_unthrottle(struct perf_event *event)
 {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       sparc_pmu_enable_event(hwc, hwc->idx);
+
+       sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
 }
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
+static void perf_stop_nmi_watchdog(void *unused)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       stop_nmi_watchdog(NULL);
+       cpuc->pcr = pcr_ops->read();
+}
+
 void perf_event_grab_pmc(void)
 {
        if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@ void perf_event_grab_pmc(void)
        mutex_lock(&pmc_grab_mutex);
        if (atomic_read(&active_events) == 0) {
                if (atomic_read(&nmi_active) > 0) {
-                       on_each_cpu(stop_nmi_watchdog, NULL, 1);
+                       on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
                        BUG_ON(atomic_read(&nmi_active) != 0);
                }
                atomic_inc(&active_events);
@@ -375,30 +711,160 @@ void perf_event_release_pmc(void)
        }
 }
 
+static const struct perf_event_map *sparc_map_cache_event(u64 config)
+{
+       unsigned int cache_type, cache_op, cache_result;
+       const struct perf_event_map *pmap;
+
+       if (!sparc_pmu->cache_map)
+               return ERR_PTR(-ENOENT);
+
+       cache_type = (config >>  0) & 0xff;
+       if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+               return ERR_PTR(-EINVAL);
+
+       cache_op = (config >>  8) & 0xff;
+       if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+               return ERR_PTR(-EINVAL);
+
+       cache_result = (config >> 16) & 0xff;
+       if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return ERR_PTR(-EINVAL);
+
+       pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
+
+       if (pmap->encoding == CACHE_OP_UNSUPPORTED)
+               return ERR_PTR(-ENOENT);
+
+       if (pmap->encoding == CACHE_OP_NONSENSE)
+               return ERR_PTR(-EINVAL);
+
+       return pmap;
+}
+
 static void hw_perf_event_destroy(struct perf_event *event)
 {
        perf_event_release_pmc();
 }
 
+/* Make sure all events can be scheduled into the hardware at
+ * the same time.  This is simplified by the fact that we only
+ * need to support 2 simultaneous HW events.
+ */
+static int sparc_check_constraints(unsigned long *events, int n_ev)
+{
+       if (n_ev <= perf_max_events) {
+               u8 msk1, msk2;
+               u16 dummy;
+
+               if (n_ev == 1)
+                       return 0;
+               BUG_ON(n_ev != 2);
+               perf_event_decode(events[0], &dummy, &msk1);
+               perf_event_decode(events[1], &dummy, &msk2);
+
+               /* If both events can go on any counter, OK.  */
+               if (msk1 == (PIC_UPPER | PIC_LOWER) &&
+                   msk2 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+
+               /* If one event is limited to a specific counter,
+                * and the other can go on both, OK.
+                */
+               if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
+                   msk2 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+               if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
+                   msk1 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+
+               /* If the events are fixed to different counters, OK.  */
+               if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
+                   (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
+                       return 0;
+
+               /* Otherwise, there is a conflict.  */
+       }
+
+       return -1;
+}
+
+static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
+{
+       int eu = 0, ek = 0, eh = 0;
+       struct perf_event *event;
+       int i, n, first;
+
+       n = n_prev + n_new;
+       if (n <= 1)
+               return 0;
+
+       first = 1;
+       for (i = 0; i < n; i++) {
+               event = evts[i];
+               if (first) {
+                       eu = event->attr.exclude_user;
+                       ek = event->attr.exclude_kernel;
+                       eh = event->attr.exclude_hv;
+                       first = 0;
+               } else if (event->attr.exclude_user != eu ||
+                          event->attr.exclude_kernel != ek ||
+                          event->attr.exclude_hv != eh) {
+                       return -EAGAIN;
+               }
+       }
+
+       return 0;
+}
+
+static int collect_events(struct perf_event *group, int max_count,
+                         struct perf_event *evts[], unsigned long *events)
+{
+       struct perf_event *event;
+       int n = 0;
+
+       if (!is_software_event(group)) {
+               if (n >= max_count)
+                       return -1;
+               evts[n] = group;
+               events[n++] = group->hw.event_base;
+       }
+       list_for_each_entry(event, &group->sibling_list, group_entry) {
+               if (!is_software_event(event) &&
+                   event->state != PERF_EVENT_STATE_OFF) {
+                       if (n >= max_count)
+                               return -1;
+                       evts[n] = event;
+                       events[n++] = event->hw.event_base;
+               }
+       }
+       return n;
+}
+
 static int __hw_perf_event_init(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
+       struct perf_event *evts[MAX_HWEVENTS];
        struct hw_perf_event *hwc = &event->hw;
+       unsigned long events[MAX_HWEVENTS];
        const struct perf_event_map *pmap;
        u64 enc;
+       int n;
 
        if (atomic_read(&nmi_active) < 0)
                return -ENODEV;
 
-       if (attr->type != PERF_TYPE_HARDWARE)
+       if (attr->type == PERF_TYPE_HARDWARE) {
+               if (attr->config >= sparc_pmu->max_events)
+                       return -EINVAL;
+               pmap = sparc_pmu->event_map(attr->config);
+       } else if (attr->type == PERF_TYPE_HW_CACHE) {
+               pmap = sparc_map_cache_event(attr->config);
+               if (IS_ERR(pmap))
+                       return PTR_ERR(pmap);
+       } else
                return -EOPNOTSUPP;
 
-       if (attr->config >= sparc_pmu->max_events)
-               return -EINVAL;
-
-       perf_event_grab_pmc();
-       event->destroy = hw_perf_event_destroy;
-
        /* We save the enable bits in the config_base.  So to
         * turn off sampling just write 'config', and to enable
         * things write 'config | config_base'.
@@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!attr->exclude_hv)
                hwc->config_base |= sparc_pmu->hv_bit;
 
+       hwc->event_base = perf_event_encode(pmap);
+
+       enc = pmap->encoding;
+
+       n = 0;
+       if (event->group_leader != event) {
+               n = collect_events(event->group_leader,
+                                  perf_max_events - 1,
+                                  evts, events);
+               if (n < 0)
+                       return -EINVAL;
+       }
+       events[n] = hwc->event_base;
+       evts[n] = event;
+
+       if (check_excludes(evts, n, 1))
+               return -EINVAL;
+
+       if (sparc_check_constraints(events, n + 1))
+               return -EINVAL;
+
+       /* Try to do all error checking before this point, as unwinding
+        * state after grabbing the PMC is difficult.
+        */
+       perf_event_grab_pmc();
+       event->destroy = hw_perf_event_destroy;
+
        if (!hwc->sample_period) {
                hwc->sample_period = MAX_PERIOD;
                hwc->last_period = hwc->sample_period;
                atomic64_set(&hwc->period_left, hwc->sample_period);
        }
 
-       pmap = sparc_pmu->event_map(attr->config);
-
-       enc = pmap->encoding;
        if (pmap->pic_mask & PIC_UPPER) {
                hwc->idx = PIC_UPPER_INDEX;
                enc <<= sparc_pmu->upper_shift;
@@ -472,7 +962,7 @@ void perf_event_print_debug(void)
 }
 
 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
-                                             unsigned long cmd, void *__args)
+                                           unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
        struct perf_sample_data data;
@@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       sparc_pmu_disable_event(hwc, idx);
+                       sparc_pmu_disable_event(cpuc, hwc, idx);
        }
 
        return NOTIFY_STOP;
@@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
 
 static bool __init supported_pmu(void)
 {
-       if (!strcmp(sparc_pmu_type, "ultra3i")) {
-               sparc_pmu = &ultra3i_pmu;
+       if (!strcmp(sparc_pmu_type, "ultra3") ||
+           !strcmp(sparc_pmu_type, "ultra3+") ||
+           !strcmp(sparc_pmu_type, "ultra3i") ||
+           !strcmp(sparc_pmu_type, "ultra4+")) {
+               sparc_pmu = &ultra3_pmu;
+               return true;
+       }
+       if (!strcmp(sparc_pmu_type, "niagara")) {
+               sparc_pmu = &niagara1_pmu;
                return true;
        }
        if (!strcmp(sparc_pmu_type, "niagara2")) {
index ff68373ce6d67615f4ad0bf88061bd04730a1673..aa36223497b931c9dc96c1ef2d536672aa36248d 100644 (file)
@@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size)
        free_bootmem(__pa(ptr), size);
 }
 
-static int pcpu_cpu_distance(unsigned int from, unsigned int to)
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
        if (cpu_to_node(from) == cpu_to_node(to))
                return LOCAL_DISTANCE;
@@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
                return REMOTE_DISTANCE;
 }
 
+static void __init pcpu_populate_pte(unsigned long addr)
+{
+       pgd_t *pgd = pgd_offset_k(addr);
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) {
+               pmd_t *new;
+
+               new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+               pud_populate(&init_mm, pud, new);
+       }
+
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd)) {
+               pte_t *new;
+
+               new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+               pmd_populate_kernel(&init_mm, pmd, new);
+       }
+}
+
 void __init setup_per_cpu_areas(void)
 {
        unsigned long delta;
        unsigned int cpu;
-       int rc;
-
-       rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
-                                   PERCPU_DYNAMIC_RESERVE, 4 << 20,
-                                   pcpu_cpu_distance, pcpu_alloc_bootmem,
-                                   pcpu_free_bootmem);
-       if (rc)
-               panic("failed to initialize first chunk (%d)", rc);
+       int rc = -EINVAL;
+
+       if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+               rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                           PERCPU_DYNAMIC_RESERVE, 4 << 20,
+                                           pcpu_cpu_distance,
+                                           pcpu_alloc_bootmem,
+                                           pcpu_free_bootmem);
+               if (rc)
+                       pr_warning("PERCPU: %s allocator failed (%d), "
+                                  "falling back to page size\n",
+                                  pcpu_fc_names[pcpu_chosen_fc], rc);
+       }
+       if (rc < 0)
+               rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
+                                          pcpu_alloc_bootmem,
+                                          pcpu_free_bootmem,
+                                          pcpu_populate_pte);
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
 
        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
        for_each_possible_cpu(cpu)
index f97cb8b6ee5fec51b5f20d572357442e340115be..f9024bccff163f06fcca11054865b885086dc8c0 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/oprofile.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/param.h>       /* for HZ */
  
 #ifdef CONFIG_SPARC64
 #include <linux/notifier.h>
index 8da93745c08790f8e8fa431b010f3ba08fd49a9b..c876bace8fdc95a1bc9ce31abbfe68247c1ac309 100644 (file)
@@ -86,10 +86,6 @@ config STACKTRACE_SUPPORT
 config HAVE_LATENCYTOP_SUPPORT
        def_bool y
 
-config FAST_CMPXCHG_LOCAL
-       bool
-       default y
-
 config MMU
        def_bool y
 
index 527519b8a9f9997241eb5897f316cdbe855bb341..f2824fb8c79cad23ad747977e1e32ab4d6b555a1 100644 (file)
@@ -400,7 +400,7 @@ config X86_TSC
 
 config X86_CMPXCHG64
        def_bool y
-       depends on X86_PAE || X86_64
+       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
 
 # this should be set for all -march=.. options where the compiler
 # generates cmov.
@@ -412,6 +412,7 @@ config X86_MINIMUM_CPU_FAMILY
        int
        default "64" if X86_64
        default "6" if X86_32 && X86_P6_NOP
+       default "5" if X86_32 && X86_CMPXCHG64
        default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
        default "3"
 
index 74619c4f9fda82652b37cfd3edbd6a52f6845fa7..1733f9f65e8259d74fc74fc40b466bd69dc6fa87 100644 (file)
@@ -21,8 +21,8 @@
 #define __AUDIT_ARCH_LE           0x40000000
 
 #ifndef CONFIG_AUDITSYSCALL
-#define sysexit_audit int_ret_from_sys_call
-#define sysretl_audit int_ret_from_sys_call
+#define sysexit_audit ia32_ret_from_sys_call
+#define sysretl_audit ia32_ret_from_sys_call
 #endif
 
 #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
        .endm 
 
        /* clobbers %eax */     
-       .macro  CLEAR_RREGS _r9=rax
+       .macro  CLEAR_RREGS offset=0, _r9=rax
        xorl    %eax,%eax
-       movq    %rax,R11(%rsp)
-       movq    %rax,R10(%rsp)
-       movq    %\_r9,R9(%rsp)
-       movq    %rax,R8(%rsp)
+       movq    %rax,\offset+R11(%rsp)
+       movq    %rax,\offset+R10(%rsp)
+       movq    %\_r9,\offset+R9(%rsp)
+       movq    %rax,\offset+R8(%rsp)
        .endm
 
        /*
@@ -172,6 +172,10 @@ sysexit_from_sys_call:
        movl    RIP-R11(%rsp),%edx              /* User %eip */
        CFI_REGISTER rip,rdx
        RESTORE_ARGS 1,24,1,1,1,1
+       xorq    %r8,%r8
+       xorq    %r9,%r9
+       xorq    %r10,%r10
+       xorq    %r11,%r11
        popfq
        CFI_ADJUST_CFA_OFFSET -8
        /*CFI_RESTORE rflags*/
@@ -202,7 +206,7 @@ sysexit_from_sys_call:
 
        .macro auditsys_exit exit,ebpsave=RBP
        testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
-       jnz int_ret_from_sys_call
+       jnz ia32_ret_from_sys_call
        TRACE_IRQS_ON
        sti
        movl %eax,%esi          /* second arg, syscall return value */
@@ -218,8 +222,9 @@ sysexit_from_sys_call:
        cli
        TRACE_IRQS_OFF
        testl %edi,TI_flags(%r10)
-       jnz int_with_check
-       jmp \exit
+       jz \exit
+       CLEAR_RREGS -ARGOFFSET
+       jmp int_with_check
        .endm
 
 sysenter_auditsys:
@@ -329,6 +334,9 @@ sysretl_from_sys_call:
        CFI_REGISTER rip,rcx
        movl EFLAGS-ARGOFFSET(%rsp),%r11d       
        /*CFI_REGISTER rflags,r11*/
+       xorq    %r10,%r10
+       xorq    %r9,%r9
+       xorq    %r8,%r8
        TRACE_IRQS_ON
        movl RSP-ARGOFFSET(%rsp),%esp
        CFI_RESTORE rsp
@@ -353,7 +361,7 @@ cstar_tracesys:
 #endif
        xchgl %r9d,%ebp
        SAVE_REST
-       CLEAR_RREGS r9
+       CLEAR_RREGS 0, r9
        movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
        movq %rsp,%rdi        /* &pt_regs -> arg1 */
        call syscall_trace_enter
@@ -425,6 +433,8 @@ ia32_do_call:
        call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
 ia32_sysret:
        movq %rax,RAX-ARGOFFSET(%rsp)
+ia32_ret_from_sys_call:
+       CLEAR_RREGS -ARGOFFSET
        jmp int_ret_from_sys_call 
 
 ia32_tracesys:                  
@@ -442,8 +452,8 @@ END(ia32_syscall)
 
 ia32_badsys:
        movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-       movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-       jmp int_ret_from_sys_call
+       movq $-ENOSYS,%rax
+       jmp ia32_sysret
 
 quiet_ni_syscall:
        movq $-ENOSYS,%rax
index 7c5ef8b14d925624e85fd3f2d589cbbbb98a4292..46fc474fd81960e7f1c345dd926fefd316cd2f04 100644 (file)
@@ -161,7 +161,8 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
            "adcl $0, %0        ;\n"
            : "=&r" (sum)
            : "r" (saddr), "r" (daddr),
-             "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
+             "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+           : "memory");
 
        return csum_fold(sum);
 }
index 82ceb788a981e7c73a7cb06e3d0bec311e205be7..ee1931be6593aba1d344b7644340a1422d7a4f6d 100644 (file)
@@ -312,19 +312,23 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
 
 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
 
-#define cmpxchg64(ptr, o, n)                                           \
-({                                                                     \
-       __typeof__(*(ptr)) __ret;                                       \
-       if (likely(boot_cpu_data.x86 > 4))                              \
-               __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr),          \
-                               (unsigned long long)(o),                \
-                               (unsigned long long)(n));               \
-       else                                                            \
-               __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
-                               (unsigned long long)(o),                \
-                               (unsigned long long)(n));               \
-       __ret;                                                          \
-})
+#define cmpxchg64(ptr, o, n)                                   \
+({                                                             \
+       __typeof__(*(ptr)) __ret;                               \
+       __typeof__(*(ptr)) __old = (o);                         \
+       __typeof__(*(ptr)) __new = (n);                         \
+       alternative_io("call cmpxchg8b_emu",                    \
+                       "lock; cmpxchg8b (%%esi)" ,             \
+                      X86_FEATURE_CX8,                         \
+                      "=A" (__ret),                            \
+                      "S" ((ptr)), "0" (__old),                \
+                      "b" ((unsigned int)__new),               \
+                      "c" ((unsigned int)(__new>>32))          \
+                      : "memory");                             \
+       __ret; })
+
+
+
 #define cmpxchg64_local(ptr, o, n)                                     \
 ({                                                                     \
        __typeof__(*(ptr)) __ret;                                       \
index 3be000435fadd763c806460d54a6c0893a1245fa..d83892226f73c799c7d11e32254ea1cd7ff429ec 100644 (file)
@@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
index 4b2af86e3e8d2388a74b2dec2b1145253b43427a..183c3457d2f4bf6af422a9f7e7e6f0ffac587a04 100644 (file)
@@ -204,10 +204,7 @@ static void print_mce_head(void)
 static void print_mce_tail(void)
 {
        printk(KERN_EMERG "This is not a software problem!\n"
-#if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD))
-              "Run through mcelog --ascii to decode and contact your hardware vendor\n"
-#endif
-              );
+              "Run through mcelog --ascii to decode and contact your hardware vendor\n");
 }
 
 #define PANIC_TIMEOUT 5 /* 5 seconds */
index 41fd965c80c6d41caefd801ee16948d23214d905..b9c830c12b4ac1de45d137a736be6de7fbd19fb4 100644 (file)
@@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf)
 
        while (*buf != '\0') {
                if (!strncmp(buf, "serial", 6)) {
-                       early_serial_init(buf + 6);
+                       buf += 6;
+                       early_serial_init(buf);
                        early_console_register(&early_serial_console, keep);
+                       if (!strncmp(buf, ",ttyS", 5))
+                               buf += 5;
                }
                if (!strncmp(buf, "ttyS", 4)) {
                        early_serial_init(buf + 4);
index 43cec6bdda637aa48aef7f590aa48b9df1300396..9c3bd4a2050e802a62ff13dfc52e855e641a802b 100644 (file)
 EXPORT_SYMBOL(mcount);
 #endif
 
+/*
+ * Note, this is a prototype to get at the symbol for
+ * the export, but dont use it from C code, it is used
+ * by assembly code and is not using C calling convention!
+ */
+#ifndef CONFIG_X86_CMPXCHG64
+extern void cmpxchg8b_emu(void);
+EXPORT_SYMBOL(cmpxchg8b_emu);
+#endif
+
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
 
index 74656d1d4e30f8fa560391253bb6b541b74e2b08..391206199515651717fb88e8645c02e6ce482f21 100644 (file)
@@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
                                __func__, smp_processor_id(), vector, irq);
        }
 
+       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
@@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs)
        if (generic_interrupt_extension)
                generic_interrupt_extension();
 
+       run_local_timers();
        irq_exit();
 
        set_irq_regs(old_regs);
index ec1de97600e70638bcfdcb53da52a83bc1288829..d915d956e66de6777450e2b6725a96799efe25c6 100644 (file)
@@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
 {
        ack_APIC_irq();
        inc_irq_stat(irq_resched_count);
+       run_local_timers();
        /*
         * KVM uses this interrupt to force a cpu out of guest mode
         */
index 1ae5ceba7eb2f0f4426f5c1a9c5b8c073375f511..7024224f0fc8037cf913dbbde9d2cb9d8f2cc640 100644 (file)
@@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
 {
        ktime_t now = apic->lapic_timer.timer.base->get_time();
 
-       apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) *
+       apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) *
                    APIC_BUS_CYCLE_NS * apic->divide_count;
        atomic_set(&apic->lapic_timer.pending, 0);
 
index eca41ae9f453cda6dc61112ab925ce66cdb7fef3..685a4ffac8e6a66cf3241775768c175a90d2962a 100644 (file)
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
 #define CREATE_TRACE_POINTS
 #include "mmutrace.h"
 
+#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+
 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 
 struct kvm_rmap_desc {
@@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        if (*spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
        if (is_writeble_pte(*spte))
-               kvm_release_pfn_dirty(pfn);
-       else
-               kvm_release_pfn_clean(pfn);
+               kvm_set_pfn_dirty(pfn);
        rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
        if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
        return write_protected;
 }
 
-static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
 {
        u64 *spte;
        int need_tlb_flush = 0;
@@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
        return need_tlb_flush;
 }
 
-static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
-                         int (*handler)(struct kvm *kvm, unsigned long *rmapp))
+static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
+{
+       int need_flush = 0;
+       u64 *spte, new_spte;
+       pte_t *ptep = (pte_t *)data;
+       pfn_t new_pfn;
+
+       WARN_ON(pte_huge(*ptep));
+       new_pfn = pte_pfn(*ptep);
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               BUG_ON(!is_shadow_present_pte(*spte));
+               rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
+               need_flush = 1;
+               if (pte_write(*ptep)) {
+                       rmap_remove(kvm, spte);
+                       __set_spte(spte, shadow_trap_nonpresent_pte);
+                       spte = rmap_next(kvm, rmapp, NULL);
+               } else {
+                       new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
+                       new_spte |= (u64)new_pfn << PAGE_SHIFT;
+
+                       new_spte &= ~PT_WRITABLE_MASK;
+                       new_spte &= ~SPTE_HOST_WRITEABLE;
+                       if (is_writeble_pte(*spte))
+                               kvm_set_pfn_dirty(spte_to_pfn(*spte));
+                       __set_spte(spte, new_spte);
+                       spte = rmap_next(kvm, rmapp, spte);
+               }
+       }
+       if (need_flush)
+               kvm_flush_remote_tlbs(kvm);
+
+       return 0;
+}
+
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
+                         int (*handler)(struct kvm *kvm, unsigned long *rmapp,
+                                        u64 data))
 {
        int i, j;
        int retval = 0;
@@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                if (hva >= start && hva < end) {
                        gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
 
-                       retval |= handler(kvm, &memslot->rmap[gfn_offset]);
+                       retval |= handler(kvm, &memslot->rmap[gfn_offset],
+                                         data);
 
                        for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
                                int idx = gfn_offset;
                                idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
                                retval |= handler(kvm,
-                                       &memslot->lpage_info[j][idx].rmap_pde);
+                                       &memslot->lpage_info[j][idx].rmap_pde,
+                                       data);
                        }
                }
        }
@@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
 
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 {
-       return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+       return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
 }
 
-static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
 {
        u64 *spte;
        int young = 0;
@@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        gfn = unalias_gfn(vcpu->kvm, gfn);
        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 
-       kvm_unmap_rmapp(vcpu->kvm, rmapp);
+       kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
        kvm_flush_remote_tlbs(vcpu->kvm);
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 {
-       return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+       return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 }
 
 #ifdef MMU_DEBUG
@@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
-                   bool can_unsync)
+                   bool can_unsync, bool reset_host_protection)
 {
        u64 spte;
        int ret = 0;
@@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
                        kvm_is_mmio_pfn(pfn));
 
+       if (reset_host_protection)
+               spte |= SPTE_HOST_WRITEABLE;
+
        spte |= (u64)pfn << PAGE_SHIFT;
 
        if ((pte_access & ACC_WRITE_MASK)
@@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
                         int *ptwrite, int level, gfn_t gfn,
-                        pfn_t pfn, bool speculative)
+                        pfn_t pfn, bool speculative,
+                        bool reset_host_protection)
 {
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*sptep);
@@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        }
 
        if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
-                     dirty, level, gfn, pfn, speculative, true)) {
+                     dirty, level, gfn, pfn, speculative, true,
+                     reset_host_protection)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
@@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        page_header_update_slot(vcpu->kvm, sptep, gfn);
        if (!was_rmapped) {
                rmap_count = rmap_add(vcpu, sptep, gfn);
-               if (!is_rmap_spte(*sptep))
-                       kvm_release_pfn_clean(pfn);
+               kvm_release_pfn_clean(pfn);
                if (rmap_count > RMAP_RECYCLE_THRESHOLD)
                        rmap_recycle(vcpu, sptep, gfn);
        } else {
@@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
                if (iterator.level == level) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    level, gfn, pfn, false);
+                                    level, gfn, pfn, false, true);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
index d2fec9c12d224e60ce5e8755383e4387ea642ca7..72558f8ff3f5a999a3f7192dd3117fe667d72098 100644 (file)
@@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
        if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
                return;
        kvm_get_pfn(pfn);
+       /*
+        * we call mmu_set_spte() with reset_host_protection = true beacuse that
+        * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
+        */
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
                     gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
-                    gpte_to_gfn(gpte), pfn, true);
+                    gpte_to_gfn(gpte), pfn, true, true);
 }
 
 /*
@@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                                     user_fault, write_fault,
                                     gw->ptes[gw->level-1] & PT_DIRTY_MASK,
                                     ptwrite, level,
-                                    gw->gfn, pfn, false);
+                                    gw->gfn, pfn, false, true);
                        break;
                }
 
@@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
        int i, offset, nr_present;
+       bool reset_host_protection;
 
        offset = nr_present = 0;
 
@@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                nr_present++;
                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+               if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
+                       pte_access &= ~ACC_WRITE_MASK;
+                       reset_host_protection = 0;
+               } else {
+                       reset_host_protection = 1;
+               }
                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
                         is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
-                        spte_to_pfn(sp->spt[i]), true, false);
+                        spte_to_pfn(sp->spt[i]), true, false,
+                        reset_host_protection);
        }
 
        return !nr_present;
index 944cc9c04b3cea96be481838bf2fd890ed1edc5a..c17404add91febfd66d12570057a12695e972d9b 100644 (file)
@@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                rdtscll(tsc_this);
                delta = vcpu->arch.host_tsc - tsc_this;
                svm->vmcb->control.tsc_offset += delta;
+               if (is_nested(svm))
+                       svm->nested.hsave->control.tsc_offset += delta;
                vcpu->cpu = cpu;
                kvm_migrate_timers(vcpu);
                svm->asid_generation = 0;
@@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
 
        switch (ecx) {
        case MSR_IA32_TSC: {
-               u64 tsc;
+               u64 tsc_offset;
+
+               if (is_nested(svm))
+                       tsc_offset = svm->nested.hsave->control.tsc_offset;
+               else
+                       tsc_offset = svm->vmcb->control.tsc_offset;
 
-               rdtscll(tsc);
-               *data = svm->vmcb->control.tsc_offset + tsc;
+               *data = tsc_offset + native_read_tsc();
                break;
        }
        case MSR_K6_STAR:
@@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 
        switch (ecx) {
        case MSR_IA32_TSC: {
-               u64 tsc;
+               u64 tsc_offset = data - native_read_tsc();
+               u64 g_tsc_offset = 0;
+
+               if (is_nested(svm)) {
+                       g_tsc_offset = svm->vmcb->control.tsc_offset -
+                                      svm->nested.hsave->control.tsc_offset;
+                       svm->nested.hsave->control.tsc_offset = tsc_offset;
+               }
+
+               svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
 
-               rdtscll(tsc);
-               svm->vmcb->control.tsc_offset = data - tsc;
                break;
        }
        case MSR_K6_STAR:
index f3812014bd0b3e4d04ecd5c0645eb0dad9baa0e9..ed53b42caba119bb7b488efdf79170b44ba922e4 100644 (file)
@@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (vcpu->cpu != cpu) {
                vcpu_clear(vmx);
                kvm_migrate_timers(vcpu);
-               vpid_sync_vcpu_all(vmx);
+               set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
                local_irq_disable();
                list_add(&vmx->local_vcpus_link,
                         &per_cpu(vcpus_on_cpu, cpu));
index be451ee44249be80cdc4829df1f8a6ccc1f5d2f5..9b9695322f567875169c4d934e4100cb266e5d52 100644 (file)
@@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
 
        if (cpuid->nent < 1)
                goto out;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
        r = -ENOMEM;
        cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
        if (!cpuid_entries)
index 9e609206fac9b668b906c5949a3b018f9804752c..85f5db95c60f03718f292080587f7faa337ef570 100644 (file)
@@ -16,7 +16,9 @@ ifeq ($(CONFIG_X86_32),y)
         lib-y += checksum_32.o
         lib-y += strstr_32.o
         lib-y += semaphore_32.o string_32.o
-
+ifneq ($(CONFIG_X86_CMPXCHG64),y)
+        lib-y += cmpxchg8b_emu.o
+endif
         lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
 else
         obj-y += io_64.o iomap_copy_64.o
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
new file mode 100644 (file)
index 0000000..828cb71
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; version 2
+ *     of the License.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+
+.text
+
+/*
+ * Inputs:
+ * %esi : memory location to compare
+ * %eax : low 32 bits of old value
+ * %edx : high 32 bits of old value
+ * %ebx : low 32 bits of new value
+ * %ecx : high 32 bits of new value
+ */
+ENTRY(cmpxchg8b_emu)
+CFI_STARTPROC
+
+#
+# Emulate 'cmpxchg8b (%esi)' on UP except we don't
+# set the whole ZF thing (caller will just compare
+# eax:edx with the expected value)
+#
+cmpxchg8b_emu:
+       pushfl
+       cli
+
+       cmpl  (%esi), %eax
+       jne not_same
+       cmpl 4(%esi), %edx
+       jne half_same
+
+       movl %ebx,  (%esi)
+       movl %ecx, 4(%esi)
+
+       popfl
+       ret
+
+ not_same:
+       movl  (%esi), %eax
+ half_same:
+       movl 4(%esi), %edx
+
+       popfl
+       ret
+
+CFI_ENDPROC
+ENDPROC(cmpxchg8b_emu)
index b53225d2cac39e690d2648d717c34fe8b872d7c5..e133ce25e29022d20bdd5e954c9a6697bfc7ab91 100644 (file)
@@ -100,7 +100,7 @@ static int xen_array_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct file_operations u32_array_fops = {
+static const struct file_operations u32_array_fops = {
        .owner  = THIS_MODULE,
        .open   = u32_array_open,
        .release= xen_array_release,
index 6593ab39cfe9223b106d3b5a897937e58176e6b1..8873b9b439ffdb0bf695b0077f84bb28c1046a38 100644 (file)
@@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
 
        if (bio->bi_private)
                complete(bio->bi_private);
+       __free_page(bio_page(bio));
 
        bio_put(bio);
 }
@@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        struct request_queue *q = bdev_get_queue(bdev);
        int type = flags & DISCARD_FL_BARRIER ?
                DISCARD_BARRIER : DISCARD_NOBARRIER;
+       struct bio *bio;
+       struct page *page;
        int ret = 0;
 
        if (!q)
                return -ENXIO;
 
-       if (!q->prepare_discard_fn)
+       if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
 
        while (nr_sects && !ret) {
-               struct bio *bio = bio_alloc(gfp_mask, 0);
-               if (!bio)
-                       return -ENOMEM;
+               unsigned int sector_size = q->limits.logical_block_size;
+               unsigned int max_discard_sectors =
+                       min(q->limits.max_discard_sectors, UINT_MAX >> 9);
 
+               bio = bio_alloc(gfp_mask, 1);
+               if (!bio)
+                       goto out;
+               bio->bi_sector = sector;
                bio->bi_end_io = blkdev_discard_end_io;
                bio->bi_bdev = bdev;
                if (flags & DISCARD_FL_WAIT)
                        bio->bi_private = &wait;
 
-               bio->bi_sector = sector;
+               /*
+                * Add a zeroed one-sector payload as that's what
+                * our current implementations need.  If we'll ever need
+                * more the interface will need revisiting.
+                */
+               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!page)
+                       goto out_free_bio;
+               if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
+                       goto out_free_page;
 
-               if (nr_sects > queue_max_hw_sectors(q)) {
-                       bio->bi_size = queue_max_hw_sectors(q) << 9;
-                       nr_sects -= queue_max_hw_sectors(q);
-                       sector += queue_max_hw_sectors(q);
+               /*
+                * And override the bio size - the way discard works we
+                * touch many more blocks on disk than the actual payload
+                * length.
+                */
+               if (nr_sects > max_discard_sectors) {
+                       bio->bi_size = max_discard_sectors << 9;
+                       nr_sects -= max_discard_sectors;
+                       sector += max_discard_sectors;
                } else {
                        bio->bi_size = nr_sects << 9;
                        nr_sects = 0;
@@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                bio_put(bio);
        }
        return ret;
+out_free_page:
+       __free_page(page);
+out_free_bio:
+       bio_put(bio);
+out:
+       return -ENOMEM;
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
index 8135228e4b2907e15f8a8ff5199de3fa8e5f40cd..81f34311659a21cc172d3ce3dde996643941e5eb 100644 (file)
@@ -34,6 +34,7 @@
 #include "blk.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
 static int __make_request(struct request_queue *q, struct bio *bio);
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
                part_stat_inc(cpu, part, merges[rw]);
        else {
                part_round_stats(cpu, part);
-               part_inc_in_flight(part, rw);
+               part_inc_in_flight(part);
        }
 
        part_stat_unlock();
@@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 
        if (part->in_flight) {
                __part_stat_add(cpu, part, time_in_queue,
-                               part_in_flight(part) * (now - part->stamp));
+                               part->in_flight * (now - part->stamp));
                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
        }
        part->stamp = now;
@@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
                req->cmd_flags |= REQ_DISCARD;
                if (bio_rw_flagged(bio, BIO_RW_BARRIER))
                        req->cmd_flags |= REQ_SOFTBARRIER;
-               req->q->prepare_discard_fn(req->q, req);
        } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
                req->cmd_flags |= REQ_HARDBARRIER;
 
@@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
+               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+                            nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                               bdevname(bio->bi_bdev, b),
                               bio_sectors(bio),
@@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
 
                if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
-                   !q->prepare_discard_fn) {
+                   !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
@@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
                part_stat_inc(cpu, part, ios[rw]);
                part_stat_add(cpu, part, ticks[rw], duration);
                part_round_stats(cpu, part);
-               part_dec_in_flight(part, rw);
+               part_dec_in_flight(part);
 
                part_stat_unlock();
        }
@@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
+int kblockd_schedule_delayed_work(struct request_queue *q,
+                                 struct delayed_work *work,
+                                 unsigned long delay)
+{
+       return queue_delayed_work(kblockd_workqueue, work, delay);
+}
+EXPORT_SYMBOL(kblockd_schedule_delayed_work);
+
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
index 99cb5cf1f447fa8759d73f06bf64fe4b20eec580..b0de8574fdc84e40e1bdf87dea4eb1af98769bb4 100644 (file)
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
                part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
 
                part_round_stats(cpu, part);
-               part_dec_in_flight(part, rq_data_dir(req));
+               part_dec_in_flight(part);
 
                part_stat_unlock();
        }
index 83413ff837397a88bf8e5a8bbc1cb9652c0dd50f..e0695bca702777d7455e9d4266590be47977443e 100644 (file)
@@ -33,23 +33,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 }
 EXPORT_SYMBOL(blk_queue_prep_rq);
 
-/**
- * blk_queue_set_discard - set a discard_sectors function for queue
- * @q:         queue
- * @dfn:       prepare_discard function
- *
- * It's possible for a queue to register a discard callback which is used
- * to transform a discard request into the appropriate type for the
- * hardware. If none is registered, then discard requests are failed
- * with %EOPNOTSUPP.
- *
- */
-void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
-{
-       q->prepare_discard_fn = dfn;
-}
-EXPORT_SYMBOL(blk_queue_set_discard);
-
 /**
  * blk_queue_merge_bvec - set a merge_bvec function for queue
  * @q:         queue
@@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->max_hw_segments = MAX_HW_SEGMENTS;
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->max_segment_size = MAX_SEGMENT_SIZE;
-       lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
+       lim->max_sectors = BLK_DEF_MAX_SECTORS;
+       lim->max_hw_sectors = INT_MAX;
+       lim->max_discard_sectors = SAFE_MAX_SECTORS;
        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
        lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
        lim->alignment_offset = 0;
@@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->unplug_timer.data = (unsigned long)q;
 
        blk_set_default_limits(&q->limits);
+       blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
 
        /*
         * If the caller didn't supply a lock, fall back to our embedded
@@ -253,6 +239,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
+/**
+ * blk_queue_max_discard_sectors - set max sectors for a single discard
+ * @q:  the request queue for the device
+ * @max_discard: maximum number of sectors to discard
+ **/
+void blk_queue_max_discard_sectors(struct request_queue *q,
+               unsigned int max_discard_sectors)
+{
+       q->limits.max_discard_sectors = max_discard_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_discard_sectors);
+
 /**
  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
index b78c9c3e26705f45fb1e90ac116dc8bb96c8122b..8a6d81afb284ebbf7111653393b6594d91924491 100644 (file)
@@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk)
        if (ret) {
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
+               blk_trace_remove_sysfs(disk_to_dev(disk));
                return ret;
        }
 
@@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk)
        if (WARN_ON(!q))
                return;
 
-       if (q->request_fn) {
+       if (q->request_fn)
                elv_unregister_queue(q);
 
-               kobject_uevent(&q->kobj, KOBJ_REMOVE);
-               kobject_del(&q->kobj);
-               kobject_put(&disk_to_dev(disk)->kobj);
-       }
+       kobject_uevent(&q->kobj, KOBJ_REMOVE);
+       kobject_del(&q->kobj);
+       blk_trace_remove_sysfs(disk_to_dev(disk));
+       kobject_put(&disk_to_dev(disk)->kobj);
 }
index 1ca813b16e7840cf10086d79b7e2bb7b0c07005e..9c4b679908f41cf034cdddc79a9bff8bcb1f4e85 100644 (file)
@@ -150,7 +150,7 @@ struct cfq_data {
         * idle window management
         */
        struct timer_list idle_slice_timer;
-       struct work_struct unplug_work;
+       struct delayed_work unplug_work;
 
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
@@ -173,6 +173,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_latency;
 
        struct list_head cic_list;
 
@@ -180,6 +181,8 @@ struct cfq_data {
         * Fallback dummy cfqq for extreme OOM conditions
         */
        struct cfq_queue oom_cfqq;
+
+       unsigned long last_end_sync_rq;
 };
 
 enum cfqq_state_flags {
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
  * scheduler run of queue, if there are requests pending and no one in the
  * driver that will restart queueing
  */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
+                                        unsigned long delay)
 {
        if (cfqd->busy_queues) {
                cfq_log(cfqd, "schedule dispatch");
-               kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
+               kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
+                                               delay);
        }
 }
 
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
                        return 0;
 
                /*
-                * we are the only queue, allow up to 4 times of 'quantum'
+                * Sole queue user, allow bigger slice
                 */
-               if (cfqq->dispatched >= 4 * max_dispatch)
-                       return 0;
+               max_dispatch *= 4;
+       }
+
+       /*
+        * Async queues must wait a bit before being allowed dispatch.
+        * We also ramp up the dispatch depth gradually for async IO,
+        * based on the last sync IO we serviced
+        */
+       if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+               unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+               unsigned int depth;
+
+               depth = last_sync / cfqd->cfq_slice[1];
+               if (!depth && !cfqq->dispatched)
+                       depth = 1;
+               if (depth < max_dispatch)
+                       max_dispatch = depth;
        }
 
+       if (cfqq->dispatched >= max_dispatch)
+               return 0;
+
        /*
         * Dispatch a request from this cfqq
         */
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 
        if (unlikely(cfqd->active_queue == cfqq)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
+               cfq_schedule_dispatch(cfqd, 0);
        }
 
        kmem_cache_free(cfq_pool, cfqq);
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
        if (unlikely(cfqq == cfqd->active_queue)) {
                __cfq_slice_expired(cfqd, cfqq, 0);
-               cfq_schedule_dispatch(cfqd);
+               cfq_schedule_dispatch(cfqd, 0);
        }
 
        cfq_put_queue(cfqq);
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
 
        if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
-           (cfqd->hw_tag && CIC_SEEKY(cic)))
+           (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
                enable_idle = 0;
        else if (sample_valid(cic->ttime_samples)) {
                if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        if (cfq_cfqq_sync(cfqq))
                cfqd->sync_flight--;
 
-       if (sync)
+       if (sync) {
                RQ_CIC(rq)->last_end_request = now;
+               cfqd->last_end_sync_rq = now;
+       }
 
        /*
         * If this is the active queue, check if it needs to be expired,
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        }
 
        if (!rq_in_driver(cfqd))
-               cfq_schedule_dispatch(cfqd);
+               cfq_schedule_dispatch(cfqd, 0);
 }
 
 /*
@@ -2316,7 +2341,7 @@ queue_fail:
        if (cic)
                put_io_context(cic->ioc);
 
-       cfq_schedule_dispatch(cfqd);
+       cfq_schedule_dispatch(cfqd, 0);
        spin_unlock_irqrestore(q->queue_lock, flags);
        cfq_log(cfqd, "set_request fail");
        return 1;
@@ -2325,7 +2350,7 @@ queue_fail:
 static void cfq_kick_queue(struct work_struct *work)
 {
        struct cfq_data *cfqd =
-               container_of(work, struct cfq_data, unplug_work);
+               container_of(work, struct cfq_data, unplug_work.work);
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data)
 expire:
        cfq_slice_expired(cfqd, timed_out);
 out_kick:
-       cfq_schedule_dispatch(cfqd);
+       cfq_schedule_dispatch(cfqd, 0);
 out_cont:
        spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
 }
@@ -2387,7 +2412,7 @@ out_cont:
 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 {
        del_timer_sync(&cfqd->idle_slice_timer);
-       cancel_work_sync(&cfqd->unplug_work);
+       cancel_delayed_work_sync(&cfqd->unplug_work);
 }
 
 static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
        cfqd->idle_slice_timer.data = (unsigned long) cfqd;
 
-       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
+       INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
        cfqd->cfq_quantum = cfq_quantum;
        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_latency = 1;
        cfqd->hw_tag = 1;
-
+       cfqd->last_end_sync_rq = jiffies;
        return cfqd;
 }
 
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
                UINT_MAX, 0);
+STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(low_latency),
        __ATTR_NULL
 };
 
index 7865a34e0faaaba07cb9e4946dca6406b303bef1..9bd086c1a4d591b59eea0e684debac6e6b8e52d9 100644 (file)
@@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val)
        return put_user(val, (compat_int_t __user *)compat_ptr(arg));
 }
 
+static int compat_put_uint(unsigned long arg, unsigned int val)
+{
+       return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
+}
+
 static int compat_put_long(unsigned long arg, long val)
 {
        return put_user(val, (compat_long_t __user *)compat_ptr(arg));
@@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        switch (cmd) {
        case HDIO_GETGEO:
                return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
+       case BLKPBSZGET:
+               return compat_put_uint(arg, bdev_physical_block_size(bdev));
+       case BLKIOMIN:
+               return compat_put_uint(arg, bdev_io_min(bdev));
+       case BLKIOOPT:
+               return compat_put_uint(arg, bdev_io_opt(bdev));
+       case BLKALIGNOFF:
+               return compat_put_int(arg, bdev_alignment_offset(bdev));
        case BLKFLSBUF:
        case BLKROSET:
        case BLKDISCARD:
index 517e4332cb37481dcad965e8f9986f029b42e64a..5a0861da324d76f975e2cb281f1fcc4e88f98606 100644 (file)
@@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
-static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
        __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = {
        &dev_attr_alignment_offset.attr,
        &dev_attr_capability.attr,
        &dev_attr_stat.attr,
-       &dev_attr_inflight.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
@@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
                           part_stat_read(hd, merges[1]),
                           (unsigned long long)part_stat_read(hd, sectors[1]),
                           jiffies_to_msecs(part_stat_read(hd, ticks[1])),
-                          part_in_flight(hd),
+                          hd->in_flight,
                           jiffies_to_msecs(part_stat_read(hd, io_ticks)),
                           jiffies_to_msecs(part_stat_read(hd, time_in_queue))
                        );
index d3e6b5827a3425de1871ec4dfa9f5ca04e8744f4..1f4d1de12b0957c9e8a2897f1aa89d5b96f170a5 100644 (file)
@@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val)
        return put_user(val, (int __user *)arg);
 }
 
+static int put_uint(unsigned long arg, unsigned int val)
+{
+       return put_user(val, (unsigned int __user *)arg);
+}
+
 static int put_long(unsigned long arg, long val)
 {
        return put_user(val, (long __user *)arg);
@@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
        case BLKROGET:
                return put_int(arg, bdev_read_only(bdev) != 0);
-       case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+       case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
                return put_int(arg, block_size(bdev));
-       case BLKSSZGET: /* get block device hardware sector size */
+       case BLKSSZGET: /* get block device logical block size */
                return put_int(arg, bdev_logical_block_size(bdev));
+       case BLKPBSZGET: /* get block device physical block size */
+               return put_uint(arg, bdev_physical_block_size(bdev));
+       case BLKIOMIN:
+               return put_uint(arg, bdev_io_min(bdev));
+       case BLKIOOPT:
+               return put_uint(arg, bdev_io_opt(bdev));
+       case BLKALIGNOFF:
+               return put_int(arg, bdev_alignment_offset(bdev));
        case BLKSECTGET:
                return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
        case BLKRASET:
index dd8729d674e56ef2dfcdc7a7fa320f399b4542c6..0ed42d8870c7cdde61dfa686475a4b45edd469e4 100644 (file)
@@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU
        select ACPI_CONTAINER
        default y
 
+config ACPI_PROCESSOR_AGGREGATOR
+       tristate "Processor Aggregator"
+       depends on ACPI_PROCESSOR
+       depends on EXPERIMENTAL
+       depends on X86
+       help
+         ACPI 4.0 defines processor Aggregator, which enables OS to perform
+         specfic processor configuration and control that applies to all
+         processors in the platform. Currently only logical processor idling
+         is defined, which is to reduce power consumption. This driver
+         support the new device.
+
 config ACPI_THERMAL
        tristate "Thermal Zone"
        depends on ACPI_PROCESSOR
index 82cd49dc603b6a5712a380eddbb54237a33a4ec2..7702118509a0a6e848bee454a983f0ec42e9522b 100644 (file)
@@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER)        += power_meter.o
 processor-y                    := processor_core.o processor_throttling.o
 processor-y                    += processor_idle.o processor_thermal.o
 processor-$(CONFIG_CPU_FREQ)   += processor_perflib.o
+
+obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
new file mode 100644 (file)
index 0000000..0d2cdb8
--- /dev/null
@@ -0,0 +1,514 @@
+/*
+ * acpi_pad.c ACPI Processor Aggregator Driver
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/clockchips.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define ACPI_PROCESSOR_AGGREGATOR_CLASS        "processor_aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
+#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+static DEFINE_MUTEX(isolated_cpus_lock);
+
+#define MWAIT_SUBSTATE_MASK    (0xf)
+#define MWAIT_CSTATE_MASK      (0xf)
+#define MWAIT_SUBSTATE_SIZE    (4)
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK     (0x2)
+static unsigned long power_saving_mwait_eax;
+static void power_saving_mwait_init(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int highest_cstate = 0;
+       unsigned int highest_subcstate = 0;
+       int i;
+
+       if (!boot_cpu_has(X86_FEATURE_MWAIT))
+               return;
+       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+               return;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+               return;
+
+       edx >>= MWAIT_SUBSTATE_SIZE;
+       for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+               if (edx & MWAIT_SUBSTATE_MASK) {
+                       highest_cstate = i;
+                       highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+               }
+       }
+       power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+               (highest_subcstate - 1);
+
+       for_each_online_cpu(i)
+               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
+
+#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+       case X86_VENDOR_INTEL:
+               /*
+                * AMD Fam10h TSC will tick in all
+                * C/P/S0/S1 states when this bit is set.
+                */
+               if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+                       return;
+
+               /*FALL THROUGH*/
+       default:
+               /* TSC could halt in idle, so notify users */
+               mark_tsc_unstable("TSC halts in idle");
+       }
+#endif
+}
+
+static unsigned long cpu_weight[NR_CPUS];
+static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
+static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
+static void round_robin_cpu(unsigned int tsk_index)
+{
+       struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
+       cpumask_var_t tmp;
+       int cpu;
+       unsigned long min_weight = -1, preferred_cpu;
+
+       if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+               return;
+
+       mutex_lock(&isolated_cpus_lock);
+       cpumask_clear(tmp);
+       for_each_cpu(cpu, pad_busy_cpus)
+               cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
+       cpumask_andnot(tmp, cpu_online_mask, tmp);
+       /* avoid HT sibilings if possible */
+       if (cpumask_empty(tmp))
+               cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
+       if (cpumask_empty(tmp)) {
+               mutex_unlock(&isolated_cpus_lock);
+               return;
+       }
+       for_each_cpu(cpu, tmp) {
+               if (cpu_weight[cpu] < min_weight) {
+                       min_weight = cpu_weight[cpu];
+                       preferred_cpu = cpu;
+               }
+       }
+
+       if (tsk_in_cpu[tsk_index] != -1)
+               cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
+       tsk_in_cpu[tsk_index] = preferred_cpu;
+       cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
+       cpu_weight[preferred_cpu]++;
+       mutex_unlock(&isolated_cpus_lock);
+
+       set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
+}
+
+static void exit_round_robin(unsigned int tsk_index)
+{
+       struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
+       cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
+       tsk_in_cpu[tsk_index] = -1;
+}
+
+static unsigned int idle_pct = 5; /* percentage */
+static unsigned int round_robin_time = 10; /* second */
+static int power_saving_thread(void *data)
+{
+       struct sched_param param = {.sched_priority = 1};
+       int do_sleep;
+       unsigned int tsk_index = (unsigned long)data;
+       u64 last_jiffies = 0;
+
+       sched_setscheduler(current, SCHED_RR, &param);
+
+       while (!kthread_should_stop()) {
+               int cpu;
+               u64 expire_time;
+
+               try_to_freeze();
+
+               /* round robin to cpus */
+               if (last_jiffies + round_robin_time * HZ < jiffies) {
+                       last_jiffies = jiffies;
+                       round_robin_cpu(tsk_index);
+               }
+
+               do_sleep = 0;
+
+               current_thread_info()->status &= ~TS_POLLING;
+               /*
+                * TS_POLLING-cleared state must be visible before we test
+                * NEED_RESCHED:
+                */
+               smp_mb();
+
+               expire_time = jiffies + HZ * (100 - idle_pct) / 100;
+
+               while (!need_resched()) {
+                       local_irq_disable();
+                       cpu = smp_processor_id();
+                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+                               &cpu);
+                       stop_critical_timings();
+
+                       __monitor((void *)&current_thread_info()->flags, 0, 0);
+                       smp_mb();
+                       if (!need_resched())
+                               __mwait(power_saving_mwait_eax, 1);
+
+                       start_critical_timings();
+                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+                               &cpu);
+                       local_irq_enable();
+
+                       if (jiffies > expire_time) {
+                               do_sleep = 1;
+                               break;
+                       }
+               }
+
+               current_thread_info()->status |= TS_POLLING;
+
+               /*
+                * current sched_rt has threshold for rt task running time.
+                * When a rt task uses 95% CPU time, the rt thread will be
+                * scheduled out for 5% CPU time to not starve other tasks. But
+                * the mechanism only works when all CPUs have RT task running,
+                * as if one CPU hasn't RT task, RT task from other CPUs will
+                * borrow CPU time from this CPU and cause RT task use > 95%
+                * CPU time. To make 'avoid staration' work, takes a nap here.
+                */
+               if (do_sleep)
+                       schedule_timeout_killable(HZ * idle_pct / 100);
+       }
+
+       exit_round_robin(tsk_index);
+       return 0;
+}
+
+static struct task_struct *ps_tsks[NR_CPUS];
+static unsigned int ps_tsk_num;
+static int create_power_saving_task(void)
+{
+       ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
+               (void *)(unsigned long)ps_tsk_num,
+               "power_saving/%d", ps_tsk_num);
+       if (ps_tsks[ps_tsk_num]) {
+               ps_tsk_num++;
+               return 0;
+       }
+       return -EINVAL;
+}
+
+static void destroy_power_saving_task(void)
+{
+       if (ps_tsk_num > 0) {
+               ps_tsk_num--;
+               kthread_stop(ps_tsks[ps_tsk_num]);
+       }
+}
+
+static void set_power_saving_task_num(unsigned int num)
+{
+       if (num > ps_tsk_num) {
+               while (ps_tsk_num < num) {
+                       if (create_power_saving_task())
+                               return;
+               }
+       } else if (num < ps_tsk_num) {
+               while (ps_tsk_num > num)
+                       destroy_power_saving_task();
+       }
+}
+
+static int acpi_pad_idle_cpus(unsigned int num_cpus)
+{
+       get_online_cpus();
+
+       num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
+       set_power_saving_task_num(num_cpus);
+
+       put_online_cpus();
+       return 0;
+}
+
+static uint32_t acpi_pad_idle_cpus_num(void)
+{
+       return ps_tsk_num;
+}
+
+static ssize_t acpi_pad_rrtime_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long num;
+       if (strict_strtoul(buf, 0, &num))
+               return -EINVAL;
+       if (num < 1 || num >= 100)
+               return -EINVAL;
+       mutex_lock(&isolated_cpus_lock);
+       round_robin_time = num;
+       mutex_unlock(&isolated_cpus_lock);
+       return count;
+}
+
+static ssize_t acpi_pad_rrtime_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
+}
+static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
+       acpi_pad_rrtime_show,
+       acpi_pad_rrtime_store);
+
+static ssize_t acpi_pad_idlepct_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long num;
+       if (strict_strtoul(buf, 0, &num))
+               return -EINVAL;
+       if (num < 1 || num >= 100)
+               return -EINVAL;
+       mutex_lock(&isolated_cpus_lock);
+       idle_pct = num;
+       mutex_unlock(&isolated_cpus_lock);
+       return count;
+}
+
+static ssize_t acpi_pad_idlepct_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
+}
+static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
+       acpi_pad_idlepct_show,
+       acpi_pad_idlepct_store);
+
+static ssize_t acpi_pad_idlecpus_store(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       unsigned long num;
+       if (strict_strtoul(buf, 0, &num))
+               return -EINVAL;
+       mutex_lock(&isolated_cpus_lock);
+       acpi_pad_idle_cpus(num);
+       mutex_unlock(&isolated_cpus_lock);
+       return count;
+}
+
+static ssize_t acpi_pad_idlecpus_show(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+       return cpumask_scnprintf(buf, PAGE_SIZE,
+               to_cpumask(pad_busy_cpus_bits));
+}
+static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
+       acpi_pad_idlecpus_show,
+       acpi_pad_idlecpus_store);
+
+static int acpi_pad_add_sysfs(struct acpi_device *device)
+{
+       int result;
+
+       result = device_create_file(&device->dev, &dev_attr_idlecpus);
+       if (result)
+               return -ENODEV;
+       result = device_create_file(&device->dev, &dev_attr_idlepct);
+       if (result) {
+               device_remove_file(&device->dev, &dev_attr_idlecpus);
+               return -ENODEV;
+       }
+       result = device_create_file(&device->dev, &dev_attr_rrtime);
+       if (result) {
+               device_remove_file(&device->dev, &dev_attr_idlecpus);
+               device_remove_file(&device->dev, &dev_attr_idlepct);
+               return -ENODEV;
+       }
+       return 0;
+}
+
+static void acpi_pad_remove_sysfs(struct acpi_device *device)
+{
+       device_remove_file(&device->dev, &dev_attr_idlecpus);
+       device_remove_file(&device->dev, &dev_attr_idlepct);
+       device_remove_file(&device->dev, &dev_attr_rrtime);
+}
+
+/* Query firmware how many CPUs should be idle */
+static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
+{
+       struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+       union acpi_object *package;
+       int rev, num, ret = -EINVAL;
+
+       status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
+       package = buffer.pointer;
+       if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
+               goto out;
+       rev = package->package.elements[0].integer.value;
+       num = package->package.elements[1].integer.value;
+       if (rev != 1)
+               goto out;
+       *num_cpus = num;
+       ret = 0;
+out:
+       kfree(buffer.pointer);
+       return ret;
+}
+
+/* Notify firmware how many CPUs are idle */
+static void acpi_pad_ost(acpi_handle handle, int stat,
+       uint32_t idle_cpus)
+{
+       union acpi_object params[3] = {
+               {.type = ACPI_TYPE_INTEGER,},
+               {.type = ACPI_TYPE_INTEGER,},
+               {.type = ACPI_TYPE_BUFFER,},
+       };
+       struct acpi_object_list arg_list = {3, params};
+
+       params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
+       params[1].integer.value =  stat;
+       params[2].buffer.length = 4;
+       params[2].buffer.pointer = (void *)&idle_cpus;
+       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+}
+
+static void acpi_pad_handle_notify(acpi_handle handle)
+{
+       int num_cpus, ret;
+       uint32_t idle_cpus;
+
+       mutex_lock(&isolated_cpus_lock);
+       if (acpi_pad_pur(handle, &num_cpus)) {
+               mutex_unlock(&isolated_cpus_lock);
+               return;
+       }
+       ret = acpi_pad_idle_cpus(num_cpus);
+       idle_cpus = acpi_pad_idle_cpus_num();
+       if (!ret)
+               acpi_pad_ost(handle, 0, idle_cpus);
+       else
+               acpi_pad_ost(handle, 1, 0);
+       mutex_unlock(&isolated_cpus_lock);
+}
+
+static void acpi_pad_notify(acpi_handle handle, u32 event,
+       void *data)
+{
+       struct acpi_device *device = data;
+
+       switch (event) {
+       case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
+               acpi_pad_handle_notify(handle);
+               acpi_bus_generate_proc_event(device, event, 0);
+               acpi_bus_generate_netlink_event(device->pnp.device_class,
+                       dev_name(&device->dev), event, 0);
+               break;
+       default:
+               printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
+               break;
+       }
+}
+
+static int acpi_pad_add(struct acpi_device *device)
+{
+       acpi_status status;
+
+       strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
+       strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
+
+       if (acpi_pad_add_sysfs(device))
+               return -ENODEV;
+
+       status = acpi_install_notify_handler(device->handle,
+               ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
+       if (ACPI_FAILURE(status)) {
+               acpi_pad_remove_sysfs(device);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int acpi_pad_remove(struct acpi_device *device,
+       int type)
+{
+       mutex_lock(&isolated_cpus_lock);
+       acpi_pad_idle_cpus(0);
+       mutex_unlock(&isolated_cpus_lock);
+
+       acpi_remove_notify_handler(device->handle,
+               ACPI_DEVICE_NOTIFY, acpi_pad_notify);
+       acpi_pad_remove_sysfs(device);
+       return 0;
+}
+
+static const struct acpi_device_id pad_device_ids[] = {
+       {"ACPI000C", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, pad_device_ids);
+
+static struct acpi_driver acpi_pad_driver = {
+       .name = "processor_aggregator",
+       .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
+       .ids = pad_device_ids,
+       .ops = {
+               .add = acpi_pad_add,
+               .remove = acpi_pad_remove,
+       },
+};
+
+static int __init acpi_pad_init(void)
+{
+       power_saving_mwait_init();
+       if (power_saving_mwait_eax == 0)
+               return -EINVAL;
+
+       return acpi_bus_register_driver(&acpi_pad_driver);
+}
+
+static void __exit acpi_pad_exit(void)
+{
+       acpi_bus_unregister_driver(&acpi_pad_driver);
+}
+
+module_init(acpi_pad_init);
+module_exit(acpi_pad_exit);
+MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
+MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
+MODULE_LICENSE("GPL");
index 3a2cfefc71ab1ab5da484b47bd9bd7f28aee988a..7338b6a3e049aa061d00bf13b469ffb086d5276a 100644 (file)
@@ -67,7 +67,7 @@ struct dock_station {
        struct list_head dependent_devices;
        struct list_head hotplug_devices;
 
-       struct list_head sibiling;
+       struct list_head sibling;
        struct platform_device *dock_device;
 };
 static LIST_HEAD(dock_stations);
@@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle)
 
        if (is_dock(handle))
                return 1;
-       list_for_each_entry(dock_station, &dock_stations, sibiling) {
+       list_for_each_entry(dock_station, &dock_stations, sibling) {
                if (find_dock_dependent_device(dock_station, handle))
                        return 1;
        }
@@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
         * make sure this handle is for a device dependent on the dock,
         * this would include the dock station itself
         */
-       list_for_each_entry(dock_station, &dock_stations, sibiling) {
+       list_for_each_entry(dock_station, &dock_stations, sibling) {
                /*
                 * An ATA bay can be in a dock and itself can be ejected
                 * seperately, so there are two 'dock stations' which need the
@@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
        if (!dock_station_count)
                return;
 
-       list_for_each_entry(dock_station, &dock_stations, sibiling) {
+       list_for_each_entry(dock_station, &dock_stations, sibling) {
                dd = find_dock_dependent_device(dock_station, handle);
                if (dd)
                        dock_del_hotplug_device(dock_station, dd);
@@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
        if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
           && event != ACPI_NOTIFY_EJECT_REQUEST)
                return 0;
-       list_for_each_entry(dock_station, &dock_stations, sibiling) {
+       list_for_each_entry(dock_station, &dock_stations, sibling) {
                if (dock_station->handle == handle) {
                        struct dock_data *dock_data;
 
@@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle)
        dock_station->last_dock_time = jiffies - HZ;
        INIT_LIST_HEAD(&dock_station->dependent_devices);
        INIT_LIST_HEAD(&dock_station->hotplug_devices);
-       INIT_LIST_HEAD(&dock_station->sibiling);
+       INIT_LIST_HEAD(&dock_station->sibling);
        spin_lock_init(&dock_station->dd_lock);
        mutex_init(&dock_station->hp_lock);
        ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
@@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle)
        add_dock_dependent_device(dock_station, dd);
 
        dock_station_count++;
-       list_add(&dock_station->sibiling, &dock_stations);
+       list_add(&dock_station->sibling, &dock_stations);
        return 0;
 
 dock_add_err_unregister:
@@ -1149,7 +1149,7 @@ static void __exit dock_exit(void)
        struct dock_station *tmp;
 
        unregister_acpi_bus_notifier(&dock_acpi_notifier);
-       list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling)
+       list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
                dock_remove(dock_station);
 }
 
index f70796081c4c0ad93c0e5c751cc2297b81dea9f4..baef28c1e63090c8b10c5e661257390c52f275b7 100644 (file)
@@ -119,6 +119,8 @@ static struct acpi_ec {
 } *boot_ec, *first_ec;
 
 static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
 
 /* --------------------------------------------------------------------------
                              Transaction Management
@@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
                        }
                        advance_transaction(ec, acpi_ec_read_status(ec));
                } while (time_before(jiffies, delay));
-               if (!ec->curr->irq_count ||
-                   (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
+               if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
                        break;
-               /* try restart command if we get any false interrupts */
                pr_debug(PREFIX "controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->curr_lock, flags);
                start_transaction(ec);
@@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = {
        {"", 0},
 };
 
+/* Some BIOS do not survive early DSDT scan, skip it */
+static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
+{
+       EC_FLAGS_SKIP_DSDT_SCAN = 1;
+       return 0;
+}
+
+/* ASUStek often supplies us with broken ECDT, validate it */
+static int ec_validate_ecdt(const struct dmi_system_id *id)
+{
+       EC_FLAGS_VALIDATE_ECDT = 1;
+       return 0;
+}
+
+/* MSI EC needs special treatment, enable it */
+static int ec_flag_msi(const struct dmi_system_id *id)
+{
+       EC_FLAGS_MSI = 1;
+       EC_FLAGS_VALIDATE_ECDT = 1;
+       return 0;
+}
+
+static struct dmi_system_id __initdata ec_dmi_table[] = {
+       {
+       ec_skip_dsdt_scan, "Compal JFL92", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
+       DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+       {
+       ec_flag_msi, "MSI hardware", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
+       DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
+       {
+       ec_validate_ecdt, "ASUS hardware", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+       {},
+};
+
+
 int __init acpi_ec_ecdt_probe(void)
 {
        acpi_status status;
@@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void)
        /*
         * Generate a boot ec context
         */
-       if (dmi_name_in_vendors("Micro-Star") ||
-           dmi_name_in_vendors("Notebook")) {
-               pr_info(PREFIX "Enabling special treatment for EC from MSI.\n");
-               EC_FLAGS_MSI = 1;
-       }
+       dmi_check_system(ec_dmi_table);
        status = acpi_get_table(ACPI_SIG_ECDT, 1,
                                (struct acpi_table_header **)&ecdt_ptr);
        if (ACPI_SUCCESS(status)) {
@@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void)
                boot_ec->handle = ACPI_ROOT_OBJECT;
                acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
                /* Don't trust ECDT, which comes from ASUSTek */
-               if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0)
+               if (!EC_FLAGS_VALIDATE_ECDT)
                        goto install;
                saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
                if (!saved_ec)
@@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void)
                memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
        /* fall through */
        }
+
+       if (EC_FLAGS_SKIP_DSDT_SCAN)
+               return -ENODEV;
+
        /* This workaround is needed only on some broken machines,
         * which require early EC, but fail to provide ECDT */
        printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
index d0d550d22a6d43a14ccecec6658e9fb2773d6b70..f8b6f555ba52ba328d43efca6c5926831a663d06 100644 (file)
@@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file,
 
        if (len > 4)
                len = 4;
+       if (len < 0)
+               return -EFAULT;
 
        if (copy_from_user(strbuf, buffer, len))
                return -EFAULT;
index c2d4d6e0936469a8ef5e80a11dd36a99d7be761e..c567b46dfa0f78a83bc1e0d0feb77e79516cfb42 100644 (file)
@@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device)
                goto err_remove_sysfs;
        }
 
-       if (pr->flags.throttling) {
-               printk(KERN_INFO PREFIX "%s [%s] (supports",
-                      acpi_device_name(device), acpi_device_bid(device));
-               printk(" %d throttling states", pr->throttling.state_count);
-               printk(")\n");
-       }
-
        return 0;
 
 err_remove_sysfs:
index 468921bed22f4e33f9012592fe906c3ca9ba3a1d..14a7481c97d71bdc2abd18e3c695cf72e8a9aee5 100644 (file)
@@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device)
                        device->flags.bus_address = 1;
                }
 
+               kfree(info);
+
                /*
                 * Some devices don't reliably have _HIDs & _CIDs, so add
                 * synthetic HIDs to make sure drivers can find them.
@@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
                         struct acpi_device **child)
 {
        acpi_status status;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        void *device = NULL;
 
-       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-       printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
-              (char *) buffer.pointer);
-
        status = acpi_bus_check_add(handle, 0, ops, &device);
        if (ACPI_SUCCESS(status))
                acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
index a4fddb24476fdba0bc1ebbafd510aaa3544595d1..f6e54bf8dd96e5d5767547bad753a22da03a52e0 100644 (file)
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
                                                struct file *file);
 static ssize_t acpi_video_device_write_brightness(struct file *file,
        const char __user *buffer, size_t count, loff_t *data);
-static struct file_operations acpi_video_device_brightness_fops = {
+static const struct file_operations acpi_video_device_brightness_fops = {
        .owner = THIS_MODULE,
        .open = acpi_video_device_brightness_open_fs,
        .read = seq_read,
index 703364b52170fa2f4728ff135d38bc2be8cb6e48..66e181345b3af8355287f006a622b874cb2a2f78 100644 (file)
@@ -1306,14 +1306,6 @@ static void amb_close (struct atm_vcc * atm_vcc) {
   return;
 }
 
-/********** Set socket options for a VC **********/
-
-// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
-
-/********** Set socket options for a VC **********/
-
-// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
-
 /********** Send **********/
 
 static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
index 5503bfc8e1320e0a362b2f4ffd57e043104ca1d7..0c302614544382992bf67fb656a001cffa9c5edd 100644 (file)
@@ -2031,7 +2031,7 @@ static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname,
 
 
 static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,int optlen)
+    void __user *optval,unsigned int optlen)
 {
        return -EINVAL;
 }
index b119640e1ee9d0f6403aae0a3156405e6a72c7bc..cd5049af47a9a11144d58e8cceed873c2bfb2483 100644 (file)
@@ -1244,7 +1244,7 @@ static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
 
 
 static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
-                        void __user *optval,int optlen)
+                        void __user *optval,unsigned int optlen)
 {
        func_enter ();
        func_exit ();
index 10f000dbe448e149e599caf2eeda9f7ec1b2a6bf..f766cc46b4c4346ae3a08897af1563097001ec0a 100644 (file)
@@ -1795,7 +1795,7 @@ fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *op
 
 
 static int
-fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
+fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
 {
     /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
     
index 29e66d603d3c6977d55a44611a0baa05f106331e..70667033a5687ee38e07a2ce220f2f91fbb2fa2a 100644 (file)
@@ -921,9 +921,9 @@ out_free_rbpq_base:
                        he_dev->rbrq_phys);
        i = CONFIG_RBPL_SIZE;
 out_free_rbpl_virt:
-       while (--i)
-               pci_pool_free(he_dev->rbps_pool, he_dev->rbpl_virt[i].virt,
-                               he_dev->rbps_base[i].phys);
+       while (i--)
+               pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
+                               he_dev->rbpl_base[i].phys);
        kfree(he_dev->rbpl_virt);
 
 out_free_rbpl_base:
@@ -933,11 +933,11 @@ out_free_rbpl_base:
 out_destroy_rbpl_pool:
        pci_pool_destroy(he_dev->rbpl_pool);
 
-       i = CONFIG_RBPL_SIZE;
+       i = CONFIG_RBPS_SIZE;
 out_free_rbps_virt:
-       while (--i)
-               pci_pool_free(he_dev->rbpl_pool, he_dev->rbps_virt[i].virt,
-                               he_dev->rbpl_base[i].phys);
+       while (i--)
+               pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
+                               he_dev->rbps_base[i].phys);
        kfree(he_dev->rbps_virt);
 
 out_free_rbps_base:
index 01ce241dbeae81423aed5bd2a49e784f9cff057c..4e49021e67ee465b5ecc8ffb312e1c7a88399431 100644 (file)
@@ -2590,7 +2590,7 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
 }
 
 static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
-                          void *optval, int optlen) {
+                          void *optval, unsigned int optlen) {
   hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
   PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
   switch (level) {
index 78c9736c3579beba974dc546cbd4bfbe8298bcf4..b2c1b37ab2e4dbb2ea5370a8637c9975af9e3146 100644 (file)
@@ -2862,7 +2862,7 @@ static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
 }  
   
 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
-       void __user *optval, int optlen)  
+       void __user *optval, unsigned int optlen)  
 {  
        IF_EVENT(printk(">ia_setsockopt\n");)  
        return -EINVAL;  
index 752b1ba81f7e9bbd5ff56944723ae6740318dad5..2e9635be048c2fdb4e8b4773a49464558ba4f351 100644 (file)
@@ -1517,7 +1517,7 @@ static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
 
 
 static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,int optlen)
+    void __user *optval,unsigned int optlen)
 {
        return -EINVAL;
 }
index 6fa7b0fdbdfd172f2e10f27bc82660c9201f3234..eb4fa19439444097875d8492ceb6996569e40d45 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/reboot.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
@@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
   return true;
 }
 
-
-/*
-  DAC960_ProcReadStatus implements reading /proc/rd/status.
-*/
-
-static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
-                                int Count, int *EOF, void *Data)
+static int dac960_proc_show(struct seq_file *m, void *v)
 {
   unsigned char *StatusMessage = "OK\n";
-  int ControllerNumber, BytesAvailable;
+  int ControllerNumber;
   for (ControllerNumber = 0;
        ControllerNumber < DAC960_ControllerCount;
        ControllerNumber++)
@@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
          break;
        }
     }
-  BytesAvailable = strlen(StatusMessage) - Offset;
-  if (Count >= BytesAvailable)
-    {
-      Count = BytesAvailable;
-      *EOF = true;
-    }
-  if (Count <= 0) return 0;
-  *Start = Page;
-  memcpy(Page, &StatusMessage[Offset], Count);
-  return Count;
+  seq_puts(m, StatusMessage);
+  return 0;
 }
 
+static int dac960_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dac960_proc_show, NULL);
+}
 
-/*
-  DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status.
-*/
+static const struct file_operations dac960_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = dac960_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
-static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset,
-                                       int Count, int *EOF, void *Data)
+static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
-  int BytesAvailable = Controller->InitialStatusLength - Offset;
-  if (Count >= BytesAvailable)
-    {
-      Count = BytesAvailable;
-      *EOF = true;
-    }
-  if (Count <= 0) return 0;
-  *Start = Page;
-  memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
-  return Count;
+       DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
+       seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
+       return 0;
 }
 
+static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
+}
 
-/*
-  DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status.
-*/
+static const struct file_operations dac960_initial_status_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = dac960_initial_status_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
-static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
-                                       int Count, int *EOF, void *Data)
+static int dac960_current_status_proc_show(struct seq_file *m, void *v)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
   unsigned char *StatusMessage =
     "No Rebuild or Consistency Check in Progress\n";
   int ProgressMessageLength = strlen(StatusMessage);
-  int BytesAvailable;
   if (jiffies != Controller->LastCurrentStatusTime)
     {
       Controller->CurrentStatusLength = 0;
@@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
        }
       Controller->LastCurrentStatusTime = jiffies;
     }
-  BytesAvailable = Controller->CurrentStatusLength - Offset;
-  if (Count >= BytesAvailable)
-    {
-      Count = BytesAvailable;
-      *EOF = true;
-    }
-  if (Count <= 0) return 0;
-  *Start = Page;
-  memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
-  return Count;
+       seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
+       return 0;
 }
 
+static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
+}
 
-/*
-  DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command.
-*/
+static const struct file_operations dac960_current_status_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = dac960_current_status_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
-static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset,
-                                     int Count, int *EOF, void *Data)
+static int dac960_user_command_proc_show(struct seq_file *m, void *v)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
-  int BytesAvailable = Controller->UserStatusLength - Offset;
-  if (Count >= BytesAvailable)
-    {
-      Count = BytesAvailable;
-      *EOF = true;
-    }
-  if (Count <= 0) return 0;
-  *Start = Page;
-  memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
-  return Count;
-}
+       DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
 
+       seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
+       return 0;
+}
 
-/*
-  DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command.
-*/
+static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
+}
 
-static int DAC960_ProcWriteUserCommand(struct file *file,
+static ssize_t dac960_user_command_proc_write(struct file *file,
                                       const char __user *Buffer,
-                                      unsigned long Count, void *Data)
+                                      size_t Count, loff_t *pos)
 {
-  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
   unsigned char CommandBuffer[80];
   int Length;
   if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
            ? Count : -EBUSY);
 }
 
+static const struct file_operations dac960_user_command_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = dac960_user_command_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .write          = dac960_user_command_proc_write,
+};
 
 /*
   DAC960_CreateProcEntries creates the /proc/rd/... entries for the
@@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
 
        if (DAC960_ProcDirectoryEntry == NULL) {
                DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
-               StatusProcEntry = create_proc_read_entry("status", 0,
+               StatusProcEntry = proc_create("status", 0,
                                           DAC960_ProcDirectoryEntry,
-                                          DAC960_ProcReadStatus, NULL);
+                                          &dac960_proc_fops);
        }
 
       sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
       ControllerProcEntry = proc_mkdir(Controller->ControllerName,
                                       DAC960_ProcDirectoryEntry);
-      create_proc_read_entry("initial_status", 0, ControllerProcEntry,
-                            DAC960_ProcReadInitialStatus, Controller);
-      create_proc_read_entry("current_status", 0, ControllerProcEntry,
-                            DAC960_ProcReadCurrentStatus, Controller);
-      UserCommandProcEntry =
-       create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
-                              ControllerProcEntry, DAC960_ProcReadUserCommand,
-                              Controller);
-      UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
+      proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
+      proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
+      UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
       Controller->ControllerProcEntry = ControllerProcEntry;
 }
 
index 24c3e21ab263a6a685356f7d17ac5c0c097e60b6..fb5be2d95d52c57a8ac8aab4f0375579a8852de6 100644 (file)
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
+#include <linux/jiffies.h>
 #include <linux/hdreg.h>
 #include <linux/spinlock.h>
 #include <linux/compat.h>
+#include <linux/mutex.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
@@ -155,6 +157,10 @@ static struct board_type products[] = {
 
 static ctlr_info_t *hba[MAX_CTLR];
 
+static struct task_struct *cciss_scan_thread;
+static DEFINE_MUTEX(scan_mutex);
+static LIST_HEAD(scan_q);
+
 static void do_cciss_request(struct request_queue *q);
 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
 static int cciss_open(struct block_device *bdev, fmode_t mode);
@@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
 static int cciss_revalidate(struct gendisk *disk);
-static int rebuild_lun_table(ctlr_info_t *h, int first_time);
+static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
 static int deregister_disk(ctlr_info_t *h, int drv_index,
-                          int clear_all);
+                          int clear_all, int via_ioctl);
 
 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
                        sector_t *total_size, unsigned int *block_size);
@@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
 
 static void fail_all_cmds(unsigned long ctlr);
+static int add_to_scan_list(struct ctlr_info *h);
 static int scan_thread(void *data);
 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
+static void cciss_hba_release(struct device *dev);
+static void cciss_device_release(struct device *dev);
+static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
+static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
 
 #ifdef CONFIG_PROC_FS
 static void cciss_procinit(int i);
@@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c)
 
 #include "cciss_scsi.c"                /* For SCSI tape support */
 
-#define RAID_UNKNOWN 6
+static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+       "UNKNOWN"
+};
+#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
 
 #ifdef CONFIG_PROC_FS
 
@@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c)
 #define ENG_GIG 1000000000
 #define ENG_GIG_FACTOR (ENG_GIG/512)
 #define ENGAGE_SCSI    "engage scsi"
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
-       "UNKNOWN"
-};
 
 static struct proc_dir_entry *proc_cciss;
 
@@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
        ctlr_info_t *h = seq->private;
        unsigned ctlr = h->ctlr;
        loff_t *pos = v;
-       drive_info_struct *drv = &h->drv[*pos];
+       drive_info_struct *drv = h->drv[*pos];
 
        if (*pos > h->highest_lun)
                return 0;
@@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
        vol_sz_frac *= 100;
        sector_div(vol_sz_frac, ENG_GIG_FACTOR);
 
-       if (drv->raid_level > 5)
+       if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
                drv->raid_level = RAID_UNKNOWN;
        seq_printf(seq, "cciss/c%dd%d:"
                        "\t%4u.%02uGB\tRAID %s\n",
@@ -426,7 +437,7 @@ out:
        return err;
 }
 
-static struct file_operations cciss_proc_fops = {
+static const struct file_operations cciss_proc_fops = {
        .owner   = THIS_MODULE,
        .open    = cciss_seq_open,
        .read    = seq_read,
@@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i)
 #define to_hba(n) container_of(n, struct ctlr_info, dev)
 #define to_drv(n) container_of(n, drive_info_struct, dev)
 
-static struct device_type cciss_host_type = {
-       .name           = "cciss_host",
-};
+static ssize_t host_store_rescan(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct ctlr_info *h = to_hba(dev);
+
+       add_to_scan_list(h);
+       wake_up_process(cciss_scan_thread);
+       wait_for_completion_interruptible(&h->scan_wait);
+
+       return count;
+}
+DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 
 static ssize_t dev_show_unique_id(struct device *dev,
                                 struct device_attribute *attr,
@@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev,
 }
 DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
 
+static ssize_t cciss_show_lunid(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       drive_info_struct *drv = to_drv(dev);
+       struct ctlr_info *h = to_hba(drv->dev.parent);
+       unsigned long flags;
+       unsigned char lunid[8];
+
+       spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+       if (h->busy_configuring) {
+               spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+               return -EBUSY;
+       }
+       if (!drv->heads) {
+               spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+               return -ENOTTY;
+       }
+       memcpy(lunid, drv->LunID, sizeof(lunid));
+       spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+       return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+               lunid[0], lunid[1], lunid[2], lunid[3],
+               lunid[4], lunid[5], lunid[6], lunid[7]);
+}
+DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
+
+static ssize_t cciss_show_raid_level(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       drive_info_struct *drv = to_drv(dev);
+       struct ctlr_info *h = to_hba(drv->dev.parent);
+       int raid;
+       unsigned long flags;
+
+       spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+       if (h->busy_configuring) {
+               spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+               return -EBUSY;
+       }
+       raid = drv->raid_level;
+       spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+       if (raid < 0 || raid > RAID_UNKNOWN)
+               raid = RAID_UNKNOWN;
+
+       return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
+                       raid_label[raid]);
+}
+DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
+
+static ssize_t cciss_show_usage_count(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       drive_info_struct *drv = to_drv(dev);
+       struct ctlr_info *h = to_hba(drv->dev.parent);
+       unsigned long flags;
+       int count;
+
+       spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+       if (h->busy_configuring) {
+               spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+               return -EBUSY;
+       }
+       count = drv->usage_count;
+       spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+       return snprintf(buf, 20, "%d\n", count);
+}
+DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
+
+static struct attribute *cciss_host_attrs[] = {
+       &dev_attr_rescan.attr,
+       NULL
+};
+
+static struct attribute_group cciss_host_attr_group = {
+       .attrs = cciss_host_attrs,
+};
+
+static const struct attribute_group *cciss_host_attr_groups[] = {
+       &cciss_host_attr_group,
+       NULL
+};
+
+static struct device_type cciss_host_type = {
+       .name           = "cciss_host",
+       .groups         = cciss_host_attr_groups,
+       .release        = cciss_hba_release,
+};
+
 static struct attribute *cciss_dev_attrs[] = {
        &dev_attr_unique_id.attr,
        &dev_attr_model.attr,
        &dev_attr_vendor.attr,
        &dev_attr_rev.attr,
+       &dev_attr_lunid.attr,
+       &dev_attr_raid_level.attr,
+       &dev_attr_usage_count.attr,
        NULL
 };
 
@@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = {
 static struct device_type cciss_dev_type = {
        .name           = "cciss_device",
        .groups         = cciss_dev_attr_groups,
+       .release        = cciss_device_release,
 };
 
 static struct bus_type cciss_bus_type = {
        .name           = "cciss",
 };
 
+/*
+ * cciss_hba_release is called when the reference count
+ * of h->dev goes to zero.
+ */
+static void cciss_hba_release(struct device *dev)
+{
+       /*
+        * nothing to do, but need this to avoid a warning
+        * about not having a release handler from lib/kref.c.
+        */
+}
 
 /*
  * Initialize sysfs entry for each controller.  This sets up and registers
@@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
 static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
 {
        device_del(&h->dev);
+       put_device(&h->dev); /* final put. */
+}
+
+/* cciss_device_release is called when the reference count
+ * of h->drv[x]dev goes to zero.
+ */
+static void cciss_device_release(struct device *dev)
+{
+       drive_info_struct *drv = to_drv(dev);
+       kfree(drv);
 }
 
 /*
@@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
  * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
  * /sys/block/cciss!c#d# to this entry.
  */
-static int cciss_create_ld_sysfs_entry(struct ctlr_info *h,
-                                      drive_info_struct *drv,
+static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
                                       int drv_index)
 {
-       device_initialize(&drv->dev);
-       drv->dev.type = &cciss_dev_type;
-       drv->dev.bus = &cciss_bus_type;
-       dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index);
-       drv->dev.parent = &h->dev;
-       return device_add(&drv->dev);
+       struct device *dev;
+
+       if (h->drv[drv_index]->device_initialized)
+               return 0;
+
+       dev = &h->drv[drv_index]->dev;
+       device_initialize(dev);
+       dev->type = &cciss_dev_type;
+       dev->bus = &cciss_bus_type;
+       dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
+       dev->parent = &h->dev;
+       h->drv[drv_index]->device_initialized = 1;
+       return device_add(dev);
 }
 
 /*
  * Remove sysfs entries for a logical drive.
  */
-static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv)
+static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
+       int ctlr_exiting)
 {
-       device_del(&drv->dev);
+       struct device *dev = &h->drv[drv_index]->dev;
+
+       /* special case for c*d0, we only destroy it on controller exit */
+       if (drv_index == 0 && !ctlr_exiting)
+               return;
+
+       device_del(dev);
+       put_device(dev); /* the "final" put. */
+       h->drv[drv_index] = NULL;
 }
 
 /*
@@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
        printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
 #endif                         /* CCISS_DEBUG */
 
-       if (host->busy_initializing || drv->busy_configuring)
+       if (drv->busy_configuring)
                return -EBUSY;
        /*
         * Root is allowed to open raw volume zero even if it's not configured
@@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
                        if (MINOR(bdev->bd_dev) & 0x0f) {
                                return -ENXIO;
                                /* if it is, make sure we have a LUN ID */
-                       } else if (drv->LunID == 0) {
+                       } else if (memcmp(drv->LunID, CTLR_LUNID,
+                               sizeof(drv->LunID))) {
                                return -ENXIO;
                        }
                }
@@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
        case CCISS_DEREGDISK:
        case CCISS_REGNEWD:
        case CCISS_REVALIDVOLS:
-               return rebuild_lun_table(host, 0);
+               return rebuild_lun_table(host, 0, 1);
 
        case CCISS_GETLUNINFO:{
                        LogvolInfo_struct luninfo;
 
-                       luninfo.LunID = drv->LunID;
+                       memcpy(&luninfo.LunID, drv->LunID,
+                               sizeof(luninfo.LunID));
                        luninfo.num_opens = drv->usage_count;
                        luninfo.num_parts = 0;
                        if (copy_to_user(argp, &luninfo,
@@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h)
                /* make sure the disk has been added and the drive is real
                 * because this can be called from the middle of init_one.
                 */
-               if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
+               if (!h->drv[curr_queue])
+                       continue;
+               if (!(h->drv[curr_queue]->queue) ||
+                       !(h->drv[curr_queue]->heads))
                        continue;
                blk_start_queue(h->gendisk[curr_queue]->queue);
 
@@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq)
        spin_unlock_irqrestore(&h->lock, flags);
 }
 
-static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[],
-       uint32_t log_unit)
+static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
+       unsigned char scsi3addr[], uint32_t log_unit)
 {
-       log_unit = h->drv[log_unit].LunID & 0x03fff;
-       memset(&scsi3addr[4], 0, 4);
-       memcpy(&scsi3addr[0], &log_unit, 4);
-       scsi3addr[3] |= 0x40;
+       memcpy(scsi3addr, h->drv[log_unit]->LunID,
+               sizeof(h->drv[log_unit]->LunID));
 }
 
 /* This function gets the SCSI vendor, model, and revision of a logical drive
@@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
        return;
 }
 
-static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
+/*
+ * cciss_add_disk sets up the block device queue for a logical drive
+ */
+static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
                                int drv_index)
 {
        disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+       if (!disk->queue)
+               goto init_queue_failure;
        sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
        disk->major = h->major;
        disk->first_minor = drv_index << NWD_SHIFT;
        disk->fops = &cciss_fops;
-       disk->private_data = &h->drv[drv_index];
-       disk->driverfs_dev = &h->drv[drv_index].dev;
+       if (cciss_create_ld_sysfs_entry(h, drv_index))
+               goto cleanup_queue;
+       disk->private_data = h->drv[drv_index];
+       disk->driverfs_dev = &h->drv[drv_index]->dev;
 
        /* Set up queue information */
        blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
        disk->queue->queuedata = h;
 
        blk_queue_logical_block_size(disk->queue,
-                                    h->drv[drv_index].block_size);
+                                    h->drv[drv_index]->block_size);
 
        /* Make sure all queue data is written out before */
-       /* setting h->drv[drv_index].queue, as setting this */
+       /* setting h->drv[drv_index]->queue, as setting this */
        /* allows the interrupt handler to start the queue */
        wmb();
-       h->drv[drv_index].queue = disk->queue;
+       h->drv[drv_index]->queue = disk->queue;
        add_disk(disk);
+       return 0;
+
+cleanup_queue:
+       blk_cleanup_queue(disk->queue);
+       disk->queue = NULL;
+init_queue_failure:
+       return -1;
 }
 
 /* This function will check the usage_count of the drive to be updated/added.
@@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
  * is also the controller node.  Any changes to disk 0 will show up on
  * the next reboot.
  */
-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
+static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
+       int via_ioctl)
 {
        ctlr_info_t *h = hba[ctlr];
        struct gendisk *disk;
@@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
        unsigned long flags = 0;
        int ret = 0;
        drive_info_struct *drvinfo;
-       int was_only_controller_node;
 
        /* Get information about the disk and modify the driver structure */
        inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-       drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL);
+       drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
        if (inq_buff == NULL || drvinfo == NULL)
                goto mem_msg;
 
-       /* See if we're trying to update the "controller node"
-        * this will happen the when the first logical drive gets
-        * created by ACU.
-        */
-       was_only_controller_node = (drv_index == 0 &&
-                               h->drv[0].raid_level == -1);
-
        /* testing to see if 16-byte CDBs are already being used */
        if (h->cciss_read == CCISS_READ_16) {
                cciss_read_capacity_16(h->ctlr, drv_index, 1,
@@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
                                drvinfo->model, drvinfo->rev);
        cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
                        sizeof(drvinfo->serial_no));
+       /* Save the lunid in case we deregister the disk, below. */
+       memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
+               sizeof(drvinfo->LunID));
 
        /* Is it the same disk we already know, and nothing's changed? */
-       if (h->drv[drv_index].raid_level != -1 &&
+       if (h->drv[drv_index]->raid_level != -1 &&
                ((memcmp(drvinfo->serial_no,
-                               h->drv[drv_index].serial_no, 16) == 0) &&
-               drvinfo->block_size == h->drv[drv_index].block_size &&
-               drvinfo->nr_blocks == h->drv[drv_index].nr_blocks &&
-               drvinfo->heads == h->drv[drv_index].heads &&
-               drvinfo->sectors == h->drv[drv_index].sectors &&
-               drvinfo->cylinders == h->drv[drv_index].cylinders))
+                               h->drv[drv_index]->serial_no, 16) == 0) &&
+               drvinfo->block_size == h->drv[drv_index]->block_size &&
+               drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
+               drvinfo->heads == h->drv[drv_index]->heads &&
+               drvinfo->sectors == h->drv[drv_index]->sectors &&
+               drvinfo->cylinders == h->drv[drv_index]->cylinders))
                        /* The disk is unchanged, nothing to update */
                        goto freeret;
 
@@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
         * If the disk already exists then deregister it before proceeding
         * (unless it's the first disk (for the controller node).
         */
-       if (h->drv[drv_index].raid_level != -1 && drv_index != 0) {
+       if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
                printk(KERN_WARNING "disk %d has changed.\n", drv_index);
                spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-               h->drv[drv_index].busy_configuring = 1;
+               h->drv[drv_index]->busy_configuring = 1;
                spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
 
-               /* deregister_disk sets h->drv[drv_index].queue = NULL
+               /* deregister_disk sets h->drv[drv_index]->queue = NULL
                 * which keeps the interrupt handler from starting
                 * the queue.
                 */
-               ret = deregister_disk(h, drv_index, 0);
-               h->drv[drv_index].busy_configuring = 0;
+               ret = deregister_disk(h, drv_index, 0, via_ioctl);
        }
 
        /* If the disk is in use return */
@@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
                goto freeret;
 
        /* Save the new information from cciss_geometry_inquiry
-        * and serial number inquiry.
+        * and serial number inquiry.  If the disk was deregistered
+        * above, then h->drv[drv_index] will be NULL.
         */
-       h->drv[drv_index].block_size = drvinfo->block_size;
-       h->drv[drv_index].nr_blocks = drvinfo->nr_blocks;
-       h->drv[drv_index].heads = drvinfo->heads;
-       h->drv[drv_index].sectors = drvinfo->sectors;
-       h->drv[drv_index].cylinders = drvinfo->cylinders;
-       h->drv[drv_index].raid_level = drvinfo->raid_level;
-       memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16);
-       memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1);
-       memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1);
-       memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1);
+       if (h->drv[drv_index] == NULL) {
+               drvinfo->device_initialized = 0;
+               h->drv[drv_index] = drvinfo;
+               drvinfo = NULL; /* so it won't be freed below. */
+       } else {
+               /* special case for cxd0 */
+               h->drv[drv_index]->block_size = drvinfo->block_size;
+               h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
+               h->drv[drv_index]->heads = drvinfo->heads;
+               h->drv[drv_index]->sectors = drvinfo->sectors;
+               h->drv[drv_index]->cylinders = drvinfo->cylinders;
+               h->drv[drv_index]->raid_level = drvinfo->raid_level;
+               memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
+               memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
+                       VENDOR_LEN + 1);
+               memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
+               memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
+       }
 
        ++h->num_luns;
        disk = h->gendisk[drv_index];
-       set_capacity(disk, h->drv[drv_index].nr_blocks);
+       set_capacity(disk, h->drv[drv_index]->nr_blocks);
 
        /* If it's not disk 0 (drv_index != 0)
         * or if it was disk 0, but there was previously
@@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
         * (raid_leve == -1) then we want to update the
         * logical drive's information.
         */
-       if (drv_index || first_time)
-               cciss_add_disk(h, disk, drv_index);
+       if (drv_index || first_time) {
+               if (cciss_add_disk(h, disk, drv_index) != 0) {
+                       cciss_free_gendisk(h, drv_index);
+                       cciss_free_drive_info(h, drv_index);
+                       printk(KERN_WARNING "cciss:%d could not update "
+                               "disk %d\n", h->ctlr, drv_index);
+                       --h->num_luns;
+               }
+       }
 
 freeret:
        kfree(inq_buff);
@@ -1793,28 +1969,70 @@ mem_msg:
 }
 
 /* This function will find the first index of the controllers drive array
- * that has a -1 for the raid_level and will return that index.  This is
- * where new drives will be added.  If the index to be returned is greater
- * than the highest_lun index for the controller then highest_lun is set
- * to this new index.  If there are no available indexes then -1 is returned.
- * "controller_node" is used to know if this is a real logical drive, or just
- * the controller node, which determines if this counts towards highest_lun.
+ * that has a null drv pointer and allocate the drive info struct and
+ * will return that index   This is where new drives will be added.
+ * If the index to be returned is greater than the highest_lun index for
+ * the controller then highest_lun is set * to this new index.
+ * If there are no available indexes or if tha allocation fails, then -1
+ * is returned.  * "controller_node" is used to know if this is a real
+ * logical drive, or just the controller node, which determines if this
+ * counts towards highest_lun.
  */
-static int cciss_find_free_drive_index(int ctlr, int controller_node)
+static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
 {
        int i;
+       drive_info_struct *drv;
 
+       /* Search for an empty slot for our drive info */
        for (i = 0; i < CISS_MAX_LUN; i++) {
-               if (hba[ctlr]->drv[i].raid_level == -1) {
-                       if (i > hba[ctlr]->highest_lun)
-                               if (!controller_node)
-                                       hba[ctlr]->highest_lun = i;
+
+               /* if not cxd0 case, and it's occupied, skip it. */
+               if (h->drv[i] && i != 0)
+                       continue;
+               /*
+                * If it's cxd0 case, and drv is alloc'ed already, and a
+                * disk is configured there, skip it.
+                */
+               if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
+                       continue;
+
+               /*
+                * We've found an empty slot.  Update highest_lun
+                * provided this isn't just the fake cxd0 controller node.
+                */
+               if (i > h->highest_lun && !controller_node)
+                       h->highest_lun = i;
+
+               /* If adding a real disk at cxd0, and it's already alloc'ed */
+               if (i == 0 && h->drv[i] != NULL)
                        return i;
-               }
+
+               /*
+                * Found an empty slot, not already alloc'ed.  Allocate it.
+                * Mark it with raid_level == -1, so we know it's new later on.
+                */
+               drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+               if (!drv)
+                       return -1;
+               drv->raid_level = -1; /* so we know it's new */
+               h->drv[i] = drv;
+               return i;
        }
        return -1;
 }
 
+static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
+{
+       kfree(h->drv[drv_index]);
+       h->drv[drv_index] = NULL;
+}
+
+static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
+{
+       put_disk(h->gendisk[drv_index]);
+       h->gendisk[drv_index] = NULL;
+}
+
 /* cciss_add_gendisk finds a free hba[]->drv structure
  * and allocates a gendisk if needed, and sets the lunid
  * in the drvinfo structure.   It returns the index into
@@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node)
  * a means to talk to the controller in case no logical
  * drives have yet been configured.
  */
-static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
+static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
+       int controller_node)
 {
        int drv_index;
 
-       drv_index = cciss_find_free_drive_index(h->ctlr, controller_node);
+       drv_index = cciss_alloc_drive_info(h, controller_node);
        if (drv_index == -1)
                return -1;
+
        /*Check if the gendisk needs to be allocated */
        if (!h->gendisk[drv_index]) {
                h->gendisk[drv_index] =
@@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
                        printk(KERN_ERR "cciss%d: could not "
                                "allocate a new disk %d\n",
                                h->ctlr, drv_index);
-                       return -1;
+                       goto err_free_drive_info;
                }
        }
-       h->drv[drv_index].LunID = lunid;
-       if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index))
+       memcpy(h->drv[drv_index]->LunID, lunid,
+               sizeof(h->drv[drv_index]->LunID));
+       if (cciss_create_ld_sysfs_entry(h, drv_index))
                goto err_free_disk;
-
        /* Don't need to mark this busy because nobody */
        /* else knows about this disk yet to contend */
        /* for access to it. */
-       h->drv[drv_index].busy_configuring = 0;
+       h->drv[drv_index]->busy_configuring = 0;
        wmb();
        return drv_index;
 
 err_free_disk:
-       put_disk(h->gendisk[drv_index]);
-       h->gendisk[drv_index] = NULL;
+       cciss_free_gendisk(h, drv_index);
+err_free_drive_info:
+       cciss_free_drive_info(h, drv_index);
        return -1;
 }
 
@@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h)
        if (h->gendisk[0] != NULL) /* already did this? Then bail. */
                return;
 
-       drv_index = cciss_add_gendisk(h, 0, 1);
-       if (drv_index == -1) {
-               printk(KERN_WARNING "cciss%d: could not "
-                       "add disk 0.\n", h->ctlr);
-               return;
-       }
-       h->drv[drv_index].block_size = 512;
-       h->drv[drv_index].nr_blocks = 0;
-       h->drv[drv_index].heads = 0;
-       h->drv[drv_index].sectors = 0;
-       h->drv[drv_index].cylinders = 0;
-       h->drv[drv_index].raid_level = -1;
-       memset(h->drv[drv_index].serial_no, 0, 16);
+       drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
+       if (drv_index == -1)
+               goto error;
+       h->drv[drv_index]->block_size = 512;
+       h->drv[drv_index]->nr_blocks = 0;
+       h->drv[drv_index]->heads = 0;
+       h->drv[drv_index]->sectors = 0;
+       h->drv[drv_index]->cylinders = 0;
+       h->drv[drv_index]->raid_level = -1;
+       memset(h->drv[drv_index]->serial_no, 0, 16);
        disk = h->gendisk[drv_index];
-       cciss_add_disk(h, disk, drv_index);
+       if (cciss_add_disk(h, disk, drv_index) == 0)
+               return;
+       cciss_free_gendisk(h, drv_index);
+       cciss_free_drive_info(h, drv_index);
+error:
+       printk(KERN_WARNING "cciss%d: could not "
+               "add disk 0.\n", h->ctlr);
+       return;
 }
 
 /* This function will add and remove logical drives from the Logical
@@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h)
  * INPUT
  * h           = The controller to perform the operations on
  */
-static int rebuild_lun_table(ctlr_info_t *h, int first_time)
+static int rebuild_lun_table(ctlr_info_t *h, int first_time,
+       int via_ioctl)
 {
        int ctlr = h->ctlr;
        int num_luns;
@@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
        int i;
        int drv_found;
        int drv_index = 0;
-       __u32 lunid = 0;
+       unsigned char lunid[8] = CTLR_LUNID;
        unsigned long flags;
 
        if (!capable(CAP_SYS_RAWIO))
@@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
                drv_found = 0;
 
                /* skip holes in the array from already deleted drives */
-               if (h->drv[i].raid_level == -1)
+               if (h->drv[i] == NULL)
                        continue;
 
                for (j = 0; j < num_luns; j++) {
-                       memcpy(&lunid, &ld_buff->LUN[j][0], 4);
-                       lunid = le32_to_cpu(lunid);
-                       if (h->drv[i].LunID == lunid) {
+                       memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
+                       if (memcmp(h->drv[i]->LunID, lunid,
+                               sizeof(lunid)) == 0) {
                                drv_found = 1;
                                break;
                        }
@@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
                if (!drv_found) {
                        /* Deregister it from the OS, it's gone. */
                        spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-                       h->drv[i].busy_configuring = 1;
+                       h->drv[i]->busy_configuring = 1;
                        spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-                       return_code = deregister_disk(h, i, 1);
-                       cciss_destroy_ld_sysfs_entry(&h->drv[i]);
-                       h->drv[i].busy_configuring = 0;
+                       return_code = deregister_disk(h, i, 1, via_ioctl);
+                       if (h->drv[i] != NULL)
+                               h->drv[i]->busy_configuring = 0;
                }
        }
 
@@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
 
                drv_found = 0;
 
-               memcpy(&lunid, &ld_buff->LUN[i][0], 4);
-               lunid = le32_to_cpu(lunid);
-
+               memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
                /* Find if the LUN is already in the drive array
                 * of the driver.  If so then update its info
                 * if not in use.  If it does not exist then find
                 * the first free index and add it.
                 */
                for (j = 0; j <= h->highest_lun; j++) {
-                       if (h->drv[j].raid_level != -1 &&
-                               h->drv[j].LunID == lunid) {
+                       if (h->drv[j] != NULL &&
+                               memcmp(h->drv[j]->LunID, lunid,
+                                       sizeof(h->drv[j]->LunID)) == 0) {
                                drv_index = j;
                                drv_found = 1;
                                break;
@@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
                        if (drv_index == -1)
                                goto freeret;
                }
-               cciss_update_drive_info(ctlr, drv_index, first_time);
+               cciss_update_drive_info(ctlr, drv_index, first_time,
+                       via_ioctl);
        }               /* end for */
 
 freeret:
@@ -2032,6 +2258,25 @@ mem_msg:
        goto freeret;
 }
 
+static void cciss_clear_drive_info(drive_info_struct *drive_info)
+{
+       /* zero out the disk size info */
+       drive_info->nr_blocks = 0;
+       drive_info->block_size = 0;
+       drive_info->heads = 0;
+       drive_info->sectors = 0;
+       drive_info->cylinders = 0;
+       drive_info->raid_level = -1;
+       memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
+       memset(drive_info->model, 0, sizeof(drive_info->model));
+       memset(drive_info->rev, 0, sizeof(drive_info->rev));
+       memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
+       /*
+        * don't clear the LUNID though, we need to remember which
+        * one this one is.
+        */
+}
+
 /* This function will deregister the disk and it's queue from the
  * kernel.  It must be called with the controller lock held and the
  * drv structures busy_configuring flag set.  It's parameters are:
@@ -2046,43 +2291,48 @@ mem_msg:
  *             the disk in preparation for re-adding it.  In this case
  *             the highest_lun should be left unchanged and the LunID
  *             should not be cleared.
+ * via_ioctl
+ *    This indicates whether we've reached this path via ioctl.
+ *    This affects the maximum usage count allowed for c0d0 to be messed with.
+ *    If this path is reached via ioctl(), then the max_usage_count will
+ *    be 1, as the process calling ioctl() has got to have the device open.
+ *    If we get here via sysfs, then the max usage count will be zero.
 */
 static int deregister_disk(ctlr_info_t *h, int drv_index,
-                          int clear_all)
+                          int clear_all, int via_ioctl)
 {
        int i;
        struct gendisk *disk;
        drive_info_struct *drv;
+       int recalculate_highest_lun;
 
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
 
-       drv = &h->drv[drv_index];
+       drv = h->drv[drv_index];
        disk = h->gendisk[drv_index];
 
        /* make sure logical volume is NOT is use */
        if (clear_all || (h->gendisk[0] == disk)) {
-               if (drv->usage_count > 1)
+               if (drv->usage_count > via_ioctl)
                        return -EBUSY;
        } else if (drv->usage_count > 0)
                return -EBUSY;
 
+       recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
+
        /* invalidate the devices and deregister the disk.  If it is disk
         * zero do not deregister it but just zero out it's values.  This
         * allows us to delete disk zero but keep the controller registered.
         */
        if (h->gendisk[0] != disk) {
                struct request_queue *q = disk->queue;
-               if (disk->flags & GENHD_FL_UP)
+               if (disk->flags & GENHD_FL_UP) {
+                       cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
                        del_gendisk(disk);
-               if (q) {
-                       blk_cleanup_queue(q);
-                       /* Set drv->queue to NULL so that we do not try
-                        * to call blk_start_queue on this queue in the
-                        * interrupt handler
-                        */
-                       drv->queue = NULL;
                }
+               if (q)
+                       blk_cleanup_queue(q);
                /* If clear_all is set then we are deleting the logical
                 * drive, not just refreshing its info.  For drives
                 * other than disk 0 we will call put_disk.  We do not
@@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
                }
        } else {
                set_capacity(disk, 0);
+               cciss_clear_drive_info(drv);
        }
 
        --h->num_luns;
-       /* zero out the disk size info */
-       drv->nr_blocks = 0;
-       drv->block_size = 0;
-       drv->heads = 0;
-       drv->sectors = 0;
-       drv->cylinders = 0;
-       drv->raid_level = -1;   /* This can be used as a flag variable to
-                                * indicate that this element of the drive
-                                * array is free.
-                                */
-
-       if (clear_all) {
-               /* check to see if it was the last disk */
-               if (drv == h->drv + h->highest_lun) {
-                       /* if so, find the new hightest lun */
-                       int i, newhighest = -1;
-                       for (i = 0; i <= h->highest_lun; i++) {
-                               /* if the disk has size > 0, it is available */
-                               if (h->drv[i].heads)
-                                       newhighest = i;
-                       }
-                       h->highest_lun = newhighest;
-               }
 
-               drv->LunID = 0;
+       /* if it was the last disk, find the new hightest lun */
+       if (clear_all && recalculate_highest_lun) {
+               int i, newhighest = -1;
+               for (i = 0; i <= h->highest_lun; i++) {
+                       /* if the disk has size > 0, it is available */
+                       if (h->drv[i] && h->drv[i]->heads)
+                               newhighest = i;
+               }
+               h->highest_lun = newhighest;
        }
        return 0;
 }
@@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
        } else {                /* Get geometry failed */
                printk(KERN_WARNING "cciss: reading geometry failed\n");
        }
-       printk(KERN_INFO "      heads=%d, sectors=%d, cylinders=%d\n\n",
-              drv->heads, drv->sectors, drv->cylinders);
 }
 
 static void
@@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
                *total_size = 0;
                *block_size = BLOCK_SIZE;
        }
-       if (*total_size != 0)
-               printk(KERN_INFO "      blocks= %llu block_size= %d\n",
-               (unsigned long long)*total_size+1, *block_size);
        kfree(buf);
 }
 
@@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk)
        InquiryData_struct *inq_buff = NULL;
 
        for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
-               if (h->drv[logvol].LunID == drv->LunID) {
+               if (memcmp(h->drv[logvol]->LunID, drv->LunID,
+                       sizeof(drv->LunID)) == 0) {
                        FOUND = 1;
                        break;
                }
@@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q)
        /* The first 2 bits are reserved for controller error reporting. */
        c->Header.Tag.lower = (c->cmdindex << 3);
        c->Header.Tag.lower |= 0x04;    /* flag for direct lookup. */
-       c->Header.LUN.LogDev.VolId = drv->LunID;
-       c->Header.LUN.LogDev.Mode = 1;
+       memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
        c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
        c->Request.Type.Type = TYPE_CMD;        // It is a command.
        c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+/**
+ * add_to_scan_list() - add controller to rescan queue
+ * @h:               Pointer to the controller.
+ *
+ * Adds the controller to the rescan queue if not already on the queue.
+ *
+ * returns 1 if added to the queue, 0 if skipped (could be on the
+ * queue already, or the controller could be initializing or shutting
+ * down).
+ **/
+static int add_to_scan_list(struct ctlr_info *h)
+{
+       struct ctlr_info *test_h;
+       int found = 0;
+       int ret = 0;
+
+       if (h->busy_initializing)
+               return 0;
+
+       if (!mutex_trylock(&h->busy_shutting_down))
+               return 0;
+
+       mutex_lock(&scan_mutex);
+       list_for_each_entry(test_h, &scan_q, scan_list) {
+               if (test_h == h) {
+                       found = 1;
+                       break;
+               }
+       }
+       if (!found && !h->busy_scanning) {
+               INIT_COMPLETION(h->scan_wait);
+               list_add_tail(&h->scan_list, &scan_q);
+               ret = 1;
+       }
+       mutex_unlock(&scan_mutex);
+       mutex_unlock(&h->busy_shutting_down);
+
+       return ret;
+}
+
+/**
+ * remove_from_scan_list() - remove controller from rescan queue
+ * @h:                    Pointer to the controller.
+ *
+ * Removes the controller from the rescan queue if present. Blocks if
+ * the controller is currently conducting a rescan.
+ **/
+static void remove_from_scan_list(struct ctlr_info *h)
+{
+       struct ctlr_info *test_h, *tmp_h;
+       int scanning = 0;
+
+       mutex_lock(&scan_mutex);
+       list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
+               if (test_h == h) {
+                       list_del(&h->scan_list);
+                       complete_all(&h->scan_wait);
+                       mutex_unlock(&scan_mutex);
+                       return;
+               }
+       }
+       if (&h->busy_scanning)
+               scanning = 0;
+       mutex_unlock(&scan_mutex);
+
+       if (scanning)
+               wait_for_completion(&h->scan_wait);
+}
+
+/**
+ * scan_thread() - kernel thread used to rescan controllers
+ * @data:       Ignored.
+ *
+ * A kernel thread used scan for drive topology changes on
+ * controllers. The thread processes only one controller at a time
+ * using a queue.  Controllers are added to the queue using
+ * add_to_scan_list() and removed from the queue either after done
+ * processing or using remove_from_scan_list().
+ *
+ * returns 0.
+ **/
 static int scan_thread(void *data)
 {
-       ctlr_info_t *h = data;
-       int rc;
-       DECLARE_COMPLETION_ONSTACK(wait);
-       h->rescan_wait = &wait;
+       struct ctlr_info *h;
 
-       for (;;) {
-               rc = wait_for_completion_interruptible(&wait);
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
                if (kthread_should_stop())
                        break;
-               if (!rc)
-                       rebuild_lun_table(h, 0);
+
+               while (1) {
+                       mutex_lock(&scan_mutex);
+                       if (list_empty(&scan_q)) {
+                               mutex_unlock(&scan_mutex);
+                               break;
+                       }
+
+                       h = list_entry(scan_q.next,
+                                      struct ctlr_info,
+                                      scan_list);
+                       list_del(&h->scan_list);
+                       h->busy_scanning = 1;
+                       mutex_unlock(&scan_mutex);
+
+                       if (h) {
+                               rebuild_lun_table(h, 0, 0);
+                               complete_all(&h->scan_wait);
+                               mutex_lock(&scan_mutex);
+                               h->busy_scanning = 0;
+                               mutex_unlock(&scan_mutex);
+                       }
+               }
        }
+
        return 0;
 }
 
@@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
        case REPORT_LUNS_CHANGED:
                printk(KERN_WARNING "cciss%d: report LUN data "
                        "changed\n", h->ctlr);
-               if (h->rescan_wait)
-                       complete(h->rescan_wait);
+               add_to_scan_list(h);
+               wake_up_process(cciss_scan_thread);
                return 1;
        break;
        case POWER_OR_RESET:
@@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                if (scratchpad == CCISS_FIRMWARE_READY)
                        break;
                set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ / 10);      /* wait 100ms */
+               schedule_timeout(msecs_to_jiffies(100));        /* wait 100ms */
        }
        if (scratchpad != CCISS_FIRMWARE_READY) {
                printk(KERN_WARNING "cciss: Board not ready.  Timed out.\n");
@@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                        break;
                /* delay and try again */
                set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(10);
+               schedule_timeout(msecs_to_jiffies(1));
        }
 
 #ifdef CCISS_DEBUG
@@ -3669,15 +4001,16 @@ Enomem:
        return -1;
 }
 
-static void free_hba(int i)
+static void free_hba(int n)
 {
-       ctlr_info_t *p = hba[i];
-       int n;
+       ctlr_info_t *h = hba[n];
+       int i;
 
-       hba[i] = NULL;
-       for (n = 0; n < CISS_MAX_LUN; n++)
-               put_disk(p->gendisk[n]);
-       kfree(p);
+       hba[n] = NULL;
+       for (i = 0; i < h->highest_lun + 1; i++)
+               if (h->gendisk[i] != NULL)
+                       put_disk(h->gendisk[i]);
+       kfree(h);
 }
 
 /* Send a message CDB to the firmware. */
@@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        hba[i]->busy_initializing = 1;
        INIT_HLIST_HEAD(&hba[i]->cmpQ);
        INIT_HLIST_HEAD(&hba[i]->reqQ);
+       mutex_init(&hba[i]->busy_shutting_down);
 
        if (cciss_pci_init(hba[i], pdev) != 0)
                goto clean0;
@@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        hba[i]->ctlr = i;
        hba[i]->pdev = pdev;
 
+       init_completion(&hba[i]->scan_wait);
+
        if (cciss_create_hba_sysfs_entry(hba[i]))
                goto clean0;
 
@@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        hba[i]->num_luns = 0;
        hba[i]->highest_lun = -1;
        for (j = 0; j < CISS_MAX_LUN; j++) {
-               hba[i]->drv[j].raid_level = -1;
-               hba[i]->drv[j].queue = NULL;
+               hba[i]->drv[j] = NULL;
                hba[i]->gendisk[j] = NULL;
        }
 
@@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 
        hba[i]->cciss_max_sectors = 2048;
 
+       rebuild_lun_table(hba[i], 1, 0);
        hba[i]->busy_initializing = 0;
-
-       rebuild_lun_table(hba[i], 1);
-       hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
-                               "cciss_scan%02d", i);
-       if (IS_ERR(hba[i]->cciss_scan_thread))
-               return PTR_ERR(hba[i]->cciss_scan_thread);
-
        return 1;
 
 clean4:
@@ -4063,12 +4392,7 @@ clean1:
        cciss_destroy_hba_sysfs_entry(hba[i]);
 clean0:
        hba[i]->busy_initializing = 0;
-       /* cleanup any queues that may have been initialized */
-       for (j=0; j <= hba[i]->highest_lun; j++){
-               drive_info_struct *drv = &(hba[i]->drv[j]);
-               if (drv->queue)
-                       blk_cleanup_queue(drv->queue);
-       }
+
        /*
         * Deliberately omit pci_disable_device(): it does something nasty to
         * Smart Array controllers that pci_enable_device does not undo
@@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
                return;
        }
 
-       kthread_stop(hba[i]->cciss_scan_thread);
+       mutex_lock(&hba[i]->busy_shutting_down);
 
+       remove_from_scan_list(hba[i]);
        remove_proc_entry(hba[i]->devname, proc_cciss);
        unregister_blkdev(hba[i]->major, hba[i]->devname);
 
@@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
                if (disk) {
                        struct request_queue *q = disk->queue;
 
-                       if (disk->flags & GENHD_FL_UP)
+                       if (disk->flags & GENHD_FL_UP) {
+                               cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
                                del_gendisk(disk);
+                       }
                        if (q)
                                blk_cleanup_queue(q);
                }
@@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_set_drvdata(pdev, NULL);
        cciss_destroy_hba_sysfs_entry(hba[i]);
+       mutex_unlock(&hba[i]->busy_shutting_down);
        free_hba(i);
 }
 
@@ -4202,15 +4530,25 @@ static int __init cciss_init(void)
        if (err)
                return err;
 
+       /* Start the scan thread */
+       cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
+       if (IS_ERR(cciss_scan_thread)) {
+               err = PTR_ERR(cciss_scan_thread);
+               goto err_bus_unregister;
+       }
+
        /* Register for our PCI devices */
        err = pci_register_driver(&cciss_pci_driver);
        if (err)
-               goto err_bus_register;
+               goto err_thread_stop;
 
-       return 0;
+       return err;
 
-err_bus_register:
+err_thread_stop:
+       kthread_stop(cciss_scan_thread);
+err_bus_unregister:
        bus_unregister(&cciss_bus_type);
+
        return err;
 }
 
@@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void)
                        cciss_remove_one(hba[i]->pdev);
                }
        }
+       kthread_stop(cciss_scan_thread);
        remove_proc_entry("driver/cciss", NULL);
        bus_unregister(&cciss_bus_type);
 }
index 06a5db25b298ebf400c99ac1c8fa200e208d10ae..31524cf42c77360645424a9c85e68a9651ba8422 100644 (file)
@@ -2,6 +2,7 @@
 #define CCISS_H
 
 #include <linux/genhd.h>
+#include <linux/mutex.h>
 
 #include "cciss_cmd.h"
 
@@ -29,7 +30,7 @@ struct access_method {
 };
 typedef struct _drive_info_struct
 {
-       __u32   LunID;  
+       unsigned char LunID[8];
        int     usage_count;
        struct request_queue *queue;
        sector_t nr_blocks;
@@ -51,6 +52,7 @@ typedef struct _drive_info_struct
        char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
        char model[MODEL_LEN + 1];   /* SCSI model string */
        char rev[REV_LEN + 1];       /* SCSI revision string */
+       char device_initialized;     /* indicates whether dev is initialized */
 } drive_info_struct;
 
 struct ctlr_info 
@@ -86,7 +88,7 @@ struct ctlr_info
        BYTE    cciss_read_capacity;
 
        // information about each logical volume
-       drive_info_struct drv[CISS_MAX_LUN];
+       drive_info_struct *drv[CISS_MAX_LUN];
 
        struct access_method access;
 
@@ -108,6 +110,8 @@ struct ctlr_info
        int                     nr_frees; 
        int                     busy_configuring;
        int                     busy_initializing;
+       int                     busy_scanning;
+       struct mutex            busy_shutting_down;
 
        /* This element holds the zero based queue number of the last
         * queue to be started.  It is used for fairness.
@@ -122,8 +126,8 @@ struct ctlr_info
        /* and saved for later processing */
 #endif
        unsigned char alive;
-       struct completion *rescan_wait;
-       struct task_struct *cciss_scan_thread;
+       struct list_head scan_list;
+       struct completion scan_wait;
        struct device dev;
 };
 
index b82d438e2607cc2c430e4b170470ac29e47c3fb2..6422651ec364197ed9418f4a3bafb966208a8417 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/blkpg.h>
 #include <linux/timer.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/hdreg.h>
 #include <linux/spinlock.h>
@@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
 
 #ifdef CONFIG_PROC_FS
 static void ida_procinit(int i);
-static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
 #else
 static void ida_procinit(int i) {}
 #endif
@@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops  = {
 #ifdef CONFIG_PROC_FS
 
 static struct proc_dir_entry *proc_array;
+static const struct file_operations ida_proc_fops;
 
 /*
  * Get us a file in /proc/array that says something about each controller.
@@ -218,19 +219,16 @@ static void __init ida_procinit(int i)
                if (!proc_array) return;
        }
 
-       create_proc_read_entry(hba[i]->devname, 0, proc_array,
-                              ida_proc_get_info, hba[i]);
+       proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
 }
 
 /*
  * Report information about this controller.
  */
-static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+static int ida_proc_show(struct seq_file *m, void *v)
 {
-       off_t pos = 0;
-       off_t len = 0;
-       int size, i, ctlr;
-       ctlr_info_t *h = (ctlr_info_t*)data;
+       int i, ctlr;
+       ctlr_info_t *h = (ctlr_info_t*)m->private;
        drv_info_t *drv;
 #ifdef CPQ_PROC_PRINT_QUEUES
        cmdlist_t *c;
@@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
 #endif
 
        ctlr = h->ctlr;
-       size = sprintf(buffer, "%s:  Compaq %s Controller\n"
+       seq_printf(m, "%s:  Compaq %s Controller\n"
                "       Board ID: 0x%08lx\n"
                "       Firmware Revision: %c%c%c%c\n"
                "       Controller Sig: 0x%08lx\n"
@@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
                h->log_drives, h->phys_drives,
                h->Qdepth, h->maxQsinceinit);
 
-       pos += size; len += size;
-       
-       size = sprintf(buffer+len, "Logical Drive Info:\n");
-       pos += size; len += size;
+       seq_puts(m, "Logical Drive Info:\n");
 
        for(i=0; i<h->log_drives; i++) {
                drv = &h->drv[i];
-               size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
+               seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
                                ctlr, i, drv->blk_size, drv->nr_blks);
-               pos += size; len += size;
        }
 
 #ifdef CPQ_PROC_PRINT_QUEUES
        spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 
-       size = sprintf(buffer+len, "\nCurrent Queues:\n");
-       pos += size; len += size;
+       seq_puts(m, "\nCurrent Queues:\n");
 
        c = h->reqQ;
-       size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
+       seq_printf(m, "reqQ = %p", c);
        if (c) c=c->next;
        while(c && c != h->reqQ) {
-               size = sprintf(buffer+len, "->%p", c);
-               pos += size; len += size;
+               seq_printf(m, "->%p", c);
                c=c->next;
        }
 
        c = h->cmpQ;
-       size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
+       seq_printf(m, "\ncmpQ = %p", c);
        if (c) c=c->next;
        while(c && c != h->cmpQ) {
-               size = sprintf(buffer+len, "->%p", c);
-               pos += size; len += size;
+               seq_printf(m, "->%p", c);
                c=c->next;
        }
 
-       size = sprintf(buffer+len, "\n"); pos += size; len += size;
+       seq_putc(m, '\n');
        spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
 #endif
-       size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
+       seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
                        h->nr_allocs, h->nr_frees);
-       pos += size; len += size;
-
-       *eof = 1;
-       *start = buffer+offset;
-       len -= offset;
-       if (len>length)
-               len = length;
-       return len;
+       return 0;
+}
+
+static int ida_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ida_proc_show, PDE(inode)->data);
 }
+
+static const struct file_operations ida_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ida_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 #endif /* CONFIG_PROC_FS */
 
 module_param_array(eisa, int, NULL, 0);
index 60ab75104da93b4aae627ee292cf2aff2a231686..1c129211302d34e15c6fce2a4475fce8b15a32d5 100644 (file)
@@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
        .configure              = parisc_agp_configure,
        .fetch_size             = parisc_agp_fetch_size,
        .tlb_flush              = parisc_agp_tlbflush,
-       .mask_memory            = parisc_agp_page_mask_memory,
+       .mask_memory            = parisc_agp_mask_memory,
        .masks                  = parisc_agp_masks,
        .agp_enable             = parisc_agp_enable,
        .cache_flush            = global_cache_flush,
index aaca40283be932811cefe4ed3c1109100d990f81..4f568cb9af3f90b62c52de1af32170ee4650d29d 100644 (file)
@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
        return as ? 0 : -ENOMEM;
 }
 
-static struct file_operations apm_bios_fops = {
+static const struct file_operations apm_bios_fops = {
        .owner          = THIS_MODULE,
        .read           = apm_read,
        .poll           = apm_poll,
index e3dd24bff5143206df8c4345fc1fce56fe492cda..836d4f0a876f5ca53b03f8314acea1a0f23f5ca3 100644 (file)
@@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
 # define bfin_otp_ioctl NULL
 #endif
 
-static struct file_operations bfin_otp_fops = {
+static const struct file_operations bfin_otp_fops = {
        .owner          = THIS_MODULE,
        .unlocked_ioctl = bfin_otp_ioctl,
        .read           = bfin_otp_read,
index df5038bbcbc28647cc64eed72f0fee6fbc8c2568..4254457d391186429590769d05334d01c459bd75 100644 (file)
@@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
                        continue;
                }
 #ifdef MODULE
-               if (isparam && irq[i])
+               if (isparam && i < NR_CARDS && irq[i])
                        cy_isa_irq = irq[i];
                else
 #endif
index 52e06589821d8ed53edef8bf96e02e0a95a9d430..045c930e6320c89e9ac2f28fadee74e1c3607790 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/errno.h>       /* for -EBUSY */
 #include <linux/ioport.h>      /* for request_region */
 #include <linux/delay.h>       /* for loops_per_jiffy */
+#include <linux/sched.h>
 #include <linux/smp_lock.h>    /* cycle_kernel_lock() */
 #include <asm/io.h>            /* for inb_p, outb_p, inb, outb, etc. */
 #include <asm/uaccess.h>       /* for get_user, etc. */
index 00dd3de1be51ef341c7c7a3819f0537d529a2ebd..06aad0831c73f5ec4c08746798fba27f59c8ff98 100644 (file)
@@ -116,7 +116,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
        if (!res)
                return -ENOENT;
 
-       mem = request_mem_region(res->start, res->end - res->start + 1,
+       mem = request_mem_region(res->start, resource_size(res),
                                 pdev->name);
        if (mem == NULL) {
                ret = -EBUSY;
@@ -124,7 +124,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
        }
 
        dev_set_drvdata(&pdev->dev, mem);
-       rng_base = ioremap(res->start, res->end - res->start + 1);
+       rng_base = ioremap(res->start, resource_size(res));
        if (!rng_base) {
                ret = -ENOMEM;
                goto err_ioremap;
index 41fc11dc921c70260e89551f111a8f73d97a3b10..65545de3dbf4ded375e55467fcaf5a8360000dfd 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/errno.h>
 #include <asm/system.h>
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/ipmi.h>
index 09050797c76a26546ca5fa2d4ed3e165cc9455b9..ec5e3f8df648ba1e719836a9b8349246005ba685 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/errno.h>
 #include <asm/system.h>
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
index 53761cefa9154bb541d676b432550d61fc579efb..e066c4fdf81be774e4be3317b8aa2e26a3f65d61 100644 (file)
@@ -261,6 +261,9 @@ done:
        return 0;
 }
 
+/* Traditional BSD devices */
+#ifdef CONFIG_LEGACY_PTYS
+
 static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
 {
        struct tty_struct *o_tty;
@@ -310,24 +313,6 @@ free_mem_out:
        return -ENOMEM;
 }
 
-
-static const struct tty_operations pty_ops = {
-       .install = pty_install,
-       .open = pty_open,
-       .close = pty_close,
-       .write = pty_write,
-       .write_room = pty_write_room,
-       .flush_buffer = pty_flush_buffer,
-       .chars_in_buffer = pty_chars_in_buffer,
-       .unthrottle = pty_unthrottle,
-       .set_termios = pty_set_termios,
-       .resize = pty_resize
-};
-
-/* Traditional BSD devices */
-#ifdef CONFIG_LEGACY_PTYS
-static struct tty_driver *pty_driver, *pty_slave_driver;
-
 static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
                         unsigned int cmd, unsigned long arg)
 {
@@ -341,7 +326,12 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
 static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
 module_param(legacy_count, int, 0);
 
-static const struct tty_operations pty_ops_bsd = {
+/*
+ * The master side of a pty can do TIOCSPTLCK and thus
+ * has pty_bsd_ioctl.
+ */
+static const struct tty_operations master_pty_ops_bsd = {
+       .install = pty_install,
        .open = pty_open,
        .close = pty_close,
        .write = pty_write,
@@ -354,8 +344,23 @@ static const struct tty_operations pty_ops_bsd = {
        .resize = pty_resize
 };
 
+static const struct tty_operations slave_pty_ops_bsd = {
+       .install = pty_install,
+       .open = pty_open,
+       .close = pty_close,
+       .write = pty_write,
+       .write_room = pty_write_room,
+       .flush_buffer = pty_flush_buffer,
+       .chars_in_buffer = pty_chars_in_buffer,
+       .unthrottle = pty_unthrottle,
+       .set_termios = pty_set_termios,
+       .resize = pty_resize
+};
+
 static void __init legacy_pty_init(void)
 {
+       struct tty_driver *pty_driver, *pty_slave_driver;
+
        if (legacy_count <= 0)
                return;
 
@@ -383,7 +388,7 @@ static void __init legacy_pty_init(void)
        pty_driver->init_termios.c_ospeed = 38400;
        pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
        pty_driver->other = pty_slave_driver;
-       tty_set_operations(pty_driver, &pty_ops);
+       tty_set_operations(pty_driver, &master_pty_ops_bsd);
 
        pty_slave_driver->owner = THIS_MODULE;
        pty_slave_driver->driver_name = "pty_slave";
@@ -399,7 +404,7 @@ static void __init legacy_pty_init(void)
        pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
                                        TTY_DRIVER_REAL_RAW;
        pty_slave_driver->other = pty_driver;
-       tty_set_operations(pty_slave_driver, &pty_ops);
+       tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
 
        if (tty_register_driver(pty_driver))
                panic("Couldn't register pty driver");
index 5942a9d674c0051885bd796fc005b39f14622657..452370af95def5a352dd4753107ce3fd4154fb93 100644 (file)
@@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
                return 1;
        }
 
-       if ((long)info < (long)(&cy_port[0])
-           || (long)(&cy_port[NR_PORTS]) < (long)info) {
+       if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
                printk("Warning: cyclades_port out of range for (%s) in %s\n",
                                name, routine);
                return 1;
@@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
                panic("TxInt on debug port!!!");
        }
 #endif
-
-       info = &cy_port[channel];
-
        /* validate the port number (as configured and open) */
        if ((channel < 0) || (NR_PORTS <= channel)) {
                base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
                base_addr[CyTEOIR] = CyNOTRANS;
                return IRQ_HANDLED;
        }
+       info = &cy_port[channel];
        info->last_active = jiffies;
        if (info->tty == 0) {
                base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
index aafdbaebc16a8697df6f16efbaadbce255e05d86..feb55075819bab4f7d9f75ce11f7aa7232ba9dc1 100644 (file)
@@ -518,7 +518,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 static int tty_ldisc_halt(struct tty_struct *tty)
 {
        clear_bit(TTY_LDISC, &tty->flags);
-       return cancel_delayed_work(&tty->buf.work);
+       return cancel_delayed_work_sync(&tty->buf.work);
 }
 
 /**
@@ -756,12 +756,9 @@ void tty_ldisc_hangup(struct tty_struct *tty)
         * N_TTY.
         */
        if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
-               /* Make sure the old ldisc is quiescent */
-               tty_ldisc_halt(tty);
-               flush_scheduled_work();
-
                /* Avoid racing set_ldisc or tty_ldisc_release */
                mutex_lock(&tty->ldisc_mutex);
+               tty_ldisc_halt(tty);
                if (tty->ldisc) {       /* Not yet closed */
                        /* Switch back to N_TTY */
                        tty_ldisc_reinit(tty);
index 29c651ab0d7891f1504cf5a222a11a6f269c261b..6b36ee56e6fe972db14dd1f4785e5cf173468acf 100644 (file)
@@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        goto eperm;
 
                if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
-                                               sizeof(struct vt_setactivate)))
-                       return -EFAULT;
+                                       sizeof(struct vt_setactivate))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
                if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
                        ret = -ENXIO;
                else {
index f40ab699860f4645a693f76a239473812e767493..4846d50199f3d1c917d23d2a13aa1108d28cace9 100644 (file)
@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
        return status;
 }
 
-static struct file_operations hwicap_fops = {
+static const struct file_operations hwicap_fops = {
        .owner = THIS_MODULE,
        .write = hwicap_write,
        .read = hwicap_read,
index abf4a2529f8011c6d4dbd87dc4412c8925dca41c..60697909ebdb00c9e0a4546764fe7e283e4fe800 100644 (file)
@@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
  * cn_proc_mcast_ctl
  * @data: message sent from userspace via the connector
  */
-static void cn_proc_mcast_ctl(struct cn_msg *msg)
+static void cn_proc_mcast_ctl(struct cn_msg *msg,
+                             struct netlink_skb_parms *nsp)
 {
        enum proc_cn_mcast_op *mc_op = NULL;
        int err = 0;
index 4a1dfe1f4ba9c1704b3f1cc56dbc2931a21c32c4..210338ea222f38e408d093f2aceaf9e0c539e5bc 100644 (file)
@@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work)
        struct cn_callback_entry *cbq =
                container_of(work, struct cn_callback_entry, work);
        struct cn_callback_data *d = &cbq->data;
+       struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
+       struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
 
-       d->callback(d->callback_priv);
+       d->callback(msg, nsp);
 
-       d->destruct_data(d->ddata);
-       d->ddata = NULL;
+       kfree_skb(d->skb);
+       d->skb = NULL;
 
        kfree(d->free);
 }
 
 static struct cn_callback_entry *
 cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
-                             void (*callback)(struct cn_msg *))
+                             void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
 {
        struct cn_callback_entry *cbq;
 
@@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
 }
 
 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
-                         void (*callback)(struct cn_msg *))
+                         void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
 {
        struct cn_callback_entry *cbq, *__cbq;
        int found = 0;
index 74f52af79563ea18558ecd862df9693ffbb22e5c..f06024668f99d2abfd73d560804cf172d9ea19a8 100644 (file)
@@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
 /*
  * Callback helper - queues work and setup destructor for given data.
  */
-static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
+static int cn_call_callback(struct sk_buff *skb)
 {
        struct cn_callback_entry *__cbq, *__new_cbq;
        struct cn_dev *dev = &cdev;
+       struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
        int err = -ENODEV;
 
        spin_lock_bh(&dev->cbdev->queue_lock);
        list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
                if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
                        if (likely(!work_pending(&__cbq->work) &&
-                                       __cbq->data.ddata == NULL)) {
-                               __cbq->data.callback_priv = msg;
-
-                               __cbq->data.ddata = data;
-                               __cbq->data.destruct_data = destruct_data;
+                                       __cbq->data.skb == NULL)) {
+                               __cbq->data.skb = skb;
 
                                if (queue_cn_work(__cbq, &__cbq->work))
                                        err = 0;
@@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
                                __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
                                if (__new_cbq) {
                                        d = &__new_cbq->data;
-                                       d->callback_priv = msg;
+                                       d->skb = skb;
                                        d->callback = __cbq->data.callback;
-                                       d->ddata = data;
-                                       d->destruct_data = destruct_data;
                                        d->free = __new_cbq;
 
                                        __new_cbq->pdev = __cbq->pdev;
@@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
  */
 static void cn_rx_skb(struct sk_buff *__skb)
 {
-       struct cn_msg *msg;
        struct nlmsghdr *nlh;
        int err;
        struct sk_buff *skb;
@@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
                        return;
                }
 
-               msg = NLMSG_DATA(nlh);
-               err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
+               err = cn_call_callback(skb);
                if (err < 0)
                        kfree_skb(skb);
        }
@@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
  * May sleep.
  */
 int cn_add_callback(struct cb_id *id, char *name,
-                   void (*callback)(struct cn_msg *))
+                   void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
 {
        int err;
        struct cn_dev *dev = &cdev;
@@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
  *
  * Used for notification of a request's processing.
  */
-static void cn_callback(struct cn_msg *msg)
+static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct cn_ctl_msg *ctl;
        struct cn_ctl_entry *ent;
index 4e551e63b6dc5fffd25c80b53b61e66a646772bf..4f4ac82382f7726a5496a5441e4bc20fe0932c69 100644 (file)
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
 
 /* Lookup table for all possible MC control instances */
 struct amd64_pvt;
-static struct mem_ctl_info *mci_lookup[MAX_NUMNODES];
-static struct amd64_pvt *pvt_lookup[MAX_NUMNODES];
+static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
+static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
 
 /*
  * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
 /* Map from a CSROW entry to the mask entry that operates on it */
 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
 {
-       return csrow >> (pvt->num_dcsm >> 3);
+       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
+               return csrow;
+       else
+               return csrow >> 1;
 }
 
 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
        intlv_en = pvt->dram_IntlvEn[0];
 
        if (intlv_en == 0) {
-               for (node_id = 0; ) {
+               for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
                        if (amd64_base_limit_match(pvt, sys_addr, node_id))
-                               break;
-
-                       if (++node_id >= DRAM_REG_COUNT)
-                               goto err_no_match;
+                               goto found;
                }
-               goto found;
+               goto err_no_match;
        }
 
-       if (unlikely((intlv_en != (0x01 << 8)) &&
-                    (intlv_en != (0x03 << 8)) &&
-                    (intlv_en != (0x07 << 8)))) {
+       if (unlikely((intlv_en != 0x01) &&
+                    (intlv_en != 0x03) &&
+                    (intlv_en != 0x07))) {
                amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
                             "IntlvEn field of DRAM Base Register for node 0: "
-                            "This probably indicates a BIOS bug.\n", intlv_en);
+                            "this probably indicates a BIOS bug.\n", intlv_en);
                return NULL;
        }
 
        bits = (((u32) sys_addr) >> 12) & intlv_en;
 
        for (node_id = 0; ; ) {
-               if ((pvt->dram_limit[node_id] & intlv_en) == bits)
+               if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
                        break;  /* intlv_sel field matches */
 
                if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
        /* sanity test for sys_addr */
        if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
                amd64_printk(KERN_WARNING,
-                         "%s(): sys_addr 0x%lx falls outside base/limit "
-                         "address range for node %d with node interleaving "
-                         "enabled.\n", __func__, (unsigned long)sys_addr,
-                         node_id);
+                            "%s(): sys_addr 0x%llx falls outside base/limit "
+                            "address range for node %d with node interleaving "
+                            "enabled.\n",
+                            __func__, sys_addr, node_id);
                return NULL;
        }
 
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
         * base/mask register pair, test the condition shown near the start of
         * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
         */
-       for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+       for (csrow = 0; csrow < pvt->cs_count; csrow++) {
 
                /* This DRAM chip select is disabled on this node */
                if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
        u64 base, mask;
 
        pvt = mci->pvt_info;
-       BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT));
+       BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
 
        base = base_from_dct_base(pvt, csrow);
        mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
  */
 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
 {
-       if (pvt->ext_model >= OPTERON_CPU_REV_F) {
+
+       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
+               pvt->dcsb_base          = REV_E_DCSB_BASE_BITS;
+               pvt->dcsm_mask          = REV_E_DCSM_MASK_BITS;
+               pvt->dcs_mask_notused   = REV_E_DCS_NOTUSED_BITS;
+               pvt->dcs_shift          = REV_E_DCS_SHIFT;
+               pvt->cs_count           = 8;
+               pvt->num_dcsm           = 8;
+       } else {
                pvt->dcsb_base          = REV_F_F1Xh_DCSB_BASE_BITS;
                pvt->dcsm_mask          = REV_F_F1Xh_DCSM_MASK_BITS;
                pvt->dcs_mask_notused   = REV_F_F1Xh_DCS_NOTUSED_BITS;
                pvt->dcs_shift          = REV_F_F1Xh_DCS_SHIFT;
 
-               switch (boot_cpu_data.x86) {
-               case 0xf:
-                       pvt->num_dcsm = REV_F_DCSM_COUNT;
-                       break;
-
-               case 0x10:
-                       pvt->num_dcsm = F10_DCSM_COUNT;
-                       break;
-
-               case 0x11:
-                       pvt->num_dcsm = F11_DCSM_COUNT;
-                       break;
-
-               default:
-                       amd64_printk(KERN_ERR, "Unsupported family!\n");
-                       break;
+               if (boot_cpu_data.x86 == 0x11) {
+                       pvt->cs_count = 4;
+                       pvt->num_dcsm = 2;
+               } else {
+                       pvt->cs_count = 8;
+                       pvt->num_dcsm = 4;
                }
-       } else {
-               pvt->dcsb_base          = REV_E_DCSB_BASE_BITS;
-               pvt->dcsm_mask          = REV_E_DCSM_MASK_BITS;
-               pvt->dcs_mask_notused   = REV_E_DCS_NOTUSED_BITS;
-               pvt->dcs_shift          = REV_E_DCS_SHIFT;
-               pvt->num_dcsm           = REV_E_DCSM_COUNT;
        }
 }
 
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
 
        amd64_set_dct_base_and_mask(pvt);
 
-       for (cs = 0; cs < CHIPSELECT_COUNT; cs++) {
+       for (cs = 0; cs < pvt->cs_count; cs++) {
                reg = K8_DCSB0 + (cs * 4);
                err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
                                                &pvt->dcsb0[cs]);
@@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
                debugf0("Reading K8_DRAM_BASE_LOW failed\n");
 
        /* Extract parts into separate data entries */
-       pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
+       pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
        pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
        pvt->dram_rw_en[dram] = (low & 0x3);
 
@@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
         * Extract parts into separate data entries. Limit is the HIGHEST memory
         * location of the region, so lower 24 bits need to be all ones
         */
-       pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
+       pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
        pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
        pvt->dram_DstNode[dram] = (low & 0x7);
 }
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
         * different from the node that detected the error.
         */
        src_mci = find_mc_by_sys_addr(mci, SystemAddress);
-       if (src_mci) {
+       if (!src_mci) {
                amd64_mc_printk(mci, KERN_ERR,
                             "failed to map error address 0x%lx to a node\n",
                             (unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
 
        pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
 
-       pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) |
-                               ((u64) low_base & 0xFFFF0000))) << 8;
+       pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
+                              (((u64)low_base  & 0xFFFF0000) << 24);
 
        low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
        high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
         * Extract address values and form a LIMIT address. Limit is the HIGHEST
         * memory location of the region, so low 24 bits need to be all ones.
         */
-       low_limit |= 0x0000FFFF;
-       pvt->dram_limit[dram] =
-               ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF);
+       pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
+                               (((u64) low_limit & 0xFFFF0000) << 24) |
+                               0x00FFFFFF;
 }
 
 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
 
        debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
 
-       for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) {
+       for (csrow = 0; csrow < pvt->cs_count; csrow++) {
 
                cs_base = amd64_get_dct_base(pvt, cs, csrow);
                if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2497,7 +2489,7 @@ err_reg:
  * NOTE: CPU Revision Dependent code
  *
  * Input:
- *     @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1)
+ *     @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
  *     k8 private pointer to -->
  *                     DRAM Bank Address mapping register
  *                     node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
                (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
                );
 
-       for (i = 0; i < CHIPSELECT_COUNT; i++) {
+       for (i = 0; i < pvt->cs_count; i++) {
                csrow = &mci->csrows[i];
 
                if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
                goto err_exit;
 
        ret = -ENOMEM;
-       mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id);
+       mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
        if (!mci)
                goto err_exit;
 
index 8ea07e2715dcad85ec1faa03e4921066ce6fb514..c6f359a85207e24552baac189270cb3702173b51 100644 (file)
 #define EDAC_AMD64_VERSION             " Ver: 3.2.0 " __DATE__
 #define EDAC_MOD_STR                   "amd64_edac"
 
+#define EDAC_MAX_NUMNODES              8
+
 /* Extended Model from CPUID, for CPU Revision numbers */
 #define OPTERON_CPU_LE_REV_C           0
 #define OPTERON_CPU_REV_D              1
 #define OPTERON_CPU_REV_FA             5
 
 /* Hardware limit on ChipSelect rows per MC and processors per system */
-#define CHIPSELECT_COUNT               8
+#define MAX_CS_COUNT                   8
 #define DRAM_REG_COUNT                 8
 
 
  */
 #define REV_E_DCSB_BASE_BITS           (0xFFE0FE00ULL)
 #define REV_E_DCS_SHIFT                        4
-#define REV_E_DCSM_COUNT               8
 
 #define REV_F_F1Xh_DCSB_BASE_BITS      (0x1FF83FE0ULL)
 #define REV_F_F1Xh_DCS_SHIFT           8
  */
 #define REV_F_DCSB_BASE_BITS           (0x1FF83FE0ULL)
 #define REV_F_DCS_SHIFT                        8
-#define REV_F_DCSM_COUNT               4
-#define F10_DCSM_COUNT                 4
-#define F11_DCSM_COUNT                 2
 
 /* DRAM CS Mask Registers */
 #define K8_DCSM0                       0x60
@@ -374,13 +372,11 @@ enum {
 
 #define SET_NB_DRAM_INJECTION_WRITE(word, bits)  \
                                        (BIT(((word) & 0xF) + 20) | \
-                                       BIT(17) |  \
-                                       ((bits) & 0xF))
+                                       BIT(17) | bits)
 
 #define SET_NB_DRAM_INJECTION_READ(word, bits)  \
                                        (BIT(((word) & 0xF) + 20) | \
-                                       BIT(16) |  \
-                                       ((bits) & 0xF))
+                                       BIT(16) |  bits)
 
 #define K8_NBCAP                       0xE8
 #define K8_NBCAP_CORES                 (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
        u32 dbam1;              /* DRAM Base Address Mapping reg for DCT1 */
 
        /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
-       u32 dcsb0[CHIPSELECT_COUNT];
-       u32 dcsb1[CHIPSELECT_COUNT];
+       u32 dcsb0[MAX_CS_COUNT];
+       u32 dcsb1[MAX_CS_COUNT];
 
        /* DRAM CS Mask Registers F2x[1,0][6C:60] */
-       u32 dcsm0[CHIPSELECT_COUNT];
-       u32 dcsm1[CHIPSELECT_COUNT];
+       u32 dcsm0[MAX_CS_COUNT];
+       u32 dcsm1[MAX_CS_COUNT];
 
        /*
         * Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
         */
        u32 dcsb_base;          /* DCSB base bits */
        u32 dcsm_mask;          /* DCSM mask bits */
+       u32 cs_count;           /* num chip selects (== num DCSB registers) */
        u32 num_dcsm;           /* Number of DCSM registers */
        u32 dcs_mask_notused;   /* DCSM notused mask bits */
        u32 dcs_shift;          /* DCSB and DCSM shift value */
index d3675b76b3a7d0792033abfb460f802b78136efb..29f1f7a612d92671976b356b48efd10594f09d6f 100644 (file)
@@ -1,5 +1,11 @@
 #include "amd64_edac.h"
 
+static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
+{
+       struct amd64_pvt *pvt = mci->pvt_info;
+       return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
 /*
  * store error injection section value which refers to one of 4 16-byte sections
  * within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
 
        ret = strict_strtoul(data, 10, &value);
        if (ret != -EINVAL) {
+
+               if (value > 3) {
+                       amd64_printk(KERN_WARNING,
+                                    "%s: invalid section 0x%lx\n",
+                                    __func__, value);
+                       return -EINVAL;
+               }
+
                pvt->injection.section = (u32) value;
                return count;
        }
        return ret;
 }
 
+static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
+{
+       struct amd64_pvt *pvt = mci->pvt_info;
+       return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
 /*
  * store error injection word value which refers to one of 9 16-bit word of the
  * 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
        ret = strict_strtoul(data, 10, &value);
        if (ret != -EINVAL) {
 
-               value = (value <= 8) ? value : 0;
-               pvt->injection.word = (u32) value;
+               if (value > 8) {
+                       amd64_printk(KERN_WARNING,
+                                    "%s: invalid word 0x%lx\n",
+                                    __func__, value);
+                       return -EINVAL;
+               }
 
+               pvt->injection.word = (u32) value;
                return count;
        }
        return ret;
 }
 
+static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
+{
+       struct amd64_pvt *pvt = mci->pvt_info;
+       return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
 /*
  * store 16 bit error injection vector which enables injecting errors to the
  * corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
        ret = strict_strtoul(data, 16, &value);
        if (ret != -EINVAL) {
 
-               pvt->injection.bit_map = (u32) value & 0xFFFF;
+               if (value & 0xFFFF0000) {
+                       amd64_printk(KERN_WARNING,
+                                    "%s: invalid EccVector: 0x%lx\n",
+                                    __func__, value);
+                       return -EINVAL;
+               }
 
+               pvt->injection.bit_map = (u32) value;
                return count;
        }
        return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
                        .name = "inject_section",
                        .mode = (S_IRUGO | S_IWUSR)
                },
-               .show = NULL,
+               .show = amd64_inject_section_show,
                .store = amd64_inject_section_store,
        },
        {
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
                        .name = "inject_word",
                        .mode = (S_IRUGO | S_IWUSR)
                },
-               .show = NULL,
+               .show = amd64_inject_word_show,
                .store = amd64_inject_word_store,
        },
        {
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
                        .name = "inject_ecc_vector",
                        .mode = (S_IRUGO | S_IWUSR)
                },
-               .show = NULL,
+               .show = amd64_inject_ecc_vector_show,
                .store = amd64_inject_ecc_vector_store,
        },
        {
index ced186d7e9a9d16552fcf3d73a95203303a8251a..5089331544ed5336e682fdec039be94068314102 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/mutex.h>
 #include <linux/poll.h>
 #include <linux/preempt.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/time.h>
 #include <linux/uaccess.h>
index 420a96e7f2dbb1a31d299f70daa07f22e8b95db3..051d1ebbd2876b68db9df2209161b13da7e8d7bb 100644 (file)
@@ -939,7 +939,7 @@ static int __init ibft_init(void)
 
        if (ibft_addr) {
                printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)virt_to_phys((void *)ibft_addr));
+                      (u64)isa_virt_to_bus(ibft_addr));
 
                rc = ibft_check_device();
                if (rc)
index d53fbbfefa3ee2893ece684f2a5cb25eddf77b04..dfb15c06c88ff8482de2b03164679633748848b7 100644 (file)
@@ -65,10 +65,10 @@ void __init reserve_ibft_region(void)
                 * so skip that area */
                if (pos == VGA_MEM)
                        pos += VGA_SIZE;
-               virt = phys_to_virt(pos);
+               virt = isa_bus_to_virt(pos);
                if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) {
                        unsigned long *addr =
-                           (unsigned long *)phys_to_virt(pos + 4);
+                           (unsigned long *)isa_bus_to_virt(pos + 4);
                        len = *addr;
                        /* if the length of the table extends past 1M,
                         * the table cannot be valid. */
index bb11a429394ab865f7e07cfc289d8a32330bd320..662ed923d9eb0be22a49c0a1db2ae5c14e11d49d 100644 (file)
@@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
        return single_open(file, gpiolib_show, NULL);
 }
 
-static struct file_operations gpiolib_operations = {
+static const struct file_operations gpiolib_operations = {
        .open           = gpiolib_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index ba728ad77f2a6611409a5f90c0f639bfa10aef9b..5cae0b3eee9be491c1a9b23a1a1ba0b156ff19bd 100644 (file)
@@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
        list_for_each_entry_safe(mode, t, &connector->user_modes, head)
                drm_mode_remove(connector, mode);
 
+       kfree(connector->fb_helper_private);
        mutex_lock(&dev->mode_config.mutex);
        drm_mode_object_put(dev, &connector->base);
        list_del(&connector->head);
@@ -1555,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
        struct drm_crtc *crtc;
        int ret = 0;
 
-       DRM_DEBUG_KMS("\n");
-
        if (!req->flags) {
                DRM_ERROR("no operation set\n");
                return -EINVAL;
index fe8697447f327abaa9072bd623b5a755aef33cd7..1fe4e1d344fdb1e6f21910d87b4795cd5986f933 100644 (file)
@@ -32,6 +32,7 @@
 #include "drmP.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 
 static void drm_mode_validate_flag(struct drm_connector *connector,
                                   int flags)
@@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        list_for_each_entry_safe(mode, t, &connector->modes, head)
                mode->status = MODE_UNVERIFIED;
 
-       connector->status = connector->funcs->detect(connector);
+       if (connector->force) {
+               if (connector->force == DRM_FORCE_ON)
+                       connector->status = connector_status_connected;
+               else
+                       connector->status = connector_status_disconnected;
+               if (connector->funcs->force)
+                       connector->funcs->force(connector);
+       } else
+               connector->status = connector->funcs->detect(connector);
 
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("%s is disconnected\n",
@@ -267,6 +276,65 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con
        return NULL;
 }
 
+static bool drm_has_cmdline_mode(struct drm_connector *connector)
+{
+       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+
+       if (!fb_help_conn)
+               return false;
+
+       cmdline_mode = &fb_help_conn->cmdline_mode;
+       return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
+{
+       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+       struct drm_display_mode *mode = NULL;
+
+       if (!fb_help_conn)
+               return mode;
+
+       cmdline_mode = &fb_help_conn->cmdline_mode;
+       if (cmdline_mode->specified == false)
+               return mode;
+
+       /* attempt to find a matching mode in the list of modes
+        *  we have gotten so far, if not add a CVT mode that conforms
+        */
+       if (cmdline_mode->rb || cmdline_mode->margins)
+               goto create_mode;
+
+       list_for_each_entry(mode, &connector->modes, head) {
+               /* check width/height */
+               if (mode->hdisplay != cmdline_mode->xres ||
+                   mode->vdisplay != cmdline_mode->yres)
+                       continue;
+
+               if (cmdline_mode->refresh_specified) {
+                       if (mode->vrefresh != cmdline_mode->refresh)
+                               continue;
+               }
+
+               if (cmdline_mode->interlace) {
+                       if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+                               continue;
+               }
+               return mode;
+       }
+
+create_mode:
+       mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
+                           cmdline_mode->yres,
+                           cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
+                           cmdline_mode->rb, cmdline_mode->interlace,
+                           cmdline_mode->margins);
+       list_add(&mode->head, &connector->modes);
+       return mode;
+}
+
 static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
 {
        bool enable;
@@ -317,10 +385,16 @@ static bool drm_target_preferred(struct drm_device *dev,
                        continue;
                }
 
-               DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
-                         connector->base.id);
+               DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+                             connector->base.id);
 
-               modes[i] = drm_has_preferred_mode(connector, width, height);
+               /* got for command line mode first */
+               modes[i] = drm_pick_cmdline_mode(connector, width, height);
+               if (!modes[i]) {
+                       DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+                                     connector->base.id);
+                       modes[i] = drm_has_preferred_mode(connector, width, height);
+               }
                /* No preferred modes, pick one off the list */
                if (!modes[i] && !list_empty(&connector->modes)) {
                        list_for_each_entry(modes[i], &connector->modes, head)
@@ -369,6 +443,8 @@ static int drm_pick_crtcs(struct drm_device *dev,
        my_score = 1;
        if (connector->status == connector_status_connected)
                my_score++;
+       if (drm_has_cmdline_mode(connector))
+               my_score++;
        if (drm_has_preferred_mode(connector, width, height))
                my_score++;
 
@@ -943,6 +1019,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
 {
        int count = 0;
 
+       drm_fb_helper_parse_command_line(dev);
+
        count = drm_helper_probe_connector_modes(dev,
                                                 dev->mode_config.max_width,
                                                 dev->mode_config.max_height);
@@ -950,7 +1028,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
        /*
         * we shouldn't end up with no modes here.
         */
-       WARN(!count, "Connected connector with 0 modes\n");
+       WARN(!count, "No connectors reported connected with modes\n");
 
        drm_setup_crtcs(dev);
 
index 90d76bacff17dc5048d9ec9cdbf97e3f55567955..3c0d2b3aed76c0ee92320074c4395541d9712cec 100644 (file)
@@ -109,7 +109,9 @@ static struct edid_quirk {
 
 
 /* Valid EDID header has these bytes */
-static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+static const u8 edid_header[] = {
+       0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
 
 /**
  * edid_is_valid - sanity check EDID data
@@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
        }
        return mode;
 }
+
+/*
+ * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+       return (a == 0x00 && b == 0x00) ||
+              (a == 0x01 && b == 0x01) ||
+              (a == 0x20 && b == 0x20);
+}
+
 /**
  * drm_mode_std - convert standard mode info (width, height, refresh) into mode
  * @t: standard timing params
@@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
  */
 struct drm_display_mode *drm_mode_std(struct drm_device *dev,
                                      struct std_timing *t,
+                                     int revision,
                                      int timing_level)
 {
        struct drm_display_mode *mode;
@@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
        unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
                >> EDID_TIMING_VFREQ_SHIFT;
 
+       if (bad_std_timing(t->hsize, t->vfreq_aspect))
+               return NULL;
+
        /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
        hsize = t->hsize * 8 + 248;
        /* vrefresh_rate = vfreq + 60 */
        vrefresh_rate = vfreq + 60;
        /* the vdisplay is calculated based on the aspect ratio */
-       if (aspect_ratio == 0)
-               vsize = (hsize * 10) / 16;
-       else if (aspect_ratio == 1)
+       if (aspect_ratio == 0) {
+               if (revision < 3)
+                       vsize = hsize;
+               else
+                       vsize = (hsize * 10) / 16;
+       } else if (aspect_ratio == 1)
                vsize = (hsize * 3) / 4;
        else if (aspect_ratio == 2)
                vsize = (hsize * 4) / 5;
@@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
                vsize = (hsize * 9) / 16;
        /* HDTV hack */
        if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
-               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
                mode->hdisplay = 1366;
                mode->vsync_start = mode->vsync_start - 1;
                mode->vsync_end = mode->vsync_end - 1;
@@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
                mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
                break;
        case LEVEL_CVT:
-               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+               mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+                                   false);
                break;
        }
        return mode;
@@ -779,7 +803,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
                        continue;
 
                newmode = drm_mode_std(dev, &edid->standard_timings[i],
-                                       timing_level);
+                                      edid->revision, timing_level);
                if (newmode) {
                        drm_mode_probed_add(connector, newmode);
                        modes++;
@@ -829,13 +853,13 @@ static int add_detailed_info(struct drm_connector *connector,
                        case EDID_DETAIL_MONITOR_CPDATA:
                                break;
                        case EDID_DETAIL_STD_MODES:
-                               /* Five modes per detailed section */
-                               for (j = 0; j < 5; i++) {
+                               for (j = 0; j < 6; i++) {
                                        struct std_timing *std;
                                        struct drm_display_mode *newmode;
 
                                        std = &data->data.timings[j];
                                        newmode = drm_mode_std(dev, std,
+                                                              edid->revision,
                                                               timing_level);
                                        if (newmode) {
                                                drm_mode_probed_add(connector, newmode);
@@ -964,7 +988,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
                                struct drm_display_mode *newmode;
 
                                std = &data->data.timings[j];
-                               newmode = drm_mode_std(dev, std, timing_level);
+                               newmode = drm_mode_std(dev, std,
+                                                      edid->revision,
+                                                      timing_level);
                                if (newmode) {
                                        drm_mode_probed_add(connector, newmode);
                                        modes++;
index 2c467131488438e3801ca63a0b65e210c7afc201..23dc9c115fd99dc4577e6444e11e866ceca488e8 100644 (file)
@@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
+int drm_fb_helper_add_connector(struct drm_connector *connector)
+{
+       connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+       if (!connector->fb_helper_private)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_connector);
+
+static int my_atoi(const char *name)
+{
+       int val = 0;
+
+       for (;; name++) {
+               switch (*name) {
+               case '0' ... '9':
+                       val = 10*val+(*name-'0');
+                       break;
+               default:
+                       return val;
+               }
+       }
+}
+
+/**
+ * drm_fb_helper_connector_parse_command_line - parse command line for connector
+ * @connector - connector to parse line for
+ * @mode_option - per connector mode option
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ *     <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
+                                                      const char *mode_option)
+{
+       const char *name;
+       unsigned int namelen;
+       int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+       unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+       int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+       int i;
+       enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+       struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
+       struct drm_fb_helper_cmdline_mode *cmdline_mode;
+
+       if (!fb_help_conn)
+               return false;
+
+       cmdline_mode = &fb_help_conn->cmdline_mode;
+       if (!mode_option)
+               mode_option = fb_mode_option;
+
+       if (!mode_option) {
+               cmdline_mode->specified = false;
+               return false;
+       }
+
+       name = mode_option;
+       namelen = strlen(name);
+       for (i = namelen-1; i >= 0; i--) {
+               switch (name[i]) {
+               case '@':
+                       namelen = i;
+                       if (!refresh_specified && !bpp_specified &&
+                           !yres_specified) {
+                               refresh = my_atoi(&name[i+1]);
+                               refresh_specified = 1;
+                               if (cvt || rb)
+                                       cvt = 0;
+                       } else
+                               goto done;
+                       break;
+               case '-':
+                       namelen = i;
+                       if (!bpp_specified && !yres_specified) {
+                               bpp = my_atoi(&name[i+1]);
+                               bpp_specified = 1;
+                               if (cvt || rb)
+                                       cvt = 0;
+                       } else
+                               goto done;
+                       break;
+               case 'x':
+                       if (!yres_specified) {
+                               yres = my_atoi(&name[i+1]);
+                               yres_specified = 1;
+                       } else
+                               goto done;
+               case '0' ... '9':
+                       break;
+               case 'M':
+                       if (!yres_specified)
+                               cvt = 1;
+                       break;
+               case 'R':
+                       if (!cvt)
+                               rb = 1;
+                       break;
+               case 'm':
+                       if (!cvt)
+                               margins = 1;
+                       break;
+               case 'i':
+                       if (!cvt)
+                               interlace = 1;
+                       break;
+               case 'e':
+                       force = DRM_FORCE_ON;
+                       break;
+               case 'D':
+                       if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
+                           (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+                               force = DRM_FORCE_ON;
+                       else
+                               force = DRM_FORCE_ON_DIGITAL;
+                       break;
+               case 'd':
+                       force = DRM_FORCE_OFF;
+                       break;
+               default:
+                       goto done;
+               }
+       }
+       if (i < 0 && yres_specified) {
+               xres = my_atoi(name);
+               res_specified = 1;
+       }
+done:
+
+       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+               drm_get_connector_name(connector), xres, yres,
+               (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
+               "", (margins) ? " with margins" : "", (interlace) ?
+               " interlaced" : "");
+
+       if (force) {
+               const char *s;
+               switch (force) {
+               case DRM_FORCE_OFF: s = "OFF"; break;
+               case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+               default:
+               case DRM_FORCE_ON: s = "ON"; break;
+               }
+
+               DRM_INFO("forcing %s connector %s\n",
+                        drm_get_connector_name(connector), s);
+               connector->force = force;
+       }
+
+       if (res_specified) {
+               cmdline_mode->specified = true;
+               cmdline_mode->xres = xres;
+               cmdline_mode->yres = yres;
+       }
+
+       if (refresh_specified) {
+               cmdline_mode->refresh_specified = true;
+               cmdline_mode->refresh = refresh;
+       }
+
+       if (bpp_specified) {
+               cmdline_mode->bpp_specified = true;
+               cmdline_mode->bpp = bpp;
+       }
+       cmdline_mode->rb = rb ? true : false;
+       cmdline_mode->cvt = cvt  ? true : false;
+       cmdline_mode->interlace = interlace ? true : false;
+
+       return true;
+}
+
+int drm_fb_helper_parse_command_line(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               char *option = NULL;
+
+               /* do something on return - turn off connector maybe */
+               if (fb_get_options(drm_get_connector_name(connector), &option))
+                       continue;
+
+               drm_fb_helper_connector_parse_command_line(connector, option);
+       }
+       return 0;
+}
+
 bool drm_fb_helper_force_kernel_mode(void)
 {
        int i = 0;
@@ -87,6 +280,7 @@ void drm_fb_helper_restore(void)
 }
 EXPORT_SYMBOL(drm_fb_helper_restore);
 
+#ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
        drm_fb_helper_restore();
@@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
        .help_msg = "force-fb(V)",
        .action_msg = "Restore framebuffer console",
 };
+#endif
 
 static void drm_fb_helper_on(struct fb_info *info)
 {
@@ -259,6 +454,96 @@ out_free:
 }
 EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
 
+static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+                    u16 blue, u16 regno, struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_framebuffer *fb = fb_helper->fb;
+       int pindex;
+
+       pindex = regno;
+
+       if (fb->bits_per_pixel == 16) {
+               pindex = regno << 3;
+
+               if (fb->depth == 16 && regno > 63)
+                       return;
+               if (fb->depth == 15 && regno > 31)
+                       return;
+
+               if (fb->depth == 16) {
+                       u16 r, g, b;
+                       int i;
+                       if (regno < 32) {
+                               for (i = 0; i < 8; i++)
+                                       fb_helper->funcs->gamma_set(crtc, red,
+                                               green, blue, pindex + i);
+                       }
+
+                       fb_helper->funcs->gamma_get(crtc, &r,
+                                                   &g, &b,
+                                                   pindex >> 1);
+
+                       for (i = 0; i < 4; i++)
+                               fb_helper->funcs->gamma_set(crtc, r,
+                                                           green, b,
+                                                           (pindex >> 1) + i);
+               }
+       }
+
+       if (fb->depth != 16)
+               fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+
+       if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+               ((u32 *) fb->pseudo_palette)[regno] =
+                       (regno << info->var.red.offset) |
+                       (regno << info->var.green.offset) |
+                       (regno << info->var.blue.offset);
+       }
+}
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       u16 *red, *green, *blue, *transp;
+       struct drm_crtc *crtc;
+       int i, rc = 0;
+       int start;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               for (i = 0; i < fb_helper->crtc_count; i++) {
+                       if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
+                               break;
+               }
+               if (i == fb_helper->crtc_count)
+                       continue;
+
+               red = cmap->red;
+               green = cmap->green;
+               blue = cmap->blue;
+               transp = cmap->transp;
+               start = cmap->start;
+
+               for (i = 0; i < cmap->len; i++) {
+                       u16 hred, hgreen, hblue, htransp = 0xffff;
+
+                       hred = *red++;
+                       hgreen = *green++;
+                       hblue = *blue++;
+
+                       if (transp)
+                               htransp = *transp++;
+
+                       setcolreg(crtc, hred, hgreen, hblue, start++, info);
+               }
+               crtc_funcs->load_lut(crtc);
+       }
+       return rc;
+}
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
+
 int drm_fb_helper_setcolreg(unsigned regno,
                            unsigned red,
                            unsigned green,
@@ -271,9 +556,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
        struct drm_crtc *crtc;
        int i;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct drm_framebuffer *fb = fb_helper->fb;
+       if (regno > 255)
+               return 1;
 
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
                for (i = 0; i < fb_helper->crtc_count; i++) {
                        if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
                                break;
@@ -281,35 +568,9 @@ int drm_fb_helper_setcolreg(unsigned regno,
                if (i == fb_helper->crtc_count)
                        continue;
 
-               if (regno > 255)
-                       return 1;
 
-               if (fb->depth == 8) {
-                       fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
-                       return 0;
-               }
-
-               if (regno < 16) {
-                       switch (fb->depth) {
-                       case 15:
-                               fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
-                                       ((green & 0xf800) >>  6) |
-                                       ((blue & 0xf800) >> 11);
-                               break;
-                       case 16:
-                               fb->pseudo_palette[regno] = (red & 0xf800) |
-                                       ((green & 0xfc00) >>  5) |
-                                       ((blue  & 0xf800) >> 11);
-                               break;
-                       case 24:
-                       case 32:
-                               fb->pseudo_palette[regno] =
-                                       (((red >> 8) & 0xff) << info->var.red.offset) |
-                                       (((green >> 8) & 0xff) << info->var.green.offset) |
-                                       (((blue >> 8) & 0xff) << info->var.blue.offset);
-                               break;
-                       }
-               }
+               setcolreg(crtc, red, green, blue, regno, info);
+               crtc_funcs->load_lut(crtc);
        }
        return 0;
 }
@@ -479,11 +740,14 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
 EXPORT_SYMBOL(drm_fb_helper_pan_display);
 
 int drm_fb_helper_single_fb_probe(struct drm_device *dev,
+                                 int preferred_bpp,
                                  int (*fb_create)(struct drm_device *dev,
                                                   uint32_t fb_width,
                                                   uint32_t fb_height,
                                                   uint32_t surface_width,
                                                   uint32_t surface_height,
+                                                  uint32_t surface_depth,
+                                                  uint32_t surface_bpp,
                                                   struct drm_framebuffer **fb_ptr))
 {
        struct drm_crtc *crtc;
@@ -497,8 +761,48 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
        struct drm_framebuffer *fb;
        struct drm_mode_set *modeset = NULL;
        struct drm_fb_helper *fb_helper;
+       uint32_t surface_depth = 24, surface_bpp = 32;
 
+       /* if driver picks 8 or 16 by default use that
+          for both depth/bpp */
+       if (preferred_bpp != surface_bpp) {
+               surface_depth = surface_bpp = preferred_bpp;
+       }
        /* first up get a count of crtcs now in use and new min/maxes width/heights */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
+
+               struct drm_fb_helper_cmdline_mode *cmdline_mode;
+
+               if (!fb_help_conn)
+                       continue;
+               
+               cmdline_mode = &fb_help_conn->cmdline_mode;
+
+               if (cmdline_mode->bpp_specified) {
+                       switch (cmdline_mode->bpp) {
+                       case 8:
+                               surface_depth = surface_bpp = 8;
+                               break;
+                       case 15:
+                               surface_depth = 15;
+                               surface_bpp = 16;
+                               break;
+                       case 16:
+                               surface_depth = surface_bpp = 16;
+                               break;
+                       case 24:
+                               surface_depth = surface_bpp = 24;
+                               break;
+                       case 32:
+                               surface_depth = 24;
+                               surface_bpp = 32;
+                               break;
+                       }
+                       break;
+               }
+       }
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (drm_helper_crtc_in_use(crtc)) {
                        if (crtc->desired_mode) {
@@ -527,7 +831,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
        /* do we have an fb already? */
        if (list_empty(&dev->mode_config.fb_kernel_list)) {
                ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
-                                  surface_height, &fb);
+                                  surface_height, surface_depth, surface_bpp,
+                                  &fb);
                if (ret)
                        return -EINVAL;
                new_fb = 1;
@@ -618,10 +923,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_free);
 
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch)
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+                           uint32_t depth)
 {
        info->fix.type = FB_TYPE_PACKED_PIXELS;
-       info->fix.visual = FB_VISUAL_TRUECOLOR;
+       info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+               FB_VISUAL_DIRECTCOLOR;
        info->fix.type_aux = 0;
        info->fix.xpanstep = 1; /* doing it in hw */
        info->fix.ypanstep = 1; /* doing it in hw */
index 49404ce1666ea320e38c0e0f00b7668437aa224d..51f677215f1dbcaa72e3f4ffe25142c32ee9120c 100644 (file)
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
 #define HV_FACTOR                      1000
 struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
                                      int vdisplay, int vrefresh,
-                                     bool reduced, bool interlaced)
+                                     bool reduced, bool interlaced, bool margins)
 {
        /* 1) top/bottom margin size (% of height) - default: 1.8, */
 #define        CVT_MARGIN_PERCENTAGE           18
@@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
        /* Pixel Clock step (kHz) */
 #define CVT_CLOCK_STEP                 250
        struct drm_display_mode *drm_mode;
-       bool margins = false;
        unsigned int vfieldrate, hperiod;
        int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
        int interlace;
index 93ff6c03733e6359b69e720dc48f3193272ce04f..ffa39671751fab815e76ef164cf860eccaae8549 100644 (file)
@@ -3244,6 +3244,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
        intel_crtc->lut_b[regno] = blue >> 8;
 }
 
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                            u16 *blue, int regno)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       *red = intel_crtc->lut_r[regno] << 8;
+       *green = intel_crtc->lut_g[regno] << 8;
+       *blue = intel_crtc->lut_b[regno] << 8;
+}
+
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                                 u16 *blue, uint32_t size)
 {
@@ -3835,6 +3845,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
        .mode_set_base = intel_pipe_set_base,
        .prepare = intel_crtc_prepare,
        .commit = intel_crtc_commit,
+       .load_lut = intel_crtc_load_lut,
 };
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
index 8aa4b7f30daa58bea5280bc9569041e4dc25c157..ef61fe9507e2e86a67ffc2dea1503ac409dbc855 100644 (file)
@@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
 extern void intelfb_restore(void);
 extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                    u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, int regno);
 
 extern int intel_framebuffer_create(struct drm_device *dev,
                                    struct drm_mode_fb_cmd *mode_cmd,
index 7ba4a232a97fb597db0add8ec079cb299f747906..2b0fe54cd92c7394763add4cf95409b443c05199 100644 (file)
@@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = {
        .fb_imageblit = cfb_imageblit,
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
 };
 
 static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .gamma_set = intel_crtc_fb_gamma_set,
+       .gamma_get = intel_crtc_fb_gamma_get,
 };
 
 
@@ -110,6 +112,7 @@ EXPORT_SYMBOL(intelfb_resize);
 static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
                          uint32_t fb_height, uint32_t surface_width,
                          uint32_t surface_height,
+                         uint32_t surface_depth, uint32_t surface_bpp,
                          struct drm_framebuffer **fb_p)
 {
        struct fb_info *info;
@@ -122,12 +125,16 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
        struct device *device = &dev->pdev->dev;
        int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 
+       /* we don't do packed 24bpp */
+       if (surface_bpp == 24)
+               surface_bpp = 32;
+
        mode_cmd.width = surface_width;
        mode_cmd.height = surface_height;
 
-       mode_cmd.bpp = 32;
+       mode_cmd.bpp = surface_bpp;
        mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
-       mode_cmd.depth = 24;
+       mode_cmd.depth = surface_depth;
 
        size = mode_cmd.pitch * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
@@ -205,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
 
 //     memset(info->screen_base, 0, size);
 
-       drm_fb_helper_fill_fix(info, fb->pitch);
+       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
        drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
 
        /* FIXME: we really shouldn't expose mmio space at all */
@@ -243,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
        int ret;
 
        DRM_DEBUG("\n");
-       ret = drm_fb_helper_single_fb_probe(dev, intelfb_create);
+       ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
        return ret;
 }
 EXPORT_SYMBOL(intelfb_probe);
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644 (file)
index 0000000..403eb3a
--- /dev/null
@@ -0,0 +1,3 @@
+mkregtable
+*_reg_safe.h
+
index 6a015929deee77ab89779d7e27ee0489e58e59da..14fa9701aeb39a351c0467d618c9d837a373d6c8 100644 (file)
@@ -733,6 +733,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
        .mode_set_base = atombios_crtc_set_base,
        .prepare = atombios_crtc_prepare,
        .commit = atombios_crtc_commit,
+       .load_lut = radeon_crtc_load_lut,
 };
 
 void radeon_atombios_init_crtc(struct drm_device *dev,
index e2b92c445baba24a8f650b2636ce2e8405723b0c..d4e6e6e4a93888a14a738dd3484279d4f5e28d9c 100644 (file)
 #define        VGA_RENDER_CONTROL                              0x0300
 #define                VGA_VSTATUS_CNTL_MASK                           0x00030000
 
-/* AVIVO disable VGA rendering */
-static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev)
-{
-       u32 vga_render;
-       vga_render = RREG32(VGA_RENDER_CONTROL);
-       vga_render &= ~VGA_VSTATUS_CNTL_MASK;
-       WREG32(VGA_RENDER_CONTROL, vga_render);
-}
-
 #endif
index be51c5f7d0f659f9baa1efaae4dfe662c498d6e5..161094c07d9443957b8f193386e28db258244620 100644 (file)
@@ -32,6 +32,9 @@
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
 
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
@@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520);
 
 /* This files gather functions specifics to:
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
- *
- * Some of these functions might be used by newer ASICs.
  */
-int r200_init(struct radeon_device *rdev);
-void r100_hdp_reset(struct radeon_device *rdev);
-void r100_gpu_init(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r100_mc_wait_for_idle(struct radeon_device *rdev);
-void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
-void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
-int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-
 
 /*
  * PCI GART
@@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev)
        radeon_gart_fini(rdev);
 }
 
-
-/*
- * MC
- */
-void r100_mc_disable_clients(struct radeon_device *rdev)
-{
-       uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
-
-       /* FIXME: is this function correct for rs100,rs200,rs300 ? */
-       if (r100_gui_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait GUI idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-
-       /* stop display and memory access */
-       ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
-       WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
-       crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
-       WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
-       crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
-
-       r100_gpu_wait_for_vsync(rdev);
-
-       WREG32(RADEON_CRTC_GEN_CNTL,
-              (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
-              RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
-
-       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
-               crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-
-               r100_gpu_wait_for_vsync2(rdev);
-               WREG32(RADEON_CRTC2_GEN_CNTL,
-                      (crtc2_gen_cntl &
-                       ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
-                      RADEON_CRTC2_DISP_REQ_EN_B);
-       }
-
-       udelay(500);
-}
-
-void r100_mc_setup(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       int r;
-
-       r = r100_debugfs_mc_info_init(rdev);
-       if (r) {
-               DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
-       }
-       /* Write VRAM size in case we are limiting it */
-       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
-       /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
-        * if the aperture is 64MB but we have 32MB VRAM
-        * we report only 32MB VRAM but we have to set MC_FB_LOCATION
-        * to 64MB, otherwise the gpu accidentially dies */
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32(RADEON_MC_FB_LOCATION, tmp);
-
-       /* Enable bus mastering */
-       tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
-       WREG32(RADEON_BUS_CNTL, tmp);
-
-       if (rdev->flags & RADEON_IS_AGP) {
-               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-               tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
-               tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
-               WREG32(RADEON_MC_AGP_LOCATION, tmp);
-               WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
-       } else {
-               WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
-               WREG32(RADEON_AGP_BASE, 0);
-       }
-
-       tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
-       tmp |= (7 << 28);
-       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
-       WREG32(RADEON_HOST_PATH_CNTL, tmp);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
-}
-
-int r100_mc_init(struct radeon_device *rdev)
-{
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-
-       r100_gpu_init(rdev);
-       /* Disable gart which also disable out of gart access */
-       r100_pci_gart_disable(rdev);
-
-       /* Setup GPU memory space */
-       rdev->mc.gtt_location = 0xFFFFFFFFUL;
-       if (rdev->flags & RADEON_IS_AGP) {
-               r = radeon_agp_init(rdev);
-               if (r) {
-                       printk(KERN_WARNING "[drm] Disabling AGP\n");
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-               } else {
-                       rdev->mc.gtt_location = rdev->mc.agp_base;
-               }
-       }
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       r100_mc_disable_clients(rdev);
-       if (r100_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-
-       r100_mc_setup(rdev);
-       return 0;
-}
-
-void r100_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Interrupts
- */
 int r100_irq_set(struct radeon_device *rdev)
 {
        uint32_t tmp = 0;
@@ -358,10 +220,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
                return RREG32(RADEON_CRTC2_CRNT_FRAME);
 }
 
-
-/*
- * Fence emission
- */
 void r100_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
@@ -377,10 +235,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
 }
 
-
-/*
- * Writeback
- */
 int r100_wb_init(struct radeon_device *rdev)
 {
        int r;
@@ -504,10 +358,6 @@ int r100_copy_blit(struct radeon_device *rdev,
        return r;
 }
 
-
-/*
- * CP
- */
 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
@@ -612,6 +462,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
        }
        return err;
 }
+
 static void r100_cp_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
@@ -863,13 +714,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
 void r100_cs_dump_packet(struct radeon_cs_parser *p,
                         struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_chunk *ib_chunk;
        volatile uint32_t *ib;
        unsigned i;
        unsigned idx;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx;
        for (i = 0; i <= (pkt->count + 1); i++, idx++) {
                DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -896,7 +745,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
                          idx, ib_chunk->length_dw);
                return -EINVAL;
        }
-       header = ib_chunk->kdata[idx];
+       header = radeon_get_ib_value(p, idx);
        pkt->idx = idx;
        pkt->type = CP_PACKET_GET_TYPE(header);
        pkt->count = CP_PACKET_GET_COUNT(header);
@@ -939,7 +788,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
  */
 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct drm_mode_object *obj;
        struct drm_crtc *crtc;
        struct radeon_crtc *radeon_crtc;
@@ -947,8 +795,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
        int crtc_id;
        int r;
        uint32_t header, h_idx, reg;
+       volatile uint32_t *ib;
 
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       ib = p->ib->ptr;
 
        /* parse the wait until */
        r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -963,24 +812,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
                return r;
        }
 
-       if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
+       if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
                DRM_ERROR("vline wait had illegal wait until\n");
                r = -EINVAL;
                return r;
        }
 
        /* jump over the NOP */
-       r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+       r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
        if (r)
                return r;
 
        h_idx = p->idx - 2;
-       p->idx += waitreloc.count;
-       p->idx += p3reloc.count;
+       p->idx += waitreloc.count + 2;
+       p->idx += p3reloc.count + 2;
 
-       header = ib_chunk->kdata[h_idx];
-       crtc_id = ib_chunk->kdata[h_idx + 5];
-       reg = ib_chunk->kdata[h_idx] >> 2;
+       header = radeon_get_ib_value(p, h_idx);
+       crtc_id = radeon_get_ib_value(p, h_idx + 5);
+       reg = CP_PACKET0_GET_REG(header);
        mutex_lock(&p->rdev->ddev->mode_config.mutex);
        obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
        if (!obj) {
@@ -994,16 +843,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
 
        if (!crtc->enabled) {
                /* if the CRTC isn't enabled - we need to nop out the wait until */
-               ib_chunk->kdata[h_idx + 2] = PACKET2(0);
-               ib_chunk->kdata[h_idx + 3] = PACKET2(0);
+               ib[h_idx + 2] = PACKET2(0);
+               ib[h_idx + 3] = PACKET2(0);
        } else if (crtc_id == 1) {
                switch (reg) {
                case AVIVO_D1MODE_VLINE_START_END:
-                       header &= R300_CP_PACKET0_REG_MASK;
+                       header &= ~R300_CP_PACKET0_REG_MASK;
                        header |= AVIVO_D2MODE_VLINE_START_END >> 2;
                        break;
                case RADEON_CRTC_GUI_TRIG_VLINE:
-                       header &= R300_CP_PACKET0_REG_MASK;
+                       header &= ~R300_CP_PACKET0_REG_MASK;
                        header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
                        break;
                default:
@@ -1011,8 +860,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
                        r = -EINVAL;
                        goto out;
                }
-               ib_chunk->kdata[h_idx] = header;
-               ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+               ib[h_idx] = header;
+               ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
        }
 out:
        mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@@ -1033,7 +882,6 @@ out:
 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
                              struct radeon_cs_reloc **cs_reloc)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_chunk *relocs_chunk;
        struct radeon_cs_packet p3reloc;
        unsigned idx;
@@ -1044,7 +892,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        r = r100_cs_packet_parse(p, &p3reloc, p->idx);
        if (r) {
@@ -1057,7 +904,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
                r100_cs_dump_packet(p, &p3reloc);
                return -EINVAL;
        }
-       idx = ib_chunk->kdata[p3reloc.idx + 1];
+       idx = radeon_get_ib_value(p, p3reloc.idx + 1);
        if (idx >= relocs_chunk->length_dw) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, relocs_chunk->length_dw);
@@ -1126,7 +973,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt,
                              unsigned idx, unsigned reg)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
@@ -1134,11 +980,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
        int r;
        int i, face;
        u32 tile_flags = 0;
+       u32 idx_value;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        track = (struct r100_cs_track *)p->track;
 
+       idx_value = radeon_get_ib_value(p, idx);
+
        switch (reg) {
        case RADEON_CRTC_GUI_TRIG_VLINE:
                r = r100_cs_packet_parse_vline(p);
@@ -1166,8 +1014,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->zb.robj = reloc->robj;
-               track->zb.offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->zb.offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1178,8 +1026,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->cb[0].robj = reloc->robj;
-               track->cb[0].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->cb[0].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_TXOFFSET_0:
        case RADEON_PP_TXOFFSET_1:
@@ -1192,7 +1040,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T0_0:
@@ -1208,8 +1056,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[0].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[0].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T1_0:
@@ -1225,8 +1073,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[1].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[1].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_PP_CUBIC_OFFSET_T2_0:
@@ -1242,12 +1090,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[2].cube_info[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[2].cube_info[i].robj = reloc->robj;
                break;
        case RADEON_RE_WIDTH_HEIGHT:
-               track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
+               track->maxy = ((idx_value >> 16) & 0x7FF);
                break;
        case RADEON_RB3D_COLORPITCH:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1263,17 +1111,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
                        tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
-               tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
+               tmp = idx_value & ~(0x7 << 16);
                tmp |= tile_flags;
                ib[idx] = tmp;
 
-               track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
+               track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
                break;
        case RADEON_RB3D_DEPTHPITCH:
-               track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
+               track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
                break;
        case RADEON_RB3D_CNTL:
-               switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+               switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
                case 7:
                case 8:
                case 9:
@@ -1291,13 +1139,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        break;
                default:
                        DRM_ERROR("Invalid color buffer format (%d) !\n",
-                                 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+                                 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
                        return -EINVAL;
                }
-               track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
+               track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
                break;
        case RADEON_RB3D_ZSTENCILCNTL:
-               switch (ib_chunk->kdata[idx] & 0xf) {
+               switch (idx_value & 0xf) {
                case 0:
                        track->zb.cpp = 2;
                        break;
@@ -1321,44 +1169,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_CNTL:
                {
-                       uint32_t temp = ib_chunk->kdata[idx] >> 4;
+                       uint32_t temp = idx_value >> 4;
                        for (i = 0; i < track->num_texture; i++)
                                track->textures[i].enabled = !!(temp & (1 << i));
                }
                break;
        case RADEON_SE_VF_CNTL:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = idx_value;
                break;
        case RADEON_SE_VTX_FMT:
-               track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
+               track->vtx_size = r100_get_vtx_size(idx_value);
                break;
        case RADEON_PP_TEX_SIZE_0:
        case RADEON_PP_TEX_SIZE_1:
        case RADEON_PP_TEX_SIZE_2:
                i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
-               track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-               track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+               track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+               track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
                break;
        case RADEON_PP_TEX_PITCH_0:
        case RADEON_PP_TEX_PITCH_1:
        case RADEON_PP_TEX_PITCH_2:
                i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
-               track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
+               track->textures[i].pitch = idx_value + 32;
                break;
        case RADEON_PP_TXFILTER_0:
        case RADEON_PP_TXFILTER_1:
        case RADEON_PP_TXFILTER_2:
                i = (reg - RADEON_PP_TXFILTER_0) / 24;
-               track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
+               track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
                                                 >> RADEON_MAX_MIP_LEVEL_SHIFT);
-               tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
+               tmp = (idx_value >> 23) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_w = false;
-               tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
+               tmp = (idx_value >> 27) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_h = false;
                break;
@@ -1366,16 +1214,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
        case RADEON_PP_TXFORMAT_1:
        case RADEON_PP_TXFORMAT_2:
                i = (reg - RADEON_PP_TXFORMAT_0) / 24;
-               if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
+               if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
                        track->textures[i].use_pitch = 1;
                } else {
                        track->textures[i].use_pitch = 0;
-                       track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-                       track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+                       track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+                       track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
                }
-               if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+               if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
                        track->textures[i].tex_coord_type = 2;
-               switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
+               switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
                case RADEON_TXFORMAT_I8:
                case RADEON_TXFORMAT_RGB332:
                case RADEON_TXFORMAT_Y8:
@@ -1402,13 +1250,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].cpp = 4;
                        break;
                }
-               track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-               track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
+               track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+               track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
                break;
        case RADEON_PP_CUBIC_FACES_0:
        case RADEON_PP_CUBIC_FACES_1:
        case RADEON_PP_CUBIC_FACES_2:
-               tmp = ib_chunk->kdata[idx];
+               tmp = idx_value;
                i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
                for (face = 0; face < 4; face++) {
                        track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -1427,15 +1275,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
                                         struct radeon_cs_packet *pkt,
                                         struct radeon_object *robj)
 {
-       struct radeon_cs_chunk *ib_chunk;
        unsigned idx;
-
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       u32 value;
        idx = pkt->idx + 1;
-       if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
+       value = radeon_get_ib_value(p, idx + 2);
+       if ((value + 1) > radeon_object_size(robj)) {
                DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
                          "(need %u have %lu) !\n",
-                         ib_chunk->kdata[idx+2] + 1,
+                         value + 1,
                          radeon_object_size(robj));
                return -EINVAL;
        }
@@ -1445,59 +1292,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 static int r100_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        unsigned idx;
-       unsigned i, c;
        volatile uint32_t *ib;
        int r;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx + 1;
        track = (struct r100_cs_track *)p->track;
        switch (pkt->opcode) {
        case PACKET3_3D_LOAD_VBPNTR:
-               c = ib_chunk->kdata[idx++];
-               track->num_arrays = c;
-               for (i = 0; i < (c - 1); i += 2, idx += 3) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 1].robj = reloc->robj;
-                       track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
-                       track->arrays[i + 1].esize &= 0x7F;
-               }
-               if (c & 1) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-               }
+               r = r100_packet3_load_vbpntr(p, pkt, idx);
+               if (r)
+                       return r;
                break;
        case PACKET3_INDX_BUFFER:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1506,7 +1314,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
                r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
                if (r) {
                        return r;
@@ -1520,27 +1328,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
                track->num_arrays = 1;
-               track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
+               track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
 
                track->arrays[0].robj = reloc->robj;
                track->arrays[0].esize = track->vtx_size;
 
-               track->max_indx = ib_chunk->kdata[idx+1];
+               track->max_indx = radeon_get_ib_value(p, idx+1);
 
-               track->vap_vf_cntl = ib_chunk->kdata[idx+3];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
                track->immd_dwords = pkt->count - 1;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
        case PACKET3_3D_DRAW_IMMD:
-               if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx+1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                track->immd_dwords = pkt->count - 1;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
@@ -1548,11 +1356,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                break;
                /* triggers drawing using in-packet vertex data */
        case PACKET3_3D_DRAW_IMMD_2:
-               if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                track->immd_dwords = pkt->count;
                r = r100_cs_track_check(p->rdev, track);
                if (r)
@@ -1560,28 +1368,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
                break;
                /* triggers drawing using in-packet vertex data */
        case PACKET3_3D_DRAW_VBUF_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing of vertex buffers setup elsewhere */
        case PACKET3_3D_DRAW_INDX_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing using indices to vertex buffer */
        case PACKET3_3D_DRAW_VBUF:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
                break;
                /* triggers drawing of vertex buffers setup elsewhere */
        case PACKET3_3D_DRAW_INDX:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r)
                        return r;
@@ -2033,7 +1841,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
        r100_pll_errata_after_data(rdev);
 }
 
-int r100_init(struct radeon_device *rdev)
+void r100_set_safe_registers(struct radeon_device *rdev)
 {
        if (ASIC_IS_RN50(rdev)) {
                rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -2042,9 +1850,8 @@ int r100_init(struct radeon_device *rdev)
                rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
                rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
        } else {
-               return r200_init(rdev);
+               r200_set_safe_registers(rdev);
        }
-       return 0;
 }
 
 /*
@@ -2342,9 +2149,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
                mode1 = &rdev->mode_info.crtcs[0]->base.mode;
                pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
        }
-       if (rdev->mode_info.crtcs[1]->base.enabled) {
-               mode2 = &rdev->mode_info.crtcs[1]->base.mode;
-               pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+       if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+               if (rdev->mode_info.crtcs[1]->base.enabled) {
+                       mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+                       pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+               }
        }
 
        min_mem_eff.full = rfixed_const_8(0);
@@ -3157,7 +2966,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
        WREG32(R_000740_CP_CSQ_CNTL, 0);
 
        /* Save few CRTC registers */
-       save->GENMO_WT = RREG32(R_0003C0_GENMO_WT);
+       save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
        save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
        save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
        save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
@@ -3167,7 +2976,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
        }
 
        /* Disable VGA aperture access */
-       WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT);
+       WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
        /* Disable cursor, overlay, crtc */
        WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
        WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
@@ -3199,10 +3008,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
                                rdev->mc.vram_location);
        }
        /* Restore CRTC registers */
-       WREG32(R_0003C0_GENMO_WT, save->GENMO_WT);
+       WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
        WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
        WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
        if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
                WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
        }
 }
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       tmp = RREG8(R_0003C2_GENMO_WT);
+       WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+       int r;
+
+       r = r100_debugfs_mc_info_init(rdev);
+       if (r)
+               dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+       struct r100_mc_save save;
+
+       /* Stops all mc clients */
+       r100_mc_stop(rdev, &save);
+       if (rdev->flags & RADEON_IS_AGP) {
+               WREG32(R_00014C_MC_AGP_LOCATION,
+                       S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+                       S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+               WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+               if (rdev->family > CHIP_RV200)
+                       WREG32(R_00015C_AGP_BASE_2,
+                               upper_32_bits(rdev->mc.agp_base) & 0xff);
+       } else {
+               WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+               WREG32(R_000170_AGP_BASE, 0);
+               if (rdev->family > CHIP_RV200)
+                       WREG32(R_00015C_AGP_BASE_2, 0);
+       }
+       /* Wait for mc idle */
+       if (r100_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+       /* Program MC, should be a 32bits limited address space */
+       WREG32(R_000148_MC_FB_LOCATION,
+               S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+               S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+       r100_mc_resume(rdev, &save);
+}
+
+void r100_clock_startup(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       if (radeon_dynclks != -1 && radeon_dynclks)
+               radeon_legacy_set_clock_gating(rdev, 1);
+       /* We need to force on some of the block */
+       tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+       tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+       if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+               tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+       WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       r100_mc_program(rdev);
+       /* Resume clock */
+       r100_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       r100_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       if (rdev->flags & RADEON_IS_PCI) {
+               r = r100_pci_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       r100_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       r100_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       radeon_combios_asic_init(rdev->ddev);
+       /* Resume clock after posting */
+       r100_clock_startup(rdev);
+       return r100_startup(rdev);
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       r100_irq_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_disable(rdev);
+       return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+       r100_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int r100_mc_init(struct radeon_device *rdev)
+{
+       int r;
+       u32 tmp;
+
+       /* Setup GPU memory space */
+       rdev->mc.vram_location = 0xFFFFFFFFUL;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       if (rdev->flags & RADEON_IS_IGP) {
+               tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+               rdev->mc.vram_location = tmp << 16;
+       }
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r) {
+                       printk(KERN_WARNING "[drm] Disabling AGP\n");
+                       rdev->flags &= ~RADEON_IS_AGP;
+                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               } else {
+                       rdev->mc.gtt_location = rdev->mc.agp_base;
+               }
+       }
+       r = radeon_mc_setup(rdev);
+       if (r)
+               return r;
+       return 0;
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Register debugfs file specific to this group of asics */
+       r100_debugfs(rdev);
+       /* Disable VGA */
+       r100_vga_render_disable(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+               return -EINVAL;
+       } else {
+               r = radeon_combios_init(rdev);
+               if (r)
+                       return r;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               radeon_combios_asic_init(rdev->ddev);
+       }
+       /* Set asic errata */
+       r100_errata(rdev);
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       r100_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r100_mc_init(rdev);
+       if (r)
+               return r;
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       if (rdev->flags & RADEON_IS_PCI) {
+               r = r100_pci_gart_init(rdev);
+               if (r)
+                       return r;
+       }
+       r100_set_safe_registers(rdev);
+       rdev->accel_working = true;
+       r = r100_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               r100_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               if (rdev->flags & RADEON_IS_PCI)
+                       r100_pci_gart_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
+       return 0;
+}
index 70a82eda394a6e0c70a2676840eb117aa31fd3a3..0daf0d76a891b240874ba9637ff2fc4c9dda5b6a 100644 (file)
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                       struct radeon_cs_packet *pkt,
                       unsigned idx, unsigned reg);
 
+
+
 static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
                                          struct radeon_cs_packet *pkt,
                                          unsigned idx,
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
        u32 tile_flags = 0;
        u32 tmp;
        struct radeon_cs_reloc *reloc;
-       struct radeon_cs_chunk *ib_chunk;
-
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
+       u32 value;
 
        r = r100_cs_packet_next_reloc(p, &reloc);
        if (r) {
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
                r100_cs_dump_packet(p, pkt);
                return r;
        }
-       tmp = ib_chunk->kdata[idx] & 0x003fffff;
+       value = radeon_get_ib_value(p, idx);
+       tmp = value & 0x003fffff;
        tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
 
        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
        }
 
        tmp |= tile_flags;
-       p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
+       p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
        return 0;
 }
+
+static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+                                          struct radeon_cs_packet *pkt,
+                                          int idx)
+{
+       unsigned c, i;
+       struct radeon_cs_reloc *reloc;
+       struct r100_cs_track *track;
+       int r = 0;
+       volatile uint32_t *ib;
+       u32 idx_value;
+
+       ib = p->ib->ptr;
+       track = (struct r100_cs_track *)p->track;
+       c = radeon_get_ib_value(p, idx++) & 0x1F;
+       track->num_arrays = c;
+       for (i = 0; i < (c - 1); i+=2, idx+=3) {
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n",
+                                 pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               idx_value = radeon_get_ib_value(p, idx);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+               track->arrays[i + 0].esize = idx_value >> 8;
+               track->arrays[i + 0].robj = reloc->robj;
+               track->arrays[i + 0].esize &= 0x7F;
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n",
+                                 pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+               track->arrays[i + 1].robj = reloc->robj;
+               track->arrays[i + 1].esize = idx_value >> 24;
+               track->arrays[i + 1].esize &= 0x7F;
+       }
+       if (c & 1) {
+               r = r100_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       DRM_ERROR("No reloc for packet3 %d\n",
+                                         pkt->opcode);
+                       r100_cs_dump_packet(p, pkt);
+                       return r;
+               }
+               idx_value = radeon_get_ib_value(p, idx);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+               track->arrays[i + 0].robj = reloc->robj;
+               track->arrays[i + 0].esize = idx_value >> 8;
+               track->arrays[i + 0].esize &= 0x7F;
+       }
+       return r;
+}
index c4b257ec920e29e5760ac4819c54cfd0f8c25185..df29a630c466454d0ee78a87f3577d824f18b6b3 100644 (file)
 #define   S_000054_VCRTC_IDX_MASTER(x)                 (((x) & 0x7F) << 24)
 #define   G_000054_VCRTC_IDX_MASTER(x)                 (((x) >> 24) & 0x7F)
 #define   C_000054_VCRTC_IDX_MASTER                    0x80FFFFFF
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
 #define R_00023C_DISPLAY_BASE_ADDR                   0x00023C
 #define   S_00023C_DISPLAY_BASE_ADDR(x)                (((x) & 0xFFFFFFFF) << 0)
 #define   G_00023C_DISPLAY_BASE_ADDR(x)                (((x) >> 0) & 0xFFFFFFFF)
 #define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
 #define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
 #define   C_000360_CUR2_LOCK                           0x7FFFFFFF
-#define R_0003C0_GENMO_WT                            0x0003C0
-#define   S_0003C0_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
-#define   G_0003C0_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
-#define   C_0003C0_GENMO_MONO_ADDRESS_B                0xFFFFFFFE
-#define   S_0003C0_VGA_RAM_EN(x)                       (((x) & 0x1) << 1)
-#define   G_0003C0_VGA_RAM_EN(x)                       (((x) >> 1) & 0x1)
-#define   C_0003C0_VGA_RAM_EN                          0xFFFFFFFD
-#define   S_0003C0_VGA_CKSEL(x)                        (((x) & 0x3) << 2)
-#define   G_0003C0_VGA_CKSEL(x)                        (((x) >> 2) & 0x3)
-#define   C_0003C0_VGA_CKSEL                           0xFFFFFFF3
-#define   S_0003C0_ODD_EVEN_MD_PGSEL(x)                (((x) & 0x1) << 5)
-#define   G_0003C0_ODD_EVEN_MD_PGSEL(x)                (((x) >> 5) & 0x1)
-#define   C_0003C0_ODD_EVEN_MD_PGSEL                   0xFFFFFFDF
-#define   S_0003C0_VGA_HSYNC_POL(x)                    (((x) & 0x1) << 6)
-#define   G_0003C0_VGA_HSYNC_POL(x)                    (((x) >> 6) & 0x1)
-#define   C_0003C0_VGA_HSYNC_POL                       0xFFFFFFBF
-#define   S_0003C0_VGA_VSYNC_POL(x)                    (((x) & 0x1) << 7)
-#define   G_0003C0_VGA_VSYNC_POL(x)                    (((x) >> 7) & 0x1)
-#define   C_0003C0_VGA_VSYNC_POL                       0xFFFFFF7F
+#define R_0003C2_GENMO_WT                            0x0003C0
+#define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
+#define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
+#define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
+#define   S_0003C2_VGA_RAM_EN(x)                       (((x) & 0x1) << 1)
+#define   G_0003C2_VGA_RAM_EN(x)                       (((x) >> 1) & 0x1)
+#define   C_0003C2_VGA_RAM_EN                          0xFD
+#define   S_0003C2_VGA_CKSEL(x)                        (((x) & 0x3) << 2)
+#define   G_0003C2_VGA_CKSEL(x)                        (((x) >> 2) & 0x3)
+#define   C_0003C2_VGA_CKSEL                           0xF3
+#define   S_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) & 0x1) << 5)
+#define   G_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) >> 5) & 0x1)
+#define   C_0003C2_ODD_EVEN_MD_PGSEL                   0xDF
+#define   S_0003C2_VGA_HSYNC_POL(x)                    (((x) & 0x1) << 6)
+#define   G_0003C2_VGA_HSYNC_POL(x)                    (((x) >> 6) & 0x1)
+#define   C_0003C2_VGA_HSYNC_POL                       0xBF
+#define   S_0003C2_VGA_VSYNC_POL(x)                    (((x) & 0x1) << 7)
+#define   G_0003C2_VGA_VSYNC_POL(x)                    (((x) >> 7) & 0x1)
+#define   C_0003C2_VGA_VSYNC_POL                       0x7F
 #define R_0003F8_CRTC2_GEN_CNTL                      0x0003F8
 #define   S_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) & 0x1) << 0)
 #define   G_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) >> 0) & 0x1)
 #define   S_000774_SCRATCH_ADDR(x)                     (((x) & 0x7FFFFFF) << 5)
 #define   G_000774_SCRATCH_ADDR(x)                     (((x) >> 5) & 0x7FFFFFF)
 #define   C_000774_SCRATCH_ADDR                        0x0000001F
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
 #define R_000E40_RBBM_STATUS                         0x000E40
 #define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
 #define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
 #define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
 #define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
 
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_TCLK_SRC_SEL(x)                     (((x) & 0x7) << 8)
+#define   G_00000D_TCLK_SRC_SEL(x)                     (((x) >> 8) & 0x7)
+#define   C_00000D_TCLK_SRC_SEL                        0xFFFFF8FF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP(x)                       (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP(x)                       (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP                          0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+
+
 #endif
index 568c74bfba3de4c7648e1cf8baee6537b545679d..eb740fc3549f82c5530c5450f4862ca8cf764fa3 100644 (file)
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                       struct radeon_cs_packet *pkt,
                       unsigned idx, unsigned reg)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        int i;
        int face;
        u32 tile_flags = 0;
+       u32 idx_value;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        track = (struct r100_cs_track *)p->track;
-
+       idx_value = radeon_get_ib_value(p, idx);
        switch (reg) {
        case RADEON_CRTC_GUI_TRIG_VLINE:
                r = r100_cs_packet_parse_vline(p);
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->zb.robj = reloc->robj;
-               track->zb.offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->zb.offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_RB3D_COLOROFFSET:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->cb[0].robj = reloc->robj;
-               track->cb[0].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->cb[0].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R200_PP_TXOFFSET_0:
        case R200_PP_TXOFFSET_1:
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
                break;
        case R200_PP_CUBIC_OFFSET_F1_0:
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->textures[i].cube_info[face - 1].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].cube_info[face - 1].robj = reloc->robj;
                break;
        case RADEON_RE_WIDTH_HEIGHT:
-               track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
+               track->maxy = ((idx_value >> 16) & 0x7FF);
                break;
        case RADEON_RB3D_COLORPITCH:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
                        tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
-               tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
+               tmp = idx_value & ~(0x7 << 16);
                tmp |= tile_flags;
                ib[idx] = tmp;
 
-               track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
+               track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
                break;
        case RADEON_RB3D_DEPTHPITCH:
-               track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
+               track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
                break;
        case RADEON_RB3D_CNTL:
-               switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+               switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
                case 7:
                case 8:
                case 9:
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        break;
                default:
                        DRM_ERROR("Invalid color buffer format (%d) !\n",
-                                 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+                                 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
                        return -EINVAL;
                }
-               if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) {
+               if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
                        DRM_ERROR("No support for depth xy offset in kms\n");
                        return -EINVAL;
                }
 
-               track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
+               track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
                break;
        case RADEON_RB3D_ZSTENCILCNTL:
-               switch (ib_chunk->kdata[idx] & 0xf) {
+               switch (idx_value & 0xf) {
                case 0:
                        track->zb.cpp = 2;
                        break;
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case RADEON_PP_CNTL:
                {
-                       uint32_t temp = ib_chunk->kdata[idx] >> 4;
+                       uint32_t temp = idx_value >> 4;
                        for (i = 0; i < track->num_texture; i++)
                                track->textures[i].enabled = !!(temp & (1 << i));
                }
                break;
        case RADEON_SE_VF_CNTL:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = idx_value;
                break;
        case 0x210c:
                /* VAP_VF_MAX_VTX_INDX */
-               track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
+               track->max_indx = idx_value & 0x00FFFFFFUL;
                break;
        case R200_SE_VTX_FMT_0:
-               track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]);
+               track->vtx_size = r200_get_vtx_size_0(idx_value);
                break;
        case R200_SE_VTX_FMT_1:
-               track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]);
+               track->vtx_size += r200_get_vtx_size_1(idx_value);
                break;
        case R200_PP_TXSIZE_0:
        case R200_PP_TXSIZE_1:
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXSIZE_4:
        case R200_PP_TXSIZE_5:
                i = (reg - R200_PP_TXSIZE_0) / 32;
-               track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-               track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+               track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+               track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
                break;
        case R200_PP_TXPITCH_0:
        case R200_PP_TXPITCH_1:
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXPITCH_4:
        case R200_PP_TXPITCH_5:
                i = (reg - R200_PP_TXPITCH_0) / 32;
-               track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
+               track->textures[i].pitch = idx_value + 32;
                break;
        case R200_PP_TXFILTER_0:
        case R200_PP_TXFILTER_1:
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXFILTER_4:
        case R200_PP_TXFILTER_5:
                i = (reg - R200_PP_TXFILTER_0) / 32;
-               track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK)
+               track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
                                                 >> R200_MAX_MIP_LEVEL_SHIFT);
-               tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
+               tmp = (idx_value >> 23) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_w = false;
-               tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
+               tmp = (idx_value >> 27) & 0x7;
                if (tmp == 2 || tmp == 6)
                        track->textures[i].roundup_h = false;
                break;
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXFORMAT_X_4:
        case R200_PP_TXFORMAT_X_5:
                i = (reg - R200_PP_TXFORMAT_X_0) / 32;
-               track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7;
-               tmp = (ib_chunk->kdata[idx] >> 16) & 0x3;
+               track->textures[i].txdepth = idx_value & 0x7;
+               tmp = (idx_value >> 16) & 0x3;
                /* 2D, 3D, CUBE */
                switch (tmp) {
                case 0:
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_TXFORMAT_4:
        case R200_PP_TXFORMAT_5:
                i = (reg - R200_PP_TXFORMAT_0) / 32;
-               if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) {
+               if (idx_value & R200_TXFORMAT_NON_POWER2) {
                        track->textures[i].use_pitch = 1;
                } else {
                        track->textures[i].use_pitch = 0;
-                       track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-                       track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+                       track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+                       track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
                }
-               switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
+               switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
                case R200_TXFORMAT_I8:
                case R200_TXFORMAT_RGB332:
                case R200_TXFORMAT_Y8:
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
                        track->textures[i].cpp = 4;
                        break;
                }
-               track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-               track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
+               track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+               track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
                break;
        case R200_PP_CUBIC_FACES_0:
        case R200_PP_CUBIC_FACES_1:
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        case R200_PP_CUBIC_FACES_3:
        case R200_PP_CUBIC_FACES_4:
        case R200_PP_CUBIC_FACES_5:
-               tmp = ib_chunk->kdata[idx];
+               tmp = idx_value;
                i = (reg - R200_PP_CUBIC_FACES_0) / 32;
                for (face = 0; face < 4; face++) {
                        track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -448,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
        return 0;
 }
 
-int r200_init(struct radeon_device *rdev)
+void r200_set_safe_registers(struct radeon_device *rdev)
 {
        rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
        rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
-       return 0;
 }
index bb151ecdf8fcb1ec80e8c22421f53bdc7f14b291..e08c4a8974ca2130245ea4c34f5273e9e3d882d0 100644 (file)
 #include "radeon_drm.h"
 #include "r100_track.h"
 #include "r300d.h"
-
+#include "rv350d.h"
 #include "r300_reg_safe.h"
 
-/* r300,r350,rv350,rv370,rv380 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_cp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
-int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-int r100_pci_gart_enable(struct radeon_device *rdev);
-void r100_mc_setup(struct radeon_device *rdev);
-void r100_mc_disable_clients(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r100_cs_packet_parse(struct radeon_cs_parser *p,
-                        struct radeon_cs_packet *pkt,
-                        unsigned idx);
-int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
-int r100_cs_parse_packet0(struct radeon_cs_parser *p,
-                         struct radeon_cs_packet *pkt,
-                         const unsigned *auth, unsigned n,
-                         radeon_packet0_check_t check);
-int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
-                                        struct radeon_cs_packet *pkt,
-                                        struct radeon_object *robj);
-
-/* This files gather functions specifics to:
- * r300,r350,rv350,rv370,rv380
- *
- * Some of these functions might be used by newer ASICs.
- */
-void r300_gpu_init(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
-
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
 
 /*
  * rv370,rv380 PCIE GART
  */
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
 {
        uint32_t tmp;
@@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
        radeon_gart_fini(rdev);
 }
 
-/*
- * MC
- */
-int r300_mc_init(struct radeon_device *rdev)
-{
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-
-       r300_gpu_init(rdev);
-       r100_pci_gart_disable(rdev);
-       if (rdev->flags & RADEON_IS_PCIE) {
-               rv370_pcie_gart_disable(rdev);
-       }
-
-       /* Setup GPU memory space */
-       rdev->mc.vram_location = 0xFFFFFFFFUL;
-       rdev->mc.gtt_location = 0xFFFFFFFFUL;
-       if (rdev->flags & RADEON_IS_AGP) {
-               r = radeon_agp_init(rdev);
-               if (r) {
-                       printk(KERN_WARNING "[drm] Disabling AGP\n");
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-               } else {
-                       rdev->mc.gtt_location = rdev->mc.agp_base;
-               }
-       }
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       /* Program GPU memory space */
-       r100_mc_disable_clients(rdev);
-       if (r300_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-       r100_mc_setup(rdev);
-       return 0;
-}
-
-void r300_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Fence emission
- */
 void r300_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
@@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
 }
 
-
-/*
- * Global GPU functions
- */
 int r300_copy_dma(struct radeon_device *rdev,
                  uint64_t src_offset,
                  uint64_t dst_offset,
@@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev)
        r100_vram_init_sizes(rdev);
 }
 
-
-/*
- * PCIE Lanes
- */
-
 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
 {
        uint32_t link_width_cntl, mask;
@@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
 
 }
 
-
-/*
- * Debugfs info
- */
 #if defined(CONFIG_DEBUG_FS)
 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
 {
@@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = {
 };
 #endif
 
-int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
@@ -689,25 +596,22 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
 #endif
 }
 
-
-/*
- * CS functions
- */
 static int r300_packet0_check(struct radeon_cs_parser *p,
                struct radeon_cs_packet *pkt,
                unsigned idx, unsigned reg)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        uint32_t tmp, tile_flags = 0;
        unsigned i;
        int r;
+       u32 idx_value;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        track = (struct r100_cs_track *)p->track;
+       idx_value = radeon_get_ib_value(p, idx);
+
        switch(reg) {
        case AVIVO_D1MODE_VLINE_START_END:
        case RADEON_CRTC_GUI_TRIG_VLINE:
@@ -738,8 +642,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->cb[i].robj = reloc->robj;
-               track->cb[i].offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->cb[i].offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R300_ZB_DEPTHOFFSET:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -750,8 +654,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        return r;
                }
                track->zb.robj = reloc->robj;
-               track->zb.offset = ib_chunk->kdata[idx];
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               track->zb.offset = idx_value;
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case R300_TX_OFFSET_0:
        case R300_TX_OFFSET_0+4:
@@ -777,32 +681,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                track->textures[i].robj = reloc->robj;
                break;
        /* Tracked registers */
        case 0x2084:
                /* VAP_VF_CNTL */
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = idx_value;
                break;
        case 0x20B4:
                /* VAP_VTX_SIZE */
-               track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
+               track->vtx_size = idx_value & 0x7F;
                break;
        case 0x2134:
                /* VAP_VF_MAX_VTX_INDX */
-               track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
+               track->max_indx = idx_value & 0x00FFFFFFUL;
                break;
        case 0x43E4:
                /* SC_SCISSOR1 */
-               track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
+               track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
                if (p->rdev->family < CHIP_RV515) {
                        track->maxy -= 1440;
                }
                break;
        case 0x4E00:
                /* RB3D_CCTL */
-               track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
+               track->num_cb = ((idx_value >> 5) & 0x3) + 1;
                break;
        case 0x4E38:
        case 0x4E3C:
@@ -825,13 +729,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
                        tile_flags |= R300_COLOR_MICROTILE_ENABLE;
 
-               tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
+               tmp = idx_value & ~(0x7 << 16);
                tmp |= tile_flags;
                ib[idx] = tmp;
 
                i = (reg - 0x4E38) >> 2;
-               track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
-               switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
+               track->cb[i].pitch = idx_value & 0x3FFE;
+               switch (((idx_value >> 21) & 0xF)) {
                case 9:
                case 11:
                case 12:
@@ -854,13 +758,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        break;
                default:
                        DRM_ERROR("Invalid color buffer format (%d) !\n",
-                                 ((ib_chunk->kdata[idx] >> 21) & 0xF));
+                                 ((idx_value >> 21) & 0xF));
                        return -EINVAL;
                }
                break;
        case 0x4F00:
                /* ZB_CNTL */
-               if (ib_chunk->kdata[idx] & 2) {
+               if (idx_value & 2) {
                        track->z_enabled = true;
                } else {
                        track->z_enabled = false;
@@ -868,7 +772,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                break;
        case 0x4F10:
                /* ZB_FORMAT */
-               switch ((ib_chunk->kdata[idx] & 0xF)) {
+               switch ((idx_value & 0xF)) {
                case 0:
                case 1:
                        track->zb.cpp = 2;
@@ -878,7 +782,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        break;
                default:
                        DRM_ERROR("Invalid z buffer format (%d) !\n",
-                                 (ib_chunk->kdata[idx] & 0xF));
+                                 (idx_value & 0xF));
                        return -EINVAL;
                }
                break;
@@ -897,17 +801,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
                        tile_flags |= R300_DEPTHMICROTILE_TILED;;
 
-               tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
+               tmp = idx_value & ~(0x7 << 16);
                tmp |= tile_flags;
                ib[idx] = tmp;
 
-               track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
+               track->zb.pitch = idx_value & 0x3FFC;
                break;
        case 0x4104:
                for (i = 0; i < 16; i++) {
                        bool enabled;
 
-                       enabled = !!(ib_chunk->kdata[idx] & (1 << i));
+                       enabled = !!(idx_value & (1 << i));
                        track->textures[i].enabled = enabled;
                }
                break;
@@ -929,9 +833,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x44FC:
                /* TX_FORMAT1_[0-15] */
                i = (reg - 0x44C0) >> 2;
-               tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
+               tmp = (idx_value >> 25) & 0x3;
                track->textures[i].tex_coord_type = tmp;
-               switch ((ib_chunk->kdata[idx] & 0x1F)) {
+               switch ((idx_value & 0x1F)) {
                case R300_TX_FORMAT_X8:
                case R300_TX_FORMAT_Y4X4:
                case R300_TX_FORMAT_Z3Y3X2:
@@ -971,7 +875,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        break;
                default:
                        DRM_ERROR("Invalid texture format %u\n",
-                                 (ib_chunk->kdata[idx] & 0x1F));
+                                 (idx_value & 0x1F));
                        return -EINVAL;
                        break;
                }
@@ -994,11 +898,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x443C:
                /* TX_FILTER0_[0-15] */
                i = (reg - 0x4400) >> 2;
-               tmp = ib_chunk->kdata[idx] & 0x7;
+               tmp = idx_value & 0x7;
                if (tmp == 2 || tmp == 4 || tmp == 6) {
                        track->textures[i].roundup_w = false;
                }
-               tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
+               tmp = (idx_value >> 3) & 0x7;
                if (tmp == 2 || tmp == 4 || tmp == 6) {
                        track->textures[i].roundup_h = false;
                }
@@ -1021,12 +925,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x453C:
                /* TX_FORMAT2_[0-15] */
                i = (reg - 0x4500) >> 2;
-               tmp = ib_chunk->kdata[idx] & 0x3FFF;
+               tmp = idx_value & 0x3FFF;
                track->textures[i].pitch = tmp + 1;
                if (p->rdev->family >= CHIP_RV515) {
-                       tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
+                       tmp = ((idx_value >> 15) & 1) << 11;
                        track->textures[i].width_11 = tmp;
-                       tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
+                       tmp = ((idx_value >> 16) & 1) << 11;
                        track->textures[i].height_11 = tmp;
                }
                break;
@@ -1048,15 +952,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
        case 0x44BC:
                /* TX_FORMAT0_[0-15] */
                i = (reg - 0x4480) >> 2;
-               tmp = ib_chunk->kdata[idx] & 0x7FF;
+               tmp = idx_value & 0x7FF;
                track->textures[i].width = tmp + 1;
-               tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
+               tmp = (idx_value >> 11) & 0x7FF;
                track->textures[i].height = tmp + 1;
-               tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
+               tmp = (idx_value >> 26) & 0xF;
                track->textures[i].num_levels = tmp;
-               tmp = ib_chunk->kdata[idx] & (1 << 31);
+               tmp = idx_value & (1 << 31);
                track->textures[i].use_pitch = !!tmp;
-               tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
+               tmp = (idx_value >> 22) & 0xF;
                track->textures[i].txdepth = tmp;
                break;
        case R300_ZB_ZPASS_ADDR:
@@ -1067,7 +971,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
                break;
        case 0x4be8:
                /* valid register only on RV530 */
@@ -1085,60 +989,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
 static int r300_packet3_check(struct radeon_cs_parser *p,
                              struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_chunk *ib_chunk;
-
        struct radeon_cs_reloc *reloc;
        struct r100_cs_track *track;
        volatile uint32_t *ib;
        unsigned idx;
-       unsigned i, c;
        int r;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx + 1;
        track = (struct r100_cs_track *)p->track;
        switch(pkt->opcode) {
        case PACKET3_3D_LOAD_VBPNTR:
-               c = ib_chunk->kdata[idx++] & 0x1F;
-               track->num_arrays = c;
-               for (i = 0; i < (c - 1); i+=2, idx+=3) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 1].robj = reloc->robj;
-                       track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
-                       track->arrays[i + 1].esize &= 0x7F;
-               }
-               if (c & 1) {
-                       r = r100_cs_packet_next_reloc(p, &reloc);
-                       if (r) {
-                               DRM_ERROR("No reloc for packet3 %d\n",
-                                         pkt->opcode);
-                               r100_cs_dump_packet(p, pkt);
-                               return r;
-                       }
-                       ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-                       track->arrays[i + 0].robj = reloc->robj;
-                       track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-                       track->arrays[i + 0].esize &= 0x7F;
-               }
+               r = r100_packet3_load_vbpntr(p, pkt, idx);
+               if (r)
+                       return r;
                break;
        case PACKET3_INDX_BUFFER:
                r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1147,7 +1011,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
                        r100_cs_dump_packet(p, pkt);
                        return r;
                }
-               ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
+               ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
                r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
                if (r) {
                        return r;
@@ -1158,11 +1022,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
                /* Number of dwords is vtx_size * (num_vertices - 1)
                 * PRIM_WALK must be equal to 3 vertex data in embedded
                 * in cmd stream */
-               if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx+1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                track->immd_dwords = pkt->count - 1;
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
@@ -1173,11 +1037,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
                /* Number of dwords is vtx_size * (num_vertices - 1)
                 * PRIM_WALK must be equal to 3 vertex data in embedded
                 * in cmd stream */
-               if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
+               if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
                        DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
                        return -EINVAL;
                }
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                track->immd_dwords = pkt->count;
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
@@ -1185,28 +1049,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_3D_DRAW_VBUF:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
                        return r;
                }
                break;
        case PACKET3_3D_DRAW_VBUF_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
                        return r;
                }
                break;
        case PACKET3_3D_DRAW_INDX:
-               track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
                        return r;
                }
                break;
        case PACKET3_3D_DRAW_INDX_2:
-               track->vap_vf_cntl = ib_chunk->kdata[idx];
+               track->vap_vf_cntl = radeon_get_ib_value(p, idx);
                r = r100_cs_track_check(p->rdev, track);
                if (r) {
                        return r;
@@ -1265,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev)
        rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
 }
 
-int r300_init(struct radeon_device *rdev)
-{
-       r300_set_reg_safe(rdev);
-       return 0;
-}
-
 void r300_mc_program(struct radeon_device *rdev)
 {
        struct r100_mc_save save;
@@ -1304,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev)
                S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
        r100_mc_resume(rdev, &save);
 }
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       if (radeon_dynclks != -1 && radeon_dynclks)
+               radeon_legacy_set_clock_gating(rdev, 1);
+       /* We need to force on some of the block */
+       tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+       tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+       if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+               tmp |= S_00000D_FORCE_VAP(1);
+       WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       r300_mc_program(rdev);
+       /* Resume clock */
+       r300_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       r300_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       if (rdev->flags & RADEON_IS_PCIE) {
+               r = rv370_pcie_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
+       if (rdev->flags & RADEON_IS_PCI) {
+               r = r100_pci_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       r100_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       r300_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       radeon_combios_asic_init(rdev->ddev);
+       /* Resume clock after posting */
+       r300_clock_startup(rdev);
+       return r300_startup(rdev);
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       r100_irq_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_disable(rdev);
+       return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+       r300_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_fini(rdev);
+       if (rdev->flags & RADEON_IS_PCI)
+               r100_pci_gart_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Disable VGA */
+       r100_vga_render_disable(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+               return -EINVAL;
+       } else {
+               r = radeon_combios_init(rdev);
+               if (r)
+                       return r;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               radeon_combios_asic_init(rdev->ddev);
+       }
+       /* Set asic errata */
+       r300_errata(rdev);
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       r300_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r420_mc_init(rdev);
+       if (r)
+               return r;
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       if (rdev->flags & RADEON_IS_PCIE) {
+               r = rv370_pcie_gart_init(rdev);
+               if (r)
+                       return r;
+       }
+       if (rdev->flags & RADEON_IS_PCI) {
+               r = r100_pci_gart_init(rdev);
+               if (r)
+                       return r;
+       }
+       r300_set_reg_safe(rdev);
+       rdev->accel_working = true;
+       r = r300_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               r300_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               if (rdev->flags & RADEON_IS_PCIE)
+                       rv370_pcie_gart_fini(rdev);
+               if (rdev->flags & RADEON_IS_PCI)
+                       r100_pci_gart_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
+       return 0;
+}
index d4fa3eb1074f1d41a151cee005e651b5d41adc6d..4c73114f0de98362f86aa45e7914bac072f50063 100644 (file)
 #define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
 #define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
 #define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
 
 
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
 #endif
index 49a2fdc57d27e72d3a98ebe69b6b4272e4a1ae78..5c7fe52de30e2688132f6cb5e188f9b6df101cc8 100644 (file)
@@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev)
 static void r420_clock_resume(struct radeon_device *rdev)
 {
        u32 sclk_cntl;
+
+       if (radeon_dynclks != -1 && radeon_dynclks)
+               radeon_atom_set_clock_gating(rdev, 1);
        sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
        sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
        if (rdev->family == CHIP_R420)
@@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev)
        int r;
 
        r300_mc_program(rdev);
+       /* Resume clock */
+       r420_clock_resume(rdev);
        /* Initialize GART (initialize after TTM so we can allocate
         * memory through TTM but finalize after TTM) */
        if (rdev->flags & RADEON_IS_PCIE) {
@@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->new_init_path = true;
        /* Initialize scratch registers */
        radeon_scratch_init(rdev);
        /* Initialize surface registers */
index a48a7db1e2aa81dda2a169fad8f8e1246892fff0..fc78d31a0b4afe727d0e8bc987ba803e10fede02 100644 (file)
 #define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
 #define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
 #define   C_00000D_FORCE_E2                            0xFFEFFFFF
-#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
-#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
-#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
 #define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
 #define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
 #define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
 #define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
 #define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
 #define   C_00000D_FORCE_RE                            0xFEFFFFFF
-#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
-#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
-#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
 #define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
 #define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
 #define   C_00000D_FORCE_PX                            0xFBFFFFFF
 #define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
 #define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
 #define   C_00000D_FORCE_TX                            0xF7FFFFFF
-#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
-#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
-#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
 #define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
 #define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
 #define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
-#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
-#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
-#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
 #define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
 #define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
 #define   C_00000D_FORCE_OV0                           0x7FFFFFFF
index e1d5e0331e19095b863d0a613c9a87318ee7d082..868add6e166dc5428a64c97a5608874b12e22137 100644 (file)
 #define AVIVO_D1MODE_VBLANK_STATUS              0x6534
 #       define AVIVO_VBLANK_ACK                 (1 << 4)
 #define AVIVO_D1MODE_VLINE_START_END            0x6538
+#define AVIVO_D1MODE_VLINE_STATUS               0x653c
+#       define AVIVO_D1MODE_VLINE_STAT          (1 << 12)
 #define AVIVO_DxMODE_INT_MASK                   0x6540
 #       define AVIVO_D1MODE_INT_MASK            (1 << 0)
 #       define AVIVO_D2MODE_INT_MASK            (1 << 8)
 
 #define AVIVO_D2MODE_VBLANK_STATUS              0x6d34
 #define AVIVO_D2MODE_VLINE_START_END            0x6d38
+#define AVIVO_D2MODE_VLINE_STATUS               0x6d3c
 #define AVIVO_D2MODE_VIEWPORT_START             0x6d80
 #define AVIVO_D2MODE_VIEWPORT_SIZE              0x6d84
 #define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6d88
index d4b0b9d2e39b28e35ee3b4d1405b25c59ea99de3..a555b7b19b48aefd3da033a2717b414fdbbccf97 100644 (file)
  *          Jerome Glisse
  */
 #include "drmP.h"
-#include "radeon_reg.h"
 #include "radeon.h"
+#include "atom.h"
+#include "r520d.h"
 
-/* r520,rv530,rv560,rv570,r580 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
-void rs600_mc_disable_clients(struct radeon_device *rdev);
-void rs600_disable_vga(struct radeon_device *rdev);
-int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
-int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
 
-/* This files gather functions specifics to:
- * r520,rv530,rv560,rv570,r580
- *
- * Some of these functions might be used by newer ASICs.
- */
-void r520_gpu_init(struct radeon_device *rdev);
-int r520_mc_wait_for_idle(struct radeon_device *rdev);
-
-
-/*
- * MC
- */
-int r520_mc_init(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-       if (rv515_debugfs_pipes_info_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for pipes !\n");
-       }
-       if (rv515_debugfs_ga_info_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for pipes !\n");
-       }
-
-       r520_gpu_init(rdev);
-       rv370_pcie_gart_disable(rdev);
-
-       /* Setup GPU memory space */
-       rdev->mc.vram_location = 0xFFFFFFFFUL;
-       rdev->mc.gtt_location = 0xFFFFFFFFUL;
-       if (rdev->flags & RADEON_IS_AGP) {
-               r = radeon_agp_init(rdev);
-               if (r) {
-                       printk(KERN_WARNING "[drm] Disabling AGP\n");
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-               } else {
-                       rdev->mc.gtt_location = rdev->mc.agp_base;
-               }
-       }
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       /* Program GPU memory space */
-       rs600_mc_disable_clients(rdev);
-       if (r520_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-       /* Write VRAM size in case we are limiting it */
-       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32_MC(R520_MC_FB_LOCATION, tmp);
-       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
-       WREG32(0x310, rdev->mc.vram_location);
-       if (rdev->flags & RADEON_IS_AGP) {
-               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-               tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
-               tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
-               WREG32_MC(R520_MC_AGP_LOCATION, tmp);
-               WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
-               WREG32_MC(R520_MC_AGP_BASE_2, 0);
-       } else {
-               WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
-               WREG32_MC(R520_MC_AGP_BASE, 0);
-               WREG32_MC(R520_MC_AGP_BASE_2, 0);
-       }
-       return 0;
-}
-
-void r520_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Global GPU functions
- */
-void r520_errata(struct radeon_device *rdev)
-{
-       rdev->pll_errata = 0;
-}
-
-int r520_mc_wait_for_idle(struct radeon_device *rdev)
+static int r520_mc_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
        uint32_t tmp;
@@ -143,12 +48,12 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
-void r520_gpu_init(struct radeon_device *rdev)
+static void r520_gpu_init(struct radeon_device *rdev)
 {
        unsigned pipe_select_current, gb_pipe_select, tmp;
 
        r100_hdp_reset(rdev);
-       rs600_disable_vga(rdev);
+       rv515_vga_render_disable(rdev);
        /*
         * DST_PIPE_CONFIG              0x170C
         * GB_TILE_CONFIG               0x4018
@@ -186,10 +91,6 @@ void r520_gpu_init(struct radeon_device *rdev)
        }
 }
 
-
-/*
- * VRAM info
- */
 static void r520_vram_get_type(struct radeon_device *rdev)
 {
        uint32_t tmp;
@@ -233,7 +134,167 @@ void r520_vram_info(struct radeon_device *rdev)
        rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
 }
 
-void r520_bandwidth_update(struct radeon_device *rdev)
+void r520_mc_program(struct radeon_device *rdev)
+{
+       struct rv515_mc_save save;
+
+       /* Stops all mc clients */
+       rv515_mc_stop(rdev, &save);
+
+       /* Wait for mc idle */
+       if (r520_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+       /* Write VRAM size in case we are limiting it */
+       WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+       /* Program MC, should be a 32bits limited address space */
+       WREG32_MC(R_000004_MC_FB_LOCATION,
+                       S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+                       S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+       WREG32(R_000134_HDP_FB_LOCATION,
+               S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+       if (rdev->flags & RADEON_IS_AGP) {
+               WREG32_MC(R_000005_MC_AGP_LOCATION,
+                       S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+                       S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+               WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+               WREG32_MC(R_000007_AGP_BASE_2,
+                       S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+       } else {
+               WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
+               WREG32_MC(R_000006_AGP_BASE, 0);
+               WREG32_MC(R_000007_AGP_BASE_2, 0);
+       }
+
+       rv515_mc_resume(rdev, &save);
+}
+
+static int r520_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       r520_mc_program(rdev);
+       /* Resume clock */
+       rv515_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       r520_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       if (rdev->flags & RADEON_IS_PCIE) {
+               r = rv370_pcie_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       rs600_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int r520_resume(struct radeon_device *rdev)
 {
-       rv515_bandwidth_avivo_update(rdev);
+       /* Make sur GART are not working */
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       rv515_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       atom_asic_init(rdev->mode_info.atom_context);
+       /* Resume clock after posting */
+       rv515_clock_startup(rdev);
+       return r520_startup(rdev);
+}
+
+int r520_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               r = radeon_atombios_init(rdev);
+               if (r)
+                       return r;
+       } else {
+               dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+               return -EINVAL;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               atom_asic_init(rdev->mode_info.atom_context);
+       }
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       r520_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r420_mc_init(rdev);
+       if (r)
+               return r;
+       rv515_debugfs(rdev);
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       r = rv370_pcie_gart_init(rdev);
+       if (r)
+               return r;
+       rv515_set_safe_registers(rdev);
+       rdev->accel_working = true;
+       r = r520_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               rv515_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               rv370_pcie_gart_fini(rdev);
+               radeon_agp_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
+       return 0;
 }
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644 (file)
index 0000000..61af61f
--- /dev/null
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R520D_H__
+#define __R520D_H__
+
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
index eab31c1d6df1612c4b91a80f4eb3de876e803c6d..609719490ec28c26b064df9d43eb642954fbb442 100644 (file)
@@ -33,8 +33,8 @@
 #include "radeon.h"
 #include "radeon_mode.h"
 #include "r600d.h"
-#include "avivod.h"
 #include "atom.h"
+#include "avivod.h"
 
 #define PFP_UCODE_SIZE 576
 #define PM4_UCODE_SIZE 1792
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
-/* This files gather functions specifics to:
- * r600,rv610,rv630,rv620,rv635,rv670
- *
- * Some of these functions might be used by newer ASICs.
- */
+/* r600,rv610,rv630,rv620,rv635,rv670 */
 int r600_mc_wait_for_idle(struct radeon_device *rdev);
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
 
-
 /*
  * R600 PCIE GART
  */
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev)
        radeon_gart_fini(rdev);
 }
 
+void r600_agp_enable(struct radeon_device *rdev)
+{
+       u32 tmp;
+       int i;
+
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+                               ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+                               EFFECTIVE_L2_QUEUE_SIZE(7));
+       WREG32(VM_L2_CNTL2, 0);
+       WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+       /* Setup TLB control */
+       tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+               EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+               ENABLE_WAIT_L2_QUERY;
+       WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+       WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+       WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+       WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+       for (i = 0; i < 7; i++)
+               WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
 int r600_mc_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
-static void r600_mc_resume(struct radeon_device *rdev)
+static void r600_mc_program(struct radeon_device *rdev)
 {
-       u32 d1vga_control, d2vga_control;
-       u32 vga_render_control, vga_hdp_control;
-       u32 d1crtc_control, d2crtc_control;
-       u32 new_d1grph_primary, new_d1grph_secondary;
-       u32 new_d2grph_primary, new_d2grph_secondary;
-       u64 old_vram_start;
+       struct rv515_mc_save save;
        u32 tmp;
        int i, j;
 
@@ -261,88 +285,54 @@ static void r600_mc_resume(struct radeon_device *rdev)
        }
        WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-       d1vga_control = RREG32(D1VGA_CONTROL);
-       d2vga_control = RREG32(D2VGA_CONTROL);
-       vga_render_control = RREG32(VGA_RENDER_CONTROL);
-       vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-       d1crtc_control = RREG32(D1CRTC_CONTROL);
-       d2crtc_control = RREG32(D2CRTC_CONTROL);
-       old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
-       new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
-       new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
-       new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
-       new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
-       new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
-       new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
-       new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
-       new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-
-       /* Stop all video */
-       WREG32(D1VGA_CONTROL, 0);
-       WREG32(D2VGA_CONTROL, 0);
-       WREG32(VGA_RENDER_CONTROL, 0);
-       WREG32(D1CRTC_UPDATE_LOCK, 1);
-       WREG32(D2CRTC_UPDATE_LOCK, 1);
-       WREG32(D1CRTC_CONTROL, 0);
-       WREG32(D2CRTC_CONTROL, 0);
-       WREG32(D1CRTC_UPDATE_LOCK, 0);
-       WREG32(D2CRTC_UPDATE_LOCK, 0);
-
-       mdelay(1);
+       rv515_mc_stop(rdev, &save);
        if (r600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "[drm] MC not idle !\n");
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-
-       /* Lockout access through VGA aperture*/
+       /* Lockout access through VGA aperture (doesn't exist before R600) */
        WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-
        /* Update configuration */
-       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
-       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
+       if (rdev->flags & RADEON_IS_AGP) {
+               if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+                       /* VRAM before AGP */
+                       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+                               rdev->mc.vram_start >> 12);
+                       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+                               rdev->mc.gtt_end >> 12);
+               } else {
+                       /* VRAM after AGP */
+                       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+                               rdev->mc.gtt_start >> 12);
+                       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+                               rdev->mc.vram_end >> 12);
+               }
+       } else {
+               WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+               WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+       }
        WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
-       tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
+       tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
        tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
        WREG32(MC_VM_FB_LOCATION, tmp);
        WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
        WREG32(HDP_NONSURFACE_INFO, (2 << 7));
-       WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+       WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
        if (rdev->flags & RADEON_IS_AGP) {
-               WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
-               WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+               WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+               WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
                WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
        } else {
                WREG32(MC_VM_AGP_BASE, 0);
                WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
                WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
        }
-       WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
-       WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
-       WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
-       WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
-       WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-
-       /* Unlock host access */
-       WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-
-       mdelay(1);
        if (r600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "[drm] MC not idle !\n");
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-
-       /* Restore video state */
-       WREG32(D1CRTC_UPDATE_LOCK, 1);
-       WREG32(D2CRTC_UPDATE_LOCK, 1);
-       WREG32(D1CRTC_CONTROL, d1crtc_control);
-       WREG32(D2CRTC_CONTROL, d2crtc_control);
-       WREG32(D1CRTC_UPDATE_LOCK, 0);
-       WREG32(D2CRTC_UPDATE_LOCK, 0);
-       WREG32(D1VGA_CONTROL, d1vga_control);
-       WREG32(D2VGA_CONTROL, d2vga_control);
-       WREG32(VGA_RENDER_CONTROL, vga_render_control);
-
+       rv515_mc_resume(rdev, &save);
        /* we need to own VRAM, so turn off the VGA renderer here
         * to stop it overwriting our objects */
-       radeon_avivo_vga_render_disable(rdev);
+       rv515_vga_render_disable(rdev);
 }
 
 int r600_mc_init(struct radeon_device *rdev)
@@ -380,6 +370,13 @@ int r600_mc_init(struct radeon_device *rdev)
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+
+       if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+               rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+       if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+               rdev->mc.real_vram_size = rdev->mc.aper_size;
+
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
                if (r)
@@ -438,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev)
                }
        }
        rdev->mc.vram_start = rdev->mc.vram_location;
-       rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+       rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
        rdev->mc.gtt_start = rdev->mc.gtt_location;
-       rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
+       rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
        /* FIXME: we should enforce default clock in case GPU is not in
         * default setup
         */
@@ -456,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev)
  */
 int r600_gpu_soft_reset(struct radeon_device *rdev)
 {
+       struct rv515_mc_save save;
        u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
                                S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
                                S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -473,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                        S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
                        S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
        u32 srbm_reset = 0;
+       u32 tmp;
 
+       dev_info(rdev->dev, "GPU softreset \n");
+       dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
+               RREG32(R_008010_GRBM_STATUS));
+       dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
+               RREG32(R_008014_GRBM_STATUS2));
+       dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
+               RREG32(R_000E50_SRBM_STATUS));
+       rv515_mc_stop(rdev, &save);
+       if (r600_mc_wait_for_idle(rdev)) {
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+       }
        /* Disable CP parsing/prefetching */
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
        /* Check if any of the rendering block is busy and reset it */
        if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
            (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
-               WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) |
+               tmp = S_008020_SOFT_RESET_CR(1) |
                        S_008020_SOFT_RESET_DB(1) |
                        S_008020_SOFT_RESET_CB(1) |
                        S_008020_SOFT_RESET_PA(1) |
@@ -491,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                        S_008020_SOFT_RESET_TC(1) |
                        S_008020_SOFT_RESET_TA(1) |
                        S_008020_SOFT_RESET_VC(1) |
-                       S_008020_SOFT_RESET_VGT(1));
+                       S_008020_SOFT_RESET_VGT(1);
+               dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+               WREG32(R_008020_GRBM_SOFT_RESET, tmp);
                (void)RREG32(R_008020_GRBM_SOFT_RESET);
                udelay(50);
                WREG32(R_008020_GRBM_SOFT_RESET, 0);
                (void)RREG32(R_008020_GRBM_SOFT_RESET);
        }
        /* Reset CP (we always reset CP) */
-       WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1));
+       tmp = S_008020_SOFT_RESET_CP(1);
+       dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+       WREG32(R_008020_GRBM_SOFT_RESET, tmp);
        (void)RREG32(R_008020_GRBM_SOFT_RESET);
        udelay(50);
        WREG32(R_008020_GRBM_SOFT_RESET, 0);
@@ -526,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
                srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
        if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
                srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
+       if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
+               srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
+       dev_info(rdev->dev, "  R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
+       WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
+       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(R_000E60_SRBM_SOFT_RESET, 0);
+       (void)RREG32(R_000E60_SRBM_SOFT_RESET);
        WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
        (void)RREG32(R_000E60_SRBM_SOFT_RESET);
        udelay(50);
@@ -533,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
        (void)RREG32(R_000E60_SRBM_SOFT_RESET);
        /* Wait a little for things to settle down */
        udelay(50);
+       dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
+               RREG32(R_008010_GRBM_STATUS));
+       dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
+               RREG32(R_008014_GRBM_STATUS2));
+       dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
+               RREG32(R_000E50_SRBM_STATUS));
+       /* After reset we need to reinit the asic as GPU often endup in an
+        * incoherent state.
+        */
+       atom_asic_init(rdev->mode_info.atom_context);
+       rv515_mc_resume(rdev, &save);
        return 0;
 }
 
@@ -1343,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev)
        return r;
 }
 
-/*
- * Writeback
- */
-int r600_wb_init(struct radeon_device *rdev)
+void r600_wb_disable(struct radeon_device *rdev)
+{
+       WREG32(SCRATCH_UMSK, 0);
+       if (rdev->wb.wb_obj) {
+               radeon_object_kunmap(rdev->wb.wb_obj);
+               radeon_object_unpin(rdev->wb.wb_obj);
+       }
+}
+
+void r600_wb_fini(struct radeon_device *rdev)
+{
+       r600_wb_disable(rdev);
+       if (rdev->wb.wb_obj) {
+               radeon_object_unref(&rdev->wb.wb_obj);
+               rdev->wb.wb = NULL;
+               rdev->wb.wb_obj = NULL;
+       }
+}
+
+int r600_wb_enable(struct radeon_device *rdev)
 {
        int r;
 
        if (rdev->wb.wb_obj == NULL) {
-               r = radeon_object_create(rdev, NULL, 4096,
-                                        true,
-                                        RADEON_GEM_DOMAIN_GTT,
-                                        false, &rdev->wb.wb_obj);
+               r = radeon_object_create(rdev, NULL, 4096, true,
+                               RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
                if (r) {
-                       DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+                       dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
                        return r;
                }
-               r = radeon_object_pin(rdev->wb.wb_obj,
-                                     RADEON_GEM_DOMAIN_GTT,
-                                     &rdev->wb.gpu_addr);
+               r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+                               &rdev->wb.gpu_addr);
                if (r) {
-                       DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+                       dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+                       r600_wb_fini(rdev);
                        return r;
                }
                r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
                if (r) {
-                       DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+                       dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+                       r600_wb_fini(rdev);
                        return r;
                }
        }
@@ -1379,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev)
        return 0;
 }
 
-void r600_wb_fini(struct radeon_device *rdev)
-{
-       if (rdev->wb.wb_obj) {
-               radeon_object_kunmap(rdev->wb.wb_obj);
-               radeon_object_unpin(rdev->wb.wb_obj);
-               radeon_object_unref(&rdev->wb.wb_obj);
-               rdev->wb.wb = NULL;
-               rdev->wb.wb_obj = NULL;
-       }
-}
-
-
-/*
- * CS
- */
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
@@ -1470,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev)
 {
        int r;
 
-       r600_gpu_reset(rdev);
-       r600_mc_resume(rdev);
-       r = r600_pcie_gart_enable(rdev);
-       if (r)
-               return r;
+       r600_mc_program(rdev);
+       if (rdev->flags & RADEON_IS_AGP) {
+               r600_agp_enable(rdev);
+       } else {
+               r = r600_pcie_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
        r600_gpu_init(rdev);
 
        r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -1493,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev)
        r = r600_cp_resume(rdev);
        if (r)
                return r;
-       r = r600_wb_init(rdev);
-       if (r)
-               return r;
+       /* write back buffer are not vital so don't worry about failure */
+       r600_wb_enable(rdev);
        return 0;
 }
 
@@ -1517,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev)
 {
        int r;
 
-       if (radeon_gpu_reset(rdev)) {
-               /* FIXME: what do we want to do here ? */
-       }
+       /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+        * posting will perform necessary task to bring back GPU into good
+        * shape.
+        */
        /* post card */
-       if (rdev->is_atom_bios) {
-               atom_asic_init(rdev->mode_info.atom_context);
-       } else {
-               radeon_combios_asic_init(rdev->ddev);
-       }
+       atom_asic_init(rdev->mode_info.atom_context);
        /* Initialize clocks */
        r = radeon_clocks_init(rdev);
        if (r) {
@@ -1538,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = radeon_ib_test(rdev);
+       r = r600_ib_test(rdev);
        if (r) {
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
@@ -1546,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev)
        return r;
 }
 
-
 int r600_suspend(struct radeon_device *rdev)
 {
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
-
+       r600_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
        /* unpin shaders bo */
        radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1569,7 +1600,6 @@ int r600_init(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->new_init_path = true;
        r = radeon_dummy_page_init(rdev);
        if (r)
                return r;
@@ -1586,8 +1616,10 @@ int r600_init(struct radeon_device *rdev)
                        return -EINVAL;
        }
        /* Must be an ATOMBIOS */
-       if (!rdev->is_atom_bios)
+       if (!rdev->is_atom_bios) {
+               dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
                return -EINVAL;
+       }
        r = radeon_atombios_init(rdev);
        if (r)
                return r;
@@ -1609,15 +1641,8 @@ int r600_init(struct radeon_device *rdev)
        if (r)
                return r;
        r = r600_mc_init(rdev);
-       if (r) {
-               if (rdev->flags & RADEON_IS_AGP) {
-                       /* Retry with disabling AGP */
-                       r600_fini(rdev);
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       return r600_init(rdev);
-               }
+       if (r)
                return r;
-       }
        /* Memory manager */
        r = radeon_object_init(rdev);
        if (r)
@@ -1646,12 +1671,10 @@ int r600_init(struct radeon_device *rdev)
 
        r = r600_startup(rdev);
        if (r) {
-               if (rdev->flags & RADEON_IS_AGP) {
-                       /* Retry with disabling AGP */
-                       r600_fini(rdev);
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       return r600_init(rdev);
-               }
+               r600_suspend(rdev);
+               r600_wb_fini(rdev);
+               radeon_ring_fini(rdev);
+               r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
        if (rdev->accel_working) {
@@ -1660,7 +1683,7 @@ int r600_init(struct radeon_device *rdev)
                        DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
                        rdev->accel_working = false;
                }
-               r = radeon_ib_test(rdev);
+               r = r600_ib_test(rdev);
                if (r) {
                        DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                        rdev->accel_working = false;
@@ -1676,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev)
 
        r600_blit_fini(rdev);
        radeon_ring_fini(rdev);
+       r600_wb_fini(rdev);
        r600_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
-#if __OS_HAS_AGP
        if (rdev->flags & RADEON_IS_AGP)
                radeon_agp_fini(rdev);
-#endif
        radeon_object_fini(rdev);
-       if (rdev->is_atom_bios)
-               radeon_atombios_fini(rdev);
-       else
-               radeon_combios_fini(rdev);
+       radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
        radeon_dummy_page_fini(rdev);
index d988eece0187253787397d3265bf07f0a6fe1e89..dec501081608364ce09c45c615ec12aeadc6f14b 100644 (file)
@@ -582,8 +582,6 @@ r600_blit_copy(struct drm_device *dev,
        u64 vb_addr;
        u32 *vb;
 
-       vb = r600_nomm_get_vb_ptr(dev);
-
        if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
                max_bytes = 8192;
 
@@ -619,8 +617,8 @@ r600_blit_copy(struct drm_device *dev,
                                if (!dev_priv->blit_vb)
                                        return;
                                set_shaders(dev);
-                               vb = r600_nomm_get_vb_ptr(dev);
                        }
+                       vb = r600_nomm_get_vb_ptr(dev);
 
                        vb[0] = i2f(dst_x);
                        vb[1] = 0;
@@ -708,8 +706,8 @@ r600_blit_copy(struct drm_device *dev,
                                        return;
 
                                set_shaders(dev);
-                               vb = r600_nomm_get_vb_ptr(dev);
                        }
+                       vb = r600_nomm_get_vb_ptr(dev);
 
                        vb[0] = i2f(dst_x / 4);
                        vb[1] = 0;
@@ -777,8 +775,6 @@ r600_blit_swap(struct drm_device *dev,
        u64 vb_addr;
        u32 *vb;
 
-       vb = r600_nomm_get_vb_ptr(dev);
-
        if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
 
                r600_nomm_put_vb(dev);
@@ -787,8 +783,8 @@ r600_blit_swap(struct drm_device *dev,
                        return;
 
                set_shaders(dev);
-               vb = r600_nomm_get_vb_ptr(dev);
        }
+       vb = r600_nomm_get_vb_ptr(dev);
 
        if (cpp == 4) {
                cb_format = COLOR_8_8_8_8;
index acae33e2ad51cd26c9bb2152c4b261b279c0aff7..93108bb31d1d950637f52e9ab7e87be2b9c7ae77 100644 (file)
@@ -610,7 +610,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
 
        DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
                  size_bytes, rdev->r600_blit.vb_used);
-       vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
        if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
                max_bytes = 8192;
 
@@ -653,6 +652,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
                                vb = r600_nomm_get_vb_ptr(dev);
 #endif
                        }
+                       vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
 
                        vb[0] = i2f(dst_x);
                        vb[1] = 0;
@@ -747,6 +747,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
                                vb = r600_nomm_get_vb_ptr(dev);
                        }
 #endif
+                       vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
 
                        vb[0] = i2f(dst_x / 4);
                        vb[1] = 0;
index 33b89cd8743ec9ea552fd12a5188d23d33e629b6..17e42195c632eab62ddef0db40aaf713c9fc067f 100644 (file)
@@ -28,7 +28,6 @@
 #include "drmP.h"
 #include "radeon.h"
 #include "r600d.h"
-#include "avivod.h"
 
 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
                                        struct radeon_cs_reloc **cs_reloc);
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
                          idx, ib_chunk->length_dw);
                return -EINVAL;
        }
-       header = ib_chunk->kdata[idx];
+       header = radeon_get_ib_value(p, idx);
        pkt->idx = idx;
        pkt->type = CP_PACKET_GET_TYPE(header);
        pkt->count = CP_PACKET_GET_COUNT(header);
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
                                        struct radeon_cs_reloc **cs_reloc)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_chunk *relocs_chunk;
        struct radeon_cs_packet p3reloc;
        unsigned idx;
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        r = r600_cs_packet_parse(p, &p3reloc, p->idx);
        if (r) {
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
                          p3reloc.idx);
                return -EINVAL;
        }
-       idx = ib_chunk->kdata[p3reloc.idx + 1];
+       idx = radeon_get_ib_value(p, p3reloc.idx + 1);
        if (idx >= relocs_chunk->length_dw) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, relocs_chunk->length_dw);
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
                                        struct radeon_cs_reloc **cs_reloc)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_chunk *relocs_chunk;
        struct radeon_cs_packet p3reloc;
        unsigned idx;
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
                return -EINVAL;
        }
        *cs_reloc = NULL;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        r = r600_cs_packet_parse(p, &p3reloc, p->idx);
        if (r) {
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
                          p3reloc.idx);
                return -EINVAL;
        }
-       idx = ib_chunk->kdata[p3reloc.idx + 1];
+       idx = radeon_get_ib_value(p, p3reloc.idx + 1);
        if (idx >= relocs_chunk->length_dw) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
                          idx, relocs_chunk->length_dw);
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
        return 0;
 }
 
+/**
+ * r600_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:            parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+       struct drm_mode_object *obj;
+       struct drm_crtc *crtc;
+       struct radeon_crtc *radeon_crtc;
+       struct radeon_cs_packet p3reloc, wait_reg_mem;
+       int crtc_id;
+       int r;
+       uint32_t header, h_idx, reg, wait_reg_mem_info;
+       volatile uint32_t *ib;
+
+       ib = p->ib->ptr;
+
+       /* parse the WAIT_REG_MEM */
+       r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
+       if (r)
+               return r;
+
+       /* check its a WAIT_REG_MEM */
+       if (wait_reg_mem.type != PACKET_TYPE3 ||
+           wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+               DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+               r = -EINVAL;
+               return r;
+       }
+
+       wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+       /* bit 4 is reg (0) or mem (1) */
+       if (wait_reg_mem_info & 0x10) {
+               DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+               r = -EINVAL;
+               return r;
+       }
+       /* waiting for value to be equal */
+       if ((wait_reg_mem_info & 0x7) != 0x3) {
+               DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+               r = -EINVAL;
+               return r;
+       }
+       if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
+               DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+               r = -EINVAL;
+               return r;
+       }
+
+       if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
+               DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+               r = -EINVAL;
+               return r;
+       }
+
+       /* jump over the NOP */
+       r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+       if (r)
+               return r;
+
+       h_idx = p->idx - 2;
+       p->idx += wait_reg_mem.count + 2;
+       p->idx += p3reloc.count + 2;
+
+       header = radeon_get_ib_value(p, h_idx);
+       crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+       reg = CP_PACKET0_GET_REG(header);
+       mutex_lock(&p->rdev->ddev->mode_config.mutex);
+       obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+       if (!obj) {
+               DRM_ERROR("cannot find crtc %d\n", crtc_id);
+               r = -EINVAL;
+               goto out;
+       }
+       crtc = obj_to_crtc(obj);
+       radeon_crtc = to_radeon_crtc(crtc);
+       crtc_id = radeon_crtc->crtc_id;
+
+       if (!crtc->enabled) {
+               /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+               ib[h_idx + 2] = PACKET2(0);
+               ib[h_idx + 3] = PACKET2(0);
+               ib[h_idx + 4] = PACKET2(0);
+               ib[h_idx + 5] = PACKET2(0);
+               ib[h_idx + 6] = PACKET2(0);
+               ib[h_idx + 7] = PACKET2(0);
+               ib[h_idx + 8] = PACKET2(0);
+       } else if (crtc_id == 1) {
+               switch (reg) {
+               case AVIVO_D1MODE_VLINE_START_END:
+                       header &= ~R600_CP_PACKET0_REG_MASK;
+                       header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+                       break;
+               default:
+                       DRM_ERROR("unknown crtc reloc\n");
+                       r = -EINVAL;
+                       goto out;
+               }
+               ib[h_idx] = header;
+               ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
+       }
+out:
+       mutex_unlock(&p->rdev->ddev->mode_config.mutex);
+       return r;
+}
+
 static int r600_packet0_check(struct radeon_cs_parser *p,
                                struct radeon_cs_packet *pkt,
                                unsigned idx, unsigned reg)
 {
+       int r;
+
        switch (reg) {
        case AVIVO_D1MODE_VLINE_START_END:
-       case AVIVO_D2MODE_VLINE_START_END:
+               r = r600_cs_packet_parse_vline(p);
+               if (r) {
+                       DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+                                       idx, reg);
+                       return r;
+               }
                break;
        default:
                printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
 static int r600_packet3_check(struct radeon_cs_parser *p,
                                struct radeon_cs_packet *pkt)
 {
-       struct radeon_cs_chunk *ib_chunk;
        struct radeon_cs_reloc *reloc;
        volatile u32 *ib;
        unsigned idx;
        unsigned i;
        unsigned start_reg, end_reg, reg;
        int r;
+       u32 idx_value;
 
        ib = p->ib->ptr;
-       ib_chunk = &p->chunks[p->chunk_ib_idx];
        idx = pkt->idx + 1;
+       idx_value = radeon_get_ib_value(p, idx);
+
        switch (pkt->opcode) {
        case PACKET3_START_3D_CMDBUF:
                if (p->family >= CHIP_RV770 || pkt->count) {
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad DRAW_INDEX\n");
                        return -EINVAL;
                }
-               ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-               ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+               ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+               ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
                break;
        case PACKET3_DRAW_INDEX_AUTO:
                if (pkt->count != 1) {
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                /* bit 4 is reg (0) or mem (1) */
-               if (ib_chunk->kdata[idx+0] & 0x10) {
+               if (idx_value & 0x10) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("bad WAIT_REG_MEM\n");
                                return -EINVAL;
                        }
                        ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-                       ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+                       ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
                }
                break;
        case PACKET3_SURFACE_SYNC:
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                /* 0xffffffff/0x0 is flush all cache flag */
-               if (ib_chunk->kdata[idx+1] != 0xffffffff ||
-                   ib_chunk->kdata[idx+2] != 0) {
+               if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+                   radeon_get_ib_value(p, idx + 2) != 0) {
                        r = r600_cs_packet_next_reloc(p, &reloc);
                        if (r) {
                                DRM_ERROR("bad SURFACE_SYNC\n");
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                return -EINVAL;
                        }
                        ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-                       ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+                       ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
                }
                break;
        case PACKET3_EVENT_WRITE_EOP:
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
-               ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+               ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
                break;
        case PACKET3_SET_CONFIG_REG:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
                    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_SET_CONTEXT_REG:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
                    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad SET_RESOURCE\n");
                        return -EINVAL;
                }
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
                    (start_reg >= PACKET3_SET_RESOURCE_END) ||
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        return -EINVAL;
                }
                for (i = 0; i < (pkt->count / 7); i++) {
-                       switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) {
+                       switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
                        case SQ_TEX_VTX_VALID_TEXTURE:
                                /* tex base */
                                r = r600_cs_packet_next_reloc(p, &reloc);
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                                        return -EINVAL;
                                }
                                ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
-                               ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
                                break;
                        case SQ_TEX_VTX_INVALID_TEXTURE:
                        case SQ_TEX_VTX_INVALID_BUFFER:
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_SET_ALU_CONST:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
                    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_SET_BOOL_CONST:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
                    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_SET_LOOP_CONST:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
                    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        case PACKET3_SET_CTL_CONST:
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
                    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad SET_SAMPLER\n");
                        return -EINVAL;
                }
-               start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET;
+               start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
                end_reg = 4 * pkt->count + start_reg - 4;
                if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
                    (start_reg >= PACKET3_SET_SAMPLER_END) ||
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        kfree(parser->relocs);
        for (i = 0; i < parser->nchunks; i++) {
                kfree(parser->chunks[i].kdata);
+               kfree(parser->chunks[i].kpage[0]);
+               kfree(parser->chunks[i].kpage[1]);
        }
        kfree(parser->chunks);
        kfree(parser->chunks_array);
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
         * uncached). */
        ib_chunk = &parser.chunks[parser.chunk_ib_idx];
        parser.ib->length_dw = ib_chunk->length_dw;
-       memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
        *l = parser.ib->length_dw;
        r = r600_cs_parse(&parser);
        if (r) {
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
                r600_cs_parser_fini(&parser, r);
                return r;
        }
+       r = radeon_cs_finish_pages(&parser);
+       if (r) {
+               DRM_ERROR("Invalid command stream !\n");
+               r600_cs_parser_fini(&parser, r);
+               return r;
+       }
        r600_cs_parser_fini(&parser, r);
        return r;
 }
index 4a9028a85c9b225ce4cda0ccf8e82a77fa03fa26..9b64d47f1f82ff4116f0c8b25872730843f92c0a 100644 (file)
 #define                G_000E50_MCDW_BUSY(x)                   (((x) >> 13) & 1)
 #define                G_000E50_SEM_BUSY(x)                    (((x) >> 14) & 1)
 #define                G_000E50_RLC_BUSY(x)                    (((x) >> 15) & 1)
+#define                G_000E50_BIF_BUSY(x)                    (((x) >> 29) & 1)
 #define        R_000E60_SRBM_SOFT_RESET                        0x0E60
 #define                S_000E60_SOFT_RESET_BIF(x)              (((x) & 1) << 1)
 #define                S_000E60_SOFT_RESET_CG(x)               (((x) & 1) << 2)
index 6311b1362594bfa55c8d569464253ffb31299fa8..5ab35b81c86bfdeedf71ec6ff2b03e114bcfb4ec 100644 (file)
  *     - TESTING, TESTING, TESTING
  */
 
+/* Initialization path:
+ *  We expect that acceleration initialization might fail for various
+ *  reasons even thought we work hard to make it works on most
+ *  configurations. In order to still have a working userspace in such
+ *  situation the init path must succeed up to the memory controller
+ *  initialization point. Failure before this point are considered as
+ *  fatal error. Here is the init callchain :
+ *      radeon_device_init  perform common structure, mutex initialization
+ *      asic_init           setup the GPU memory layout and perform all
+ *                          one time initialization (failure in this
+ *                          function are considered fatal)
+ *      asic_startup        setup the GPU acceleration, in order to
+ *                          follow guideline the first thing this
+ *                          function should do is setting the GPU
+ *                          memory controller (only MC setup failure
+ *                          are considered as fatal)
+ */
+
 #include <asm/atomic.h>
 #include <linux/wait.h>
 #include <linux/list.h>
@@ -342,7 +360,7 @@ struct radeon_ib {
        unsigned long           idx;
        uint64_t                gpu_addr;
        struct radeon_fence     *fence;
-       volatile uint32_t       *ptr;
+       uint32_t        *ptr;
        uint32_t                length_dw;
 };
 
@@ -415,7 +433,12 @@ struct radeon_cs_reloc {
 struct radeon_cs_chunk {
        uint32_t                chunk_id;
        uint32_t                length_dw;
+       int kpage_idx[2];
+       uint32_t                *kpage[2];
        uint32_t                *kdata;
+       void __user *user_ptr;
+       int last_copied_page;
+       int last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -438,8 +461,38 @@ struct radeon_cs_parser {
        struct radeon_ib        *ib;
        void                    *track;
        unsigned                family;
+       int parser_error;
 };
 
+extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+
+
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       u32 pg_idx, pg_offset;
+       u32 idx_value = 0;
+       int new_page;
+
+       pg_idx = (idx * 4) / PAGE_SIZE;
+       pg_offset = (idx * 4) % PAGE_SIZE;
+
+       if (ibc->kpage_idx[0] == pg_idx)
+               return ibc->kpage[0][pg_offset/4];
+       if (ibc->kpage_idx[1] == pg_idx)
+               return ibc->kpage[1][pg_offset/4];
+
+       new_page = radeon_cs_update_pages(p, pg_idx);
+       if (new_page < 0) {
+               p->parser_error = new_page;
+               return 0;
+       }
+
+       idx_value = ibc->kpage[new_page][pg_offset/4];
+       return idx_value;
+}
+
 struct radeon_cs_packet {
        unsigned        idx;
        unsigned        type;
@@ -537,18 +590,8 @@ struct radeon_asic {
        void (*fini)(struct radeon_device *rdev);
        int (*resume)(struct radeon_device *rdev);
        int (*suspend)(struct radeon_device *rdev);
-       void (*errata)(struct radeon_device *rdev);
-       void (*vram_info)(struct radeon_device *rdev);
        void (*vga_set_state)(struct radeon_device *rdev, bool state);
        int (*gpu_reset)(struct radeon_device *rdev);
-       int (*mc_init)(struct radeon_device *rdev);
-       void (*mc_fini)(struct radeon_device *rdev);
-       int (*wb_init)(struct radeon_device *rdev);
-       void (*wb_fini)(struct radeon_device *rdev);
-       int (*gart_init)(struct radeon_device *rdev);
-       void (*gart_fini)(struct radeon_device *rdev);
-       int (*gart_enable)(struct radeon_device *rdev);
-       void (*gart_disable)(struct radeon_device *rdev);
        void (*gart_tlb_flush)(struct radeon_device *rdev);
        int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
        int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -558,7 +601,6 @@ struct radeon_asic {
        void (*ring_start)(struct radeon_device *rdev);
        int (*ring_test)(struct radeon_device *rdev);
        void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
-       int (*ib_test)(struct radeon_device *rdev);
        int (*irq_set)(struct radeon_device *rdev);
        int (*irq_process)(struct radeon_device *rdev);
        u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
@@ -736,7 +778,6 @@ struct radeon_device {
        bool                            shutdown;
        bool                            suspend;
        bool                            need_dma32;
-       bool                            new_init_path;
        bool                            accel_working;
        struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
        const struct firmware *me_fw;   /* all family ME firmware */
@@ -896,28 +937,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 #define radeon_resume(rdev) (rdev)->asic->resume((rdev))
 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
 #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
-#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
-#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
 #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
-#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
-#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
-#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
-#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
-#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev))
-#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev))
-#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
-#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
-#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
-#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
 #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
 #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
 #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
 #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
-#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
 #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
 #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
@@ -943,6 +970,8 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
 extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
 struct r100_mc_save {
@@ -974,23 +1003,71 @@ extern void r100_vram_init_sizes(struct radeon_device *rdev);
 extern void r100_wb_disable(struct radeon_device *rdev);
 extern void r100_wb_fini(struct radeon_device *rdev);
 extern int r100_wb_init(struct radeon_device *rdev);
+extern void r100_hdp_reset(struct radeon_device *rdev);
+extern int r100_rb2d_reset(struct radeon_device *rdev);
+extern int r100_cp_reset(struct radeon_device *rdev);
+extern void r100_vga_render_disable(struct radeon_device *rdev);
+extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+                                               struct radeon_cs_packet *pkt,
+                                               struct radeon_object *robj);
+extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+                               struct radeon_cs_packet *pkt,
+                               const unsigned *auth, unsigned n,
+                               radeon_packet0_check_t check);
+extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
+                               struct radeon_cs_packet *pkt,
+                               unsigned idx);
+
+/* rv200,rv250,rv280 */
+extern void r200_set_safe_registers(struct radeon_device *rdev);
 
 /* r300,r350,rv350,rv370,rv380 */
 extern void r300_set_reg_safe(struct radeon_device *rdev);
 extern void r300_mc_program(struct radeon_device *rdev);
 extern void r300_vram_info(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
 extern int rv370_pcie_gart_init(struct radeon_device *rdev);
 extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
 extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
 extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
 
 /* r420,r423,rv410 */
+extern int r420_mc_init(struct radeon_device *rdev);
 extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
 extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
 
 /* rv515 */
+struct rv515_mc_save {
+       u32 d1vga_control;
+       u32 d2vga_control;
+       u32 vga_render_control;
+       u32 vga_hdp_control;
+       u32 d1crtc_control;
+       u32 d2crtc_control;
+};
 extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+extern void rv515_vga_render_disable(struct radeon_device *rdev);
+extern void rv515_set_safe_registers(struct radeon_device *rdev);
+extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+extern void rv515_clock_startup(struct radeon_device *rdev);
+extern void rv515_debugfs(struct radeon_device *rdev);
+extern int rv515_suspend(struct radeon_device *rdev);
+
+/* rs400 */
+extern int rs400_gart_init(struct radeon_device *rdev);
+extern int rs400_gart_enable(struct radeon_device *rdev);
+extern void rs400_gart_adjust_size(struct radeon_device *rdev);
+extern void rs400_gart_disable(struct radeon_device *rdev);
+extern void rs400_gart_fini(struct radeon_device *rdev);
+
+/* rs600 */
+extern void rs600_set_safe_registers(struct radeon_device *rdev);
+extern int rs600_irq_set(struct radeon_device *rdev);
+extern void rs600_irq_disable(struct radeon_device *rdev);
 
 /* rs690, rs740 */
 extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -1009,8 +1086,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
 extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 extern int r600_ib_test(struct radeon_device *rdev);
 extern int r600_ring_test(struct radeon_device *rdev);
-extern int r600_wb_init(struct radeon_device *rdev);
 extern void r600_wb_fini(struct radeon_device *rdev);
+extern int r600_wb_enable(struct radeon_device *rdev);
+extern void r600_wb_disable(struct radeon_device *rdev);
 extern void r600_scratch_init(struct radeon_device *rdev);
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
index 8968f78fa1e30e8424b101a8ccf477d4885286d5..c3532c7a6f3f04f74d26d6ef97ece8fea7da053e 100644 (file)
@@ -41,28 +41,17 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
 /*
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
-int r100_init(struct radeon_device *rdev);
-int r200_init(struct radeon_device *rdev);
+extern int r100_init(struct radeon_device *rdev);
+extern void r100_fini(struct radeon_device *rdev);
+extern int r100_suspend(struct radeon_device *rdev);
+extern int r100_resume(struct radeon_device *rdev);
 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void r100_errata(struct radeon_device *rdev);
-void r100_vram_info(struct radeon_device *rdev);
 void r100_vga_set_state(struct radeon_device *rdev, bool state);
 int r100_gpu_reset(struct radeon_device *rdev);
-int r100_mc_init(struct radeon_device *rdev);
-void r100_mc_fini(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
-int r100_wb_init(struct radeon_device *rdev);
-void r100_wb_fini(struct radeon_device *rdev);
-int r100_pci_gart_init(struct radeon_device *rdev);
-void r100_pci_gart_fini(struct radeon_device *rdev);
-int r100_pci_gart_enable(struct radeon_device *rdev);
-void r100_pci_gart_disable(struct radeon_device *rdev);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-void r100_cp_fini(struct radeon_device *rdev);
-void r100_cp_disable(struct radeon_device *rdev);
 void r100_cp_commit(struct radeon_device *rdev);
 void r100_ring_start(struct radeon_device *rdev);
 int r100_irq_set(struct radeon_device *rdev);
@@ -83,33 +72,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
 int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ib_test(struct radeon_device *rdev);
 int r100_ring_test(struct radeon_device *rdev);
 
 static struct radeon_asic r100_asic = {
        .init = &r100_init,
-       .errata = &r100_errata,
-       .vram_info = &r100_vram_info,
+       .fini = &r100_fini,
+       .suspend = &r100_suspend,
+       .resume = &r100_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r100_gpu_reset,
-       .mc_init = &r100_mc_init,
-       .mc_fini = &r100_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &r100_pci_gart_init,
-       .gart_fini = &r100_pci_gart_fini,
-       .gart_enable = &r100_pci_gart_enable,
-       .gart_disable = &r100_pci_gart_disable,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r100_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
@@ -131,55 +108,38 @@ static struct radeon_asic r100_asic = {
 /*
  * r300,r350,rv350,rv380
  */
-int r300_init(struct radeon_device *rdev);
-void r300_errata(struct radeon_device *rdev);
-void r300_vram_info(struct radeon_device *rdev);
-int r300_gpu_reset(struct radeon_device *rdev);
-int r300_mc_init(struct radeon_device *rdev);
-void r300_mc_fini(struct radeon_device *rdev);
-void r300_ring_start(struct radeon_device *rdev);
-void r300_fence_ring_emit(struct radeon_device *rdev,
-                         struct radeon_fence *fence);
-int r300_cs_parse(struct radeon_cs_parser *p);
-int rv370_pcie_gart_init(struct radeon_device *rdev);
-void rv370_pcie_gart_fini(struct radeon_device *rdev);
-int rv370_pcie_gart_enable(struct radeon_device *rdev);
-void rv370_pcie_gart_disable(struct radeon_device *rdev);
-void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
-int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
-int r300_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset,
-                 uint64_t dst_offset,
-                 unsigned num_pages,
-                 struct radeon_fence *fence);
-
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_gpu_reset(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+                               struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r300_copy_dma(struct radeon_device *rdev,
+                       uint64_t src_offset,
+                       uint64_t dst_offset,
+                       unsigned num_pages,
+                       struct radeon_fence *fence);
 static struct radeon_asic r300_asic = {
        .init = &r300_init,
-       .errata = &r300_errata,
-       .vram_info = &r300_vram_info,
+       .fini = &r300_fini,
+       .suspend = &r300_suspend,
+       .resume = &r300_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
-       .mc_init = &r300_mc_init,
-       .mc_fini = &r300_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &r100_pci_gart_init,
-       .gart_fini = &r100_pci_gart_fini,
-       .gart_enable = &r100_pci_gart_enable,
-       .gart_disable = &r100_pci_gart_disable,
        .gart_tlb_flush = &r100_pci_gart_tlb_flush,
        .gart_set_page = &r100_pci_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
@@ -209,26 +169,14 @@ static struct radeon_asic r420_asic = {
        .fini = &r420_fini,
        .suspend = &r420_suspend,
        .resume = &r420_resume,
-       .errata = NULL,
-       .vram_info = NULL,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
-       .mc_init = NULL,
-       .mc_fini = NULL,
-       .wb_init = NULL,
-       .wb_fini = NULL,
-       .gart_enable = NULL,
-       .gart_disable = NULL,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_init = NULL,
-       .cp_fini = NULL,
-       .cp_disable = NULL,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = NULL,
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
@@ -250,42 +198,27 @@ static struct radeon_asic r420_asic = {
 /*
  * rs400,rs480
  */
-void rs400_errata(struct radeon_device *rdev);
-void rs400_vram_info(struct radeon_device *rdev);
-int rs400_mc_init(struct radeon_device *rdev);
-void rs400_mc_fini(struct radeon_device *rdev);
-int rs400_gart_init(struct radeon_device *rdev);
-void rs400_gart_fini(struct radeon_device *rdev);
-int rs400_gart_enable(struct radeon_device *rdev);
-void rs400_gart_disable(struct radeon_device *rdev);
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
 void rs400_gart_tlb_flush(struct radeon_device *rdev);
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 static struct radeon_asic rs400_asic = {
-       .init = &r300_init,
-       .errata = &rs400_errata,
-       .vram_info = &rs400_vram_info,
+       .init = &rs400_init,
+       .fini = &rs400_fini,
+       .suspend = &rs400_suspend,
+       .resume = &rs400_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
-       .mc_init = &rs400_mc_init,
-       .mc_fini = &rs400_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rs400_gart_init,
-       .gart_fini = &rs400_gart_fini,
-       .gart_enable = &rs400_gart_enable,
-       .gart_disable = &rs400_gart_disable,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &r100_irq_set,
        .irq_process = &r100_irq_process,
        .get_vblank_counter = &r100_get_vblank_counter,
@@ -307,18 +240,13 @@ static struct radeon_asic rs400_asic = {
 /*
  * rs600.
  */
-int rs600_init(struct radeon_device *rdev);
-void rs600_errata(struct radeon_device *rdev);
-void rs600_vram_info(struct radeon_device *rdev);
-int rs600_mc_init(struct radeon_device *rdev);
-void rs600_mc_fini(struct radeon_device *rdev);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
 int rs600_irq_set(struct radeon_device *rdev);
 int rs600_irq_process(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
-int rs600_gart_init(struct radeon_device *rdev);
-void rs600_gart_fini(struct radeon_device *rdev);
-int rs600_gart_enable(struct radeon_device *rdev);
-void rs600_gart_disable(struct radeon_device *rdev);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -326,28 +254,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
 static struct radeon_asic rs600_asic = {
        .init = &rs600_init,
-       .errata = &rs600_errata,
-       .vram_info = &rs600_vram_info,
+       .fini = &rs600_fini,
+       .suspend = &rs600_suspend,
+       .resume = &rs600_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
-       .mc_init = &rs600_mc_init,
-       .mc_fini = &rs600_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rs600_gart_init,
-       .gart_fini = &rs600_gart_fini,
-       .gart_enable = &rs600_gart_enable,
-       .gart_disable = &rs600_gart_disable,
        .gart_tlb_flush = &rs600_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
@@ -367,37 +284,26 @@ static struct radeon_asic rs600_asic = {
 /*
  * rs690,rs740
  */
-void rs690_errata(struct radeon_device *rdev);
-void rs690_vram_info(struct radeon_device *rdev);
-int rs690_mc_init(struct radeon_device *rdev);
-void rs690_mc_fini(struct radeon_device *rdev);
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs690_bandwidth_update(struct radeon_device *rdev);
 static struct radeon_asic rs690_asic = {
-       .init = &rs600_init,
-       .errata = &rs690_errata,
-       .vram_info = &rs690_vram_info,
+       .init = &rs690_init,
+       .fini = &rs690_fini,
+       .suspend = &rs690_suspend,
+       .resume = &rs690_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &r300_gpu_reset,
-       .mc_init = &rs690_mc_init,
-       .mc_fini = &rs690_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rs400_gart_init,
-       .gart_fini = &rs400_gart_fini,
-       .gart_enable = &rs400_gart_enable,
-       .gart_disable = &rs400_gart_disable,
        .gart_tlb_flush = &rs400_gart_tlb_flush,
        .gart_set_page = &rs400_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &r300_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
@@ -420,41 +326,29 @@ static struct radeon_asic rs690_asic = {
  * rv515
  */
 int rv515_init(struct radeon_device *rdev);
-void rv515_errata(struct radeon_device *rdev);
-void rv515_vram_info(struct radeon_device *rdev);
+void rv515_fini(struct radeon_device *rdev);
 int rv515_gpu_reset(struct radeon_device *rdev);
-int rv515_mc_init(struct radeon_device *rdev);
-void rv515_mc_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rv515_ring_start(struct radeon_device *rdev);
 uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
 void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rv515_bandwidth_update(struct radeon_device *rdev);
+int rv515_resume(struct radeon_device *rdev);
+int rv515_suspend(struct radeon_device *rdev);
 static struct radeon_asic rv515_asic = {
        .init = &rv515_init,
-       .errata = &rv515_errata,
-       .vram_info = &rv515_vram_info,
+       .fini = &rv515_fini,
+       .suspend = &rv515_suspend,
+       .resume = &rv515_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &rv515_gpu_reset,
-       .mc_init = &rv515_mc_init,
-       .mc_fini = &rv515_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rv370_pcie_gart_init,
-       .gart_fini = &rv370_pcie_gart_fini,
-       .gart_enable = &rv370_pcie_gart_enable,
-       .gart_disable = &rv370_pcie_gart_disable,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
@@ -476,35 +370,21 @@ static struct radeon_asic rv515_asic = {
 /*
  * r520,rv530,rv560,rv570,r580
  */
-void r520_errata(struct radeon_device *rdev);
-void r520_vram_info(struct radeon_device *rdev);
-int r520_mc_init(struct radeon_device *rdev);
-void r520_mc_fini(struct radeon_device *rdev);
-void r520_bandwidth_update(struct radeon_device *rdev);
+int r520_init(struct radeon_device *rdev);
+int r520_resume(struct radeon_device *rdev);
 static struct radeon_asic r520_asic = {
-       .init = &rv515_init,
-       .errata = &r520_errata,
-       .vram_info = &r520_vram_info,
+       .init = &r520_init,
+       .fini = &rv515_fini,
+       .suspend = &rv515_suspend,
+       .resume = &r520_resume,
        .vga_set_state = &r100_vga_set_state,
        .gpu_reset = &rv515_gpu_reset,
-       .mc_init = &r520_mc_init,
-       .mc_fini = &r520_mc_fini,
-       .wb_init = &r100_wb_init,
-       .wb_fini = &r100_wb_fini,
-       .gart_init = &rv370_pcie_gart_init,
-       .gart_fini = &rv370_pcie_gart_fini,
-       .gart_enable = &rv370_pcie_gart_enable,
-       .gart_disable = &rv370_pcie_gart_disable,
        .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
        .gart_set_page = &rv370_pcie_gart_set_page,
-       .cp_init = &r100_cp_init,
-       .cp_fini = &r100_cp_fini,
-       .cp_disable = &r100_cp_disable,
        .cp_commit = &r100_cp_commit,
        .ring_start = &rv515_ring_start,
        .ring_test = &r100_ring_test,
        .ring_ib_execute = &r100_ring_ib_execute,
-       .ib_test = &r100_ib_test,
        .irq_set = &rs600_irq_set,
        .irq_process = &rs600_irq_process,
        .get_vblank_counter = &rs600_get_vblank_counter,
@@ -519,7 +399,7 @@ static struct radeon_asic r520_asic = {
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r100_set_surface_reg,
        .clear_surface_reg = r100_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
 };
 
 /*
@@ -552,37 +432,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t offset, uint32_t obj_size);
 int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ib_test(struct radeon_device *rdev);
 int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_pages, struct radeon_fence *fence);
 
 static struct radeon_asic r600_asic = {
-       .errata = NULL,
        .init = &r600_init,
        .fini = &r600_fini,
        .suspend = &r600_suspend,
        .resume = &r600_resume,
        .cp_commit = &r600_cp_commit,
-       .vram_info = NULL,
        .vga_set_state = &r600_vga_set_state,
        .gpu_reset = &r600_gpu_reset,
-       .mc_init = NULL,
-       .mc_fini = NULL,
-       .wb_init = &r600_wb_init,
-       .wb_fini = &r600_wb_fini,
-       .gart_enable = NULL,
-       .gart_disable = NULL,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .cp_init = NULL,
-       .cp_fini = NULL,
-       .cp_disable = NULL,
-       .ring_start = NULL,
        .ring_test = &r600_ring_test,
        .ring_ib_execute = &r600_ring_ib_execute,
-       .ib_test = &r600_ib_test,
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .fence_ring_emit = &r600_fence_ring_emit,
@@ -596,7 +462,7 @@ static struct radeon_asic r600_asic = {
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r600_set_surface_reg,
        .clear_surface_reg = r600_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
 };
 
 /*
@@ -609,30 +475,17 @@ int rv770_resume(struct radeon_device *rdev);
 int rv770_gpu_reset(struct radeon_device *rdev);
 
 static struct radeon_asic rv770_asic = {
-       .errata = NULL,
        .init = &rv770_init,
        .fini = &rv770_fini,
        .suspend = &rv770_suspend,
        .resume = &rv770_resume,
        .cp_commit = &r600_cp_commit,
-       .vram_info = NULL,
        .gpu_reset = &rv770_gpu_reset,
        .vga_set_state = &r600_vga_set_state,
-       .mc_init = NULL,
-       .mc_fini = NULL,
-       .wb_init = &r600_wb_init,
-       .wb_fini = &r600_wb_fini,
-       .gart_enable = NULL,
-       .gart_disable = NULL,
        .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
        .gart_set_page = &rs600_gart_set_page,
-       .cp_init = NULL,
-       .cp_fini = NULL,
-       .cp_disable = NULL,
-       .ring_start = NULL,
        .ring_test = &r600_ring_test,
        .ring_ib_execute = &r600_ring_ib_execute,
-       .ib_test = &r600_ib_test,
        .irq_set = &r600_irq_set,
        .irq_process = &r600_irq_process,
        .fence_ring_emit = &r600_fence_ring_emit,
@@ -646,7 +499,7 @@ static struct radeon_asic rv770_asic = {
        .set_clock_gating = &radeon_atom_set_clock_gating,
        .set_surface_reg = r600_set_surface_reg,
        .clear_surface_reg = r600_clear_surface_reg,
-       .bandwidth_update = &r520_bandwidth_update,
+       .bandwidth_update = &rv515_bandwidth_update,
 };
 
 #endif
index 743742128307b4de0ebae64c6d64e6984d56c3a3..5b6c08cee40ee8fa3d01a155005791c83ab4fe75 100644 (file)
@@ -272,12 +272,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                            (le16_to_cpu(path->usConnObjectId) &
                             OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
-                       if ((le16_to_cpu(path->usDeviceTag) ==
-                            ATOM_DEVICE_TV1_SUPPORT)
-                           || (le16_to_cpu(path->usDeviceTag) ==
-                               ATOM_DEVICE_TV2_SUPPORT)
-                           || (le16_to_cpu(path->usDeviceTag) ==
-                               ATOM_DEVICE_CV_SUPPORT))
+                       /* TODO CV support */
+                       if (le16_to_cpu(path->usDeviceTag) ==
+                               ATOM_DEVICE_CV_SUPPORT)
                                continue;
 
                        if ((rdev->family == CHIP_RS780) &&
index 96e37a6e7ce4d7e28c85a3e42a0445d57f7120ef..34a9b9119518273c97cb1df5e1b966709a838c7b 100644 (file)
 /*
  * BIOS.
  */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios.  On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+       uint8_t __iomem *bios;
+       resource_size_t vram_base;
+       resource_size_t size = 256 * 1024; /* ??? */
+
+       rdev->bios = NULL;
+       vram_base = drm_get_resource_start(rdev->ddev, 0);
+       bios = ioremap(vram_base, size);
+       if (!bios) {
+               DRM_ERROR("Unable to mmap vram\n");
+               return false;
+       }
+
+       if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+               iounmap(bios);
+               DRM_ERROR("bad rom signature\n");
+               return false;
+       }
+       rdev->bios = kmalloc(size, GFP_KERNEL);
+       if (rdev->bios == NULL) {
+               iounmap(bios);
+               DRM_ERROR("kmalloc failed\n");
+               return false;
+       }
+       memcpy(rdev->bios, bios, size);
+       iounmap(bios);
+       return true;
+}
+
 static bool radeon_read_bios(struct radeon_device *rdev)
 {
        uint8_t __iomem *bios;
        size_t size;
 
        rdev->bios = NULL;
+       /* XXX: some cards may return 0 for rom size? ddx has a workaround */
        bios = pci_map_rom(rdev->pdev, &size);
        if (!bios) {
                return false;
@@ -341,7 +379,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
 
 static bool radeon_read_disabled_bios(struct radeon_device *rdev)
 {
-       if (rdev->family >= CHIP_RV770)
+       if (rdev->flags & RADEON_IS_IGP)
+               return igp_read_bios_from_vram(rdev);
+       else if (rdev->family >= CHIP_RV770)
                return r700_read_disabled_bios(rdev);
        else if (rdev->family >= CHIP_R600)
                return r600_read_disabled_bios(rdev);
@@ -356,7 +396,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
        bool r;
        uint16_t tmp;
 
-       r = radeon_read_bios(rdev);
+       if (rdev->flags & RADEON_IS_IGP) {
+               r = igp_read_bios_from_vram(rdev);
+               if (r == false)
+                       r = radeon_read_bios(rdev);
+       } else
+               r = radeon_read_bios(rdev);
        if (r == false) {
                r = radeon_read_disabled_bios(rdev);
        }
index 152eef13197a1721602841511467652756e09a62..f5c32a766b10d772b01d12b19660c11a4ce208e2 100644 (file)
@@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
                                        R300_PIXCLK_TRANS_ALWAYS_ONb |
                                        R300_PIXCLK_TVO_ALWAYS_ONb |
                                        R300_P2G2CLK_ALWAYS_ONb |
-                                       R300_P2G2CLK_ALWAYS_ONb);
+                                       R300_P2G2CLK_DAC_ALWAYS_ONb);
                                WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
                        } else if (rdev->family >= CHIP_RV350) {
                                tmp = RREG32_PLL(R300_SCLK_CNTL2);
@@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
                                        R300_PIXCLK_TRANS_ALWAYS_ONb |
                                        R300_PIXCLK_TVO_ALWAYS_ONb |
                                        R300_P2G2CLK_ALWAYS_ONb |
-                                       R300_P2G2CLK_ALWAYS_ONb);
+                                       R300_P2G2CLK_DAC_ALWAYS_ONb);
                                WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
 
                                tmp = RREG32_PLL(RADEON_MCLK_MISC);
@@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
                                 R300_PIXCLK_TRANS_ALWAYS_ONb |
                                 R300_PIXCLK_TVO_ALWAYS_ONb |
                                 R300_P2G2CLK_ALWAYS_ONb |
-                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_P2G2CLK_DAC_ALWAYS_ONb |
                                 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
                        WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
                } else if (rdev->family >= CHIP_RV350) {
@@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
                                 R300_PIXCLK_TRANS_ALWAYS_ONb |
                                 R300_PIXCLK_TVO_ALWAYS_ONb |
                                 R300_P2G2CLK_ALWAYS_ONb |
-                                R300_P2G2CLK_ALWAYS_ONb |
+                                R300_P2G2CLK_DAC_ALWAYS_ONb |
                                 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
                        WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
                } else {
index af1d551f1a8f6a877197409a3e6482521f4c2def..e376be47a4a0a67e54d2089b4947d562ac4e9fad 100644 (file)
@@ -26,6 +26,7 @@
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 #include "radeon_drm.h"
 #include "radeon.h"
 #include "atom.h"
@@ -245,7 +246,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
                if (common_modes[i].w < 320 || common_modes[i].h < 200)
                        continue;
 
-               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false);
+               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
                drm_mode_probed_add(connector, mode);
        }
 }
@@ -559,7 +560,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
                radeon_add_common_modes(encoder, connector);
        else {
                /* only 800x600 is supported right now on pre-avivo chips */
-               tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false);
+               tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
                tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
                drm_mode_probed_add(connector, tv_mode);
        }
@@ -743,6 +744,15 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
        return NULL;
 }
 
+static void radeon_dvi_force(struct drm_connector *connector)
+{
+       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+       if (connector->force == DRM_FORCE_ON)
+               radeon_connector->use_digital = false;
+       if (connector->force == DRM_FORCE_ON_DIGITAL)
+               radeon_connector->use_digital = true;
+}
+
 struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
        .get_modes = radeon_dvi_get_modes,
        .mode_valid = radeon_vga_mode_valid,
@@ -755,6 +765,7 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = radeon_connector_set_property,
        .destroy = radeon_connector_destroy,
+       .force = radeon_dvi_force,
 };
 
 void
@@ -771,6 +782,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        struct radeon_connector *radeon_connector;
        struct radeon_connector_atom_dig *radeon_dig_connector;
        uint32_t subpixel_order = SubPixelNone;
+       int ret;
 
        /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -796,24 +808,30 @@ radeon_add_atom_connector(struct drm_device *dev,
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_VGA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
                        if (!radeon_connector->ddc_bus)
                                goto failed;
                }
+               radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                break;
        case DRM_MODE_CONNECTOR_DVIA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
                                goto failed;
                }
+               radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
@@ -827,7 +845,9 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
@@ -837,6 +857,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.coherent_mode_property,
                                              1);
+               radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
@@ -850,7 +871,9 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
                        if (!radeon_connector->ddc_bus)
@@ -869,7 +892,9 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
                        if (!radeon_connector->ddc_bus)
@@ -882,11 +907,14 @@ radeon_add_atom_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_9PinDIN:
                if (radeon_tv == 1) {
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+                       ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+                       if (ret)
+                               goto failed;
+                       radeon_connector->dac_load_detect = true;
+                       drm_connector_attach_property(&radeon_connector->base,
+                                                     rdev->mode_info.load_detect_property,
+                                                     1);
                }
-               drm_connector_attach_property(&radeon_connector->base,
-                                             rdev->mode_info.load_detect_property,
-                                             1);
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -896,7 +924,9 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
                        if (!radeon_connector->ddc_bus)
@@ -932,6 +962,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        uint32_t subpixel_order = SubPixelNone;
+       int ret;
 
        /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -957,24 +988,30 @@ radeon_add_legacy_connector(struct drm_device *dev,
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_VGA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
                        if (!radeon_connector->ddc_bus)
                                goto failed;
                }
+               radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                break;
        case DRM_MODE_CONNECTOR_DVIA:
                drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
                                goto failed;
                }
+               radeon_connector->dac_load_detect = true;
                drm_connector_attach_property(&radeon_connector->base,
                                              rdev->mode_info.load_detect_property,
                                              1);
@@ -982,11 +1019,14 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_DVID:
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
                        if (!radeon_connector->ddc_bus)
                                goto failed;
+                       radeon_connector->dac_load_detect = true;
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
@@ -998,7 +1038,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_9PinDIN:
                if (radeon_tv == 1) {
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+                       ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+                       if (ret)
+                               goto failed;
+                       radeon_connector->dac_load_detect = true;
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
@@ -1006,7 +1049,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
-               drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+               if (ret)
+                       goto failed;
                if (i2c_bus->valid) {
                        radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
                        if (!radeon_connector->ddc_bus)
index 12f5990c2d2a443d954ffcaff1a88eb05afe40b1..5ab2cf96a26498a3d29aba7fe18ed6c46716b161 100644 (file)
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                }
 
                p->chunks[i].length_dw = user_chunk.length_dw;
-               cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+               p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 
-               size = p->chunks[i].length_dw * sizeof(uint32_t);
-               p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
-               if (p->chunks[i].kdata == NULL) {
-                       return -ENOMEM;
-               }
-               if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
-                       return -EFAULT;
+               cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+               if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+                       size = p->chunks[i].length_dw * sizeof(uint32_t);
+                       p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
+                       if (p->chunks[i].kdata == NULL) {
+                               return -ENOMEM;
+                       }
+                       if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
+                                              p->chunks[i].user_ptr, size)) {
+                               return -EFAULT;
+                       }
+               } else {
+                       p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+                       p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+                       if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
+                               kfree(p->chunks[i].kpage[0]);
+                               kfree(p->chunks[i].kpage[1]);
+                               return -ENOMEM;
+                       }
+                       p->chunks[i].kpage_idx[0] = -1;
+                       p->chunks[i].kpage_idx[1] = -1;
+                       p->chunks[i].last_copied_page = -1;
+                       p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
                }
        }
        if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        kfree(parser->relocs_ptr);
        for (i = 0; i < parser->nchunks; i++) {
                kfree(parser->chunks[i].kdata);
+               kfree(parser->chunks[i].kpage[0]);
+               kfree(parser->chunks[i].kpage[1]);
        }
        kfree(parser->chunks);
        kfree(parser->chunks_array);
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
         * uncached). */
        ib_chunk = &parser.chunks[parser.chunk_ib_idx];
        parser.ib->length_dw = ib_chunk->length_dw;
-       memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
        r = radeon_cs_parse(&parser);
+       if (r || parser.parser_error) {
+               DRM_ERROR("Invalid command stream !\n");
+               radeon_cs_parser_fini(&parser, r);
+               mutex_unlock(&rdev->cs_mutex);
+               return r;
+       }
+       r = radeon_cs_finish_pages(&parser);
        if (r) {
                DRM_ERROR("Invalid command stream !\n");
                radeon_cs_parser_fini(&parser, r);
@@ -254,3 +278,64 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        mutex_unlock(&rdev->cs_mutex);
        return r;
 }
+
+int radeon_cs_finish_pages(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       int i;
+       int size = PAGE_SIZE;
+
+       for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
+               if (i == ibc->last_page_index) {
+                       size = (ibc->length_dw * 4) % PAGE_SIZE;
+                       if (size == 0)
+                               size = PAGE_SIZE;
+               }
+               
+               if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+                                      ibc->user_ptr + (i * PAGE_SIZE),
+                                      size))
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+{
+       int new_page;
+       struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+       int i;
+       int size = PAGE_SIZE;
+
+       for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+               if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+                                      ibc->user_ptr + (i * PAGE_SIZE),
+                                      PAGE_SIZE)) {
+                       p->parser_error = -EFAULT;
+                       return 0;
+               }
+       }
+
+       new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+
+       if (pg_idx == ibc->last_page_index) {
+               size = (ibc->length_dw * 4) % PAGE_SIZE;
+                       if (size == 0)
+                               size = PAGE_SIZE;
+       }
+
+       if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
+                              ibc->user_ptr + (pg_idx * PAGE_SIZE),
+                              size)) {
+               p->parser_error = -EFAULT;
+               return 0;
+       }
+
+       /* copy to IB here */
+       memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+
+       ibc->last_copied_page = pg_idx;
+       ibc->kpage_idx[new_page] = pg_idx;
+
+       return new_page;
+}
index daf5db780956c2bc7738912a48f5407e1d1182ca..3d667031de6ea17e23ff08d0d50ee66d3ae1d47f 100644 (file)
@@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev)
        case CHIP_RV380:
                rdev->asic = &r300_asic;
                if (rdev->flags & RADEON_IS_PCIE) {
-                       rdev->asic->gart_init = &rv370_pcie_gart_init;
-                       rdev->asic->gart_fini = &rv370_pcie_gart_fini;
-                       rdev->asic->gart_enable = &rv370_pcie_gart_enable;
-                       rdev->asic->gart_disable = &rv370_pcie_gart_disable;
                        rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
                        rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
                }
@@ -485,7 +481,6 @@ void radeon_combios_fini(struct radeon_device *rdev)
 static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 {
        struct radeon_device *rdev = cookie;
-
        radeon_vga_set_state(rdev, state);
        if (state)
                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -493,6 +488,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
        else
                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
+
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+       rdev->flags &= ~RADEON_IS_AGP;
+       if (rdev->family >= CHIP_R600) {
+               DRM_INFO("Forcing AGP to PCIE mode\n");
+               rdev->flags |= RADEON_IS_PCIE;
+       } else if (rdev->family >= CHIP_RV515 ||
+                       rdev->family == CHIP_RV380 ||
+                       rdev->family == CHIP_RV410 ||
+                       rdev->family == CHIP_R423) {
+               DRM_INFO("Forcing AGP to PCIE mode\n");
+               rdev->flags |= RADEON_IS_PCIE;
+               rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+               rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+       } else {
+               DRM_INFO("Forcing AGP to PCI mode\n");
+               rdev->flags |= RADEON_IS_PCI;
+               rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+               rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+       }
+}
+
 /*
  * Radeon device.
  */
@@ -531,29 +549,7 @@ int radeon_device_init(struct radeon_device *rdev,
        }
 
        if (radeon_agpmode == -1) {
-               rdev->flags &= ~RADEON_IS_AGP;
-               if (rdev->family >= CHIP_RV515 ||
-                   rdev->family == CHIP_RV380 ||
-                   rdev->family == CHIP_RV410 ||
-                   rdev->family == CHIP_R423) {
-                       DRM_INFO("Forcing AGP to PCIE mode\n");
-                       rdev->flags |= RADEON_IS_PCIE;
-                       rdev->asic->gart_init = &rv370_pcie_gart_init;
-                       rdev->asic->gart_fini = &rv370_pcie_gart_fini;
-                       rdev->asic->gart_enable = &rv370_pcie_gart_enable;
-                       rdev->asic->gart_disable = &rv370_pcie_gart_disable;
-                       rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-                       rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-               } else {
-                       DRM_INFO("Forcing AGP to PCI mode\n");
-                       rdev->flags |= RADEON_IS_PCI;
-                       rdev->asic->gart_init = &r100_pci_gart_init;
-                       rdev->asic->gart_fini = &r100_pci_gart_fini;
-                       rdev->asic->gart_enable = &r100_pci_gart_enable;
-                       rdev->asic->gart_disable = &r100_pci_gart_disable;
-                       rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
-                       rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-               }
+               radeon_agp_disable(rdev);
        }
 
        /* set DMA mask + need_dma32 flags.
@@ -585,111 +581,27 @@ int radeon_device_init(struct radeon_device *rdev,
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
 
-       rdev->new_init_path = false;
-       r = radeon_init(rdev);
-       if (r) {
-               return r;
-       }
-
        /* if we have > 1 VGA cards, then disable the radeon VGA resources */
        r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
        if (r) {
                return -EINVAL;
        }
 
-       if (!rdev->new_init_path) {
-               /* Setup errata flags */
-               radeon_errata(rdev);
-               /* Initialize scratch registers */
-               radeon_scratch_init(rdev);
-               /* Initialize surface registers */
-               radeon_surface_init(rdev);
-
-               /* BIOS*/
-               if (!radeon_get_bios(rdev)) {
-                       if (ASIC_IS_AVIVO(rdev))
-                               return -EINVAL;
-               }
-               if (rdev->is_atom_bios) {
-                       r = radeon_atombios_init(rdev);
-                       if (r) {
-                               return r;
-                       }
-               } else {
-                       r = radeon_combios_init(rdev);
-                       if (r) {
-                               return r;
-                       }
-               }
-               /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-               if (radeon_gpu_reset(rdev)) {
-                       /* FIXME: what do we want to do here ? */
-               }
-               /* check if cards are posted or not */
-               if (!radeon_card_posted(rdev) && rdev->bios) {
-                       DRM_INFO("GPU not posted. posting now...\n");
-                       if (rdev->is_atom_bios) {
-                               atom_asic_init(rdev->mode_info.atom_context);
-                       } else {
-                               radeon_combios_asic_init(rdev->ddev);
-                       }
-               }
-               /* Get clock & vram information */
-               radeon_get_clock_info(rdev->ddev);
-               radeon_vram_info(rdev);
-               /* Initialize clocks */
-               r = radeon_clocks_init(rdev);
-               if (r) {
-                       return r;
-               }
+       r = radeon_init(rdev);
+       if (r)
+               return r;
 
-               /* Initialize memory controller (also test AGP) */
-               r = radeon_mc_init(rdev);
-               if (r) {
-                       return r;
-               }
-               /* Fence driver */
-               r = radeon_fence_driver_init(rdev);
-               if (r) {
-                       return r;
-               }
-               r = radeon_irq_kms_init(rdev);
-               if (r) {
-                       return r;
-               }
-               /* Memory manager */
-               r = radeon_object_init(rdev);
-               if (r) {
-                       return r;
-               }
-               r = radeon_gpu_gart_init(rdev);
+       if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+               /* Acceleration not working on AGP card try again
+                * with fallback to PCI or PCIE GART
+                */
+               radeon_gpu_reset(rdev);
+               radeon_fini(rdev);
+               radeon_agp_disable(rdev);
+               r = radeon_init(rdev);
                if (r)
                        return r;
-               /* Initialize GART (initialize after TTM so we can allocate
-                * memory through TTM but finalize after TTM) */
-               r = radeon_gart_enable(rdev);
-               if (r)
-                       return 0;
-                       r = radeon_gem_init(rdev);
-               if (r)
-                       return 0;
-
-               /* 1M ring buffer */
-               r = radeon_cp_init(rdev, 1024 * 1024);
-               if (r)
-                       return 0;
-               r = radeon_wb_init(rdev);
-               if (r)
-                       DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
-               r = radeon_ib_pool_init(rdev);
-               if (r)
-                       return 0;
-               r = radeon_ib_test(rdev);
-               if (r)
-                       return 0;
-               rdev->accel_working = true;
        }
-       DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
        if (radeon_testing) {
                radeon_test_moves(rdev);
        }
@@ -703,32 +615,8 @@ void radeon_device_fini(struct radeon_device *rdev)
 {
        DRM_INFO("radeon: finishing device.\n");
        rdev->shutdown = true;
-       /* Order matter so becarefull if you rearrange anythings */
-       if (!rdev->new_init_path) {
-               radeon_ib_pool_fini(rdev);
-               radeon_cp_fini(rdev);
-               radeon_wb_fini(rdev);
-               radeon_gpu_gart_fini(rdev);
-               radeon_gem_fini(rdev);
-               radeon_mc_fini(rdev);
-#if __OS_HAS_AGP
-               radeon_agp_fini(rdev);
-#endif
-               radeon_irq_kms_fini(rdev);
-               vga_client_register(rdev->pdev, NULL, NULL, NULL);
-               radeon_fence_driver_fini(rdev);
-               radeon_clocks_fini(rdev);
-               radeon_object_fini(rdev);
-               if (rdev->is_atom_bios) {
-                       radeon_atombios_fini(rdev);
-               } else {
-                       radeon_combios_fini(rdev);
-               }
-               kfree(rdev->bios);
-               rdev->bios = NULL;
-       } else {
-               radeon_fini(rdev);
-       }
+       radeon_fini(rdev);
+       vga_client_register(rdev->pdev, NULL, NULL, NULL);
        iounmap(rdev->rmmio);
        rdev->rmmio = NULL;
 }
@@ -768,14 +656,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 
        radeon_save_bios_scratch_regs(rdev);
 
-       if (!rdev->new_init_path) {
-               radeon_cp_disable(rdev);
-               radeon_gart_disable(rdev);
-               rdev->irq.sw_int = false;
-               radeon_irq_set(rdev);
-       } else {
-               radeon_suspend(rdev);
-       }
+       radeon_suspend(rdev);
        /* evict remaining vram memory */
        radeon_object_evict_vram(rdev);
 
@@ -794,7 +675,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 int radeon_resume_kms(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
-       int r;
 
        acquire_console_sem();
        pci_set_power_state(dev->pdev, PCI_D0);
@@ -804,43 +684,7 @@ int radeon_resume_kms(struct drm_device *dev)
                return -1;
        }
        pci_set_master(dev->pdev);
-       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
-       if (!rdev->new_init_path) {
-               if (radeon_gpu_reset(rdev)) {
-                       /* FIXME: what do we want to do here ? */
-               }
-               /* post card */
-               if (rdev->is_atom_bios) {
-                       atom_asic_init(rdev->mode_info.atom_context);
-               } else {
-                       radeon_combios_asic_init(rdev->ddev);
-               }
-               /* Initialize clocks */
-               r = radeon_clocks_init(rdev);
-               if (r) {
-                       release_console_sem();
-                       return r;
-               }
-               /* Enable IRQ */
-               rdev->irq.sw_int = true;
-               radeon_irq_set(rdev);
-               /* Initialize GPU Memory Controller */
-               r = radeon_mc_init(rdev);
-               if (r) {
-                       goto out;
-               }
-               r = radeon_gart_enable(rdev);
-               if (r) {
-                       goto out;
-               }
-               r = radeon_cp_init(rdev, rdev->cp.ring_size);
-               if (r) {
-                       goto out;
-               }
-       } else {
-               radeon_resume(rdev);
-       }
-out:
+       radeon_resume(rdev);
        radeon_restore_bios_scratch_regs(rdev);
        fb_set_suspend(rdev->fbdev_info, 0);
        release_console_sem();
index 5d8141b13765c9234f6c1011fc5dcc6526ac4225..3655d91993a6bd8d1d46034b6262e93553a8ef81 100644 (file)
@@ -106,24 +106,33 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
                legacy_crtc_load_lut(crtc);
 }
 
-/** Sets the color ramps on behalf of RandR */
+/** Sets the color ramps on behalf of fbcon */
 void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                              u16 blue, int regno)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
-       if (regno == 0)
-               DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
        radeon_crtc->lut_r[regno] = red >> 6;
        radeon_crtc->lut_g[regno] = green >> 6;
        radeon_crtc->lut_b[regno] = blue >> 6;
 }
 
+/** Gets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                             u16 *blue, int regno)
+{
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+       *red = radeon_crtc->lut_r[regno] << 6;
+       *green = radeon_crtc->lut_g[regno] << 6;
+       *blue = radeon_crtc->lut_b[regno] << 6;
+}
+
 static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                                  u16 *blue, uint32_t size)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       int i, j;
+       int i;
 
        if (size != 256) {
                return;
@@ -132,23 +141,11 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                return;
        }
 
-       if (crtc->fb->depth == 16) {
-               for (i = 0; i < 64; i++) {
-                       if (i <= 31) {
-                               for (j = 0; j < 8; j++) {
-                                       radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
-                                       radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
-                               }
-                       }
-                       for (j = 0; j < 4; j++)
-                               radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
-               }
-       } else {
-               for (i = 0; i < 256; i++) {
-                       radeon_crtc->lut_r[i] = red[i] >> 6;
-                       radeon_crtc->lut_g[i] = green[i] >> 6;
-                       radeon_crtc->lut_b[i] = blue[i] >> 6;
-               }
+       /* userspace palettes are always correct as is */
+       for (i = 0; i < 256; i++) {
+               radeon_crtc->lut_r[i] = red[i] >> 6;
+               radeon_crtc->lut_g[i] = green[i] >> 6;
+               radeon_crtc->lut_b[i] = blue[i] >> 6;
        }
 
        radeon_crtc_load_lut(crtc);
@@ -724,7 +721,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
        if (ret) {
                return ret;
        }
-       /* allocate crtcs - TODO single crtc */
+
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               num_crtc = 1;
+
+       /* allocate crtcs */
        for (i = 0; i < num_crtc; i++) {
                radeon_crtc_init(rdev->ddev, i);
        }
index 50fce498910c6aec1548714cb727507545d858f3..7f50fb864af8e6e2804f266d24a60aac1194051e 100644 (file)
@@ -62,9 +62,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
-void radeon_master_destroy_kms(struct drm_device *dev,
-                              struct drm_master *master);
 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
@@ -260,8 +257,6 @@ static struct drm_driver kms_driver = {
        .get_vblank_counter = radeon_get_vblank_counter_kms,
        .enable_vblank = radeon_enable_vblank_kms,
        .disable_vblank = radeon_disable_vblank_kms,
-       .master_create = radeon_master_create_kms,
-       .master_destroy = radeon_master_destroy_kms,
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = radeon_debugfs_init,
        .debugfs_cleanup = radeon_debugfs_cleanup,
index 621646752cd2520e6275a8b6ebcd7d79a8b3b174..a65ab1a0dad20c06338c521bff3eac5505981f81 100644 (file)
@@ -1345,6 +1345,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
 void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
 {
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *encoder;
        struct radeon_encoder *radeon_encoder;
 
@@ -1364,7 +1365,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
                return;
 
        encoder = &radeon_encoder->base;
-       encoder->possible_crtcs = 0x3;
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               encoder->possible_crtcs = 0x1;
+       else
+               encoder->possible_crtcs = 0x3;
        encoder->possible_clones = 0;
 
        radeon_encoder->enc_priv = NULL;
index 944e4fa78db51dc66f753bee297ac8916735ddd7..b38c4c8e2c611f9d8f6d9276bc69d8f199e056c0 100644 (file)
@@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = {
        .fb_imageblit = cfb_imageblit,
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_blank = drm_fb_helper_blank,
+       .fb_setcmap = drm_fb_helper_setcmap,
 };
 
 /**
@@ -123,11 +124,13 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
 
 static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
        .gamma_set = radeon_crtc_fb_gamma_set,
+       .gamma_get = radeon_crtc_fb_gamma_get,
 };
 
 int radeonfb_create(struct drm_device *dev,
                    uint32_t fb_width, uint32_t fb_height,
                    uint32_t surface_width, uint32_t surface_height,
+                   uint32_t surface_depth, uint32_t surface_bpp,
                    struct drm_framebuffer **fb_p)
 {
        struct radeon_device *rdev = dev->dev_private;
@@ -145,13 +148,19 @@ int radeonfb_create(struct drm_device *dev,
        unsigned long tmp;
        bool fb_tiled = false; /* useful for testing */
        u32 tiling_flags = 0;
+       int crtc_count;
 
        mode_cmd.width = surface_width;
        mode_cmd.height = surface_height;
-       mode_cmd.bpp = 32;
+
+       /* avivo can't scanout real 24bpp */
+       if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+               surface_bpp = 32;
+
+       mode_cmd.bpp = surface_bpp;
        /* need to align pitch with crtc limits */
        mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
-       mode_cmd.depth = 24;
+       mode_cmd.depth = surface_depth;
 
        size = mode_cmd.pitch * mode_cmd.height;
        aligned_size = ALIGN(size, PAGE_SIZE);
@@ -216,7 +225,11 @@ int radeonfb_create(struct drm_device *dev,
        rfbdev = info->par;
        rfbdev->helper.funcs = &radeon_fb_helper_funcs;
        rfbdev->helper.dev = dev;
-       ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2,
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               crtc_count = 1;
+       else
+               crtc_count = 2;
+       ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
                                            RADEONFB_CONN_LIMIT);
        if (ret)
                goto out_unref;
@@ -233,7 +246,7 @@ int radeonfb_create(struct drm_device *dev,
 
        strcpy(info->fix.id, "radeondrmfb");
 
-       drm_fb_helper_fill_fix(info, fb->pitch);
+       drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 
        info->flags = FBINFO_DEFAULT;
        info->fbops = &radeonfb_ops;
@@ -290,13 +303,26 @@ out:
        return ret;
 }
 
+static char *mode_option;
+int radeon_parse_options(char *options)
+{
+       char *this_opt;
+
+       if (!options || !*options)
+               return 0;
+
+       while ((this_opt = strsep(&options, ",")) != NULL) {
+               if (!*this_opt)
+                       continue;
+               mode_option = this_opt;
+       }
+       return 0;
+}
+
 int radeonfb_probe(struct drm_device *dev)
 {
-       int ret;
-       ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
-       return ret;
+       return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
 }
-EXPORT_SYMBOL(radeonfb_probe);
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
 {
index 1841145a7c4f873a6445ea7a6780046a76d8e9f1..8e0a8759e428e85a62289b6616352c22d2376e57 100644 (file)
@@ -83,8 +83,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
        int r = 0;
+       int num_crtc = 2;
 
-       r = drm_vblank_init(rdev->ddev, 2);
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               num_crtc = 1;
+
+       r = drm_vblank_init(rdev->ddev, num_crtc);
        if (r) {
                return r;
        }
index 709bd892b3a9ec316de781e22d03016611eb4773..ba128621057a64eb0801e56fae4093dfb51f46f2 100644 (file)
@@ -200,55 +200,6 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
 }
 
 
-/*
- * For multiple master (like multiple X).
- */
-struct drm_radeon_master_private {
-       drm_local_map_t *sarea;
-       drm_radeon_sarea_t *sarea_priv;
-};
-
-int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
-{
-       struct drm_radeon_master_private *master_priv;
-       unsigned long sareapage;
-       int ret;
-
-       master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
-       if (master_priv == NULL) {
-               return -ENOMEM;
-       }
-       /* prebuild the SAREA */
-       sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
-       ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
-                        _DRM_CONTAINS_LOCK,
-                        &master_priv->sarea);
-       if (ret) {
-               DRM_ERROR("SAREA setup failed\n");
-               return ret;
-       }
-       master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
-       master_priv->sarea_priv->pfCurrentPage = 0;
-       master->driver_priv = master_priv;
-       return 0;
-}
-
-void radeon_master_destroy_kms(struct drm_device *dev,
-                              struct drm_master *master)
-{
-       struct drm_radeon_master_private *master_priv = master->driver_priv;
-
-       if (master_priv == NULL) {
-               return;
-       }
-       if (master_priv->sarea) {
-               drm_rmmap_locked(dev, master_priv->sarea);
-       }
-       kfree(master_priv);
-       master->driver_priv = NULL;
-}
-
-
 /*
  * IOCTL.
  */
index 2b997a15fb1f5bb8b46746ba0dc1d856a368ae52..36410f85d7059a73830137d26a3287f51535febd 100644 (file)
@@ -1053,6 +1053,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
        .mode_set_base = radeon_crtc_set_base,
        .prepare = radeon_crtc_prepare,
        .commit = radeon_crtc_commit,
+       .load_lut = radeon_crtc_load_lut,
 };
 
 
index b1547f700d739eeb9787b2e55a09b826e6af3d57..6ceb958fd194cda46f729a0ce64d80cc189fe6f9 100644 (file)
@@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
                                        R420_TV_DAC_DACADJ_MASK |
                                        R420_TV_DAC_RDACPD |
                                        R420_TV_DAC_GDACPD |
-                                       R420_TV_DAC_GDACPD |
+                                       R420_TV_DAC_BDACPD |
                                        R420_TV_DAC_TVENABLE);
                } else {
                        tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
@@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
                                        RADEON_TV_DAC_DACADJ_MASK |
                                        RADEON_TV_DAC_RDACPD |
                                        RADEON_TV_DAC_GDACPD |
-                                       RADEON_TV_DAC_GDACPD);
+                                       RADEON_TV_DAC_BDACPD);
                }
 
                /*  FIXME TV */
@@ -1318,7 +1318,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
                return;
 
        encoder = &radeon_encoder->base;
-       encoder->possible_crtcs = 0x3;
+       if (rdev->flags & RADEON_SINGLE_CRTC)
+               encoder->possible_crtcs = 0x1;
+       else
+               encoder->possible_crtcs = 0x3;
        encoder->possible_clones = 0;
 
        radeon_encoder->enc_priv = NULL;
index 570a58729daffd0aeff3178c252f4d2feb24110c..e61226817ccf1812a00fe442532d7e567db3e203 100644 (file)
@@ -407,6 +407,8 @@ extern void
 radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
 extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
                                     u16 blue, int regno);
+extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                    u16 *blue, int regno);
 struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
                                                  struct drm_mode_fb_cmd *mode_cmd,
                                                  struct drm_gem_object *obj);
index 73af463b7a59f734c245a52076f4c007c0b0b869..1f056dadc5c2243339c0562801669bf28a903b1b 100644 (file)
@@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
 int radeon_object_list_reserve(struct list_head *head)
 {
        struct radeon_object_list *lobj;
-       struct list_head *i;
        int r;
 
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
+       list_for_each_entry(lobj, head, list){
                if (!lobj->robj->pin_count) {
                        r = radeon_object_reserve(lobj->robj, true);
                        if (unlikely(r != 0)) {
@@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head)
 void radeon_object_list_unreserve(struct list_head *head)
 {
        struct radeon_object_list *lobj;
-       struct list_head *i;
 
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
+       list_for_each_entry(lobj, head, list) {
                if (!lobj->robj->pin_count) {
                        radeon_object_unreserve(lobj->robj);
-               } else {
                }
        }
 }
@@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
        struct radeon_object_list *lobj;
        struct radeon_object *robj;
        struct radeon_fence *old_fence = NULL;
-       struct list_head *i;
        int r;
 
        r = radeon_object_list_reserve(head);
@@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
                radeon_object_list_unreserve(head);
                return r;
        }
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
+       list_for_each_entry(lobj, head, list) {
                robj = lobj->robj;
                if (!robj->pin_count) {
                        if (lobj->wdomain) {
@@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head)
 {
        struct radeon_object_list *lobj;
        struct radeon_fence *old_fence = NULL;
-       struct list_head *i;
 
-       list_for_each(i, head) {
-               lobj = list_entry(i, struct radeon_object_list, list);
+       list_for_each_entry(lobj, head, list) {
                old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
                lobj->robj->tobj.sync_obj = NULL;
                if (old_fence) {
index 21da871a793c64aec6ce16ac4b1d820aa0628faa..bfa1ab9c93e123bd0e066319822af3ba8e880112 100644 (file)
 #       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
 #       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
 #       define R300_CP_PACKET0_REG_MASK             0x00001fff
+#       define R600_CP_PACKET0_REG_MASK             0x0000ffff
 #       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
 #       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
 
index 5b1cf04a011adf4fe26e46311f4df6b285e4dd1f..765bd184b6fc15382fc10b2dfaadd91414709054 100644 (file)
@@ -689,9 +689,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
 
 #define RADEON_DEBUGFS_MEM_TYPES 2
 
-static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
-static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
-
 #if defined(CONFIG_DEBUG_FS)
 static int radeon_mm_dump_table(struct seq_file *m, void *data)
 {
@@ -711,9 +708,11 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
 
 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
 {
+#if defined(CONFIG_DEBUG_FS)
+       static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
+       static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
        unsigned i;
 
-#if defined(CONFIG_DEBUG_FS)
        for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
                if (i == 0)
                        sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644 (file)
index 0000000..48a913a
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+/* Registers */
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+
+#endif
index a3fbdad938c71bc75d98dec1ac8282b7ddb2361a..a769c296f6a615463746145ecba7239f92fb04da 100644 (file)
  */
 #include <linux/seq_file.h>
 #include <drm/drmP.h>
-#include "radeon_reg.h"
 #include "radeon.h"
+#include "rs400d.h"
 
-/* rs400,rs480 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-void r100_mc_disable_clients(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
 
-/* This files gather functions specifics to :
- * rs400,rs480
- *
- * Some of these functions might be used by newer ASICs.
- */
-void rs400_gpu_init(struct radeon_device *rdev);
-int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
-
-
-/*
- * GART functions.
- */
 void rs400_gart_adjust_size(struct radeon_device *rdev)
 {
        /* Check gart size */
@@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
        return 0;
 }
 
-
-/*
- * MC functions.
- */
-int rs400_mc_init(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-
-       rs400_gpu_init(rdev);
-       rs400_gart_disable(rdev);
-       rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-       rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
-       rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       r100_mc_disable_clients(rdev);
-       if (r300_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32(RADEON_MC_FB_LOCATION, tmp);
-       tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
-       WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
-       WREG32(RADEON_HOST_PATH_CNTL, tmp);
-       (void)RREG32(RADEON_HOST_PATH_CNTL);
-
-       return 0;
-}
-
-void rs400_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Global GPU functions
- */
-void rs400_errata(struct radeon_device *rdev)
-{
-       rdev->pll_errata = 0;
-}
-
 void rs400_gpu_init(struct radeon_device *rdev)
 {
        /* FIXME: HDP same place on rs400 ? */
@@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev)
        }
 }
 
-
-/*
- * VRAM info.
- */
 void rs400_vram_info(struct radeon_device *rdev)
 {
        rs400_gart_adjust_size(rdev);
@@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev)
        r100_vram_init_sizes(rdev);
 }
 
-
-/*
- * Indirect registers accessor
- */
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
        uint32_t r;
@@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
        WREG32(RS480_NB_MC_INDEX, 0xff);
 }
 
-
-/*
- * Debugfs info
- */
 #if defined(CONFIG_DEBUG_FS)
 static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
 {
@@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = {
 };
 #endif
 
-int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
 {
 #if defined(CONFIG_DEBUG_FS)
        return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
@@ -427,3 +345,188 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
        return 0;
 #endif
 }
+
+static int rs400_mc_init(struct radeon_device *rdev)
+{
+       int r;
+       u32 tmp;
+
+       /* Setup GPU memory space */
+       tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+       rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
+       rdev->mc.gtt_location = 0xFFFFFFFFUL;
+       r = radeon_mc_setup(rdev);
+       if (r)
+               return r;
+       return 0;
+}
+
+void rs400_mc_program(struct radeon_device *rdev)
+{
+       struct r100_mc_save save;
+
+       /* Stops all mc clients */
+       r100_mc_stop(rdev, &save);
+
+       /* Wait for mc idle */
+       if (r300_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+       WREG32(R_000148_MC_FB_LOCATION,
+               S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+               S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+       r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       rs400_mc_program(rdev);
+       /* Resume clock */
+       r300_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       rs400_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       r = rs400_gart_enable(rdev);
+       if (r)
+               return r;
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       r100_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       rs400_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       r300_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       radeon_combios_asic_init(rdev->ddev);
+       /* Resume clock after posting */
+       r300_clock_startup(rdev);
+       return rs400_startup(rdev);
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       r100_irq_disable(rdev);
+       rs400_gart_disable(rdev);
+       return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+       rs400_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+       rs400_gart_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Disable VGA */
+       r100_vga_render_disable(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+               return -EINVAL;
+       } else {
+               r = radeon_combios_init(rdev);
+               if (r)
+                       return r;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               radeon_combios_asic_init(rdev->ddev);
+       }
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       rs400_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = rs400_mc_init(rdev);
+       if (r)
+               return r;
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       r = rs400_gart_init(rdev);
+       if (r)
+               return r;
+       r300_set_reg_safe(rdev);
+       rdev->accel_working = true;
+       r = rs400_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               rs400_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               rs400_gart_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
+       return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644 (file)
index 0000000..6d8bac5
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+#endif
index 0e791e26def32f14a9496a2ad3af15b4dd8b1ccf..10dfa78762da22cbac4bd95ba366ae6644417e18 100644 (file)
  *          Alex Deucher
  *          Jerome Glisse
  */
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
 #include "drmP.h"
-#include "radeon_reg.h"
 #include "radeon.h"
-#include "avivod.h"
+#include "atom.h"
+#include "rs600d.h"
 
 #include "rs600_reg_safe.h"
 
-/* rs600 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
-
-/* This files gather functions specifics to :
- * rs600
- *
- * Some of these functions might be used by newer ASICs.
- */
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-void rs600_disable_vga(struct radeon_device *rdev);
-
 
 /*
  * GART.
@@ -55,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
 {
        uint32_t tmp;
 
-       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
-       tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
-       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+       tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+       tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+       WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
-       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
-       tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
-       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
+       tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+       tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
+       WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
 
-       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
-       tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
-       WREG32_MC(RS600_MC_PT0_CNTL, tmp);
-       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
+       tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+       tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+       WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+       tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
 }
 
 int rs600_gart_init(struct radeon_device *rdev)
@@ -88,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
 
 int rs600_gart_enable(struct radeon_device *rdev)
 {
-       uint32_t tmp;
+       u32 tmp;
        int r, i;
 
        if (rdev->gart.table.vram.robj == NULL) {
@@ -98,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       /* Enable bus master */
+       tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
+       WREG32(R_00004C_BUS_CNTL, tmp);
        /* FIXME: setup default page */
-       WREG32_MC(RS600_MC_PT0_CNTL,
-                (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
-                 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+       WREG32_MC(R_000100_MC_PT0_CNTL,
+                (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+                 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
        for (i = 0; i < 19; i++) {
-               WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
-                        (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
-                         RS600_SYSTEM_ACCESS_MODE_IN_SYS |
-                         RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
-                         RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
-                         RS600_ENABLE_FRAGMENT_PROCESSING |
-                         RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+               WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+                       S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+                       S_00016C_SYSTEM_ACCESS_MODE_MASK(
+                               V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
+                       S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+                               V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
+                       S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
+                       S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+                       S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
        }
 
        /* System context map to GART space */
-       WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
-       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-       WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
+       WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
+       WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
 
        /* enable first context */
-       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
-       tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
-       WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
-                (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
+       WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+       WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+       WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+                       S_000102_ENABLE_PAGE_TABLE(1) |
+                       S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
        /* disable all other contexts */
        for (i = 1; i < 8; i++) {
-               WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+               WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
        }
 
        /* setup the page table */
-       WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
-                rdev->gart.table_addr);
-       WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+       WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+                       rdev->gart.table_addr);
+       WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
 
        /* enable page tables */
-       tmp = RREG32_MC(RS600_MC_PT0_CNTL);
-       WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
-       tmp = RREG32_MC(RS600_MC_CNTL1);
-       WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
+       tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+       WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+       tmp = RREG32_MC(R_000009_MC_CNTL1);
+       WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
        rs600_gart_tlb_flush(rdev);
        rdev->gart.ready = true;
        return 0;
@@ -148,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
        uint32_t tmp;
 
        /* FIXME: disable out of gart access */
-       WREG32_MC(RS600_MC_PT0_CNTL, 0);
-       tmp = RREG32_MC(RS600_MC_CNTL1);
-       tmp &= ~RS600_ENABLE_PAGE_TABLES;
-       WREG32_MC(RS600_MC_CNTL1, tmp);
+       WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+       tmp = RREG32_MC(R_000009_MC_CNTL1);
+       WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
        if (rdev->gart.table.vram.robj) {
                radeon_object_kunmap(rdev->gart.table.vram.robj);
                radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -185,129 +185,61 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
        return 0;
 }
 
-
-/*
- * MC.
- */
-void rs600_mc_disable_clients(struct radeon_device *rdev)
-{
-       unsigned tmp;
-
-       if (r100_gui_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait GUI idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-
-       radeon_avivo_vga_render_disable(rdev);
-
-       tmp = RREG32(AVIVO_D1VGA_CONTROL);
-       WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
-       tmp = RREG32(AVIVO_D2VGA_CONTROL);
-       WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
-
-       tmp = RREG32(AVIVO_D1CRTC_CONTROL);
-       WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
-       tmp = RREG32(AVIVO_D2CRTC_CONTROL);
-       WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
-
-       /* make sure all previous write got through */
-       tmp = RREG32(AVIVO_D2CRTC_CONTROL);
-
-       mdelay(1);
-}
-
-int rs600_mc_init(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-
-       rs600_gpu_init(rdev);
-       rs600_gart_disable(rdev);
-
-       /* Setup GPU memory space */
-       rdev->mc.vram_location = 0xFFFFFFFFUL;
-       rdev->mc.gtt_location = 0xFFFFFFFFUL;
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       /* Program GPU memory space */
-       /* Enable bus master */
-       tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
-       WREG32(RADEON_BUS_CNTL, tmp);
-       /* FIXME: What does AGP means for such chipset ? */
-       WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
-       /* FIXME: are this AGP reg in indirect MC range ? */
-       WREG32_MC(RS600_MC_AGP_BASE, 0);
-       WREG32_MC(RS600_MC_AGP_BASE_2, 0);
-       rs600_mc_disable_clients(rdev);
-       if (rs600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32_MC(RS600_MC_FB_LOCATION, tmp);
-       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
-       return 0;
-}
-
-void rs600_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Interrupts
- */
 int rs600_irq_set(struct radeon_device *rdev)
 {
        uint32_t tmp = 0;
        uint32_t mode_int = 0;
 
        if (rdev->irq.sw_int) {
-               tmp |= RADEON_SW_INT_ENABLE;
+               tmp |= S_000040_SW_INT_EN(1);
        }
        if (rdev->irq.crtc_vblank_int[0]) {
-               mode_int |= AVIVO_D1MODE_INT_MASK;
+               mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
        }
        if (rdev->irq.crtc_vblank_int[1]) {
-               mode_int |= AVIVO_D2MODE_INT_MASK;
+               mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
        }
-       WREG32(RADEON_GEN_INT_CNTL, tmp);
-       WREG32(AVIVO_DxMODE_INT_MASK, mode_int);
+       WREG32(R_000040_GEN_INT_CNTL, tmp);
+       WREG32(R_006540_DxMODE_INT_MASK, mode_int);
        return 0;
 }
 
 static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
 {
-       uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
-       uint32_t irq_mask = RADEON_SW_INT_TEST;
-
-       if (irqs & AVIVO_DISPLAY_INT_STATUS) {
-               *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS);
-               if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
-                       WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+       uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+       uint32_t irq_mask = ~C_000044_SW_INT;
+
+       if (G_000044_DISPLAY_INT_STAT(irqs)) {
+               *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+               if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+                       WREG32(R_006534_D1MODE_VBLANK_STATUS,
+                               S_006534_D1MODE_VBLANK_ACK(1));
                }
-               if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
-                       WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
+               if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+                       WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+                               S_006D34_D2MODE_VBLANK_ACK(1));
                }
        } else {
                *r500_disp_int = 0;
        }
 
        if (irqs) {
-               WREG32(RADEON_GEN_INT_STATUS, irqs);
+               WREG32(R_000044_GEN_INT_STATUS, irqs);
        }
        return irqs & irq_mask;
 }
 
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+       u32 tmp;
+
+       WREG32(R_000040_GEN_INT_CNTL, 0);
+       WREG32(R_006540_DxMODE_INT_MASK, 0);
+       /* Wait and acknowledge irq */
+       mdelay(1);
+       rs600_irq_ack(rdev, &tmp);
+}
+
 int rs600_irq_process(struct radeon_device *rdev)
 {
        uint32_t status;
@@ -319,16 +251,13 @@ int rs600_irq_process(struct radeon_device *rdev)
        }
        while (status || r500_disp_int) {
                /* SW interrupt */
-               if (status & RADEON_SW_INT_TEST) {
+               if (G_000040_SW_INT_EN(status))
                        radeon_fence_process(rdev);
-               }
                /* Vertical blank interrupts */
-               if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
+               if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
                        drm_handle_vblank(rdev->ddev, 0);
-               }
-               if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
+               if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
                        drm_handle_vblank(rdev->ddev, 1);
-               }
                status = rs600_irq_ack(rdev, &r500_disp_int);
        }
        return IRQ_HANDLED;
@@ -337,67 +266,34 @@ int rs600_irq_process(struct radeon_device *rdev)
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
 {
        if (crtc == 0)
-               return RREG32(AVIVO_D1CRTC_FRAME_COUNT);
+               return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
        else
-               return RREG32(AVIVO_D2CRTC_FRAME_COUNT);
-}
-
-
-/*
- * Global GPU functions
- */
-void rs600_disable_vga(struct radeon_device *rdev)
-{
-       unsigned tmp;
-
-       WREG32(0x330, 0);
-       WREG32(0x338, 0);
-       tmp = RREG32(0x300);
-       tmp &= ~(3 << 16);
-       WREG32(0x300, tmp);
-       WREG32(0x308, (1 << 8));
-       WREG32(0x310, rdev->mc.vram_location);
-       WREG32(0x594, 0);
+               return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
 }
 
 int rs600_mc_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
-       uint32_t tmp;
 
        for (i = 0; i < rdev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32_MC(RS600_MC_STATUS);
-               if (tmp & RS600_MC_STATUS_IDLE) {
+               if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
                        return 0;
-               }
-               DRM_UDELAY(1);
+               udelay(1);
        }
        return -1;
 }
 
-void rs600_errata(struct radeon_device *rdev)
-{
-       rdev->pll_errata = 0;
-}
-
 void rs600_gpu_init(struct radeon_device *rdev)
 {
        /* FIXME: HDP same place on rs600 ? */
        r100_hdp_reset(rdev);
-       rs600_disable_vga(rdev);
        /* FIXME: is this correct ? */
        r420_pipes_init(rdev);
-       if (rs600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
+       /* Wait for mc idle */
+       if (rs600_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
 }
 
-
-/*
- * VRAM info.
- */
 void rs600_vram_info(struct radeon_device *rdev)
 {
        /* FIXME: to do or is these values sane ? */
@@ -410,31 +306,206 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
        /* FIXME: implement, should this be like rs690 ? */
 }
 
-
-/*
- * Indirect registers accessor
- */
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
-       uint32_t r;
-
-       WREG32(RS600_MC_INDEX,
-              ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
-       r = RREG32(RS600_MC_DATA);
-       return r;
+       WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+               S_000070_MC_IND_CITF_ARB0(1));
+       return RREG32(R_000074_MC_IND_DATA);
 }
 
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
-       WREG32(RS600_MC_INDEX,
-               RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
-               ((reg) & RS600_MC_ADDR_MASK));
-       WREG32(RS600_MC_DATA, v);
+       WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+               S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+       WREG32(R_000074_MC_IND_DATA, v);
 }
 
-int rs600_init(struct radeon_device *rdev)
+void rs600_debugfs(struct radeon_device *rdev)
+{
+       if (r100_debugfs_rbbm_init(rdev))
+               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
 {
        rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
        rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+       struct rv515_mc_save save;
+
+       /* Stops all mc clients */
+       rv515_mc_stop(rdev, &save);
+
+       /* Wait for mc idle */
+       if (rs600_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+       /* FIXME: What does AGP means for such chipset ? */
+       WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+       WREG32_MC(R_000006_AGP_BASE, 0);
+       WREG32_MC(R_000007_AGP_BASE_2, 0);
+       /* Program MC */
+       WREG32_MC(R_000004_MC_FB_LOCATION,
+                       S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+                       S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+       WREG32(R_000134_HDP_FB_LOCATION,
+               S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+       rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       rs600_mc_program(rdev);
+       /* Resume clock */
+       rv515_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       rs600_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       r = rs600_gart_enable(rdev);
+       if (r)
+               return r;
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       rs600_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       rs600_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       rv515_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       atom_asic_init(rdev->mode_info.atom_context);
+       /* Resume clock after posting */
+       rv515_clock_startup(rdev);
+       return rs600_startup(rdev);
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       rs600_irq_disable(rdev);
+       rs600_gart_disable(rdev);
+       return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+       rs600_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+       rs600_gart_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Disable VGA */
+       rv515_vga_render_disable(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* BIOS */
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               r = radeon_atombios_init(rdev);
+               if (r)
+                       return r;
+       } else {
+               dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+               return -EINVAL;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               atom_asic_init(rdev->mode_info.atom_context);
+       }
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       rs600_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r420_mc_init(rdev);
+       if (r)
+               return r;
+       rs600_debugfs(rdev);
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       r = rs600_gart_init(rdev);
+       if (r)
+               return r;
+       rs600_set_safe_registers(rdev);
+       rdev->accel_working = true;
+       r = rs600_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               rs600_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               rs600_gart_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
        return 0;
 }
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644 (file)
index 0000000..8130892
--- /dev/null
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_DISPLAY_INT_STATUS(x)               (((x) & 0x1) << 0)
+#define   G_000040_DISPLAY_INT_STATUS(x)               (((x) >> 0) & 0x1)
+#define   C_000040_DISPLAY_INT_STATUS                  0xFFFFFFFE
+#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
+#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
+#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
+#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
+#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
+#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
+#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
+#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
+#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
+#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
+#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
+#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
+#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
+#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
+#define   C_000040_FP2_DETECT                          0xFFFFFBFF
+#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
+#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
+#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_DISPLAY_INT_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_DISPLAY_INT_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_DISPLAY_INT_STAT                    0xFFFFFFFE
+#define   S_000044_VGA_INT_STAT(x)                     (((x) & 0x1) << 1)
+#define   G_000044_VGA_INT_STAT(x)                     (((x) >> 1) & 0x1)
+#define   C_000044_VGA_INT_STAT                        0xFFFFFFFD
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_MC_PROBE_FAULT_STAT(x)              (((x) & 0x1) << 16)
+#define   G_000044_MC_PROBE_FAULT_STAT(x)              (((x) >> 16) & 0x1)
+#define   C_000044_MC_PROBE_FAULT_STAT                 0xFFFEFFFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_SCRATCH_INT_STAT(x)                 (((x) & 0x1) << 18)
+#define   G_000044_SCRATCH_INT_STAT(x)                 (((x) >> 18) & 0x1)
+#define   C_000044_SCRATCH_INT_STAT                    0xFFFBFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) & 0x1) << 20)
+#define   G_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) >> 20) & 0x1)
+#define   C_000044_ATI_OVERDRIVE_INT_STAT              0xFFEFFFFF
+#define   S_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) & 0x1) << 21)
+#define   G_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) >> 21) & 0x1)
+#define   C_000044_MC_PROTECTION_FAULT_STAT            0xFFDFFFFF
+#define   S_000044_RBBM_READ_INT_STAT(x)               (((x) & 0x1) << 22)
+#define   G_000044_RBBM_READ_INT_STAT(x)               (((x) >> 22) & 0x1)
+#define   C_000044_RBBM_READ_INT_STAT                  0xFFBFFFFF
+#define   S_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) & 0x1) << 23)
+#define   G_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) >> 23) & 0x1)
+#define   C_000044_CB_CONTEXT_SWITCH_STAT              0xFF7FFFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_IDCT_INT_STAT(x)                    (((x) & 0x1) << 27)
+#define   G_000044_IDCT_INT_STAT(x)                    (((x) >> 27) & 0x1)
+#define   C_000044_IDCT_INT_STAT                       0xF7FFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define R_00004C_BUS_CNTL                            0x00004C
+#define   S_00004C_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 14)
+#define   G_00004C_BUS_MASTER_DIS(x)                   (((x) >> 14) & 0x1)
+#define   C_00004C_BUS_MASTER_DIS                      0xFFFFBFFF
+#define   S_00004C_BUS_MSI_REARM(x)                    (((x) & 0x1) << 20)
+#define   G_00004C_BUS_MSI_REARM(x)                    (((x) >> 20) & 0x1)
+#define   C_00004C_BUS_MSI_REARM                       0xFFEFFFFF
+#define R_000070_MC_IND_INDEX                        0x000070
+#define   S_000070_MC_IND_ADDR(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000070_MC_IND_ADDR(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000070_MC_IND_ADDR                         0xFFFF0000
+#define   S_000070_MC_IND_SEQ_RBS_0(x)                 (((x) & 0x1) << 16)
+#define   G_000070_MC_IND_SEQ_RBS_0(x)                 (((x) >> 16) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_0                    0xFFFEFFFF
+#define   S_000070_MC_IND_SEQ_RBS_1(x)                 (((x) & 0x1) << 17)
+#define   G_000070_MC_IND_SEQ_RBS_1(x)                 (((x) >> 17) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_1                    0xFFFDFFFF
+#define   S_000070_MC_IND_SEQ_RBS_2(x)                 (((x) & 0x1) << 18)
+#define   G_000070_MC_IND_SEQ_RBS_2(x)                 (((x) >> 18) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_2                    0xFFFBFFFF
+#define   S_000070_MC_IND_SEQ_RBS_3(x)                 (((x) & 0x1) << 19)
+#define   G_000070_MC_IND_SEQ_RBS_3(x)                 (((x) >> 19) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_3                    0xFFF7FFFF
+#define   S_000070_MC_IND_AIC_RBS(x)                   (((x) & 0x1) << 20)
+#define   G_000070_MC_IND_AIC_RBS(x)                   (((x) >> 20) & 0x1)
+#define   C_000070_MC_IND_AIC_RBS                      0xFFEFFFFF
+#define   S_000070_MC_IND_CITF_ARB0(x)                 (((x) & 0x1) << 21)
+#define   G_000070_MC_IND_CITF_ARB0(x)                 (((x) >> 21) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB0                    0xFFDFFFFF
+#define   S_000070_MC_IND_CITF_ARB1(x)                 (((x) & 0x1) << 22)
+#define   G_000070_MC_IND_CITF_ARB1(x)                 (((x) >> 22) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB1                    0xFFBFFFFF
+#define   S_000070_MC_IND_WR_EN(x)                     (((x) & 0x1) << 23)
+#define   G_000070_MC_IND_WR_EN(x)                     (((x) >> 23) & 0x1)
+#define   C_000070_MC_IND_WR_EN                        0xFF7FFFFF
+#define   S_000070_MC_IND_RD_INV(x)                    (((x) & 0x1) << 24)
+#define   G_000070_MC_IND_RD_INV(x)                    (((x) >> 24) & 0x1)
+#define   C_000070_MC_IND_RD_INV                       0xFEFFFFFF
+#define R_000074_MC_IND_DATA                         0x000074
+#define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000074_MC_IND_DATA                         0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT           0x0060A4
+#define   S_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0060A4_D1CRTC_FRAME_COUNT                  0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS                0x006534
+#define   S_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006534_D1MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006534_D1MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006534_D1MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006534_D1MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006534_D1MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006534_D1MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006534_D1MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006534_D1MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK                     0x006540
+#define   S_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 0)
+#define   G_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) >> 0) & 0x1)
+#define   C_006540_D1MODE_VBLANK_INT_MASK              0xFFFFFFFE
+#define   S_006540_D1MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 4)
+#define   G_006540_D1MODE_VLINE_INT_MASK(x)            (((x) >> 4) & 0x1)
+#define   C_006540_D1MODE_VLINE_INT_MASK               0xFFFFFFEF
+#define   S_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 8)
+#define   G_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) >> 8) & 0x1)
+#define   C_006540_D2MODE_VBLANK_INT_MASK              0xFFFFFEFF
+#define   S_006540_D2MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 12)
+#define   G_006540_D2MODE_VLINE_INT_MASK(x)            (((x) >> 12) & 0x1)
+#define   C_006540_D2MODE_VLINE_INT_MASK               0xFFFFEFFF
+#define   S_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 30)
+#define   G_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) >> 30) & 0x1)
+#define   C_006540_D1MODE_VBLANK_CP_SEL                0xBFFFFFFF
+#define   S_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 31)
+#define   G_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) >> 31) & 0x1)
+#define   C_006540_D2MODE_VBLANK_CP_SEL                0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT           0x0068A4
+#define   S_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0068A4_D2CRTC_FRAME_COUNT                  0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS                0x006D34
+#define   S_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006D34_D2MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006D34_D2MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006D34_D2MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006D34_D2MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS               0x007EDC
+#define   S_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 4)
+#define   G_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) >> 4) & 0x1)
+#define   C_007EDC_LB_D1_VBLANK_INTERRUPT              0xFFFFFFEF
+#define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
+#define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
+#define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
+
+
+/* MC registers */
+#define R_000000_MC_STATUS                           0x000000
+#define   S_000000_MC_IDLE(x)                          (((x) & 0x1) << 0)
+#define   G_000000_MC_IDLE(x)                          (((x) >> 0) & 0x1)
+#define   C_000000_MC_IDLE                             0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000009_MC_CNTL1                            0x000009
+#define   S_000009_ENABLE_PAGE_TABLES(x)               (((x) & 0x1) << 26)
+#define   G_000009_ENABLE_PAGE_TABLES(x)               (((x) >> 26) & 0x1)
+#define   C_000009_ENABLE_PAGE_TABLES                  0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL                         0x000100
+#define   S_000100_ENABLE_PT(x)                        (((x) & 0x1) << 0)
+#define   G_000100_ENABLE_PT(x)                        (((x) >> 0) & 0x1)
+#define   C_000100_ENABLE_PT                           0xFFFFFFFE
+#define   S_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_000100_EFFECTIVE_L2_CACHE_SIZE             0xFFFC7FFF
+#define   S_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) & 0x7) << 21)
+#define   G_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) >> 21) & 0x7)
+#define   C_000100_EFFECTIVE_L2_QUEUE_SIZE             0xFF1FFFFF
+#define   S_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) & 0x1) << 28)
+#define   G_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) >> 28) & 0x1)
+#define   C_000100_INVALIDATE_ALL_L1_TLBS              0xEFFFFFFF
+#define   S_000100_INVALIDATE_L2_CACHE(x)              (((x) & 0x1) << 29)
+#define   G_000100_INVALIDATE_L2_CACHE(x)              (((x) >> 29) & 0x1)
+#define   C_000100_INVALIDATE_L2_CACHE                 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL                0x000102
+#define   S_000102_ENABLE_PAGE_TABLE(x)                (((x) & 0x1) << 0)
+#define   G_000102_ENABLE_PAGE_TABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000102_ENABLE_PAGE_TABLE                   0xFFFFFFFE
+#define   S_000102_PAGE_TABLE_DEPTH(x)                 (((x) & 0x3) << 1)
+#define   G_000102_PAGE_TABLE_DEPTH(x)                 (((x) >> 1) & 0x3)
+#define   C_000102_PAGE_TABLE_DEPTH                    0xFFFFFFF9
+#define   V_000102_PAGE_TABLE_FLAT                     0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR     0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR    0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR   0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR      0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR     0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR       0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL                 0x00016C
+#define   S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define   G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define   C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE    0xFFFFFFFE
+#define   S_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) & 0x1) << 1)
+#define   G_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) >> 1) & 0x1)
+#define   C_00016C_TRANSLATION_MODE_OVERRIDE           0xFFFFFFFD
+#define   S_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) & 0x3) << 8)
+#define   G_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) >> 8) & 0x3)
+#define   C_00016C_SYSTEM_ACCESS_MODE_MASK             0xFFFFFCFF
+#define   V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY          0
+#define   V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP      1
+#define   V_00016C_SYSTEM_ACCESS_MODE_IN_SYS           2
+#define   V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS       3
+#define   S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) & 0x1) << 10)
+#define   G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) >> 10) & 0x1)
+#define   C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS     0xFFFFFBFF
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH  0
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define   S_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) & 0x7) << 11)
+#define   G_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) >> 11) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_CACHE_SIZE             0xFFFFC7FF
+#define   S_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) & 0x1) << 14)
+#define   G_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) >> 14) & 0x1)
+#define   C_00016C_ENABLE_FRAGMENT_PROCESSING          0xFFFFBFFF
+#define   S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_QUEUE_SIZE             0xFFFC7FFF
+#define   S_00016C_INVALIDATE_L1_TLB(x)                (((x) & 0x1) << 20)
+#define   G_00016C_INVALIDATE_L1_TLB(x)                (((x) >> 20) & 0x1)
+#define   C_00016C_INVALIDATE_L1_TLB                   0xFFEFFFFF
+
+#endif
index 0f585ca8276d88c537c9513e3d64d42fc4c9b378..025e3225346cc6a6902392fddcade5c1cce99134 100644 (file)
  *          Jerome Glisse
  */
 #include "drmP.h"
-#include "radeon_reg.h"
 #include "radeon.h"
-#include "rs690r.h"
 #include "atom.h"
-#include "atom-bits.h"
-
-/* rs690,rs740 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r300_mc_wait_for_idle(struct radeon_device *rdev);
-void r420_pipes_init(struct radeon_device *rdev);
-void rs400_gart_disable(struct radeon_device *rdev);
-int rs400_gart_enable(struct radeon_device *rdev);
-void rs400_gart_adjust_size(struct radeon_device *rdev);
-void rs600_mc_disable_clients(struct radeon_device *rdev);
-void rs600_disable_vga(struct radeon_device *rdev);
-
-/* This files gather functions specifics to :
- * rs690,rs740
- *
- * Some of these functions might be used by newer ASICs.
- */
-void rs690_gpu_init(struct radeon_device *rdev);
-int rs690_mc_wait_for_idle(struct radeon_device *rdev);
-
+#include "rs690d.h"
 
-/*
- * MC functions.
- */
-int rs690_mc_init(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-       int r;
-
-       if (r100_debugfs_rbbm_init(rdev)) {
-               DRM_ERROR("Failed to register debugfs file for RBBM !\n");
-       }
-
-       rs690_gpu_init(rdev);
-       rs400_gart_disable(rdev);
-
-       /* Setup GPU memory space */
-       rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-       rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
-       rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
-       rdev->mc.vram_location = 0xFFFFFFFFUL;
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       /* Program GPU memory space */
-       rs600_mc_disable_clients(rdev);
-       if (rs690_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
-       /* FIXME: Does this reg exist on RS480,RS740 ? */
-       WREG32(0x310, rdev->mc.vram_location);
-       WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
-       return 0;
-}
-
-void rs690_mc_fini(struct radeon_device *rdev)
-{
-}
-
-
-/*
- * Global GPU functions
- */
-int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
        uint32_t tmp;
 
        for (i = 0; i < rdev->usec_timeout; i++) {
                /* read MC_STATUS */
-               tmp = RREG32_MC(RS690_MC_STATUS);
-               if (tmp & RS690_MC_STATUS_IDLE) {
+               tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+               if (G_000090_MC_SYSTEM_IDLE(tmp))
                        return 0;
-               }
-               DRM_UDELAY(1);
+               udelay(1);
        }
        return -1;
 }
 
-void rs690_errata(struct radeon_device *rdev)
-{
-       rdev->pll_errata = 0;
-}
-
-void rs690_gpu_init(struct radeon_device *rdev)
+static void rs690_gpu_init(struct radeon_device *rdev)
 {
        /* FIXME: HDP same place on rs690 ? */
        r100_hdp_reset(rdev);
-       rs600_disable_vga(rdev);
        /* FIXME: is this correct ? */
        r420_pipes_init(rdev);
        if (rs690_mc_wait_for_idle(rdev)) {
@@ -134,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev)
        }
 }
 
-
-/*
- * VRAM info.
- */
 void rs690_pm_info(struct radeon_device *rdev)
 {
        int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
@@ -251,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
        /*
         * Line Buffer Setup
         * There is a single line buffer shared by both display controllers.
-        * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+        * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
         * the display controllers.  The paritioning can either be done
         * manually or via one of four preset allocations specified in bits 1:0:
         *  0 - line buffer is divided in half and shared between crtc
         *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
         *  2 - D1 gets the whole buffer
         *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
-        * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
+        * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
         * allocation mode. In manual allocation mode, D1 always starts at 0,
         * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
         */
-       tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK;
-       tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE;
+       tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+       tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
        /* auto */
        if (mode1 && mode2) {
                if (mode1->hdisplay > mode2->hdisplay) {
                        if (mode1->hdisplay > 2560)
-                               tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+                               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
                        else
-                               tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+                               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
                } else if (mode2->hdisplay > mode1->hdisplay) {
                        if (mode2->hdisplay > 2560)
-                               tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+                               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
                        else
-                               tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+                               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
                } else
-                       tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+                       tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
        } else if (mode1) {
-               tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY;
+               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
        } else if (mode2) {
-               tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+               tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
        }
-       WREG32(DC_LB_MEMORY_SPLIT, tmp);
+       WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
 }
 
 struct rs690_watermark {
@@ -488,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
         * option.
         */
        if (rdev->disp_priority == 2) {
-               tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
-               tmp &= ~MC_DISP1R_INIT_LAT_MASK;
-               tmp &= ~MC_DISP0R_INIT_LAT_MASK;
-               if (mode1)
-                       tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+               tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+               tmp &= C_000104_MC_DISP0R_INIT_LAT;
+               tmp &= C_000104_MC_DISP1R_INIT_LAT;
                if (mode0)
-                       tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
-               WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
+                       tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+               if (mode1)
+                       tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+               WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
        }
        rs690_line_buffer_adjust(rdev, mode0, mode1);
 
        if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
-               WREG32(DCP_CONTROL, 0);
+               WREG32(R_006C9C_DCP_CONTROL, 0);
        if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
-               WREG32(DCP_CONTROL, 2);
+               WREG32(R_006C9C_DCP_CONTROL, 2);
 
        rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
        rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
 
        tmp = (wm0.lb_request_fifo_depth - 1);
        tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
-       WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+       WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
        if (mode0 && mode1) {
                if (rfixed_trunc(wm0.dbpp) > 64)
@@ -562,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-               WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+               WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
+               WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+               WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
+               WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
        } else if (mode0) {
                if (rfixed_trunc(wm0.dbpp) > 64)
                        a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -592,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
                        priority_mark02.full = 0;
                if (wm0.priority_mark_max.full > priority_mark02.full)
                        priority_mark02.full = wm0.priority_mark_max.full;
-               WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
-               WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
-               WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
+               WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
+               WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+               WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
+                       S_006D48_D2MODE_PRIORITY_A_OFF(1));
+               WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
+                       S_006D4C_D2MODE_PRIORITY_B_OFF(1));
        } else {
                if (rfixed_trunc(wm1.dbpp) > 64)
                        a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
@@ -622,30 +543,203 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
                        priority_mark12.full = 0;
                if (wm1.priority_mark_max.full > priority_mark12.full)
                        priority_mark12.full = wm1.priority_mark_max.full;
-               WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
-               WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
-               WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
-               WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+               WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
+                       S_006548_D1MODE_PRIORITY_A_OFF(1));
+               WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
+                       S_00654C_D1MODE_PRIORITY_B_OFF(1));
+               WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
+               WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
        }
 }
 
-/*
- * Indirect registers accessor
- */
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
        uint32_t r;
 
-       WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
-       r = RREG32(RS690_MC_DATA);
-       WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+       WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+       r = RREG32(R_00007C_MC_DATA);
+       WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
        return r;
 }
 
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
 {
-       WREG32(RS690_MC_INDEX,
-              RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
-       WREG32(RS690_MC_DATA, v);
-       WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
+       WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+               S_000078_MC_IND_WR_EN(1));
+       WREG32(R_00007C_MC_DATA, v);
+       WREG32(R_000078_MC_INDEX, 0x7F);
+}
+
+void rs690_mc_program(struct radeon_device *rdev)
+{
+       struct rv515_mc_save save;
+
+       /* Stops all mc clients */
+       rv515_mc_stop(rdev, &save);
+
+       /* Wait for mc idle */
+       if (rs690_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+       /* Program MC, should be a 32bits limited address space */
+       WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+                       S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+                       S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+       WREG32(R_000134_HDP_FB_LOCATION,
+               S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+       rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       rs690_mc_program(rdev);
+       /* Resume clock */
+       rv515_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       rs690_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       r = rs400_gart_enable(rdev);
+       if (r)
+               return r;
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       rs600_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       rs400_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       rv515_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       atom_asic_init(rdev->mode_info.atom_context);
+       /* Resume clock after posting */
+       rv515_clock_startup(rdev);
+       return rs690_startup(rdev);
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       rs600_irq_disable(rdev);
+       rs400_gart_disable(rdev);
+       return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+       rs690_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+       rs400_gart_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Disable VGA */
+       rv515_vga_render_disable(rdev);
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               r = radeon_atombios_init(rdev);
+               if (r)
+                       return r;
+       } else {
+               dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+               return -EINVAL;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               atom_asic_init(rdev->mode_info.atom_context);
+       }
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       rs690_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r420_mc_init(rdev);
+       if (r)
+               return r;
+       rv515_debugfs(rdev);
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       r = rs400_gart_init(rdev);
+       if (r)
+               return r;
+       rs600_set_safe_registers(rdev);
+       rdev->accel_working = true;
+       r = rs690_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               rs690_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               rs400_gart_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
+       return 0;
 }
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644 (file)
index 0000000..62d31e7
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+/* Registers */
+#define R_000078_MC_INDEX                            0x000078
+#define   S_000078_MC_IND_ADDR(x)                      (((x) & 0x1FF) << 0)
+#define   G_000078_MC_IND_ADDR(x)                      (((x) >> 0) & 0x1FF)
+#define   C_000078_MC_IND_ADDR                         0xFFFFFE00
+#define   S_000078_MC_IND_WR_EN(x)                     (((x) & 0x1) << 9)
+#define   G_000078_MC_IND_WR_EN(x)                     (((x) >> 9) & 0x1)
+#define   C_000078_MC_IND_WR_EN                        0xFFFFFDFF
+#define R_00007C_MC_DATA                             0x00007C
+#define   S_00007C_MC_DATA(x)                          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00007C_MC_DATA(x)                          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00007C_MC_DATA                             0x00000000
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT                  0x006520
+#define   S_006520_DC_LB_MEMORY_SPLIT(x)               (((x) & 0x3) << 0)
+#define   G_006520_DC_LB_MEMORY_SPLIT(x)               (((x) >> 0) & 0x3)
+#define   C_006520_DC_LB_MEMORY_SPLIT                  0xFFFFFFFC
+#define   S_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) & 0x1) << 2)
+#define   G_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) >> 2) & 0x1)
+#define   C_006520_DC_LB_MEMORY_SPLIT_MODE             0xFFFFFFFB
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF    0
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q      1
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY          2
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q      3
+#define   S_006520_DC_LB_DISP1_END_ADR(x)              (((x) & 0x7FF) << 4)
+#define   G_006520_DC_LB_DISP1_END_ADR(x)              (((x) >> 4) & 0x7FF)
+#define   C_006520_DC_LB_DISP1_END_ADR                 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL                         0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING              0x006D58
+#define   S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 0)
+#define   G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) >> 0) & 0xF)
+#define   C_006D58_LB_D1_MAX_REQ_OUTSTANDING           0xFFFFFFF0
+#define   S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 16)
+#define   G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) >> 16) & 0xF)
+#define   C_006D58_LB_D2_MAX_REQ_OUTSTANDING           0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS                    0x000090
+#define   S_000090_MC_SYSTEM_IDLE(x)                   (((x) & 0x1) << 0)
+#define   G_000090_MC_SYSTEM_IDLE(x)                   (((x) >> 0) & 0x1)
+#define   C_000090_MC_SYSTEM_IDLE                      0xFFFFFFFE
+#define   S_000090_MC_SEQUENCER_IDLE(x)                (((x) & 0x1) << 1)
+#define   G_000090_MC_SEQUENCER_IDLE(x)                (((x) >> 1) & 0x1)
+#define   C_000090_MC_SEQUENCER_IDLE                   0xFFFFFFFD
+#define   S_000090_MC_ARBITER_IDLE(x)                  (((x) & 0x1) << 2)
+#define   G_000090_MC_ARBITER_IDLE(x)                  (((x) >> 2) & 0x1)
+#define   C_000090_MC_ARBITER_IDLE                     0xFFFFFFFB
+#define   S_000090_MC_SELECT_PM(x)                     (((x) & 0x1) << 3)
+#define   G_000090_MC_SELECT_PM(x)                     (((x) >> 3) & 0x1)
+#define   C_000090_MC_SELECT_PM                        0xFFFFFFF7
+#define   S_000090_RESERVED4(x)                        (((x) & 0xF) << 4)
+#define   G_000090_RESERVED4(x)                        (((x) >> 4) & 0xF)
+#define   C_000090_RESERVED4                           0xFFFFFF0F
+#define   S_000090_RESERVED8(x)                        (((x) & 0xF) << 8)
+#define   G_000090_RESERVED8(x)                        (((x) >> 8) & 0xF)
+#define   C_000090_RESERVED8                           0xFFFFF0FF
+#define   S_000090_RESERVED12(x)                       (((x) & 0xF) << 12)
+#define   G_000090_RESERVED12(x)                       (((x) >> 12) & 0xF)
+#define   C_000090_RESERVED12                          0xFFFF0FFF
+#define   S_000090_MCA_INIT_EXECUTED(x)                (((x) & 0x1) << 16)
+#define   G_000090_MCA_INIT_EXECUTED(x)                (((x) >> 16) & 0x1)
+#define   C_000090_MCA_INIT_EXECUTED                   0xFFFEFFFF
+#define   S_000090_MCA_IDLE(x)                         (((x) & 0x1) << 17)
+#define   G_000090_MCA_IDLE(x)                         (((x) >> 17) & 0x1)
+#define   C_000090_MCA_IDLE                            0xFFFDFFFF
+#define   S_000090_MCA_SEQ_IDLE(x)                     (((x) & 0x1) << 18)
+#define   G_000090_MCA_SEQ_IDLE(x)                     (((x) >> 18) & 0x1)
+#define   C_000090_MCA_SEQ_IDLE                        0xFFFBFFFF
+#define   S_000090_MCA_ARB_IDLE(x)                     (((x) & 0x1) << 19)
+#define   G_000090_MCA_ARB_IDLE(x)                     (((x) >> 19) & 0x1)
+#define   C_000090_MCA_ARB_IDLE                        0xFFF7FFFF
+#define   S_000090_RESERVED20(x)                       (((x) & 0xFFF) << 20)
+#define   G_000090_RESERVED20(x)                       (((x) >> 20) & 0xFFF)
+#define   C_000090_RESERVED20                          0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION                   0x000100
+#define   S_000100_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000100_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000100_MC_FB_START                         0xFFFF0000
+#define   S_000100_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000100_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000100_MC_FB_TOP                           0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER              0x000104
+#define   S_000104_MC_CPR_INIT_LAT(x)                  (((x) & 0xF) << 0)
+#define   G_000104_MC_CPR_INIT_LAT(x)                  (((x) >> 0) & 0xF)
+#define   C_000104_MC_CPR_INIT_LAT                     0xFFFFFFF0
+#define   S_000104_MC_VF_INIT_LAT(x)                   (((x) & 0xF) << 4)
+#define   G_000104_MC_VF_INIT_LAT(x)                   (((x) >> 4) & 0xF)
+#define   C_000104_MC_VF_INIT_LAT                      0xFFFFFF0F
+#define   S_000104_MC_DISP0R_INIT_LAT(x)               (((x) & 0xF) << 8)
+#define   G_000104_MC_DISP0R_INIT_LAT(x)               (((x) >> 8) & 0xF)
+#define   C_000104_MC_DISP0R_INIT_LAT                  0xFFFFF0FF
+#define   S_000104_MC_DISP1R_INIT_LAT(x)               (((x) & 0xF) << 12)
+#define   G_000104_MC_DISP1R_INIT_LAT(x)               (((x) >> 12) & 0xF)
+#define   C_000104_MC_DISP1R_INIT_LAT                  0xFFFF0FFF
+#define   S_000104_MC_FIXED_INIT_LAT(x)                (((x) & 0xF) << 16)
+#define   G_000104_MC_FIXED_INIT_LAT(x)                (((x) >> 16) & 0xF)
+#define   C_000104_MC_FIXED_INIT_LAT                   0xFFF0FFFF
+#define   S_000104_MC_E2R_INIT_LAT(x)                  (((x) & 0xF) << 20)
+#define   G_000104_MC_E2R_INIT_LAT(x)                  (((x) >> 20) & 0xF)
+#define   C_000104_MC_E2R_INIT_LAT                     0xFF0FFFFF
+#define   S_000104_SAME_PAGE_PRIO(x)                   (((x) & 0xF) << 24)
+#define   G_000104_SAME_PAGE_PRIO(x)                   (((x) >> 24) & 0xF)
+#define   C_000104_SAME_PAGE_PRIO                      0xF0FFFFFF
+#define   S_000104_MC_GLOBW_INIT_LAT(x)                (((x) & 0xF) << 28)
+#define   G_000104_MC_GLOBW_INIT_LAT(x)                (((x) >> 28) & 0xF)
+#define   C_000104_MC_GLOBW_INIT_LAT                   0x0FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
deleted file mode 100644 (file)
index c0d9faa..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-#ifndef RS690R_H
-#define RS690R_H
-
-/* RS690/RS740 registers */
-#define MC_INDEX                       0x0078
-#      define MC_INDEX_MASK                    0x1FF
-#      define MC_INDEX_WR_EN                   (1 << 9)
-#      define MC_INDEX_WR_ACK                  0x7F
-#define MC_DATA                                0x007C
-#define HDP_FB_LOCATION                        0x0134
-#define DC_LB_MEMORY_SPLIT             0x6520
-#define                DC_LB_MEMORY_SPLIT_MASK                 0x00000003
-#define                DC_LB_MEMORY_SPLIT_SHIFT                0
-#define                DC_LB_MEMORY_SPLIT_D1HALF_D2HALF        0
-#define                DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q          1
-#define                DC_LB_MEMORY_SPLIT_D1_ONLY              2
-#define                DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q          3
-#define                DC_LB_MEMORY_SPLIT_SHIFT_MODE           (1 << 2)
-#define                DC_LB_DISP1_END_ADR_SHIFT               4
-#define                DC_LB_DISP1_END_ADR_MASK                0x00007FF0
-#define D1MODE_PRIORITY_A_CNT          0x6548
-#define                MODE_PRIORITY_MARK_MASK                 0x00007FFF
-#define                MODE_PRIORITY_OFF                       (1 << 16)
-#define                MODE_PRIORITY_ALWAYS_ON                 (1 << 20)
-#define                MODE_PRIORITY_FORCE_MASK                (1 << 24)
-#define D1MODE_PRIORITY_B_CNT          0x654C
-#define LB_MAX_REQ_OUTSTANDING         0x6D58
-#define                LB_D1_MAX_REQ_OUTSTANDING_MASK          0x0000000F
-#define                LB_D1_MAX_REQ_OUTSTANDING_SHIFT         0
-#define                LB_D2_MAX_REQ_OUTSTANDING_MASK          0x000F0000
-#define                LB_D2_MAX_REQ_OUTSTANDING_SHIFT         16
-#define DCP_CONTROL                    0x6C9C
-#define D2MODE_PRIORITY_A_CNT          0x6D48
-#define D2MODE_PRIORITY_B_CNT          0x6D4C
-
-/* MC indirect registers */
-#define MC_STATUS_IDLE                         (1 << 0)
-#define MC_MISC_CNTL                   0x18
-#define                DISABLE_GTW                     (1 << 1)
-#define                GART_INDEX_REG_EN               (1 << 12)
-#define                BLOCK_GFX_D3_EN                 (1 << 14)
-#define GART_FEATURE_ID                        0x2B
-#define                HANG_EN                         (1 << 11)
-#define                TLB_ENABLE                      (1 << 18)
-#define                P2P_ENABLE                      (1 << 19)
-#define                GTW_LAC_EN                      (1 << 25)
-#define                LEVEL2_GART                     (0 << 30)
-#define                LEVEL1_GART                     (1 << 30)
-#define                PDC_EN                          (1 << 31)
-#define GART_BASE                      0x2C
-#define GART_CACHE_CNTRL               0x2E
-#      define GART_CACHE_INVALIDATE            (1 << 0)
-#define MC_STATUS                      0x90
-#define MCCFG_FB_LOCATION              0x100
-#define                MC_FB_START_MASK                0x0000FFFF
-#define                MC_FB_START_SHIFT               0
-#define                MC_FB_TOP_MASK                  0xFFFF0000
-#define                MC_FB_TOP_SHIFT                 16
-#define MCCFG_AGP_LOCATION             0x101
-#define                MC_AGP_START_MASK               0x0000FFFF
-#define                MC_AGP_START_SHIFT              0
-#define                MC_AGP_TOP_MASK                 0xFFFF0000
-#define                MC_AGP_TOP_SHIFT                16
-#define MCCFG_AGP_BASE                 0x102
-#define MCCFG_AGP_BASE_2               0x103
-#define MC_INIT_MISC_LAT_TIMER         0x104
-#define                MC_DISP0R_INIT_LAT_SHIFT        8
-#define                MC_DISP0R_INIT_LAT_MASK         0x00000F00
-#define                MC_DISP1R_INIT_LAT_SHIFT        12
-#define                MC_DISP1R_INIT_LAT_MASK         0x0000F000
-
-#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644 (file)
index 0000000..c5b3983
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644 (file)
index 0000000..e5a70b0
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#define R_00000D_SCLK_CNTL_M6                        0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644 (file)
index 0000000..c75c5ed
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL                           0x00000D */
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+
+#endif
index fd799748e7d894c5165c6d8886df0e4417c9b605..41a34c23e6d8514b39d9f427ce59880a405cce04 100644 (file)
 #include "drmP.h"
 #include "rv515d.h"
 #include "radeon.h"
-
+#include "atom.h"
 #include "rv515_reg_safe.h"
-/* rv515 depends on : */
-void r100_hdp_reset(struct radeon_device *rdev);
-int r100_cp_reset(struct radeon_device *rdev);
-int r100_rb2d_reset(struct radeon_device *rdev);
-int r100_gui_wait_for_idle(struct radeon_device *rdev);
-int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-void r420_pipes_init(struct radeon_device *rdev);
-void rs600_mc_disable_clients(struct radeon_device *rdev);
-void rs600_disable_vga(struct radeon_device *rdev);
-
-/* This files gather functions specifics to:
- * rv515
- *
- * Some of these functions might be used by newer ASICs.
- */
+
+/* This files gather functions specifics to: rv515 */
 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
 int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
 void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
-
-/*
- * MC
- */
-int rv515_mc_init(struct radeon_device *rdev)
+void rv515_debugfs(struct radeon_device *rdev)
 {
-       uint32_t tmp;
-       int r;
-
        if (r100_debugfs_rbbm_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for RBBM !\n");
        }
@@ -69,67 +49,8 @@ int rv515_mc_init(struct radeon_device *rdev)
        if (rv515_debugfs_ga_info_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for pipes !\n");
        }
-
-       rv515_gpu_init(rdev);
-       rv370_pcie_gart_disable(rdev);
-
-       /* Setup GPU memory space */
-       rdev->mc.vram_location = 0xFFFFFFFFUL;
-       rdev->mc.gtt_location = 0xFFFFFFFFUL;
-       if (rdev->flags & RADEON_IS_AGP) {
-               r = radeon_agp_init(rdev);
-               if (r) {
-                       printk(KERN_WARNING "[drm] Disabling AGP\n");
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-               } else {
-                       rdev->mc.gtt_location = rdev->mc.agp_base;
-               }
-       }
-       r = radeon_mc_setup(rdev);
-       if (r) {
-               return r;
-       }
-
-       /* Program GPU memory space */
-       rs600_mc_disable_clients(rdev);
-       if (rv515_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "Failed to wait MC idle while "
-                      "programming pipes. Bad things might happen.\n");
-       }
-       /* Write VRAM size in case we are limiting it */
-       WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
-       tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32(0x134, tmp);
-       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
-       tmp = REG_SET(MC_FB_TOP, tmp >> 16);
-       tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
-       WREG32_MC(MC_FB_LOCATION, tmp);
-       WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
-       WREG32(0x310, rdev->mc.vram_location);
-       if (rdev->flags & RADEON_IS_AGP) {
-               tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
-               tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
-               tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
-               WREG32_MC(MC_AGP_LOCATION, tmp);
-               WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
-               WREG32_MC(MC_AGP_BASE_2, 0);
-       } else {
-               WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
-               WREG32_MC(MC_AGP_BASE, 0);
-               WREG32_MC(MC_AGP_BASE_2, 0);
-       }
-       return 0;
-}
-
-void rv515_mc_fini(struct radeon_device *rdev)
-{
 }
 
-
-/*
- * Global GPU functions
- */
 void rv515_ring_start(struct radeon_device *rdev)
 {
        int r;
@@ -198,11 +119,6 @@ void rv515_ring_start(struct radeon_device *rdev)
        radeon_ring_unlock_commit(rdev);
 }
 
-void rv515_errata(struct radeon_device *rdev)
-{
-       rdev->pll_errata = 0;
-}
-
 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
 {
        unsigned i;
@@ -219,6 +135,12 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
        return -1;
 }
 
+void rv515_vga_render_disable(struct radeon_device *rdev)
+{
+       WREG32(R_000300_VGA_RENDER_CONTROL,
+               RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+}
+
 void rv515_gpu_init(struct radeon_device *rdev)
 {
        unsigned pipe_select_current, gb_pipe_select, tmp;
@@ -231,7 +153,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
                       "reseting GPU. Bad things might happen.\n");
        }
 
-       rs600_disable_vga(rdev);
+       rv515_vga_render_disable(rdev);
 
        r420_pipes_init(rdev);
        gb_pipe_select = RREG32(0x402C);
@@ -335,10 +257,6 @@ int rv515_gpu_reset(struct radeon_device *rdev)
        return 0;
 }
 
-
-/*
- * VRAM info
- */
 static void rv515_vram_get_type(struct radeon_device *rdev)
 {
        uint32_t tmp;
@@ -374,10 +292,6 @@ void rv515_vram_info(struct radeon_device *rdev)
        rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
 }
 
-
-/*
- * Indirect registers accessor
- */
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
 {
        uint32_t r;
@@ -395,9 +309,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
        WREG32(MC_IND_INDEX, 0);
 }
 
-/*
- * Debugfs info
- */
 #if defined(CONFIG_DEBUG_FS)
 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
 {
@@ -459,13 +370,257 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 #endif
 }
 
-/*
- * Asic initialization
- */
-int rv515_init(struct radeon_device *rdev)
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+       save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
+       save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
+       save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+       save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+       save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
+       save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
+
+       /* Stop all video */
+       WREG32(R_000330_D1VGA_CONTROL, 0);
+       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+       WREG32(R_000300_VGA_RENDER_CONTROL, 0);
+       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
+       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
+       WREG32(R_006080_D1CRTC_CONTROL, 0);
+       WREG32(R_006880_D2CRTC_CONTROL, 0);
+       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
+       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+}
+
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+       WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
+       WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
+       WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
+       WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
+       WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
+       /* Unlock host access */
+       WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+       mdelay(1);
+       /* Restore video state */
+       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
+       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
+       WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
+       WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
+       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
+       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+       WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
+       WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
+       WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+void rv515_mc_program(struct radeon_device *rdev)
+{
+       struct rv515_mc_save save;
+
+       /* Stops all mc clients */
+       rv515_mc_stop(rdev, &save);
+
+       /* Wait for mc idle */
+       if (rv515_mc_wait_for_idle(rdev))
+               dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+       /* Write VRAM size in case we are limiting it */
+       WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+       /* Program MC, should be a 32bits limited address space */
+       WREG32_MC(R_000001_MC_FB_LOCATION,
+                       S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
+                       S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
+       WREG32(R_000134_HDP_FB_LOCATION,
+               S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+       if (rdev->flags & RADEON_IS_AGP) {
+               WREG32_MC(R_000002_MC_AGP_LOCATION,
+                       S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+                       S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+               WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+               WREG32_MC(R_000004_MC_AGP_BASE_2,
+                       S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+       } else {
+               WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
+               WREG32_MC(R_000003_MC_AGP_BASE, 0);
+               WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
+       }
+
+       rv515_mc_resume(rdev, &save);
+}
+
+void rv515_clock_startup(struct radeon_device *rdev)
+{
+       if (radeon_dynclks != -1 && radeon_dynclks)
+               radeon_atom_set_clock_gating(rdev, 1);
+       /* We need to force on some of the block */
+       WREG32_PLL(R_00000F_CP_DYN_CNTL,
+               RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
+       WREG32_PLL(R_000011_E2_DYN_CNTL,
+               RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
+       WREG32_PLL(R_000013_IDCT_DYN_CNTL,
+               RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
+}
+
+static int rv515_startup(struct radeon_device *rdev)
+{
+       int r;
+
+       rv515_mc_program(rdev);
+       /* Resume clock */
+       rv515_clock_startup(rdev);
+       /* Initialize GPU configuration (# pipes, ...) */
+       rv515_gpu_init(rdev);
+       /* Initialize GART (initialize after TTM so we can allocate
+        * memory through TTM but finalize after TTM) */
+       if (rdev->flags & RADEON_IS_PCIE) {
+               r = rv370_pcie_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
+       /* Enable IRQ */
+       rdev->irq.sw_int = true;
+       rs600_irq_set(rdev);
+       /* 1M ring buffer */
+       r = r100_cp_init(rdev, 1024 * 1024);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+               return r;
+       }
+       r = r100_wb_init(rdev);
+       if (r)
+               dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+       r = r100_ib_init(rdev);
+       if (r) {
+               dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
+               return r;
+       }
+       return 0;
+}
+
+int rv515_resume(struct radeon_device *rdev)
+{
+       /* Make sur GART are not working */
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_disable(rdev);
+       /* Resume clock before doing reset */
+       rv515_clock_startup(rdev);
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* post */
+       atom_asic_init(rdev->mode_info.atom_context);
+       /* Resume clock after posting */
+       rv515_clock_startup(rdev);
+       return rv515_startup(rdev);
+}
+
+int rv515_suspend(struct radeon_device *rdev)
+{
+       r100_cp_disable(rdev);
+       r100_wb_disable(rdev);
+       rs600_irq_disable(rdev);
+       if (rdev->flags & RADEON_IS_PCIE)
+               rv370_pcie_gart_disable(rdev);
+       return 0;
+}
+
+void rv515_set_safe_registers(struct radeon_device *rdev)
 {
        rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
        rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
+}
+
+void rv515_fini(struct radeon_device *rdev)
+{
+       rv515_suspend(rdev);
+       r100_cp_fini(rdev);
+       r100_wb_fini(rdev);
+       r100_ib_fini(rdev);
+       radeon_gem_fini(rdev);
+    rv370_pcie_gart_fini(rdev);
+       radeon_agp_fini(rdev);
+       radeon_irq_kms_fini(rdev);
+       radeon_fence_driver_fini(rdev);
+       radeon_object_fini(rdev);
+       radeon_atombios_fini(rdev);
+       kfree(rdev->bios);
+       rdev->bios = NULL;
+}
+
+int rv515_init(struct radeon_device *rdev)
+{
+       int r;
+
+       /* Initialize scratch registers */
+       radeon_scratch_init(rdev);
+       /* Initialize surface registers */
+       radeon_surface_init(rdev);
+       /* TODO: disable VGA need to use VGA request */
+       /* BIOS*/
+       if (!radeon_get_bios(rdev)) {
+               if (ASIC_IS_AVIVO(rdev))
+                       return -EINVAL;
+       }
+       if (rdev->is_atom_bios) {
+               r = radeon_atombios_init(rdev);
+               if (r)
+                       return r;
+       } else {
+               dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+               return -EINVAL;
+       }
+       /* Reset gpu before posting otherwise ATOM will enter infinite loop */
+       if (radeon_gpu_reset(rdev)) {
+               dev_warn(rdev->dev,
+                       "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+                       RREG32(R_000E40_RBBM_STATUS),
+                       RREG32(R_0007C0_CP_STAT));
+       }
+       /* check if cards are posted or not */
+       if (!radeon_card_posted(rdev) && rdev->bios) {
+               DRM_INFO("GPU not posted. posting now...\n");
+               atom_asic_init(rdev->mode_info.atom_context);
+       }
+       /* Initialize clocks */
+       radeon_get_clock_info(rdev->ddev);
+       /* Get vram informations */
+       rv515_vram_info(rdev);
+       /* Initialize memory controller (also test AGP) */
+       r = r420_mc_init(rdev);
+       if (r)
+               return r;
+       rv515_debugfs(rdev);
+       /* Fence driver */
+       r = radeon_fence_driver_init(rdev);
+       if (r)
+               return r;
+       r = radeon_irq_kms_init(rdev);
+       if (r)
+               return r;
+       /* Memory manager */
+       r = radeon_object_init(rdev);
+       if (r)
+               return r;
+       r = rv370_pcie_gart_init(rdev);
+       if (r)
+               return r;
+       rv515_set_safe_registers(rdev);
+       rdev->accel_working = true;
+       r = rv515_startup(rdev);
+       if (r) {
+               /* Somethings want wront with the accel init stop accel */
+               dev_err(rdev->dev, "Disabling GPU acceleration\n");
+               rv515_suspend(rdev);
+               r100_cp_fini(rdev);
+               r100_wb_fini(rdev);
+               r100_ib_fini(rdev);
+               rv370_pcie_gart_fini(rdev);
+               radeon_agp_fini(rdev);
+               radeon_irq_kms_fini(rdev);
+               rdev->accel_working = false;
+       }
        return 0;
 }
 
index a65e17ec1c08cd7dd75b512b4e076db9c10b3bcc..fc216e49384d038c94ae7afb05dda83a15a46e2b 100644 (file)
 #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
 
-#endif
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_000300_VGA_RENDER_CONTROL                  0x000300
+#define   S_000300_VGA_BLINK_RATE(x)                   (((x) & 0x1F) << 0)
+#define   G_000300_VGA_BLINK_RATE(x)                   (((x) >> 0) & 0x1F)
+#define   C_000300_VGA_BLINK_RATE                      0xFFFFFFE0
+#define   S_000300_VGA_BLINK_MODE(x)                   (((x) & 0x3) << 5)
+#define   G_000300_VGA_BLINK_MODE(x)                   (((x) >> 5) & 0x3)
+#define   C_000300_VGA_BLINK_MODE                      0xFFFFFF9F
+#define   S_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) & 0x1) << 7)
+#define   G_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) >> 7) & 0x1)
+#define   C_000300_VGA_CURSOR_BLINK_INVERT             0xFFFFFF7F
+#define   S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) & 0x1) << 8)
+#define   G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) >> 8) & 0x1)
+#define   C_000300_VGA_EXTD_ADDR_COUNT_ENABLE          0xFFFFFEFF
+#define   S_000300_VGA_VSTATUS_CNTL(x)                 (((x) & 0x3) << 16)
+#define   G_000300_VGA_VSTATUS_CNTL(x)                 (((x) >> 16) & 0x3)
+#define   C_000300_VGA_VSTATUS_CNTL                    0xFFFCFFFF
+#define   S_000300_VGA_LOCK_8DOT(x)                    (((x) & 0x1) << 24)
+#define   G_000300_VGA_LOCK_8DOT(x)                    (((x) >> 24) & 0x1)
+#define   C_000300_VGA_LOCK_8DOT                       0xFEFFFFFF
+#define   S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
+#define   G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
+#define   C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL    0xFDFFFFFF
+#define R_000310_VGA_MEMORY_BASE_ADDRESS             0x000310
+#define   S_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000310_VGA_MEMORY_BASE_ADDRESS             0x00000000
+#define R_000328_VGA_HDP_CONTROL                     0x000328
+#define   S_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) & 0x1) << 0)
+#define   G_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) >> 0) & 0x1)
+#define   C_000328_VGA_MEM_PAGE_SELECT_EN              0xFFFFFFFE
+#define   S_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) & 0x1) << 8)
+#define   G_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) >> 8) & 0x1)
+#define   C_000328_VGA_RBBM_LOCK_DISABLE               0xFFFFFEFF
+#define   S_000328_VGA_SOFT_RESET(x)                   (((x) & 0x1) << 16)
+#define   G_000328_VGA_SOFT_RESET(x)                   (((x) >> 16) & 0x1)
+#define   C_000328_VGA_SOFT_RESET                      0xFFFEFFFF
+#define   S_000328_VGA_TEST_RESET_CONTROL(x)           (((x) & 0x1) << 24)
+#define   G_000328_VGA_TEST_RESET_CONTROL(x)           (((x) >> 24) & 0x1)
+#define   C_000328_VGA_TEST_RESET_CONTROL              0xFEFFFFFF
+#define R_000330_D1VGA_CONTROL                       0x000330
+#define   S_000330_D1VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000330_D1VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000330_D1VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000330_D1VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000330_D1VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000330_D1VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000330_D1VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000330_D1VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000330_D1VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000330_D1VGA_ROTATE                        0xFCFFFFFF
+#define R_000338_D2VGA_CONTROL                       0x000338
+#define   S_000338_D2VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000338_D2VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000338_D2VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000338_D2VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000338_D2VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000338_D2VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000338_D2VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000338_D2VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000338_D2VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000338_D2VGA_ROTATE                        0xFCFFFFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006080_D1CRTC_CONTROL                      0x006080
+#define   S_006080_D1CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006080_D1CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006080_D1CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006080_D1CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006080_D1CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006080_D1CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0060E8_D1CRTC_UPDATE_LOCK                  0x0060E8
+#define   S_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0060E8_D1CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x006110
+#define   S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x006118
+#define   S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+#define R_006880_D2CRTC_CONTROL                      0x006880
+#define   S_006880_D2CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006880_D2CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006880_D2CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006880_D2CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006880_D2CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006880_D2CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0068E8_D2CRTC_UPDATE_LOCK                  0x0068E8
+#define   S_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0068E8_D2CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x006910
+#define   S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x006918
+#define   S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+
+
+#define R_000001_MC_FB_LOCATION                      0x000001
+#define   S_000001_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000001_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000001_MC_FB_START                         0xFFFF0000
+#define   S_000001_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000001_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000001_MC_FB_TOP                           0x0000FFFF
+#define R_000002_MC_AGP_LOCATION                     0x000002
+#define   S_000002_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000002_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000002_MC_AGP_START                        0xFFFF0000
+#define   S_000002_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000002_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000002_MC_AGP_TOP                          0x0000FFFF
+#define R_000003_MC_AGP_BASE                         0x000003
+#define   S_000003_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000003_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000003_AGP_BASE_ADDR                       0x00000000
+#define R_000004_MC_AGP_BASE_2                       0x000004
+#define   S_000004_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000004_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000004_AGP_BASE_ADDR_2                     0xFFFFFFF0
 
+
+#define R_00000F_CP_DYN_CNTL                         0x00000F
+#define   S_00000F_CP_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_00000F_CP_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_00000F_CP_FORCEON                          0xFFFFFFFE
+#define   S_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_00000F_CP_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_00000F_CP_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_00000F_CP_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_00000F_CP_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_00000F_CP_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_00000F_CP_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_00000F_CP_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_00000F_CP_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_00000F_CP_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_00000F_CP_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_00000F_CP_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_00000F_CP_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_00000F_CP_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_00000F_CP_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_00000F_CP_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_00000F_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_00000F_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_00000F_SPARE                               0xFF3FFFFF
+#define   S_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_00000F_CP_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000011_E2_DYN_CNTL                         0x000011
+#define   S_000011_E2_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_000011_E2_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_000011_E2_FORCEON                          0xFFFFFFFE
+#define   S_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_000011_E2_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_000011_E2_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_000011_E2_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_000011_E2_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_000011_E2_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_000011_E2_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_000011_E2_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_000011_E2_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_000011_E2_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_000011_E2_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_000011_E2_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_000011_E2_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_000011_E2_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_000011_E2_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_000011_E2_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_000011_E2_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_000011_E2_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_000011_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000011_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000011_SPARE                               0xFF3FFFFF
+#define   S_000011_E2_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_000011_E2_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_000011_E2_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000013_IDCT_DYN_CNTL                       0x000013
+#define   S_000013_IDCT_FORCEON(x)                     (((x) & 0x1) << 0)
+#define   G_000013_IDCT_FORCEON(x)                     (((x) >> 0) & 0x1)
+#define   C_000013_IDCT_FORCEON                        0xFFFFFFFE
+#define   S_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 1)
+#define   G_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 1) & 0x1)
+#define   C_000013_IDCT_MAX_DYN_STOP_LAT               0xFFFFFFFD
+#define   S_000013_IDCT_CLOCK_STATUS(x)                (((x) & 0x1) << 2)
+#define   G_000013_IDCT_CLOCK_STATUS(x)                (((x) >> 2) & 0x1)
+#define   C_000013_IDCT_CLOCK_STATUS                   0xFFFFFFFB
+#define   S_000013_IDCT_PROG_SHUTOFF(x)                (((x) & 0x1) << 3)
+#define   G_000013_IDCT_PROG_SHUTOFF(x)                (((x) >> 3) & 0x1)
+#define   C_000013_IDCT_PROG_SHUTOFF                   0xFFFFFFF7
+#define   S_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) & 0xFF) << 4)
+#define   G_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) >> 4) & 0xFF)
+#define   C_000013_IDCT_PROG_DELAY_VALUE               0xFFFFF00F
+#define   S_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) & 0xFF) << 12)
+#define   G_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) >> 12) & 0xFF)
+#define   C_000013_IDCT_LOWER_POWER_IDLE               0xFFF00FFF
+#define   S_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) & 0x1) << 20)
+#define   G_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) >> 20) & 0x1)
+#define   C_000013_IDCT_LOWER_POWER_IGNORE             0xFFEFFFFF
+#define   S_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) & 0x1) << 21)
+#define   G_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) >> 21) & 0x1)
+#define   C_000013_IDCT_NORMAL_POWER_IGNORE            0xFFDFFFFF
+#define   S_000013_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000013_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000013_SPARE                               0xFF3FFFFF
+#define   S_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) & 0xFF) << 24)
+#define   G_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) >> 24) & 0xFF)
+#define   C_000013_IDCT_NORMAL_POWER_BUSY              0x00FFFFFF
+
+#endif
index b574c73a51092ff16f6878166ea0f55f55de11e5..595ac638039d87a1ee2e423d2304ccdbc833a112 100644 (file)
@@ -31,8 +31,8 @@
 #include "radeon.h"
 #include "radeon_drm.h"
 #include "rv770d.h"
-#include "avivod.h"
 #include "atom.h"
+#include "avivod.h"
 
 #define R700_PFP_UCODE_SIZE 848
 #define R700_PM4_UCODE_SIZE 1360
@@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
 }
 
 
-/*
- * MC
- */
-static void rv770_mc_resume(struct radeon_device *rdev)
+void rv770_agp_enable(struct radeon_device *rdev)
+{
+       u32 tmp;
+       int i;
+
+       /* Setup L2 cache */
+       WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+                               ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+                               EFFECTIVE_L2_QUEUE_SIZE(7));
+       WREG32(VM_L2_CNTL2, 0);
+       WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+       /* Setup TLB control */
+       tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+               SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+               SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+               EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+       WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+       WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+       WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+       WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+       WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+       WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+       for (i = 0; i < 7; i++)
+               WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
 {
-       u32 d1vga_control, d2vga_control;
-       u32 vga_render_control, vga_hdp_control;
-       u32 d1crtc_control, d2crtc_control;
-       u32 new_d1grph_primary, new_d1grph_secondary;
-       u32 new_d2grph_primary, new_d2grph_secondary;
-       u64 old_vram_start;
+       struct rv515_mc_save save;
        u32 tmp;
        int i, j;
 
@@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev)
        }
        WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
-       d1vga_control = RREG32(D1VGA_CONTROL);
-       d2vga_control = RREG32(D2VGA_CONTROL);
-       vga_render_control = RREG32(VGA_RENDER_CONTROL);
-       vga_hdp_control = RREG32(VGA_HDP_CONTROL);
-       d1crtc_control = RREG32(D1CRTC_CONTROL);
-       d2crtc_control = RREG32(D2CRTC_CONTROL);
-       old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
-       new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
-       new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
-       new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
-       new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
-       new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
-       new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
-       new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
-       new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
-
-       /* Stop all video */
-       WREG32(D1VGA_CONTROL, 0);
-       WREG32(D2VGA_CONTROL, 0);
-       WREG32(VGA_RENDER_CONTROL, 0);
-       WREG32(D1CRTC_UPDATE_LOCK, 1);
-       WREG32(D2CRTC_UPDATE_LOCK, 1);
-       WREG32(D1CRTC_CONTROL, 0);
-       WREG32(D2CRTC_CONTROL, 0);
-       WREG32(D1CRTC_UPDATE_LOCK, 0);
-       WREG32(D2CRTC_UPDATE_LOCK, 0);
-
-       mdelay(1);
+       rv515_mc_stop(rdev, &save);
        if (r600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "[drm] MC not idle !\n");
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-
        /* Lockout access through VGA aperture*/
        WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
-
        /* Update configuration */
-       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
-       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
+       if (rdev->flags & RADEON_IS_AGP) {
+               if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+                       /* VRAM before AGP */
+                       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+                               rdev->mc.vram_start >> 12);
+                       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+                               rdev->mc.gtt_end >> 12);
+               } else {
+                       /* VRAM after AGP */
+                       WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+                               rdev->mc.gtt_start >> 12);
+                       WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+                               rdev->mc.vram_end >> 12);
+               }
+       } else {
+               WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+                       rdev->mc.vram_start >> 12);
+               WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+                       rdev->mc.vram_end >> 12);
+       }
        WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
-       tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
+       tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
        tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
        WREG32(MC_VM_FB_LOCATION, tmp);
        WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
        WREG32(HDP_NONSURFACE_INFO, (2 << 7));
        WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
        if (rdev->flags & RADEON_IS_AGP) {
-               WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
+               WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
                WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
                WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
        } else {
@@ -204,34 +212,13 @@ static void rv770_mc_resume(struct radeon_device *rdev)
                WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
                WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
        }
-       WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
-       WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
-       WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
-       WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
-       WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-
-       /* Unlock host access */
-       WREG32(VGA_HDP_CONTROL, vga_hdp_control);
-
-       mdelay(1);
        if (r600_mc_wait_for_idle(rdev)) {
-               printk(KERN_WARNING "[drm] MC not idle !\n");
+               dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
        }
-
-       /* Restore video state */
-       WREG32(D1CRTC_UPDATE_LOCK, 1);
-       WREG32(D2CRTC_UPDATE_LOCK, 1);
-       WREG32(D1CRTC_CONTROL, d1crtc_control);
-       WREG32(D2CRTC_CONTROL, d2crtc_control);
-       WREG32(D1CRTC_UPDATE_LOCK, 0);
-       WREG32(D2CRTC_UPDATE_LOCK, 0);
-       WREG32(D1VGA_CONTROL, d1vga_control);
-       WREG32(D2VGA_CONTROL, d2vga_control);
-       WREG32(VGA_RENDER_CONTROL, vga_render_control);
-
+       rv515_mc_resume(rdev, &save);
        /* we need to own VRAM, so turn off the VGA renderer here
         * to stop it overwriting our objects */
-       radeon_avivo_vga_render_disable(rdev);
+       rv515_vga_render_disable(rdev);
 }
 
 
@@ -801,6 +788,13 @@ int rv770_mc_init(struct radeon_device *rdev)
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+
+       if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+               rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+       if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+               rdev->mc.real_vram_size = rdev->mc.aper_size;
+
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
                if (r)
@@ -833,9 +827,9 @@ int rv770_mc_init(struct radeon_device *rdev)
                rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
        }
        rdev->mc.vram_start = rdev->mc.vram_location;
-       rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+       rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
        rdev->mc.gtt_start = rdev->mc.gtt_location;
-       rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
+       rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
        /* FIXME: we should enforce default clock in case GPU is not in
         * default setup
         */
@@ -854,11 +848,14 @@ static int rv770_startup(struct radeon_device *rdev)
 {
        int r;
 
-       radeon_gpu_reset(rdev);
-       rv770_mc_resume(rdev);
-       r = rv770_pcie_gart_enable(rdev);
-       if (r)
-               return r;
+       rv770_mc_program(rdev);
+       if (rdev->flags & RADEON_IS_AGP) {
+               rv770_agp_enable(rdev);
+       } else {
+               r = rv770_pcie_gart_enable(rdev);
+               if (r)
+                       return r;
+       }
        rv770_gpu_init(rdev);
 
        r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -877,9 +874,8 @@ static int rv770_startup(struct radeon_device *rdev)
        r = r600_cp_resume(rdev);
        if (r)
                return r;
-       r = r600_wb_init(rdev);
-       if (r)
-               return r;
+       /* write back buffer are not vital so don't worry about failure */
+       r600_wb_enable(rdev);
        return 0;
 }
 
@@ -887,15 +883,12 @@ int rv770_resume(struct radeon_device *rdev)
 {
        int r;
 
-       if (radeon_gpu_reset(rdev)) {
-               /* FIXME: what do we want to do here ? */
-       }
+       /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+        * posting will perform necessary task to bring back GPU into good
+        * shape.
+        */
        /* post card */
-       if (rdev->is_atom_bios) {
-               atom_asic_init(rdev->mode_info.atom_context);
-       } else {
-               radeon_combios_asic_init(rdev->ddev);
-       }
+       atom_asic_init(rdev->mode_info.atom_context);
        /* Initialize clocks */
        r = radeon_clocks_init(rdev);
        if (r) {
@@ -908,7 +901,7 @@ int rv770_resume(struct radeon_device *rdev)
                return r;
        }
 
-       r = radeon_ib_test(rdev);
+       r = r600_ib_test(rdev);
        if (r) {
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
@@ -922,8 +915,8 @@ int rv770_suspend(struct radeon_device *rdev)
        /* FIXME: we should wait for ring to be empty */
        r700_cp_stop(rdev);
        rdev->cp.ready = false;
+       r600_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
-
        /* unpin shaders bo */
         radeon_object_unpin(rdev->r600_blit.shader_obj);
        return 0;
@@ -939,7 +932,6 @@ int rv770_init(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->new_init_path = true;
        r = radeon_dummy_page_init(rdev);
        if (r)
                return r;
@@ -953,8 +945,10 @@ int rv770_init(struct radeon_device *rdev)
                        return -EINVAL;
        }
        /* Must be an ATOMBIOS */
-       if (!rdev->is_atom_bios)
+       if (!rdev->is_atom_bios) {
+               dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
                return -EINVAL;
+       }
        r = radeon_atombios_init(rdev);
        if (r)
                return r;
@@ -976,15 +970,8 @@ int rv770_init(struct radeon_device *rdev)
        if (r)
                return r;
        r = rv770_mc_init(rdev);
-       if (r) {
-               if (rdev->flags & RADEON_IS_AGP) {
-                       /* Retry with disabling AGP */
-                       rv770_fini(rdev);
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       return rv770_init(rdev);
-               }
+       if (r)
                return r;
-       }
        /* Memory manager */
        r = radeon_object_init(rdev);
        if (r)
@@ -1013,12 +1000,10 @@ int rv770_init(struct radeon_device *rdev)
 
        r = rv770_startup(rdev);
        if (r) {
-               if (rdev->flags & RADEON_IS_AGP) {
-                       /* Retry with disabling AGP */
-                       rv770_fini(rdev);
-                       rdev->flags &= ~RADEON_IS_AGP;
-                       return rv770_init(rdev);
-               }
+               rv770_suspend(rdev);
+               r600_wb_fini(rdev);
+               radeon_ring_fini(rdev);
+               rv770_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
        if (rdev->accel_working) {
@@ -1027,7 +1012,7 @@ int rv770_init(struct radeon_device *rdev)
                        DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
                        rdev->accel_working = false;
                }
-               r = radeon_ib_test(rdev);
+               r = r600_ib_test(rdev);
                if (r) {
                        DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                        rdev->accel_working = false;
@@ -1042,20 +1027,15 @@ void rv770_fini(struct radeon_device *rdev)
 
        r600_blit_fini(rdev);
        radeon_ring_fini(rdev);
+       r600_wb_fini(rdev);
        rv770_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
-#if __OS_HAS_AGP
        if (rdev->flags & RADEON_IS_AGP)
                radeon_agp_fini(rdev);
-#endif
        radeon_object_fini(rdev);
-       if (rdev->is_atom_bios) {
-               radeon_atombios_fini(rdev);
-       } else {
-               radeon_combios_fini(rdev);
-       }
+       radeon_atombios_fini(rdev);
        kfree(rdev->bios);
        rdev->bios = NULL;
        radeon_dummy_page_fini(rdev);
index 541744d00d3e549b0d6665eb0d9c727522f867bc..b17007178a36e57bb59c82e8f3378fe9f5741d29 100644 (file)
@@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
                if (unlikely(ret != 0))
                        goto out_err;
 
-               ++item->refcount;
        }
+       ++item->refcount;
        ref->object = item->object;
        object = item->object;
        mutex_unlock(&item->mutex);
index 0c6639ea03dd1748076dd8742035f1f154603cd9..ba05275e5104f7d30771254b82e3f35a48017bfa 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/major.h>
 #include <linux/hid.h>
 #include <linux/mutex.h>
+#include <linux/sched.h>
 #include <linux/smp_lock.h>
 
 #include <linux/hidraw.h>
index ea955edde87e5127fc4c7f57d7d0c0b51ed3b1e4..2a7a85a6dc360e9615725fc7be5c25b0f845799c 100644 (file)
@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
        return ret;
 }
 
-static struct file_operations watchdog_fops = {
+static const struct file_operations watchdog_fops = {
        .owner = THIS_MODULE,
        .llseek = no_llseek,
        .open = watchdog_open,
index ecd739534f6a4a76c674fd5323daa8a30fcaf2a7..82b16808a274c1e0045692fcfc32dcfdb641ec31 100644 (file)
@@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
        struct lis3lv02d *lis3 = spi_get_drvdata(spi);
        lis3lv02d_joystick_disable();
        lis3lv02d_poweroff(lis3);
-       return 0;
+
+       return lis3lv02d_remove_fs(&lis3_dev);
 }
 
 #ifdef CONFIG_PM
index 6c9a04136e0aefb9a1b4430146b1453ed9acbe3f..00d975eb5b83dcc86ddd8d5f5b6846c9d370f6e6 100644 (file)
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ltc4215);
-
 /* Here are names of the chip's registers (a.k.a. commands) */
 enum ltc4215_cmd {
        LTC4215_CONTROL                 = 0x00, /* rw */
@@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = {
 static int ltc4215_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       struct i2c_adapter *adapter = client->adapter;
        struct ltc4215_data *data;
        int ret;
 
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -ENODEV;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data) {
                ret = -ENOMEM;
@@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client)
        return 0;
 }
 
-static int ltc4215_detect(struct i2c_client *client,
-                         int kind,
-                         struct i2c_board_info *info)
-{
-       struct i2c_adapter *adapter = client->adapter;
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-               return -ENODEV;
-
-       if (kind < 0) {         /* probed detection - check the chip type */
-               s32 v;          /* 8 bits from the chip, or -ERRNO */
-
-               /*
-                * Register 0x01 bit b7 is reserved, expect 0
-                * Register 0x03 bit b6 and b7 are reserved, expect 0
-                */
-               v = i2c_smbus_read_byte_data(client, LTC4215_ALERT);
-               if (v < 0 || (v & (1 << 7)) != 0)
-                       return -ENODEV;
-
-               v = i2c_smbus_read_byte_data(client, LTC4215_FAULT);
-               if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0)
-                               return -ENODEV;
-       }
-
-       strlcpy(info->type, "ltc4215", I2C_NAME_SIZE);
-       dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n",
-                       kind < 0 ? "probed" : "forced",
-                       client->addr);
-
-       return 0;
-}
-
 static const struct i2c_device_id ltc4215_id[] = {
-       { "ltc4215", ltc4215 },
+       { "ltc4215", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ltc4215_id);
 
 /* This is the driver that will be inserted */
 static struct i2c_driver ltc4215_driver = {
-       .class          = I2C_CLASS_HWMON,
        .driver = {
                .name   = "ltc4215",
        },
        .probe          = ltc4215_probe,
        .remove         = ltc4215_remove,
        .id_table       = ltc4215_id,
-       .detect         = ltc4215_detect,
-       .address_data   = &addr_data,
 };
 
 static int __init ltc4215_init(void)
index e389643336126897d9abd8adf08cfe3464425712..65c232a9d0c5a917b533e2aeac34c2c13aa4fba5 100644 (file)
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
-/* Valid addresses are 0x20 - 0x3f
- *
- * For now, we do not probe, since some of these addresses
- * are known to be unfriendly to probing */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ltc4245);
-
 /* Here are names of the chip's registers (a.k.a. commands) */
 enum ltc4245_cmd {
        LTC4245_STATUS                  = 0x00, /* readonly */
@@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = {
 static int ltc4245_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       struct i2c_adapter *adapter = client->adapter;
        struct ltc4245_data *data;
        int ret;
 
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -ENODEV;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data) {
                ret = -ENOMEM;
@@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client)
        return 0;
 }
 
-/* Check that some bits in a control register appear at all possible
- * locations without changing value
- *
- * @client: the i2c client to use
- * @reg: the register to read
- * @bits: the bits to check (0xff checks all bits,
- *                           0x03 checks only the last two bits)
- *
- * return -ERRNO if the register read failed
- * return -ENODEV if the register value doesn't stay constant at all
- * possible addresses
- *
- * return 0 for success
- */
-static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits)
-{
-       int i;
-       s32 v, voff1, voff2;
-
-       /* Read register and check for error */
-       v = i2c_smbus_read_byte_data(client, reg);
-       if (v < 0)
-               return v;
-
-       v &= bits;
-
-       for (i = 0x00; i < 0xff; i += 0x20) {
-
-               voff1 = i2c_smbus_read_byte_data(client, reg + i);
-               if (voff1 < 0)
-                       return voff1;
-
-               voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08);
-               if (voff2 < 0)
-                       return voff2;
-
-               voff1 &= bits;
-               voff2 &= bits;
-
-               if (v != voff1 || v != voff2)
-                       return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int ltc4245_detect(struct i2c_client *client,
-                         int kind,
-                         struct i2c_board_info *info)
-{
-       struct i2c_adapter *adapter = client->adapter;
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-               return -ENODEV;
-
-       if (kind < 0) {         /* probed detection - check the chip type */
-               s32 v;          /* 8 bits from the chip, or -ERRNO */
-
-               /* Chip registers 0x00-0x07 are control registers
-                * Chip registers 0x10-0x1f are data registers
-                *
-                * Address bits b7-b5 are ignored. This makes the chip "repeat"
-                * in steps of 0x20. Any control registers should appear with
-                * the same values across all duplicated addresses.
-                *
-                * Register 0x02 bit b2 is reserved, expect 0
-                * Register 0x07 bits b7 to b4 are reserved, expect 0
-                *
-                * Registers 0x01, 0x02 are control registers and should not
-                * change on their own.
-                *
-                * Register 0x06 bits b6 and b7 are control bits, and should
-                * not change on their own.
-                *
-                * Register 0x07 bits b3 to b0 are control bits, and should
-                * not change on their own.
-                */
-
-               /* read register 0x02 reserved bit, expect 0 */
-               v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL);
-               if (v < 0 || (v & 0x04) != 0)
-                       return -ENODEV;
-
-               /* read register 0x07 reserved bits, expect 0 */
-               v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR);
-               if (v < 0 || (v & 0xf0) != 0)
-                       return -ENODEV;
-
-               /* check that the alert register appears at all locations */
-               if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff))
-                       return -ENODEV;
-
-               /* check that the control register appears at all locations */
-               if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff))
-                       return -ENODEV;
-
-               /* check that register 0x06 bits b6 and b7 stay constant */
-               if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0))
-                       return -ENODEV;
-
-               /* check that register 0x07 bits b3-b0 stay constant */
-               if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f))
-                       return -ENODEV;
-       }
-
-       strlcpy(info->type, "ltc4245", I2C_NAME_SIZE);
-       dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n",
-                       kind < 0 ? "probed" : "forced",
-                       client->addr);
-
-       return 0;
-}
-
 static const struct i2c_device_id ltc4245_id[] = {
-       { "ltc4245", ltc4245 },
+       { "ltc4245", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ltc4245_id);
 
 /* This is the driver that will be inserted */
 static struct i2c_driver ltc4245_driver = {
-       .class          = I2C_CLASS_HWMON,
        .driver = {
                .name   = "ltc4245",
        },
        .probe          = ltc4245_probe,
        .remove         = ltc4245_remove,
        .id_table       = ltc4245_id,
-       .detect         = ltc4245_detect,
-       .address_data   = &addr_data,
 };
 
 static int __init ltc4245_init(void)
index f7d6fe9c49baa3724c38463448acbcb5a4384cb4..8f0b90ef8c76fe9e76e1c2101d841004690963e4 100644 (file)
@@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
        error = acpi_check_region(amd756_ioport, SMB_IOSIZE,
                                  amd756_driver.name);
        if (error)
-               return error;
+               return -ENODEV;
 
        if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) {
                dev_err(&pdev->dev, "SMB region 0x%x already in use!\n",
index a7c59908c457cbeb23937904dc6d6a76a05e060c..5b4ad86ca166f2bdcec9e63f8fa02947f2bfd83c 100644 (file)
@@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
        smbus->size = pci_resource_len(dev, 0);
 
        error = acpi_check_resource_conflict(&dev->resource[0]);
-       if (error)
+       if (error) {
+               error = -ENODEV;
                goto out_kfree;
+       }
 
        if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) {
                error = -EBUSY;
index 9d2c5adf5d4fbbcf06c8baf68d98cb01c5b93706..55edcfe5b851abe5676d1d3c857eed93c233e458 100644 (file)
@@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
        }
 
        err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
-       if (err)
+       if (err) {
+               err = -ENODEV;
                goto exit;
+       }
 
        err = pci_request_region(dev, SMBBAR, i801_driver.name);
        if (err) {
index 9f6b8e0f8632af44c2eb4c49667277e0656bec7b..dba6eb053e2facc6f1393abe41ac5b296b9127aa 100644 (file)
@@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev,
                return -ENODEV;
        }
        if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name))
-               return -EBUSY;
+               return -ENODEV;
        if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) {
                dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
                        sch_smba);
index a782c7a08f9e1256d40430bf15d050d30791375b..d26a972aacaad8b0eff3f850b46fa5ed418d0440 100644 (file)
@@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
        }
 
        if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
-               return -EBUSY;
+               return -ENODEV;
 
        if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
                dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
@@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
 
        piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
        if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
-               return -EBUSY;
+               return -ENODEV;
 
        if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
                dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
index 8295885b2fdb9417c18f19becc3a3ce045240d18..1649963b00dcfef12861868dfa5715547f9bfc7a 100644 (file)
@@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
 
        retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]);
        if (retval)
-               return retval;
+               return -ENODEV;
 
        /* Everything is happy, let's grab the memory and set things up. */
        if (!request_region(sis96x_smbus_base, SMB_IOSIZE,
index 54d810a4d00f86aa3ff687294e235b47a0a4a7cf..e4b1543015af9cb5eeb44f40a676918c70fb83e8 100644 (file)
@@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
 found:
        error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
        if (error)
-               return error;
+               return -ENODEV;
 
        if (!request_region(vt596_smba, 8, vt596_driver.name)) {
                dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
index 28d09a5d84500b588ffe862a6cfc0221bb5a3a1e..017c09540c2f70b416af3a39eaf122a5a49287f1 100644 (file)
@@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = {
 
 static void proc_ide_settings_warn(void)
 {
-       static int warned;
-
-       if (warned)
-               return;
-
-       printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
+       printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
                            "obsolete, and will be removed soon!\n");
-       warned = 1;
 }
 
 static int ide_settings_proc_show(struct seq_file *m, void *v)
index afca22beaadfc3c279ed53693d31e0a98c48779c..3b88eba04c9c81fd33acd3f2b21196ac8a243a54 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (C) 1999-2000     Andre Hedrick <andre@linux-ide.org>
  * Copyright (C) 2002          Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
  * Copyright (C) 2003          Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (C) 2007          Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2009     Bartlomiej Zolnierkiewicz
  *
  * May be copied or modified under the terms of the GNU General Public License
  *
@@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive)
 
        pci_read_config_byte(dev, 0x4b, &reg4bh);
 
+       rw_prefetch = reg4bh & ~(0x11 << drive->dn);
+
        if (drive->media == ide_disk)
-               rw_prefetch = 0x11 << drive->dn;
+               rw_prefetch |= 0x11 << drive->dn;
 
-       if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch)
-               pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch);
+       if (reg4bh != rw_prefetch)
+               pci_write_config_byte(dev, 0x4b, rw_prefetch);
 }
 
 static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
index 51bd9669cb1f7697c0be4e1b4b0c38dc4ee05499..f504c9b00c1ba631cbe7f27b80ad0859ab9f3a08 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/file.h>
 #include <linux/mount.h>
 #include <linux/cdev.h>
index 8c46f2257098cf2d7e01ba472c31a48dc5ffb77a..7de02969ed7d80bcf28b59abe9471253d9a9138f 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/mutex.h>
 #include <linux/kref.h>
 #include <linux/compat.h>
+#include <linux/sched.h>
 #include <linux/semaphore.h>
 
 #include <asm/uaccess.h>
index d3fff9e008a3e01f1f81795101738ae236b02dcb..aec0fbdfe7f016bba4126bbf89d64060783501b9 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/file.h>
 #include <linux/mount.h>
 #include <linux/cdev.h>
index 1148140d08a1faf72967dc3068e41c71488054d8..dee6706038aad7ad1519f551bd4d71f02fa6f9c1 100644 (file)
@@ -13,6 +13,7 @@
 #define EVDEV_BUFFER_SIZE      64
 
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/init.h>
index e828aab7daceada9eb8858fb19fa3fc755baa7ea..c6f88ebb40c766e21c17104cdbbe12563ed075a7 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/random.h>
 #include <linux/major.h>
 #include <linux/proc_fs.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 #include <linux/device.h>
@@ -1273,6 +1274,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
                }                                               \
        } while (0)
 
+#ifdef CONFIG_PM
 static void input_dev_reset(struct input_dev *dev, bool activate)
 {
        if (!dev->event)
@@ -1287,7 +1289,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
        }
 }
 
-#ifdef CONFIG_PM
 static int input_dev_suspend(struct device *dev)
 {
        struct input_dev *input_dev = to_input_dev(dev);
index 901b2525993e54f77c219cac9b1d55e3431219e5..b1bd6dd322864d0bd04c2f9a48c9f80b3217dcce 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/major.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/miscdevice.h>
index c5a49aba418f9232ff026dbce0a9d30db706bf1b..d3f57245420a008a412afa9b55dd4bd8f015ce8d 100644 (file)
@@ -30,6 +30,7 @@
  *             - first public version
  */
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/init.h>
index 966b8868f7924f6f73c276411cded8f29b155714..a13d80f7da17481772cc4e00f260b2f564919e13 100644 (file)
@@ -13,6 +13,7 @@
 #define MOUSEDEV_MINORS                32
 #define MOUSEDEV_MIX           31
 
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/smp_lock.h>
 #include <linux/poll.h>
index 2d8352419c0db445c57f03d80336cf9e1458faf5..65bf91e16a42725c7aeb5754fdb5954553fd73af 100644 (file)
@@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
 
        if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
                u16 info = CAPIMSG_U16(skb->data, 12); // Info field
-               if (info == 0) {
+               if ((info & 0xff00) == 0) {
                        mutex_lock(&cdev->ncci_list_mtx);
                        capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
                        mutex_unlock(&cdev->ncci_list_mtx);
index 650120261abfbac9816386927fed562bf6ab37d4..3e6d17f42a98309bcdcd11b0296d2ea606cc8e20 100644 (file)
@@ -40,7 +40,7 @@ static int debugmode = 0;
 MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
 MODULE_AUTHOR("Carsten Paeth");
 MODULE_LICENSE("GPL");
-module_param(debugmode, uint, 0);
+module_param(debugmode, uint, S_IRUGO|S_IWUSR);
 
 /* -------- type definitions ----------------------------------------- */
 
@@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci)
                                 NULL,  /* Useruserdata */   /* $$$$ */
                                 NULL   /* Facilitydataarray */
        );
-       send_message(card, &cmsg);
        plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
+       send_message(card, &cmsg);
 
        cmd.command = ISDN_STAT_BHUP;
        cmd.driver = card->myid;
@@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
                 */
                capi_cmsg_answer(cmsg);
                cmsg->Reject = 1;       /* ignore */
-               send_message(card, cmsg);
                plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
+               send_message(card, cmsg);
                printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
                        card->contrnr,
                        cmd.parm.setup.phone,
@@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
        case 2:         /* Call will be rejected. */
                capi_cmsg_answer(cmsg);
                cmsg->Reject = 2;       /* reject call, normal call clearing */
-               send_message(card, cmsg);
                plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
+               send_message(card, cmsg);
                break;
 
        default:
@@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
                capi_cmsg_answer(cmsg);
                cmsg->Reject = 8;       /* reject call,
                                           destination out of order */
-               send_message(card, cmsg);
                plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
+               send_message(card, cmsg);
                break;
        }
        return;
@@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg)
                card->bchans[plcip->chan].disconnecting = 1;
                plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
                capi_cmsg_answer(cmsg);
-               send_message(card, cmsg);
                plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
+               send_message(card, cmsg);
                break;
 
        case CAPI_DISCONNECT_CONF:      /* plci */
@@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg)
 
                if (card->bchans[plcip->chan].incoming) {
                        capi_cmsg_answer(cmsg);
-                       send_message(card, cmsg);
                        plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
+                       send_message(card, cmsg);
                } else {
                        capidrv_ncci *nccip;
                        capi_cmsg_answer(cmsg);
@@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg)
                                                 NULL   /* NCPI */
                        );
                        nccip->msgid = cmsg->Messagenumber;
+                       plci_change_state(card, plcip,
+                                         EV_PLCI_CONNECT_ACTIVE_IND);
+                       ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
                        send_message(card, cmsg);
                        cmd.command = ISDN_STAT_DCONN;
                        cmd.driver = card->myid;
                        cmd.arg = plcip->chan;
                        card->interface.statcallb(&cmd);
-                       plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
-                       ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
                }
                break;
 
@@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg)
                        goto notfound;
 
                capi_cmsg_answer(cmsg);
-               send_message(card, cmsg);
                ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
+               send_message(card, cmsg);
 
                cmd.command = ISDN_STAT_BCONN;
                cmd.driver = card->myid;
@@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg)
                                                          0,    /* Reject */
                                                          NULL  /* NCPI */
                                );
-                               send_message(card, cmsg);
                                ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
+                               send_message(card, cmsg);
                                break;
                        }
                        printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n",                                                 card->contrnr);
@@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg)
                card->bchans[nccip->chan].disconnecting = 1;
                ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
                capi_cmsg_answer(cmsg);
-               send_message(card, cmsg);
                ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
+               send_message(card, cmsg);
                break;
 
        case CAPI_DISCONNECT_B3_CONF:   /* ncci */
@@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card)
                             card->cipmask,
                             card->cipmask2,
                             NULL, NULL);
-       send_message(card, &cmdcmsg);
        listen_change_state(card, EV_LISTEN_REQ);
+       send_message(card, &cmdcmsg);
 }
 
 static void listentimerfunc(unsigned long x)
index 8b256a617c8a4524cec9f503ef7371ebbf30356b..3697c409bec66d5b9c2dbe0b31577f22fef2ef1c 100644 (file)
@@ -16,6 +16,7 @@
 #else
 #include <linux/fs.h>
 #endif
+#include <linux/sched.h>
 #include <linux/isdnif.h>
 #include <net/net_namespace.h>
 #include "isdn_divert.h"
index 234cc5d53312ab90b339edd1c96a005fd10a7942..44a58e6f8f6585f5fd7a5db45eed7d2473f705b5 100644 (file)
@@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
        return startbytes - numbytes;
 }
 
-/* process a block of data received from the device
+/**
+ * gigaset_m10x_input() - process a block of data received from the device
+ * @inbuf:     received data and device descriptor structure.
+ *
+ * Called by hardware module {ser,usb}_gigaset with a block of received
+ * bytes. Separates the bytes received over the serial data channel into
+ * user data and command replies (locked/unlocked) according to the
+ * current state of the interface.
  */
 void gigaset_m10x_input(struct inbuf_t *inbuf)
 {
@@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
        return iraw_skb;
 }
 
-/* gigaset_send_skb
- * called by common.c to queue an skb for sending
- * and start transmission if necessary
- * parameters:
- *     B Channel control structure
- *     skb
+/**
+ * gigaset_m10x_send_skb() - queue an skb for sending
+ * @bcs:       B channel descriptor structure.
+ * @skb:       data to send.
+ *
+ * Called by i4l.c to encode and queue an skb for sending, and start
+ * transmission if necessary.
+ *
  * Return value:
- *     number of bytes accepted for sending
- *     (skb->len if ok, 0 if out of buffer space)
- *     or error code (< 0, eg. -EINVAL)
+ *     number of bytes accepted for sending (skb->len) if ok,
+ *     error code < 0 (eg. -ENOMEM) on error
  */
 int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
 {
index 781c4041f7b0d1a856b2be280126d89e0adf55e2..5ed1d99eb9f3125b830b5a83cee806e4cbb9814d 100644 (file)
@@ -134,6 +134,7 @@ struct bas_cardstate {
 #define BS_ATRDPEND    0x040   /* urb_cmd_in in use */
 #define BS_ATWRPEND    0x080   /* urb_cmd_out in use */
 #define BS_SUSPEND     0x100   /* USB port suspended */
+#define BS_RESETTING   0x200   /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
 
 
 static struct gigaset_driver *driver = NULL;
@@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
        return -EINVAL;
 }
 
+/* set/clear bits in base connection state, return previous state
+ */
+static inline int update_basstate(struct bas_cardstate *ucs,
+                                 int set, int clear)
+{
+       unsigned long flags;
+       int state;
+
+       spin_lock_irqsave(&ucs->lock, flags);
+       state = ucs->basstate;
+       ucs->basstate = (state & ~clear) | set;
+       spin_unlock_irqrestore(&ucs->lock, flags);
+       return state;
+}
+
 /* error_hangup
  * hang up any existing connection because of an unrecoverable error
  * This function may be called from any context and takes care of scheduling
@@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs)
  */
 static inline void error_reset(struct cardstate *cs)
 {
-       /* close AT command channel to recover (ignore errors) */
-       req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
-
-       //FIXME try to recover without bothering the user
-       dev_err(cs->dev,
-           "unrecoverable error - please disconnect Gigaset base to reset\n");
+       /* reset interrupt pipe to recover (ignore errors) */
+       update_basstate(cs->hw.bas, BS_RESETTING, 0);
+       req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT);
 }
 
 /* check_pending
@@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs)
        case HD_DEVICE_INIT_ACK:                /* no reply expected */
                ucs->pending = 0;
                break;
-       /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE
-        * are handled separately and should never end up here
+       case HD_RESET_INTERRUPT_PIPE:
+               if (!(ucs->basstate & BS_RESETTING))
+                       ucs->pending = 0;
+               break;
+       /*
+        * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
+        * and should never end up here
         */
        default:
                dev_warn(&ucs->interface->dev,
@@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data)
        error_reset(cs);
 }
 
-/* set/clear bits in base connection state, return previous state
- */
-inline static int update_basstate(struct bas_cardstate *ucs,
-                                 int set, int clear)
-{
-       unsigned long flags;
-       int state;
-
-       spin_lock_irqsave(&ucs->lock, flags);
-       state = ucs->basstate;
-       ucs->basstate = (state & ~clear) | set;
-       spin_unlock_irqrestore(&ucs->lock, flags);
-       return state;
-}
-
 /* read_ctrl_callback
  * USB completion handler for control pipe input
  * called by the USB subsystem in interrupt context
@@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb)
                break;
 
        case HD_RESET_INTERRUPT_PIPE_ACK:
-               gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK");
+               update_basstate(ucs, 0, BS_RESETTING);
+               dev_notice(cs->dev, "interrupt pipe reset\n");
                break;
 
        case HD_SUSPEND_END:
@@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data)
                rcvbuf = urb->transfer_buffer;
                totleft = urb->actual_length;
                for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
-                       if (unlikely(urb->iso_frame_desc[frame].status)) {
+                       numbytes = urb->iso_frame_desc[frame].actual_length;
+                       if (unlikely(urb->iso_frame_desc[frame].status))
                                dev_warn(cs->dev,
-                                        "isochronous read: frame %d: %s\n",
-                                        frame,
+                                        "isochronous read: frame %d[%d]: %s\n",
+                                        frame, numbytes,
                                         get_usb_statmsg(
                                            urb->iso_frame_desc[frame].status));
-                               break;
-                       }
-                       numbytes = urb->iso_frame_desc[frame].actual_length;
-                       if (unlikely(numbytes > BAS_MAXFRAME)) {
+                       if (unlikely(numbytes > BAS_MAXFRAME))
                                dev_warn(cs->dev,
                                         "isochronous read: frame %d: "
                                         "numbytes (%d) > BAS_MAXFRAME\n",
                                         frame, numbytes);
-                               break;
-                       }
                        if (unlikely(numbytes > totleft)) {
                                dev_warn(cs->dev,
                                         "isochronous read: frame %d: "
                                         "numbytes (%d) > totleft (%d)\n",
                                         frame, numbytes, totleft);
-                               break;
+                               numbytes = totleft;
                        }
                        offset = urb->iso_frame_desc[frame].offset;
                        if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
@@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data)
                                         "offset (%d) + numbytes (%d) "
                                         "> BAS_INBUFSIZE\n",
                                         frame, offset, numbytes);
-                               break;
+                               numbytes = BAS_INBUFSIZE - offset;
                        }
                        gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
                        totleft -= numbytes;
@@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data)
 
        case HD_CLOSE_ATCHANNEL:
                dev_err(bcs->cs->dev, "timeout closing AT channel\n");
+               error_reset(bcs->cs);
                break;
 
        case HD_CLOSE_B2CHANNEL:
@@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data)
                error_reset(bcs->cs);
                break;
 
+       case HD_RESET_INTERRUPT_PIPE:
+               /* error recovery escalation */
+               dev_err(bcs->cs->dev,
+                       "reset interrupt pipe timeout, attempting USB reset\n");
+               usb_queue_reset_device(bcs->cs->hw.bas->interface);
+               break;
+
        default:
                dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
                         pending);
@@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs,
                goto notqueued;
        }
 
+       /* translate "+++" escape sequence sent as a single separate command
+        * into "close AT channel" command for error recovery
+        * The next command will reopen the AT channel automatically.
+        */
+       if (len == 3 && !memcmp(buf, "+++", 3)) {
+               rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
+               goto notqueued;
+       }
+
        if (len > IF_WRITEBUF)
                len = IF_WRITEBUF;
        if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
index e4141bf8b2f3a705436115491bab35a18e198062..33dcd8d72b7cb72ced39b702a0e486400c4fcd52 100644 (file)
 #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
 #define DRIVER_DESC "Driver for Gigaset 307x"
 
+#ifdef CONFIG_GIGASET_DEBUG
+#define DRIVER_DESC_DEBUG " (debug build)"
+#else
+#define DRIVER_DESC_DEBUG ""
+#endif
+
 /* Module parameters */
 int gigaset_debuglevel = DEBUG_DEFAULT;
 EXPORT_SYMBOL_GPL(gigaset_debuglevel);
@@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level");
 #define VALID_MINOR    0x01
 #define VALID_ID       0x02
 
+/**
+ * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
+ * @level:     debugging level.
+ * @msg:       message prefix.
+ * @len:       number of bytes to dump.
+ * @buf:       data to dump.
+ *
+ * If the current debugging level includes one of the bits set in @level,
+ * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
+ * prefixed by the text @msg.
+ */
 void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
                        size_t len, const unsigned char *buf)
 {
@@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs)
        spin_unlock_irqrestore(&cs->ev_lock, flags);
 }
 
+/**
+ * gigaset_add_event() - add event to device event queue
+ * @cs:                device descriptor structure.
+ * @at_state:  connection state structure.
+ * @type:      event type.
+ * @ptr:       pointer parameter for event.
+ * @parameter: integer parameter for event.
+ * @arg:       pointer parameter for event.
+ *
+ * Allocate an event queue entry from the device's event queue, and set it up
+ * with the parameters given.
+ *
+ * Return value: added event
+ */
 struct event_t *gigaset_add_event(struct cardstate *cs,
                                  struct at_state_t *at_state, int type,
                                  void *ptr, int parameter, void *arg)
@@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask)
        spin_unlock_irqrestore(&drv->lock, flags);
 }
 
+/**
+ * gigaset_freecs() - free all associated ressources of a device
+ * @cs:                device descriptor structure.
+ *
+ * Stops all tasklets and timers, unregisters the device from all
+ * subsystems it was registered to, deallocates the device structure
+ * @cs and all structures referenced from it.
+ * Operations on the device should be stopped before calling this.
+ */
 void gigaset_freecs(struct cardstate *cs)
 {
        int i;
@@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
        inbuf->inputstate = inputstate;
 }
 
-/* append received bytes to inbuf */
+/**
+ * gigaset_fill_inbuf() - append received data to input buffer
+ * @inbuf:     buffer structure.
+ * @src:       received data.
+ * @numbytes:  number of bytes received.
+ */
 int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
                       unsigned numbytes)
 {
@@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
        return NULL;
 }
 
-/* gigaset_initcs
+/**
+ * gigaset_initcs() - initialize device structure
+ * @drv:       hardware driver the device belongs to
+ * @channels:  number of B channels supported by device
+ * @onechannel:        !=0 if B channel data and AT commands share one
+ *                 communication channel (M10x),
+ *             ==0 if B channels have separate communication channels (base)
+ * @ignoreframes:      number of frames to ignore after setting up B channel
+ * @cidmode:   !=0: start in CallID mode
+ * @modulename:        name of driver module for LL registration
+ *
  * Allocate and initialize cardstate structure for Gigaset driver
  * Calls hardware dependent gigaset_initcshw() function
  * Calls B channel initialization function gigaset_initbcs() for each B channel
- * parameters:
- *     drv             hardware driver the device belongs to
- *     channels        number of B channels supported by device
- *     onechannel      !=0: B channel data and AT commands share one
- *                          communication channel
- *                     ==0: B channels have separate communication channels
- *     ignoreframes    number of frames to ignore after setting up B channel
- *     cidmode         !=0: start in CallID mode
- *     modulename      name of driver module (used for I4L registration)
- * return value:
+ *
+ * Return value:
  *     pointer to cardstate structure
  */
 struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
@@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs)
 }
 
 
+/**
+ * gigaset_start() - start device operations
+ * @cs:                device descriptor structure.
+ *
+ * Prepares the device for use by setting up communication parameters,
+ * scheduling an EV_START event to initiate device initialization, and
+ * waiting for completion of the initialization.
+ *
+ * Return value:
+ *     1 - success, 0 - error
+ */
 int gigaset_start(struct cardstate *cs)
 {
        unsigned long flags;
@@ -879,9 +937,15 @@ error:
 }
 EXPORT_SYMBOL_GPL(gigaset_start);
 
-/* gigaset_shutdown
- * check if a device is associated to the cardstate structure and stop it
- * return value: 0 if ok, -1 if no device was associated
+/**
+ * gigaset_shutdown() - shut down device operations
+ * @cs:                device descriptor structure.
+ *
+ * Deactivates the device by scheduling an EV_SHUTDOWN event and
+ * waiting for completion of the shutdown.
+ *
+ * Return value:
+ *     0 - success, -1 - error (no device associated)
  */
 int gigaset_shutdown(struct cardstate *cs)
 {
@@ -912,6 +976,13 @@ exit:
 }
 EXPORT_SYMBOL_GPL(gigaset_shutdown);
 
+/**
+ * gigaset_stop() - stop device operations
+ * @cs:                device descriptor structure.
+ *
+ * Stops operations on the device by scheduling an EV_STOP event and
+ * waiting for completion of the shutdown.
+ */
 void gigaset_stop(struct cardstate *cs)
 {
        mutex_lock(&cs->mutex);
@@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
        return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
 }
 
+/**
+ * gigaset_freedriver() - free all associated ressources of a driver
+ * @drv:       driver descriptor structure.
+ *
+ * Unregisters the driver from the system and deallocates the driver
+ * structure @drv and all structures referenced from it.
+ * All devices should be shut down before calling this.
+ */
 void gigaset_freedriver(struct gigaset_driver *drv)
 {
        unsigned long flags;
@@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv)
 }
 EXPORT_SYMBOL_GPL(gigaset_freedriver);
 
-/* gigaset_initdriver
+/**
+ * gigaset_initdriver() - initialize driver structure
+ * @minor:     First minor number
+ * @minors:    Number of minors this driver can handle
+ * @procname:  Name of the driver
+ * @devname:   Name of the device files (prefix without minor number)
+ *
  * Allocate and initialize gigaset_driver structure. Initialize interface.
- * parameters:
- *     minor           First minor number
- *     minors          Number of minors this driver can handle
- *     procname        Name of the driver
- *     devname         Name of the device files (prefix without minor number)
- * return value:
+ *
+ * Return value:
  *     Pointer to the gigaset_driver structure on success, NULL on failure.
  */
 struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
@@ -1095,6 +1176,13 @@ error:
 }
 EXPORT_SYMBOL_GPL(gigaset_initdriver);
 
+/**
+ * gigaset_blockdriver() - block driver
+ * @drv:       driver descriptor structure.
+ *
+ * Prevents the driver from attaching new devices, in preparation for
+ * deregistration.
+ */
 void gigaset_blockdriver(struct gigaset_driver *drv)
 {
        drv->blocked = 1;
@@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void)
        if (gigaset_debuglevel == 1)
                gigaset_debuglevel = DEBUG_DEFAULT;
 
-       pr_info(DRIVER_DESC "\n");
+       pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
        return 0;
 }
 
index 2d91049571a4ce20470acb86d7120bee326b911c..cc768caa38f54b86f48ec92310ed93e3b1fcf998 100644 (file)
@@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] =
        /* leave dle mode */
        {RSP_INIT,      0,  0,SEQ_DLE0,           201, 5, {0},             "^SDLE=0\r"},
        {RSP_OK,      201,201, -1,                202,-1},
-       //{RSP_ZDLE,    202,202,  0,                202, 0, {ACT_ERROR}},//DELETE
        {RSP_ZDLE,    202,202,  0,                  0, 0, {ACT_DLE0}},
        {RSP_NODEV,   200,249, -1,                  0, 0, {ACT_FAKEDLE0}},
        {RSP_ERROR,   200,249, -1,                  0, 0, {ACT_FAILDLE0}},
@@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] =
        {EV_SHUTDOWN,  -1, -1, -1,                 -1,-1, {ACT_SHUTDOWN}}, //FIXME
 
        /* misc. */
+       {RSP_ERROR,    -1, -1, -1,                 -1, -1, {ACT_ERROR} },
        {RSP_EMPTY,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
        {RSP_ZCFGT,    -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
        {RSP_ZCFG,     -1, -1, -1,                 -1,-1, {ACT_DEBUG}}, //FIXME
@@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] =
        {RSP_INIT,     -1, -1,SEQ_HUP,            401, 5, {0},             "+VLS=0\r"}, /* hang up */ //-1,-1?
        {RSP_OK,      401,401, -1,                402, 5},
        {RSP_ZVLS,    402,402,  0,                403, 5},
-       {RSP_ZSAU,    403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */
-       //{RSP_ZSAU,    403,403,ZSAU_NULL,          401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
-       {RSP_ZSAU,    403,403,ZSAU_NULL,            0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
-       {RSP_NODEV,   401,403, -1,                  0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
+       {RSP_ZSAU,    403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
+       {RSP_ZSAU,    403, 403, ZSAU_NULL,            0,  0, {ACT_DISCONNECT} },
+       {RSP_NODEV,   401, 403, -1,                   0,  0, {ACT_FAKEHUP} },
        {RSP_ERROR,   401,401, -1,                  0, 0, {ACT_ABORTHUP}},
        {EV_TIMEOUT,  401,403, -1,                  0, 0, {ACT_ABORTHUP}},
 
@@ -474,8 +473,13 @@ static int cid_of_response(char *s)
        //FIXME is ;<digit>+ at end of non-CID response really impossible?
 }
 
-/* This function will be called via task queue from the callback handler.
- * We received a modem response and have to handle it..
+/**
+ * gigaset_handle_modem_response() - process received modem response
+ * @cs:                device descriptor structure.
+ *
+ * Called by asyncdata/isocdata if a block of data received from the
+ * device must be processed as a modem command response. The data is
+ * already in the cs structure.
  */
 void gigaset_handle_modem_response(struct cardstate *cs)
 {
@@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p)
        if (bcs) {
                /* B channel assigned: invoke hardware specific handler */
                cs->ops->close_bchannel(bcs);
+               /* notify LL */
+               if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
+                       bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
+                       gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
+               }
        } else {
                /* no B channel assigned: just deallocate */
                spin_lock_irqsave(&cs->lock, flags);
@@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs,
                cs->gotfwver = -1;
                dev_err(cs->dev, "could not read firmware version.\n");
                break;
-#ifdef CONFIG_GIGASET_DEBUG
        case ACT_ERROR:
-               *p_genresp = 1;
-               *p_resp_code = RSP_ERROR;
+               gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
+                       __func__, at_state->ConState);
+               cs->cur_at_seq = SEQ_NONE;
                break;
+#ifdef CONFIG_GIGASET_DEBUG
        case ACT_TEST:
                {
                        static int count = 3; //2; //1;
index 9b22f9cf2f33b2bd4895bf15f5d3cb16e73918df..654489d836cd18b7d3559a45a608e11018b4a1f9 100644 (file)
@@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
                return -ENODEV;
        }
        bcs = &cs->bcs[channel];
+
+       /* can only handle linear sk_buffs */
+       if (skb_linearize(skb) < 0) {
+               dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
+               return -ENOMEM;
+       }
        len = skb->len;
 
        gig_dbg(DEBUG_LLDATA,
@@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
        return cs->ops->send_skb(bcs, skb);
 }
 
+/**
+ * gigaset_skb_sent() - acknowledge sending an skb
+ * @bcs:       B channel descriptor structure.
+ * @skb:       sent data.
+ *
+ * Called by hardware module {bas,ser,usb}_gigaset when the data in a
+ * skb has been successfully sent, for signalling completion to the LL.
+ */
 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
 {
        unsigned len;
@@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state)
        return 0;
 }
 
+/**
+ * gigaset_isdn_icall() - signal incoming call
+ * @at_state:  connection state structure.
+ *
+ * Called by main module to notify the LL that an incoming call has been
+ * received. @at_state contains the parameters of the call.
+ *
+ * Return value: call disposition (ICALL_*)
+ */
 int gigaset_isdn_icall(struct at_state_t *at_state)
 {
        struct cardstate *cs = at_state->cs;
index f33ac27de643f4631817e6165c034fa38f1c006d..6a8e1384e7bd09b6441ef6a27eaed5c497f31061 100644 (file)
@@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs)
        tty_unregister_device(drv->tty, cs->minor_index);
 }
 
+/**
+ * gigaset_if_receive() - pass a received block of data to the tty device
+ * @cs:                device descriptor structure.
+ * @buffer:    received data.
+ * @len:       number of bytes received.
+ *
+ * Called by asyncdata/isocdata if a block of data received from the
+ * device must be sent to userspace through the ttyG* device.
+ */
 void gigaset_if_receive(struct cardstate *cs,
                        unsigned char *buffer, size_t len)
 {
index bed38fcc432b62b8a88335a697dfa93295110a2e..9f3ef7b4248c35d918321376d956d2a0da629a12 100644 (file)
@@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
                return -EAGAIN;
        }
 
-       dump_bytes(DEBUG_STREAM, "snd data", in, count);
+       dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
 
        /* bitstuff and checksum input data */
        fcs = PPP_INITFCS;
@@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
        /* put closing flag and repeat byte for flag idle */
        isowbuf_putflag(iwb);
        end = isowbuf_donewrite(iwb);
-       dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
        return end;
 }
 
@@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
        }
 
        gig_dbg(DEBUG_STREAM, "put %d bytes", count);
+       dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
+
        write = iwb->write;
        do {
                c = bitrev8(*in++);
@@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs)
                procskb->tail -= 2;
                gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)",
                        __func__, procskb->len);
-               dump_bytes(DEBUG_STREAM,
+               dump_bytes(DEBUG_STREAM_DUMP,
                           "rcv data", procskb->data, procskb->len);
                bcs->hw.bas->goodbytes += procskb->len;
                gigaset_rcv_skb(procskb, bcs->cs, bcs);
@@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count,
                        dobytes--;
                }
                if (dobytes == 0) {
+                       dump_bytes(DEBUG_STREAM_DUMP,
+                                  "rcv data", skb->data, skb->len);
                        gigaset_rcv_skb(skb, bcs->cs, bcs);
                        bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
                        if (!skb) {
@@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
 
 /* == data output ========================================================== */
 
-/* gigaset_send_skb
- * called by common.c to queue an skb for sending
- * and start transmission if necessary
- * parameters:
- *     B Channel control structure
- *     skb
- * return value:
- *     number of bytes accepted for sending
- *     (skb->len if ok, 0 if out of buffer space)
- *     or error code (< 0, eg. -EINVAL)
+/**
+ * gigaset_isoc_send_skb() - queue an skb for sending
+ * @bcs:       B channel descriptor structure.
+ * @skb:       data to send.
+ *
+ * Called by i4l.c to queue an skb for sending, and start transmission if
+ * necessary.
+ *
+ * Return value:
+ *     number of bytes accepted for sending (skb->len) if ok,
+ *     error code < 0 (eg. -ENODEV) on error
  */
 int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
 {
index bde55d7287fae86470b701eedf400a52857275f8..eadc1cd34a20ae2830ed4442807889ac693c8b27 100644 (file)
@@ -78,6 +78,7 @@ config MISDN_NETJET
        depends on PCI
        select MISDN_IPAC
        select ISDN_HDLC
+       select ISDN_I4L
        help
          Enable support for Traverse Technologies NETJet PCI cards.
 
index dd744ffd240bcd34251174902ebcacfb112f0931..07c4e49f9e77f2a8ba65fdd7db5a6271ba6f22cc 100644 (file)
@@ -141,8 +141,7 @@ endmenu
 endif
 
 config ISDN_HDLC
-       tristate 
-       depends on HISAX_ST5481
+       tristate
        select CRC_CCITT
        select BITREVERSE
 
index c36f521374562271c5c4dba4890303a99ffbc069..feb0fa45b664b7f30f843d8097fc47013be8adce 100644 (file)
@@ -415,7 +415,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 }
 
 static int data_sock_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, int len)
+       char __user *optval, unsigned int len)
 {
        struct sock *sk = sock->sk;
        int err = 0, opt = 0;
index 708a8017c21d49e207139c9773927fd87c848f7a..adc561eb59d2ed022873bacf35501b9462e19bbc 100644 (file)
@@ -19,9 +19,6 @@
 #include <linux/workqueue.h>
 #include <linux/leds-pca9532.h>
 
-static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
-I2C_CLIENT_INSMOD_1(pca9532);
-
 #define PCA9532_REG_PSC(i) (0x2+(i)*2)
 #define PCA9532_REG_PWM(i) (0x3+(i)*2)
 #define PCA9532_REG_LS0  0x6
index b4d3f7ca554f0d8e81be09a02d824fb4f84cb978..bd1632388e4a37f0f12268e83d645b92e96632ff 100644 (file)
@@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file)
  * uses: reading and writing a character device called /dev/lguest.  All the
  * work happens in the read(), write() and close() routines:
  */
-static struct file_operations lguest_fops = {
+static const struct file_operations lguest_fops = {
        .owner   = THIS_MODULE,
        .release = close,
        .write   = write,
index fde377c60cca753eafe9c358f7e562c6d538ad7b..556f0feaa4df33fe3733e0ccf3638bb6bfe1aa44 100644 (file)
@@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg)
        return data;
 }
 
+static struct i2c_driver thermostat_driver;
+
 static int
 attach_thermostat(struct i2c_adapter *adapter)
 {
@@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter)
         * Let i2c-core delete that device on driver removal.
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
-       list_add_tail(&client->detected, &client->driver->clients);
+       list_add_tail(&client->detected, &thermostat_driver.clients);
        return 0;
 }
 
index a028598af2d32ae030596bff2e83145420271d65..ea32c7e5a9af116c49e885b1e015fe9742f24758 100644 (file)
@@ -286,6 +286,8 @@ struct fcu_fan_table        fcu_fans[] = {
        },
 };
 
+static struct i2c_driver therm_pm72_driver;
+
 /*
  * Utility function to create an i2c_client structure and
  * attach it to one of u3 adapters
@@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
         * Let i2c-core delete that device on driver removal.
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
-       list_add_tail(&clt->detected, &clt->driver->clients);
+       list_add_tail(&clt->detected, &therm_pm72_driver.clients);
        return clt;
 }
 
index 529886c7a8263ae4d4d01df89b7b513695d983c3..ed6426a107738b4dff5a1d639140a7f22b7c9b27 100644 (file)
@@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client,
        return rc;
 }
 
+static struct i2c_driver wf_lm75_driver;
+
 static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
                                             u8 addr, int ds1775,
                                             const char *loc)
@@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
         * Let i2c-core delete that device on driver removal.
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
-       list_add_tail(&client->detected, &client->driver->clients);
+       list_add_tail(&client->detected, &wf_lm75_driver.clients);
        return client;
  fail:
        return NULL;
index e2a55ecda2b28aa2c80114dd7d6c7ec0cd519827..a67b349319e9c9be57f19faf3f309ff487e06448 100644 (file)
@@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client,
        return rc;
 }
 
+static struct i2c_driver wf_max6690_driver;
+
 static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
                                            u8 addr, const char *loc)
 {
@@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
         * Let i2c-core delete that device on driver removal.
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
-       list_add_tail(&client->detected, &client->driver->clients);
+       list_add_tail(&client->detected, &wf_max6690_driver.clients);
        return client;
 
  fail:
index 5da729e58f99294ec43cd23ec7bee680da7e70b7..e20330a28959bc539d0691337bf49438e2022efe 100644 (file)
@@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = {
        .owner          = THIS_MODULE,
 };
 
+static struct i2c_driver wf_sat_driver;
+
 static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
 {
        struct i2c_board_info info;
@@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
         * Let i2c-core delete that device on driver removal.
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
-       list_add_tail(&client->detected, &client->driver->clients);
+       list_add_tail(&client->detected, &wf_sat_driver.clients);
 }
 
 static int wf_sat_probe(struct i2c_client *client,
index ba0edad2d048017bd08257f0c7b34eb9f86bf1ea..54abf9e303b7d66c4f41de1ef1ff17197572d292 100644 (file)
@@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
  * This is the connector callback that delivers data
  * that was sent from userspace.
  */
-static void cn_ulog_callback(void *data)
+static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
-       struct cn_msg *msg = (struct cn_msg *)data;
        struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
 
+       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+               return;
+
        spin_lock(&receiving_list_lock);
        if (msg->len == 0)
                fill_pkg(msg, NULL);
index 376f1ab48a245fcdce521bd55a7ceb109f7f8c9e..23e76fe0d35958d604657a559a5954095854bf23 100644 (file)
@@ -130,7 +130,7 @@ struct mapped_device {
        /*
         * A list of ios that arrived while we were suspended.
         */
-       atomic_t pending[2];
+       atomic_t pending;
        wait_queue_head_t wait;
        struct work_struct work;
        struct bio_list deferred;
@@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io)
 {
        struct mapped_device *md = io->md;
        int cpu;
-       int rw = bio_data_dir(io->bio);
 
        io->start_time = jiffies;
 
        cpu = part_stat_lock();
        part_round_stats(cpu, &dm_disk(md)->part0);
        part_stat_unlock();
-       dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
+       dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
 }
 
 static void end_io_acct(struct dm_io *io)
@@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io)
         * After this is decremented the bio must not be touched if it is
         * a barrier.
         */
-       dm_disk(md)->part0.in_flight[rw] = pending =
-               atomic_dec_return(&md->pending[rw]);
-       pending += atomic_read(&md->pending[rw^0x1]);
+       dm_disk(md)->part0.in_flight = pending =
+               atomic_dec_return(&md->pending);
 
        /* nudge anyone waiting on suspend queue */
        if (!pending)
@@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->disk)
                goto bad_disk;
 
-       atomic_set(&md->pending[0], 0);
-       atomic_set(&md->pending[1], 0);
+       atomic_set(&md->pending, 0);
        init_waitqueue_head(&md->wait);
        INIT_WORK(&md->work, dm_wq_work);
        init_waitqueue_head(&md->eventq);
@@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
                                break;
                        }
                        spin_unlock_irqrestore(q->queue_lock, flags);
-               } else if (!atomic_read(&md->pending[0]) &&
-                                       !atomic_read(&md->pending[1]))
+               } else if (!atomic_read(&md->pending))
                        break;
 
                if (interruptible == TASK_INTERRUPTIBLE &&
index 3750ff48cba1e75097c99daf4697931b2f9b0a3f..c37790ad92d0b501e470d4c0a6de0a1713fc592a 100644 (file)
@@ -20,6 +20,7 @@
  *
  */
 
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -1203,7 +1204,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-static struct file_operations dvb_dvr_fops = {
+static const struct file_operations dvb_dvr_fops = {
        .owner = THIS_MODULE,
        .read = dvb_dvr_read,
        .write = dvb_dvr_write,
index eef6d36166268f77883b2fb454eb6dcd698ca1fa..91c537bca8adaac314f85c7ffc531c05f5d00620 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
index eeb80d0ea3ff24585295aad880e0a77ecc9078fb..853e04b7cb361d45257f80dcafdcf121cd340c63 100644 (file)
@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
        return POLLIN;
 }
 
-static struct file_operations fdtv_ca_fops = {
+static const struct file_operations fdtv_ca_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = dvb_generic_ioctl,
        .open           = dvb_generic_open,
index 8b1440136c45b10710079bdbf2e80fba7f166978..482d0f3be5ffe7a26ed931a1c7f44042a384da9c 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/videodev2.h>   /* V4L2 API defs                */
 #include <linux/param.h>
 #include <linux/pnp.h>
+#include <linux/sched.h>
 #include <linux/io.h>          /* outb, outb_p                 */
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
index 43ab0adf3b610a8927299e833c6bb3ab5f3b73ad..2377313c041a8c33976db18f0c1ef7a698c94bd6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/vmalloc.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/ctype.h>
index 5447da16a1705c49e9e72c57a58e203ceb4ce1eb..613481028272f114c754a4a652987fce90452cae 100644 (file)
@@ -57,8 +57,6 @@
  * The AB3100 is usually assigned address 0x48 (7-bit)
  * The chip is defined in the platform i2c_board_data section.
  */
-static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD_1(ab3100);
 
 u8 ab3100_get_chip_type(struct ab3100 *ab3100)
 {
@@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id ab3100_id[] = {
-       { "ab3100", ab3100 },
+       { "ab3100", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ab3100_id);
index 2afc08006e6d1db891f995445a351c59186165fd..fa294b6d600a9d6af6f8f27d8b853b5aea7d55e3 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/sched.h>
 #include <linux/ucb1400.h>
 
 unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
index 3c0c58eed34778a22b6d052dbb665584e8b55147..5a6b2bce8ad5a7a3672c8333a91a8285202b9af2 100644 (file)
 #include <linux/i2c.h>
 #include <linux/mutex.h>
 
-/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(max6875);
-
 /* The MAX6875 can only read/write 16 bytes at a time */
 #define SLICE_SIZE                     16
 #define SLICE_BITS                     4
@@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = {
        .read = max6875_read,
 };
 
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max6875_detect(struct i2c_client *client, int kind,
-                         struct i2c_board_info *info)
+static int max6875_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
+       struct max6875_data *data;
+       int err;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
                                     | I2C_FUNC_SMBUS_READ_BYTE))
                return -ENODEV;
 
-       /* Only check even addresses */
+       /* Only bind to even addresses */
        if (client->addr & 1)
                return -ENODEV;
 
-       strlcpy(info->type, "max6875", I2C_NAME_SIZE);
-
-       return 0;
-}
-
-static int max6875_probe(struct i2c_client *client,
-                        const struct i2c_device_id *id)
-{
-       struct max6875_data *data;
-       int err;
-
        if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
                return -ENOMEM;
 
@@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = {
        .probe          = max6875_probe,
        .remove         = max6875_remove,
        .id_table       = max6875_id,
-
-       .detect         = max6875_detect,
-       .address_data   = &addr_data,
 };
 
 static int __init max6875_init(void)
index fa57b67593ae5b884c42a9ae7e7704084c905004..90a95ce8dc344953365cc1ec3d5eafc58e02a490 100644 (file)
@@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-static struct file_operations phantom_file_ops = {
+static const struct file_operations phantom_file_ops = {
        .open = phantom_open,
        .release = phantom_release,
        .unlocked_ioctl = phantom_ioctl,
index 300e7ba391a0faffcb436ed4974a573e0a2b4618..41c8fe2a928c6a76967890a7467038e7df703c3c 100644 (file)
@@ -53,7 +53,6 @@ struct gru_stats_s gru_stats;
 /* Guaranteed user available resources on each node */
 static int max_user_cbrs, max_user_dsr_bytes;
 
-static struct file_operations gru_fops;
 static struct miscdevice gru_miscdev;
 
 
@@ -426,7 +425,7 @@ static void __exit gru_exit(void)
        gru_proc_exit();
 }
 
-static struct file_operations gru_fops = {
+static const struct file_operations gru_fops = {
        .owner          = THIS_MODULE,
        .unlocked_ioctl = gru_file_unlocked_ioctl,
        .mmap           = gru_file_mmap,
index 610dbd1fcc820a85cf4fcc667281063cb1b26983..96d10f40fb23e1ba6db97359c53c4e6a8415660f 100644 (file)
@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct file_operations mmc_dbg_ext_csd_fops = {
+static const struct file_operations mmc_dbg_ext_csd_fops = {
        .open           = mmc_ext_csd_open,
        .read           = mmc_ext_csd_read,
        .release        = mmc_ext_csd_release,
index 6636354b48ceac476c3ecbb7a88054dd4047d8d2..f85dcd5365082ab27cc77adefcb84f1364892f80 100644 (file)
@@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
        unsigned i, nr_strings;
        char **buffer, *string;
 
+       /* Find all null-terminated (including zero length) strings in
+          the TPLLV1_INFO field. Trailing garbage is ignored. */
        buf += 2;
        size -= 2;
 
@@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
                if (buf[i] == 0)
                        nr_strings++;
        }
-
-       if (nr_strings < 4) {
-               printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
+       if (nr_strings == 0)
                return 0;
-       }
 
        size = i;
 
@@ -98,6 +97,22 @@ static const unsigned char speed_val[16] =
 static const unsigned int speed_unit[8] =
        { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
 
+/* FUNCE tuples with these types get passed to SDIO drivers */
+static const unsigned char funce_type_whitelist[] = {
+       4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
+};
+
+static int cistpl_funce_whitelisted(unsigned char type)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
+               if (funce_type_whitelist[i] == type)
+                       return 1;
+       }
+       return 0;
+}
+
 static int cistpl_funce_common(struct mmc_card *card,
                               const unsigned char *buf, unsigned size)
 {
@@ -120,6 +135,10 @@ static int cistpl_funce_func(struct sdio_func *func,
        unsigned vsn;
        unsigned min_size;
 
+       /* let SDIO drivers take care of whitelisted FUNCE tuples */
+       if (cistpl_funce_whitelisted(buf[0]))
+               return -EILSEQ;
+
        vsn = func->card->cccr.sdio_vsn;
        min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
 
@@ -154,13 +173,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
        else
                ret = cistpl_funce_common(card, buf, size);
 
-       if (ret) {
+       if (ret && ret != -EILSEQ) {
                printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
                       "type %u\n", mmc_hostname(card->host), size, buf[0]);
-               return ret;
        }
 
-       return 0;
+       return ret;
 }
 
 typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
@@ -253,21 +271,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
                for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
                        if (cis_tpl_list[i].code == tpl_code)
                                break;
-               if (i >= ARRAY_SIZE(cis_tpl_list)) {
-                       /* this tuple is unknown to the core */
-                       this->next = NULL;
-                       this->code = tpl_code;
-                       this->size = tpl_link;
-                       *prev = this;
-                       prev = &this->next;
-                       printk(KERN_DEBUG
-                              "%s: queuing CIS tuple 0x%02x length %u\n",
-                              mmc_hostname(card->host), tpl_code, tpl_link);
-               } else {
+               if (i < ARRAY_SIZE(cis_tpl_list)) {
                        const struct cis_tpl *tpl = cis_tpl_list + i;
                        if (tpl_link < tpl->min_size) {
                                printk(KERN_ERR
-                                      "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n",
+                                      "%s: bad CIS tuple 0x%02x"
+                                      " (length = %u, expected >= %u)\n",
                                       mmc_hostname(card->host),
                                       tpl_code, tpl_link, tpl->min_size);
                                ret = -EINVAL;
@@ -275,7 +284,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
                                ret = tpl->parse(card, func,
                                                 this->data, tpl_link);
                        }
-                       kfree(this);
+                       /*
+                        * We don't need the tuple anymore if it was
+                        * successfully parsed by the SDIO core or if it is
+                        * not going to be parsed by SDIO drivers.
+                        */
+                       if (!ret || ret != -EILSEQ)
+                               kfree(this);
+               } else {
+                       /* unknown tuple */
+                       ret = -EILSEQ;
+               }
+
+               if (ret == -EILSEQ) {
+                       /* this tuple is unknown to the core or whitelisted */
+                       this->next = NULL;
+                       this->code = tpl_code;
+                       this->size = tpl_link;
+                       *prev = this;
+                       prev = &this->next;
+                       printk(KERN_DEBUG
+                              "%s: queuing CIS tuple 0x%02x length %u\n",
+                              mmc_hostname(card->host), tpl_code, tpl_link);
+                       /* keep on analyzing tuples */
+                       ret = 0;
                }
 
                ptr += tpl_link;
index 7cb057f3f883837e1fc05e459eef0f3b8219201f..432ae8358c86953a39b9584dce1ebdc085fd30f7 100644 (file)
@@ -276,6 +276,47 @@ config MMC_S3C
 
          If unsure, say N.
 
+config MMC_S3C_HW_SDIO_IRQ
+       bool "Hardware support for SDIO IRQ"
+       depends on MMC_S3C
+       help
+         Enable the hardware support for SDIO interrupts instead of using
+        the generic polling code.
+
+choice
+       prompt "Samsung S3C SD/MMC transfer code"
+       depends on MMC_S3C
+
+config MMC_S3C_PIO
+       bool "Use PIO transfers only"
+       help
+         Use PIO to transfer data between memory and the hardware.
+
+         PIO is slower than DMA as it requires CPU instructions to
+         move the data. This has been the traditional default for
+         the S3C MCI driver.
+
+config MMC_S3C_DMA
+       bool "Use DMA transfers only (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       help
+         Use DMA to transfer data between memory and the hardare.
+
+         Currently, the DMA support in this driver seems to not be
+         working properly and needs to be debugged before this
+         option is useful.
+
+config MMC_S3C_PIODMA
+       bool "Support for both PIO and DMA (EXPERIMENTAL)"
+       help
+         Compile both the PIO and DMA transfer routines into the
+         driver and let the platform select at run-time which one
+         is best.
+
+         See notes for the DMA option.
+
+endchoice
+
 config MMC_SDRICOH_CS
        tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
        depends on EXPERIMENTAL && PCI && PCMCIA
index 3d1e5329da12b9f84a141f544c0aaac8e0f8d92c..705a5894a6bbae304a76b37f649ee0be474b89c4 100644 (file)
@@ -678,7 +678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
        writel(0, host->base + MMCIMASK1);
        writel(0xfff, host->base + MMCICLEAR);
 
-#ifdef CONFIG_GPIOLIB
        if (gpio_is_valid(plat->gpio_cd)) {
                ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
                if (ret == 0)
@@ -697,7 +696,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
                else if (ret != -ENOSYS)
                        goto err_gpio_wp;
        }
-#endif
 
        ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
        if (ret)
index 8c08cd7efa7f1a9073e6a80bba0dc8696632b2ce..99b74a3510203a2e258d83c9f9f4d9aa4f9db816 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/mmc/host.h>
 #include <linux/platform_device.h>
 #include <linux/cpufreq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 #include <linux/gpio.h>
 #include <linux/irq.h>
 #include <linux/io.h>
@@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug;
                dev_dbg(&host->pdev->dev, args);  \
        } while (0)
 
-#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
-
 static struct s3c2410_dma_client s3cmci_dma_client = {
        .name           = "s3c-mci",
 };
@@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
 
 #endif /* CONFIG_MMC_DEBUG */
 
+/**
+ * s3cmci_host_usedma - return whether the host is using dma or pio
+ * @host: The host state
+ *
+ * Return true if the host is using DMA to transfer data, else false
+ * to use PIO mode. Will return static data depending on the driver
+ * configuration.
+ */
+static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
+{
+#ifdef CONFIG_MMC_S3C_PIO
+       return false;
+#elif defined(CONFIG_MMC_S3C_DMA)
+       return true;
+#else
+       return host->dodma;
+#endif
+}
+
+/**
+ * s3cmci_host_canpio - return true if host has pio code available
+ *
+ * Return true if the driver has been compiled with the PIO support code
+ * available.
+ */
+static inline bool s3cmci_host_canpio(void)
+{
+#ifdef CONFIG_MMC_S3C_PIO
+       return true;
+#else
+       return false;
+#endif
+}
+
 static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
 {
        u32 newmask;
@@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
 
 static inline void clear_imask(struct s3cmci_host *host)
 {
-       writel(0, host->base + host->sdiimsk);
+       u32 mask = readl(host->base + host->sdiimsk);
+
+       /* preserve the SDIO IRQ mask state */
+       mask &= S3C2410_SDIIMSK_SDIOIRQ;
+       writel(mask, host->base + host->sdiimsk);
+}
+
+/**
+ * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
+ * @host: The host to check.
+ *
+ * Test to see if the SDIO interrupt is being signalled in case the
+ * controller has failed to re-detect a card interrupt. Read GPE8 and
+ * see if it is low and if so, signal a SDIO interrupt.
+ *
+ * This is currently called if a request is finished (we assume that the
+ * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
+ * already being indicated.
+*/
+static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
+{
+       if (host->sdio_irqen) {
+               if (gpio_get_value(S3C2410_GPE(8)) == 0) {
+                       printk(KERN_DEBUG "%s: signalling irq\n", __func__);
+                       mmc_signal_sdio_irq(host->mmc);
+               }
+       }
 }
 
 static inline int get_data_buffer(struct s3cmci_host *host,
@@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host)
        return 63 - fifostat;
 }
 
+/**
+ * s3cmci_enable_irq - enable IRQ, after having disabled it.
+ * @host: The device state.
+ * @more: True if more IRQs are expected from transfer.
+ *
+ * Enable the main IRQ if needed after it has been disabled.
+ *
+ * The IRQ can be one of the following states:
+ *     - disabled during IDLE
+ *     - disabled whilst processing data
+ *     - enabled during transfer
+ *     - enabled whilst awaiting SDIO interrupt detection
+ */
+static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
+{
+       unsigned long flags;
+       bool enable = false;
+
+       local_irq_save(flags);
+
+       host->irq_enabled = more;
+       host->irq_disabled = false;
+
+       enable = more | host->sdio_irqen;
+
+       if (host->irq_state != enable) {
+               host->irq_state = enable;
+
+               if (enable)
+                       enable_irq(host->irq);
+               else
+                       disable_irq(host->irq);
+       }
+
+       local_irq_restore(flags);
+}
+
+/**
+ *
+ */
+static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
+
+       host->irq_disabled = transfer;
+
+       if (transfer && host->irq_state) {
+               host->irq_state = false;
+               disable_irq(host->irq);
+       }
+
+       local_irq_restore(flags);
+}
+
 static void do_pio_read(struct s3cmci_host *host)
 {
        int res;
@@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data)
 {
        struct s3cmci_host *host = (struct s3cmci_host *) data;
 
-
-       disable_irq(host->irq);
+       s3cmci_disable_irq(host, true);
 
        if (host->pio_active == XFER_WRITE)
                do_pio_write(host);
@@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data)
                                host->mrq->data->error = -EINVAL;
                }
 
+               s3cmci_enable_irq(host, false);
                finalize_request(host);
        } else
-               enable_irq(host->irq);
+               s3cmci_enable_irq(host, true);
 }
 
 /*
@@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
        struct s3cmci_host *host = dev_id;
        struct mmc_command *cmd;
        u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
-       u32 mci_cclear, mci_dclear;
+       u32 mci_cclear = 0, mci_dclear;
        unsigned long iflags;
 
+       mci_dsta = readl(host->base + S3C2410_SDIDSTA);
+       mci_imsk = readl(host->base + host->sdiimsk);
+
+       if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
+               if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
+                       mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
+                       writel(mci_dclear, host->base + S3C2410_SDIDSTA);
+
+                       mmc_signal_sdio_irq(host->mmc);
+                       return IRQ_HANDLED;
+               }
+       }
+
        spin_lock_irqsave(&host->complete_lock, iflags);
 
        mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
-       mci_dsta = readl(host->base + S3C2410_SDIDSTA);
        mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
        mci_fsta = readl(host->base + S3C2410_SDIFSTA);
-       mci_imsk = readl(host->base + host->sdiimsk);
-       mci_cclear = 0;
        mci_dclear = 0;
 
        if ((host->complete_what == COMPLETION_NONE) ||
@@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
                goto irq_out;
        }
 
-       if (!host->dodma) {
+       if (!s3cmci_host_usedma(host)) {
                if ((host->pio_active == XFER_WRITE) &&
                    (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
 
@@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
        dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
                size, mci_dsta, mci_dcnt);
 
+       host->dma_complete = 1;
        host->complete_what = COMPLETION_FINALIZE;
 
 out:
@@ -683,9 +812,9 @@ out:
 fail_request:
        host->mrq->data->error = -EINVAL;
        host->complete_what = COMPLETION_FINALIZE;
-       writel(0, host->base + host->sdiimsk);
-       goto out;
+       clear_imask(host);
 
+       goto out;
 }
 
 static void finalize_request(struct s3cmci_host *host)
@@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host)
 
        if (cmd->data && (cmd->error == 0) &&
            (cmd->data->error == 0)) {
-               if (host->dodma && (!host->dma_complete)) {
-                       dbg(host, dbg_dma, "DMA Missing!\n");
+               if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
+                       dbg(host, dbg_dma, "DMA Missing (%d)!\n",
+                           host->dma_complete);
                        return;
                }
        }
@@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host)
        writel(0, host->base + S3C2410_SDICMDARG);
        writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
        writel(0, host->base + S3C2410_SDICMDCON);
-       writel(0, host->base + host->sdiimsk);
+       clear_imask(host);
 
        if (cmd->data && cmd->error)
                cmd->data->error = cmd->error;
@@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host)
        /* If we had an error while transfering data we flush the
         * DMA channel and the fifo to clear out any garbage. */
        if (mrq->data->error != 0) {
-               if (host->dodma)
+               if (s3cmci_host_usedma(host))
                        s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
 
                if (host->is2440) {
@@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host)
 request_done:
        host->complete_what = COMPLETION_NONE;
        host->mrq = NULL;
+
+       s3cmci_check_sdio_irq(host);
        mmc_request_done(host->mmc, mrq);
 }
 
@@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
 
        dcon  = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
 
-       if (host->dodma)
+       if (s3cmci_host_usedma(host))
                dcon |= S3C2410_SDIDCON_DMAEN;
 
        if (host->bus_width == MMC_BUS_WIDTH_4)
@@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
 static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
 {
        int dma_len, i;
-       int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
+       int rw = data->flags & MMC_DATA_WRITE;
 
        BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
 
@@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
        s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
 
        dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-                            (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                            rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        if (dma_len == 0)
                return -ENOMEM;
@@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
        for (i = 0; i < dma_len; i++) {
                int res;
 
-               dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i,
-                       sg_dma_address(&data->sg[i]),
-                       sg_dma_len(&data->sg[i]));
+               dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i,
+                   sg_dma_address(&data->sg[i]),
+                   sg_dma_len(&data->sg[i]));
 
-               res = s3c2410_dma_enqueue(host->dma, (void *) host,
+               res = s3c2410_dma_enqueue(host->dma, host,
                                          sg_dma_address(&data->sg[i]),
                                          sg_dma_len(&data->sg[i]));
 
@@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
                        return;
                }
 
-               if (host->dodma)
+               if (s3cmci_host_usedma(host))
                        res = s3cmci_prepare_dma(host, cmd->data);
                else
                        res = s3cmci_prepare_pio(host, cmd->data);
@@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
        s3cmci_send_command(host, cmd);
 
        /* Enable Interrupt */
-       enable_irq(host->irq);
+       s3cmci_enable_irq(host, true);
 }
 
 static int s3cmci_card_present(struct mmc_host *mmc)
@@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
        if (pdata->gpio_detect == 0)
                return -ENOSYS;
 
-       ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1;
+       ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
        return ret ^ pdata->detect_invert;
 }
 
@@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        switch (ios->power_mode) {
        case MMC_POWER_ON:
        case MMC_POWER_UP:
-               s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK);
-               s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD);
-               s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0);
-               s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1);
-               s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2);
-               s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
+               s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
 
                if (host->pdata->set_power)
                        host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        case MMC_POWER_OFF:
        default:
-               s3c2410_gpio_setpin(S3C2410_GPE5, 0);
-               s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
+               gpio_direction_output(S3C2410_GPE(5), 0);
 
                if (host->is2440)
                        mci_con |= S3C2440_SDICON_SDRESET;
@@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
        struct s3c24xx_mci_pdata *pdata = host->pdata;
        int ret;
 
-       if (pdata->gpio_wprotect == 0)
+       if (pdata->no_wprotect)
                return 0;
 
        ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
@@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
        return ret;
 }
 
+static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+       struct s3cmci_host *host = mmc_priv(mmc);
+       unsigned long flags;
+       u32 con;
+
+       local_irq_save(flags);
+
+       con = readl(host->base + S3C2410_SDICON);
+       host->sdio_irqen = enable;
+
+       if (enable == host->sdio_irqen)
+               goto same_state;
+
+       if (enable) {
+               con |= S3C2410_SDICON_SDIOIRQ;
+               enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
+
+               if (!host->irq_state && !host->irq_disabled) {
+                       host->irq_state = true;
+                       enable_irq(host->irq);
+               }
+       } else {
+               disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
+               con &= ~S3C2410_SDICON_SDIOIRQ;
+
+               if (!host->irq_enabled && host->irq_state) {
+                       disable_irq_nosync(host->irq);
+                       host->irq_state = false;
+               }
+       }
+
+       writel(con, host->base + S3C2410_SDICON);
+
+ same_state:
+       local_irq_restore(flags);
+
+       s3cmci_check_sdio_irq(host);
+}
+
 static struct mmc_host_ops s3cmci_ops = {
        .request        = s3cmci_request,
        .set_ios        = s3cmci_set_ios,
        .get_ro         = s3cmci_get_ro,
        .get_cd         = s3cmci_card_present,
+       .enable_sdio_irq = s3cmci_enable_sdio_irq,
 };
 
 static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
 }
 #endif
 
-static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
+
+#ifdef CONFIG_DEBUG_FS
+
+static int s3cmci_state_show(struct seq_file *seq, void *v)
+{
+       struct s3cmci_host *host = seq->private;
+
+       seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
+       seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
+       seq_printf(seq, "Prescale = %d\n", host->prescaler);
+       seq_printf(seq, "is2440 = %d\n", host->is2440);
+       seq_printf(seq, "IRQ = %d\n", host->irq);
+       seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
+       seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
+       seq_printf(seq, "IRQ state = %d\n", host->irq_state);
+       seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
+       seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
+       seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
+       seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
+
+       return 0;
+}
+
+static int s3cmci_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, s3cmci_state_show, inode->i_private);
+}
+
+static const struct file_operations s3cmci_fops_state = {
+       .owner          = THIS_MODULE,
+       .open           = s3cmci_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
+
+struct s3cmci_reg {
+       unsigned short  addr;
+       unsigned char   *name;
+} debug_regs[] = {
+       DBG_REG(CON),
+       DBG_REG(PRE),
+       DBG_REG(CMDARG),
+       DBG_REG(CMDCON),
+       DBG_REG(CMDSTAT),
+       DBG_REG(RSP0),
+       DBG_REG(RSP1),
+       DBG_REG(RSP2),
+       DBG_REG(RSP3),
+       DBG_REG(TIMER),
+       DBG_REG(BSIZE),
+       DBG_REG(DCON),
+       DBG_REG(DCNT),
+       DBG_REG(DSTA),
+       DBG_REG(FSTA),
+       {}
+};
+
+static int s3cmci_regs_show(struct seq_file *seq, void *v)
+{
+       struct s3cmci_host *host = seq->private;
+       struct s3cmci_reg *rptr = debug_regs;
+
+       for (; rptr->name; rptr++)
+               seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
+                          readl(host->base + rptr->addr));
+
+       seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
+
+       return 0;
+}
+
+static int s3cmci_regs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, s3cmci_regs_show, inode->i_private);
+}
+
+static const struct file_operations s3cmci_fops_regs = {
+       .owner          = THIS_MODULE,
+       .open           = s3cmci_regs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void s3cmci_debugfs_attach(struct s3cmci_host *host)
+{
+       struct device *dev = &host->pdev->dev;
+
+       host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
+       if (IS_ERR(host->debug_root)) {
+               dev_err(dev, "failed to create debugfs root\n");
+               return;
+       }
+
+       host->debug_state = debugfs_create_file("state", 0444,
+                                               host->debug_root, host,
+                                               &s3cmci_fops_state);
+
+       if (IS_ERR(host->debug_state))
+               dev_err(dev, "failed to create debug state file\n");
+
+       host->debug_regs = debugfs_create_file("regs", 0444,
+                                              host->debug_root, host,
+                                              &s3cmci_fops_regs);
+
+       if (IS_ERR(host->debug_regs))
+               dev_err(dev, "failed to create debug regs file\n");
+}
+
+static void s3cmci_debugfs_remove(struct s3cmci_host *host)
+{
+       debugfs_remove(host->debug_regs);
+       debugfs_remove(host->debug_state);
+       debugfs_remove(host->debug_root);
+}
+
+#else
+static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
+static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit s3cmci_probe(struct platform_device *pdev)
 {
        struct s3cmci_host *host;
        struct mmc_host *mmc;
        int ret;
+       int is2440;
+       int i;
+
+       is2440 = platform_get_device_id(pdev)->driver_data;
 
        mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
        if (!mmc) {
@@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
                goto probe_out;
        }
 
+       for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
+               ret = gpio_request(i, dev_name(&pdev->dev));
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get gpio %d\n", i);
+
+                       for (i--; i >= S3C2410_GPE(5); i--)
+                               gpio_free(i);
+
+                       goto probe_free_host;
+               }
+       }
+
        host = mmc_priv(mmc);
        host->mmc       = mmc;
        host->pdev      = pdev;
@@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
                host->clk_div   = 2;
        }
 
-       host->dodma             = 0;
        host->complete_what     = COMPLETION_NONE;
        host->pio_active        = XFER_NONE;
 
-       host->dma               = S3CMCI_DMA;
+#ifdef CONFIG_MMC_S3C_PIODMA
+       host->dodma             = host->pdata->dma;
+#endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!host->mem) {
@@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
                        "failed to get io memory region resouce.\n");
 
                ret = -ENOENT;
-               goto probe_free_host;
+               goto probe_free_gpio;
        }
 
        host->mem = request_mem_region(host->mem->start,
-                                      RESSIZE(host->mem), pdev->name);
+                                      resource_size(host->mem), pdev->name);
 
        if (!host->mem) {
                dev_err(&pdev->dev, "failed to request io memory region.\n");
                ret = -ENOENT;
-               goto probe_free_host;
+               goto probe_free_gpio;
        }
 
-       host->base = ioremap(host->mem->start, RESSIZE(host->mem));
+       host->base = ioremap(host->mem->start, resource_size(host->mem));
        if (!host->base) {
                dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
                ret = -EINVAL;
@@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
         * ensure we don't lock the system with un-serviceable requests. */
 
        disable_irq(host->irq);
+       host->irq_state = false;
 
-       host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
-
-       if (host->irq_cd >= 0) {
-               if (request_irq(host->irq_cd, s3cmci_irq_cd,
-                               IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                               DRIVER_NAME, host)) {
-                       dev_err(&pdev->dev, "can't get card detect irq.\n");
-                       ret = -ENOENT;
+       if (!host->pdata->no_detect) {
+               ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get detect gpio\n");
                        goto probe_free_irq;
                }
-       } else {
-               dev_warn(&pdev->dev, "host detect has no irq available\n");
-               s3c2410_gpio_cfgpin(host->pdata->gpio_detect,
-                                   S3C2410_GPIO_INPUT);
+
+               host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
+
+               if (host->irq_cd >= 0) {
+                       if (request_irq(host->irq_cd, s3cmci_irq_cd,
+                                       IRQF_TRIGGER_RISING |
+                                       IRQF_TRIGGER_FALLING,
+                                       DRIVER_NAME, host)) {
+                               dev_err(&pdev->dev,
+                                       "can't get card detect irq.\n");
+                               ret = -ENOENT;
+                               goto probe_free_gpio_cd;
+                       }
+               } else {
+                       dev_warn(&pdev->dev,
+                                "host detect has no irq available\n");
+                       gpio_direction_input(host->pdata->gpio_detect);
+               }
+       } else
+               host->irq_cd = -1;
+
+       if (!host->pdata->no_wprotect) {
+               ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get writeprotect\n");
+                       goto probe_free_irq_cd;
+               }
+
+               gpio_direction_input(host->pdata->gpio_wprotect);
        }
 
-       if (host->pdata->gpio_wprotect)
-               s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
-                                   S3C2410_GPIO_INPUT);
+       /* depending on the dma state, get a dma channel to use. */
 
-       if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) {
-               dev_err(&pdev->dev, "unable to get DMA channel.\n");
-               ret = -EBUSY;
-               goto probe_free_irq_cd;
+       if (s3cmci_host_usedma(host)) {
+               host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client,
+                                               host);
+               if (host->dma < 0) {
+                       dev_err(&pdev->dev, "cannot get DMA channel.\n");
+                       if (!s3cmci_host_canpio()) {
+                               ret = -EBUSY;
+                               goto probe_free_gpio_wp;
+                       } else {
+                               dev_warn(&pdev->dev, "falling back to PIO.\n");
+                               host->dodma = 0;
+                       }
+               }
        }
 
        host->clk = clk_get(&pdev->dev, "sdi");
@@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
                dev_err(&pdev->dev, "failed to find clock source.\n");
                ret = PTR_ERR(host->clk);
                host->clk = NULL;
-               goto probe_free_host;
+               goto probe_free_dma;
        }
 
        ret = clk_enable(host->clk);
@@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
 
        mmc->ops        = &s3cmci_ops;
        mmc->ocr_avail  = MMC_VDD_32_33 | MMC_VDD_33_34;
+#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
+       mmc->caps       = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+#else
        mmc->caps       = MMC_CAP_4_BIT_DATA;
+#endif
        mmc->f_min      = host->clk_rate / (host->clk_div * 256);
        mmc->f_max      = host->clk_rate / host->clk_div;
 
@@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
                goto free_cpufreq;
        }
 
+       s3cmci_debugfs_attach(host);
+
        platform_set_drvdata(pdev, mmc);
-       dev_info(&pdev->dev, "initialisation done.\n");
+       dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
+                s3cmci_host_usedma(host) ? "dma" : "pio",
+                mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
 
        return 0;
 
@@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
  clk_free:
        clk_put(host->clk);
 
+ probe_free_dma:
+       if (s3cmci_host_usedma(host))
+               s3c2410_dma_free(host->dma, &s3cmci_dma_client);
+
+ probe_free_gpio_wp:
+       if (!host->pdata->no_wprotect)
+               gpio_free(host->pdata->gpio_wprotect);
+
+ probe_free_gpio_cd:
+       if (!host->pdata->no_detect)
+               gpio_free(host->pdata->gpio_detect);
+
  probe_free_irq_cd:
        if (host->irq_cd >= 0)
                free_irq(host->irq_cd, host);
@@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
        iounmap(host->base);
 
  probe_free_mem_region:
-       release_mem_region(host->mem->start, RESSIZE(host->mem));
+       release_mem_region(host->mem->start, resource_size(host->mem));
+
+ probe_free_gpio:
+       for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
+               gpio_free(i);
 
  probe_free_host:
        mmc_free_host(mmc);
+
  probe_out:
        return ret;
 }
@@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
        if (host->irq_cd >= 0)
                free_irq(host->irq_cd, host);
 
+       s3cmci_debugfs_remove(host);
        s3cmci_cpufreq_deregister(host);
        mmc_remove_host(mmc);
        clk_disable(host->clk);
@@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
 {
        struct mmc_host         *mmc  = platform_get_drvdata(pdev);
        struct s3cmci_host      *host = mmc_priv(mmc);
+       struct s3c24xx_mci_pdata *pd = host->pdata;
+       int i;
 
        s3cmci_shutdown(pdev);
 
        clk_put(host->clk);
 
        tasklet_disable(&host->pio_tasklet);
-       s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
+
+       if (s3cmci_host_usedma(host))
+               s3c2410_dma_free(host->dma, &s3cmci_dma_client);
 
        free_irq(host->irq, host);
 
+       if (!pd->no_wprotect)
+               gpio_free(pd->gpio_wprotect);
+
+       if (!pd->no_detect)
+               gpio_free(pd->gpio_detect);
+
+       for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
+               gpio_free(i);
+
+
        iounmap(host->base);
-       release_mem_region(host->mem->start, RESSIZE(host->mem));
+       release_mem_region(host->mem->start, resource_size(host->mem));
 
        mmc_free_host(mmc);
        return 0;
 }
 
-static int __devinit s3cmci_2410_probe(struct platform_device *dev)
-{
-       return s3cmci_probe(dev, 0);
-}
+static struct platform_device_id s3cmci_driver_ids[] = {
+       {
+               .name   = "s3c2410-sdi",
+               .driver_data    = 0,
+       }, {
+               .name   = "s3c2412-sdi",
+               .driver_data    = 1,
+       }, {
+               .name   = "s3c2440-sdi",
+               .driver_data    = 1,
+       },
+       { }
+};
 
-static int __devinit s3cmci_2412_probe(struct platform_device *dev)
-{
-       return s3cmci_probe(dev, 1);
-}
+MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
 
-static int __devinit s3cmci_2440_probe(struct platform_device *dev)
-{
-       return s3cmci_probe(dev, 1);
-}
 
 #ifdef CONFIG_PM
 
-static int s3cmci_suspend(struct platform_device *dev, pm_message_t state)
+static int s3cmci_suspend(struct device *dev)
 {
-       struct mmc_host *mmc = platform_get_drvdata(dev);
+       struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
+       struct pm_message event = { PM_EVENT_SUSPEND };
 
-       return  mmc_suspend_host(mmc, state);
+       return mmc_suspend_host(mmc, event);
 }
 
-static int s3cmci_resume(struct platform_device *dev)
+static int s3cmci_resume(struct device *dev)
 {
-       struct mmc_host *mmc = platform_get_drvdata(dev);
+       struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
 
        return mmc_resume_host(mmc);
 }
 
-#else /* CONFIG_PM */
-#define s3cmci_suspend NULL
-#define s3cmci_resume NULL
-#endif /* CONFIG_PM */
-
-
-static struct platform_driver s3cmci_2410_driver = {
-       .driver.name    = "s3c2410-sdi",
-       .driver.owner   = THIS_MODULE,
-       .probe          = s3cmci_2410_probe,
-       .remove         = __devexit_p(s3cmci_remove),
-       .shutdown       = s3cmci_shutdown,
+static struct dev_pm_ops s3cmci_pm = {
        .suspend        = s3cmci_suspend,
        .resume         = s3cmci_resume,
 };
 
-static struct platform_driver s3cmci_2412_driver = {
-       .driver.name    = "s3c2412-sdi",
-       .driver.owner   = THIS_MODULE,
-       .probe          = s3cmci_2412_probe,
-       .remove         = __devexit_p(s3cmci_remove),
-       .shutdown       = s3cmci_shutdown,
-       .suspend        = s3cmci_suspend,
-       .resume         = s3cmci_resume,
-};
+#define s3cmci_pm_ops &s3cmci_pm
+#else /* CONFIG_PM */
+#define s3cmci_pm_ops NULL
+#endif /* CONFIG_PM */
 
-static struct platform_driver s3cmci_2440_driver = {
-       .driver.name    = "s3c2440-sdi",
-       .driver.owner   = THIS_MODULE,
-       .probe          = s3cmci_2440_probe,
+
+static struct platform_driver s3cmci_driver = {
+       .driver = {
+               .name   = "s3c-sdi",
+               .owner  = THIS_MODULE,
+               .pm     = s3cmci_pm_ops,
+       },
+       .id_table       = s3cmci_driver_ids,
+       .probe          = s3cmci_probe,
        .remove         = __devexit_p(s3cmci_remove),
        .shutdown       = s3cmci_shutdown,
-       .suspend        = s3cmci_suspend,
-       .resume         = s3cmci_resume,
 };
 
-
 static int __init s3cmci_init(void)
 {
-       platform_driver_register(&s3cmci_2410_driver);
-       platform_driver_register(&s3cmci_2412_driver);
-       platform_driver_register(&s3cmci_2440_driver);
-       return 0;
+       return platform_driver_register(&s3cmci_driver);
 }
 
 static void __exit s3cmci_exit(void)
 {
-       platform_driver_unregister(&s3cmci_2410_driver);
-       platform_driver_unregister(&s3cmci_2412_driver);
-       platform_driver_unregister(&s3cmci_2440_driver);
+       platform_driver_unregister(&s3cmci_driver);
 }
 
 module_init(s3cmci_init);
@@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit);
 MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
-MODULE_ALIAS("platform:s3c2410-sdi");
-MODULE_ALIAS("platform:s3c2412-sdi");
-MODULE_ALIAS("platform:s3c2440-sdi");
index ca1ba3d58cfd545a9c730f56135fb99506391e94..c76b53dbeb6179a3545bbb26b7aa87d50302477e 100644 (file)
@@ -8,9 +8,6 @@
  * published by the Free Software Foundation.
  */
 
-/* FIXME: DMA Resource management ?! */
-#define S3CMCI_DMA 0
-
 enum s3cmci_waitfor {
        COMPLETION_NONE,
        COMPLETION_FINALIZE,
@@ -42,6 +39,11 @@ struct s3cmci_host {
        int                     dodma;
        int                     dmatogo;
 
+       bool                    irq_disabled;
+       bool                    irq_enabled;
+       bool                    irq_state;
+       int                     sdio_irqen;
+
        struct mmc_request      *mrq;
        int                     cmd_is_stop;
 
@@ -68,6 +70,12 @@ struct s3cmci_host {
        unsigned int            ccnt, dcnt;
        struct tasklet_struct   pio_tasklet;
 
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *debug_root;
+       struct dentry           *debug_state;
+       struct dentry           *debug_regs;
+#endif
+
 #ifdef CONFIG_CPU_FREQ
        struct notifier_block   freq_transition;
 #endif
index 0acbf4f5be50d31e31ccbce8d5e51db854e57f22..8ca17a3e96eaa0e85251ecf861d23673b9df3927 100644 (file)
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv {
        spinlock_t queue_lock;
 };
 
-static int blktrans_discard_request(struct request_queue *q,
-                                   struct request *req)
-{
-       req->cmd_type = REQ_TYPE_LINUX_BLOCK;
-       req->cmd[0] = REQ_LB_OP_DISCARD;
-       return 0;
-}
-
 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
                               struct mtd_blktrans_dev *dev,
                               struct request *req)
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
 
        buf = req->buffer;
 
-       if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-           req->cmd[0] == REQ_LB_OP_DISCARD)
-               return tr->discard(dev, block, nsect);
-
        if (!blk_fs_request(req))
                return -EIO;
 
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
            get_capacity(req->rq_disk))
                return -EIO;
 
+       if (blk_discard_rq(req))
+               return tr->discard(dev, block, nsect);
+
        switch(rq_data_dir(req)) {
        case READ:
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
        tr->blkcore_priv->rq->queuedata = tr;
        blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
        if (tr->discard)
-               blk_queue_set_discard(tr->blkcore_priv->rq,
-                                     blktrans_discard_request);
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+                                       tr->blkcore_priv->rq);
 
        tr->blkshift = ffs(tr->blksize) - 1;
 
index b9eeadf01b749cb9ae223ee6f63ba63e685db98e..975e25b19ebe8a19502366c0c2763b3f6298058b 100644 (file)
@@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
 
 #ifdef CONFIG_PM
 
-static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
+static int vortex_suspend(struct device *dev)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *ndev = pci_get_drvdata(pdev);
+
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       netif_device_detach(ndev);
+       vortex_down(ndev, 1);
 
-       if (dev && netdev_priv(dev)) {
-               if (netif_running(dev)) {
-                       netif_device_detach(dev);
-                       vortex_down(dev, 1);
-                       disable_irq(dev->irq);
-               }
-               pci_save_state(pdev);
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-               pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
-       }
        return 0;
 }
 
-static int vortex_resume(struct pci_dev *pdev)
+static int vortex_resume(struct device *dev)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct vortex_private *vp = netdev_priv(dev);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct net_device *ndev = pci_get_drvdata(pdev);
        int err;
 
-       if (dev && vp) {
-               pci_set_power_state(pdev, PCI_D0);
-               pci_restore_state(pdev);
-               err = pci_enable_device(pdev);
-               if (err) {
-                       pr_warning("%s: Could not enable device\n",
-                               dev->name);
-                       return err;
-               }
-               pci_set_master(pdev);
-               if (netif_running(dev)) {
-                       err = vortex_up(dev);
-                       if (err)
-                               return err;
-                       enable_irq(dev->irq);
-                       netif_device_attach(dev);
-               }
-       }
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       err = vortex_up(ndev);
+       if (err)
+               return err;
+
+       netif_device_attach(ndev);
+
        return 0;
 }
 
-#endif /* CONFIG_PM */
+static struct dev_pm_ops vortex_pm_ops = {
+       .suspend = vortex_suspend,
+       .resume = vortex_resume,
+       .freeze = vortex_suspend,
+       .thaw = vortex_resume,
+       .poweroff = vortex_suspend,
+       .restore = vortex_resume,
+};
+
+#define VORTEX_PM_OPS (&vortex_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define VORTEX_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
 
 #ifdef CONFIG_EISA
 static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
        .probe          = vortex_init_one,
        .remove         = __devexit_p(vortex_remove_one),
        .id_table       = vortex_pci_tbl,
-#ifdef CONFIG_PM
-       .suspend        = vortex_suspend,
-       .resume         = vortex_resume,
-#endif
+       .driver.pm      = VORTEX_PM_OPS,
 };
 
 
index 2bea67c134f022456a15fdad71fcd3f089ca52d2..712776089b4600349c7e6cdd6148e63d819b53fa 100644 (file)
@@ -1738,6 +1738,13 @@ config KS8851
        help
          SPI driver for Micrel KS8851 SPI attached network chip.
 
+config KS8851_MLL
+       tristate "Micrel KS8851 MLL"
+       depends on HAS_IOMEM
+       help
+         This platform driver is for Micrel KS8851 Address/data bus
+         multiplexed network chip.
+
 config VIA_RHINE
        tristate "VIA Rhine support"
        depends on NET_PCI && PCI
index ae8cd30f13d6548690acdf90b114e1190f6ab465..d866b8cf65d19b184574dbb3ba3463899e31ba4a 100644 (file)
@@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
 obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_KS8842)   += ks8842.o
 obj-$(CONFIG_KS8851)   += ks8851.o
+obj-$(CONFIG_KS8851_MLL)       += ks8851_mll.o
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
 obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
 obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
index fdf5937233fc9e91c2a9d6c8e3ac32f2e37dfea4..04f63c77071dfc17c7e75a335c92768e1c85d236 100644 (file)
@@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
                ps->rx_errors++;
                if (status & RX_MISSED_FRAME)
                        ps->rx_missed_errors++;
-               if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
+               if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
                        ps->rx_length_errors++;
                if (status & RX_CRC_ERROR)
                        ps->rx_crc_errors++;
@@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev)
                                        printk("rx len error\n");
                                if (status & RX_U_CNTRL_FRAME)
                                        printk("rx u control frame\n");
-                               if (status & RX_MISSED_FRAME)
-                                       printk("rx miss\n");
                        }
                }
                prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
index 09d270913c5041f866537f9e7c4ae6d3a3d976c8..ba29dc319b34860b651696b929b8af908b0b8428 100644 (file)
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
                if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
                        break;
                udelay(1);
-       } while (limit-- >= 0);
+       } while (limit-- > 0);
 
        return (limit < 0) ? 1 : 0;
 }
index 684c6fe24c8d538daa5145c803ac3c66e717fec2..a80da0e14a52b31c22706e641290ef2da1b7c455 100644 (file)
@@ -258,6 +258,7 @@ struct be_adapter {
        bool link_up;
        u32 port_num;
        bool promiscuous;
+       u32 cap;
 };
 
 extern const struct ethtool_ops be_ethtool_ops;
index 3dd76c4170bfc000657f4b65713b662d20c8de04..89876ade5e33c93f584ea84878c72da098643955 100644 (file)
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
 }
 
 /* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
        if (!status) {
                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
                *port_num = le32_to_cpu(resp->phys_port);
+               *cap = le32_to_cpu(resp->function_cap);
        }
 
        spin_unlock(&adapter->mbox_lock);
@@ -1128,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
-       req = embedded_payload(wrb);
        sge = nonembedded_sgl(wrb);
 
        be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
index 93e432f3d926f784ecf586375ceb1c0c54c76183..a86f917f85f427bc18ea27d79f7062f406107ad9 100644 (file)
@@ -62,7 +62,7 @@ enum {
        MCC_STATUS_QUEUE_FLUSHING = 0x4,
 /* The command is completing with a DMA error */
        MCC_STATUS_DMA_FAILED = 0x5,
-       MCC_STATUS_NOT_SUPPORTED = 0x66
+       MCC_STATUS_NOT_SUPPORTED = 66
 };
 
 #define CQE_STATUS_COMPL_MASK          0xFFFF
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
                        u32 tx_fc, u32 rx_fc);
 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
                        u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num);
+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
+                       u32 *port_num, u32 *cap);
 extern int be_cmd_reset_function(struct be_adapter *adapter);
 extern int be_process_mcc(struct be_adapter *adapter);
 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
index 11445df3dbc03aa6bdced9515cf4cca22917acce..cda5bf2fc50acf5b07406eff6ff488c532f67dbf 100644 (file)
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
        .get_rx_csum = be_get_rx_csum,
        .set_rx_csum = be_set_rx_csum,
        .get_tx_csum = ethtool_op_get_tx_csum,
-       .set_tx_csum = ethtool_op_set_tx_csum,
+       .set_tx_csum = ethtool_op_set_tx_hw_csum,
        .get_sg = ethtool_op_get_sg,
        .set_sg = ethtool_op_set_sg,
        .get_tso = ethtool_op_get_tso,
index 409cf05959034737273f6b2cb8371a2347102059..6d5e81f7046f2f769c5f5f78026927f6ad6f2eca 100644 (file)
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
        /* no space available in linux */
        dev_stats->tx_dropped = 0;
 
-       dev_stats->multicast = port_stats->tx_multicastframes;
+       dev_stats->multicast = port_stats->rx_multicast_frames;
        dev_stats->collisions = 0;
 
        /* detailed tx_errors */
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
                        struct be_eth_rx_compl *rxcp)
 {
        struct sk_buff *skb;
-       u32 vtp, vid;
+       u32 vlanf, vid;
+       u8 vtm;
 
-       vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+       vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+       vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+       /* vlanf could be wrongly set in some cards.
+        * ignore if vtm is not set */
+       if ((adapter->cap == 0x400) && !vtm)
+               vlanf = 0;
 
        skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
        if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
        skb->protocol = eth_type_trans(skb, adapter->netdev);
        skb->dev = adapter->netdev;
 
-       if (vtp) {
+       if (vlanf) {
                if (!adapter->vlan_grp || adapter->num_vlans == 0) {
                        kfree_skb(skb);
                        return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        struct be_eq_obj *eq_obj =  &adapter->rx_eq;
        u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
        u16 i, rxq_idx = 0, vid, j;
+       u8 vtm;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
        pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
+       vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+       /* vlanf could be wrongly set in some cards.
+        * ignore if vtm is not set */
+       if ((adapter->cap == 0x400) && !vtm)
+               vlanf = 0;
 
        skb = napi_get_frags(&eq_obj->napi);
        if (!skb) {
@@ -1885,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev)
        struct be_adapter *adapter = netdev_priv(netdev);
 
        netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
-               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
-               NETIF_F_IPV6_CSUM | NETIF_F_GRO;
+               NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+               NETIF_F_GRO;
 
        netdev->flags |= IFF_MULTICAST;
 
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
        if (status)
                return status;
 
-       status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
+       status = be_cmd_query_fw_cfg(adapter,
+                               &adapter->port_num, &adapter->cap);
        return status;
 }
 
index 6044e12ff9fce99f433f514e5eb182bccc5097ee..ff449de6f3c00ab0eb394640cc56b07ce7e968dc 100644 (file)
@@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
                                       ": %s: Setting %s as primary slave.\n",
                                       bond->dev->name, slave->dev->name);
                                bond->primary_slave = slave;
+                               strcpy(bond->params.primary, slave->dev->name);
                                bond_select_active_slave(bond);
                                goto out;
                        }
index 211c8e9182fc51566fffac1fff201da34bc8ee05..46c87ec7960c83c03d10772a4793629c3cd8904d 100644 (file)
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
                        cnic_ulp_init(dev);
                else if (event == NETDEV_UNREGISTER)
                        cnic_ulp_exit(dev);
-               else if (event == NETDEV_UP) {
+
+               if (event == NETDEV_UP) {
                        if (cnic_register_netdev(dev) != 0) {
                                cnic_put(dev);
                                goto done;
index a49235739eef9444d1596df5dfb5248a4aba8a1e..d8b09efdcb5261ec62c28c7b965e63be482d317f 100644 (file)
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION    "2.0.0"
-#define CNIC_MODULE_RELDATE    "May 21, 2009"
+#define CNIC_MODULE_VERSION    "2.0.1"
+#define CNIC_MODULE_RELDATE    "Oct 01, 2009"
 
 #define CNIC_ULP_RDMA          0
 #define CNIC_ULP_ISCSI         1
index 1a4f89c66a263e59a174cbd91b1553c0fc1190ec..42e2b7e21c29e3858804eea0db2b413fffb32b30 100644 (file)
@@ -149,7 +149,6 @@ do {                                                                        \
 
 #define AUTO_ALL_MODES            0
 #define E1000_EEPROM_82544_APM    0x0004
-#define E1000_EEPROM_ICH8_APME    0x0004
 #define E1000_EEPROM_APME         0x0400
 
 #ifndef E1000_MASTER_SLAVE
@@ -293,7 +292,6 @@ struct e1000_adapter {
 
        u64 hw_csum_err;
        u64 hw_csum_good;
-       u64 rx_hdr_split;
        u32 alloc_rx_buff_failed;
        u32 rx_int_delay;
        u32 rx_abs_int_delay;
@@ -317,7 +315,6 @@ struct e1000_adapter {
        struct e1000_rx_ring test_rx_ring;
 
        int msg_enable;
-       bool have_msi;
 
        /* to not mess up cache alignment, always add to the bottom */
        bool tso_force;
index 27f996a2010faeecc478b901682e2dec3e282f49..490b2b7cd3abf77f5dc257bbdb27f7b5d08e2a77 100644 (file)
@@ -82,7 +82,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
        { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
        { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
-       { "rx_header_split", E1000_STAT(rx_hdr_split) },
        { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
        { "tx_smbus", E1000_STAT(stats.mgptc) },
        { "rx_smbus", E1000_STAT(stats.mgprc) },
@@ -114,8 +113,6 @@ static int e1000_get_settings(struct net_device *netdev,
                                   SUPPORTED_1000baseT_Full|
                                   SUPPORTED_Autoneg |
                                   SUPPORTED_TP);
-               if (hw->phy_type == e1000_phy_ife)
-                       ecmd->supported &= ~SUPPORTED_1000baseT_Full;
                ecmd->advertising = ADVERTISED_TP;
 
                if (hw->autoneg == 1) {
@@ -178,14 +175,6 @@ static int e1000_set_settings(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       /* When SoL/IDER sessions are active, autoneg/speed/duplex
-        * cannot be changed */
-       if (e1000_check_phy_reset_block(hw)) {
-               DPRINTK(DRV, ERR, "Cannot change link characteristics "
-                       "when SoL/IDER is active.\n");
-               return -EINVAL;
-       }
-
        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
                msleep(1);
 
@@ -330,10 +319,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
        else
                netdev->features &= ~NETIF_F_TSO;
 
-       if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
-               netdev->features |= NETIF_F_TSO6;
-       else
-               netdev->features &= ~NETIF_F_TSO6;
+       netdev->features &= ~NETIF_F_TSO6;
 
        DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
        adapter->tso_force = true;
@@ -441,7 +427,6 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
        regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
        regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
        if (hw->mac_type >= e1000_82540 &&
-           hw->mac_type < e1000_82571 &&
            hw->media_type == e1000_media_type_copper) {
                regs_buff[26] = er32(MANC);
        }
@@ -554,10 +539,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
        ret_val = e1000_write_eeprom(hw, first_word,
                                     last_word - first_word + 1, eeprom_buff);
 
-       /* Update the checksum over the first part of the EEPROM if needed
-        * and flush shadow RAM for 82573 conrollers */
-       if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
-                               (hw->mac_type == e1000_82573)))
+       /* Update the checksum over the first part of the EEPROM if needed */
+       if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
                e1000_update_eeprom_checksum(hw);
 
        kfree(eeprom_buff);
@@ -568,31 +551,12 @@ static void e1000_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *drvinfo)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
        char firmware_version[32];
-       u16 eeprom_data;
 
        strncpy(drvinfo->driver,  e1000_driver_name, 32);
        strncpy(drvinfo->version, e1000_driver_version, 32);
 
-       /* EEPROM image version # is reported as firmware version # for
-        * 8257{1|2|3} controllers */
-       e1000_read_eeprom(hw, 5, 1, &eeprom_data);
-       switch (hw->mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_82573:
-       case e1000_80003es2lan:
-       case e1000_ich8lan:
-               sprintf(firmware_version, "%d.%d-%d",
-                       (eeprom_data & 0xF000) >> 12,
-                       (eeprom_data & 0x0FF0) >> 4,
-                       eeprom_data & 0x000F);
-               break;
-       default:
-               sprintf(firmware_version, "N/A");
-       }
-
+       sprintf(firmware_version, "N/A");
        strncpy(drvinfo->fw_version, firmware_version, 32);
        strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
        drvinfo->regdump_len = e1000_get_regs_len(netdev);
@@ -781,21 +745,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        /* The status register is Read Only, so a write should fail.
         * Some bits that get toggled are ignored.
         */
-       switch (hw->mac_type) {
+
        /* there are several bits on newer hardware that are r/w */
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_80003es2lan:
-               toggle = 0x7FFFF3FF;
-               break;
-       case e1000_82573:
-       case e1000_ich8lan:
-               toggle = 0x7FFFF033;
-               break;
-       default:
-               toggle = 0xFFFFF833;
-               break;
-       }
+       toggle = 0xFFFFF833;
 
        before = er32(STATUS);
        value = (er32(STATUS) & toggle);
@@ -810,12 +762,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        /* restore previous status */
        ew32(STATUS, before);
 
-       if (hw->mac_type != e1000_ich8lan) {
-               REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
-               REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
-               REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
-               REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
-       }
+       REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+       REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+       REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+       REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
 
        REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
        REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -830,8 +780,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 
        REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
 
-       before = (hw->mac_type == e1000_ich8lan ?
-                 0x06C3B33E : 0x06DFB3FE);
+       before = 0x06DFB3FE;
        REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
        REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
 
@@ -839,12 +788,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 
                REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
                REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
-               if (hw->mac_type != e1000_ich8lan)
-                       REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+               REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
                REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
                REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
-               value = (hw->mac_type == e1000_ich8lan ?
-                        E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+               value = E1000_RAR_ENTRIES;
                for (i = 0; i < value; i++) {
                        REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
                                         0xFFFFFFFF);
@@ -859,8 +806,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
 
        }
 
-       value = (hw->mac_type == e1000_ich8lan ?
-                       E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+       value = E1000_MC_TBL_SIZE;
        for (i = 0; i < value; i++)
                REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
 
@@ -933,9 +879,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
        /* Test each interrupt */
        for (; i < 10; i++) {
 
-               if (hw->mac_type == e1000_ich8lan && i == 8)
-                       continue;
-
                /* Interrupt to test */
                mask = 1 << i;
 
@@ -1289,35 +1232,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
                e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
                /* autoneg off */
                e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
-       } else if (hw->phy_type == e1000_phy_gg82563)
-               e1000_write_phy_reg(hw,
-                                   GG82563_PHY_KMRN_MODE_CTRL,
-                                   0x1CC);
+       }
 
        ctrl_reg = er32(CTRL);
 
-       if (hw->phy_type == e1000_phy_ife) {
-               /* force 100, set loopback */
-               e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+       /* force 1000, set loopback */
+       e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
 
-               /* Now set up the MAC to the same speed/duplex as the PHY. */
-               ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
-               ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
-                            E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
-                            E1000_CTRL_SPD_100 |/* Force Speed to 100 */
-                            E1000_CTRL_FD);     /* Force Duplex to FULL */
-       } else {
-               /* force 1000, set loopback */
-               e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
-
-               /* Now set up the MAC to the same speed/duplex as the PHY. */
-               ctrl_reg = er32(CTRL);
-               ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
-               ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
-                            E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
-                            E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
-                            E1000_CTRL_FD);     /* Force Duplex to FULL */
-       }
+       /* Now set up the MAC to the same speed/duplex as the PHY. */
+       ctrl_reg = er32(CTRL);
+       ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+       ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+                       E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+                       E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+                       E1000_CTRL_FD);  /* Force Duplex to FULL */
 
        if (hw->media_type == e1000_media_type_copper &&
           hw->phy_type == e1000_phy_m88)
@@ -1373,14 +1301,8 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
        case e1000_82541_rev_2:
        case e1000_82547:
        case e1000_82547_rev_2:
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_82573:
-       case e1000_80003es2lan:
-       case e1000_ich8lan:
                return e1000_integrated_phy_loopback(adapter);
                break;
-
        default:
                /* Default PHY loopback work is to read the MII
                 * control register and assert bit 14 (loopback mode).
@@ -1409,14 +1331,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
                case e1000_82546_rev_3:
                        return e1000_set_phy_loopback(adapter);
                        break;
-               case e1000_82571:
-               case e1000_82572:
-#define E1000_SERDES_LB_ON 0x410
-                       e1000_set_phy_loopback(adapter);
-                       ew32(SCTL, E1000_SERDES_LB_ON);
-                       msleep(10);
-                       return 0;
-                       break;
                default:
                        rctl = er32(RCTL);
                        rctl |= E1000_RCTL_LBM_TCVR;
@@ -1440,26 +1354,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
        ew32(RCTL, rctl);
 
        switch (hw->mac_type) {
-       case e1000_82571:
-       case e1000_82572:
-               if (hw->media_type == e1000_media_type_fiber ||
-                   hw->media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
-                       ew32(SCTL, E1000_SERDES_LB_OFF);
-                       msleep(10);
-                       break;
-               }
-               /* Fall Through */
        case e1000_82545:
        case e1000_82546:
        case e1000_82545_rev_3:
        case e1000_82546_rev_3:
        default:
                hw->autoneg = true;
-               if (hw->phy_type == e1000_phy_gg82563)
-                       e1000_write_phy_reg(hw,
-                                           GG82563_PHY_KMRN_MODE_CTRL,
-                                           0x180);
                e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
                if (phy_reg & MII_CR_LOOPBACK) {
                        phy_reg &= ~MII_CR_LOOPBACK;
@@ -1560,17 +1460,6 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
 
 static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
 {
-       struct e1000_hw *hw = &adapter->hw;
-
-       /* PHY loopback cannot be performed if SoL/IDER
-        * sessions are active */
-       if (e1000_check_phy_reset_block(hw)) {
-               DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
-                       "when SoL/IDER is active.\n");
-               *data = 0;
-               goto out;
-       }
-
        *data = e1000_setup_desc_rings(adapter);
        if (*data)
                goto out;
@@ -1592,13 +1481,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
        *data = 0;
        if (hw->media_type == e1000_media_type_internal_serdes) {
                int i = 0;
-               hw->serdes_link_down = true;
+               hw->serdes_has_link = false;
 
                /* On some blade server designs, link establishment
                 * could take as long as 2-3 minutes */
                do {
                        e1000_check_for_link(hw);
-                       if (!hw->serdes_link_down)
+                       if (hw->serdes_has_link)
                                return *data;
                        msleep(20);
                } while (i++ < 3750);
@@ -1716,15 +1605,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
        case E1000_DEV_ID_82545EM_COPPER:
        case E1000_DEV_ID_82546GB_QUAD_COPPER:
        case E1000_DEV_ID_82546GB_PCIE:
-       case E1000_DEV_ID_82571EB_SERDES_QUAD:
                /* these don't support WoL at all */
                wol->supported = 0;
                break;
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
-       case E1000_DEV_ID_82571EB_FIBER:
-       case E1000_DEV_ID_82571EB_SERDES:
-       case E1000_DEV_ID_82571EB_COPPER:
                /* Wake events not supported on port B */
                if (er32(STATUS) & E1000_STATUS_FUNC_1) {
                        wol->supported = 0;
@@ -1733,10 +1618,6 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
                /* return success for non excluded adapter ports */
                retval = 0;
                break;
-       case E1000_DEV_ID_82571EB_QUAD_COPPER:
-       case E1000_DEV_ID_82571EB_QUAD_FIBER:
-       case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
-       case E1000_DEV_ID_82571PT_QUAD_COPPER:
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
                /* quad port adapters only support WoL on port A */
                if (!adapter->quad_port_a) {
@@ -1872,30 +1753,15 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
        if (!data)
                data = INT_MAX;
 
-       if (hw->mac_type < e1000_82571) {
-               if (!adapter->blink_timer.function) {
-                       init_timer(&adapter->blink_timer);
-                       adapter->blink_timer.function = e1000_led_blink_callback;
-                       adapter->blink_timer.data = (unsigned long)adapter;
-               }
-               e1000_setup_led(hw);
-               mod_timer(&adapter->blink_timer, jiffies);
-               msleep_interruptible(data * 1000);
-               del_timer_sync(&adapter->blink_timer);
-       } else if (hw->phy_type == e1000_phy_ife) {
-               if (!adapter->blink_timer.function) {
-                       init_timer(&adapter->blink_timer);
-                       adapter->blink_timer.function = e1000_led_blink_callback;
-                       adapter->blink_timer.data = (unsigned long)adapter;
-               }
-               mod_timer(&adapter->blink_timer, jiffies);
-               msleep_interruptible(data * 1000);
-               del_timer_sync(&adapter->blink_timer);
-               e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
-       } else {
-               e1000_blink_led_start(hw);
-               msleep_interruptible(data * 1000);
+       if (!adapter->blink_timer.function) {
+               init_timer(&adapter->blink_timer);
+               adapter->blink_timer.function = e1000_led_blink_callback;
+               adapter->blink_timer.data = (unsigned long)adapter;
        }
+       e1000_setup_led(hw);
+       mod_timer(&adapter->blink_timer, jiffies);
+       msleep_interruptible(data * 1000);
+       del_timer_sync(&adapter->blink_timer);
 
        e1000_led_off(hw);
        clear_bit(E1000_LED_ON, &adapter->led_status);
index 45ac225a7aaa5c241a90e042ccac8738c8643a67..8d7d87f128275e2d7ba1baf8f100933acad6feb3 100644 (file)
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
-*******************************************************************************/
+ */
 
 /* e1000_hw.c
  * Shared functions for accessing and configuring the MAC
  */
 
-
 #include "e1000_hw.h"
 
-static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
-static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
-static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
-static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
-static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
-static void e1000_release_software_semaphore(struct e1000_hw *hw);
-
-static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
 static s32 e1000_check_downshift(struct e1000_hw *hw);
 static s32 e1000_check_polarity(struct e1000_hw *hw,
                                e1000_rev_polarity *polarity);
 static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
 static void e1000_clear_vfta(struct e1000_hw *hw);
-static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
 static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
                                              bool link_up);
 static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
 static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
-static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
 static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
 static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
                                  u16 *max_length);
-static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
 static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static s32 e1000_get_software_flag(struct e1000_hw *hw);
-static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
-static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
 static s32 e1000_id_led_init(struct e1000_hw *hw);
-static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
-                                                u32 cnf_base_addr,
-                                                u32 cnf_size);
-static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
 static void e1000_init_rx_addrs(struct e1000_hw *hw);
-static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
-static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
-static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
-static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-                                  u16 offset, u8 *sum);
-static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
-                                     struct e1000_host_mng_command_header
-                                     *hdr);
-static s32 e1000_mng_write_commit(struct e1000_hw *hw);
-static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
-                                 struct e1000_phy_info *phy_info);
 static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
                                  struct e1000_phy_info *phy_info);
-static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
-                                 u16 *data);
-static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
-                                  u16 *data);
-static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
 static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
                                  struct e1000_phy_info *phy_info);
-static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
-static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
-static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
-                                       u8 byte);
-static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
-static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
-static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
-                               u16 *data);
-static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
-                                u16 data);
-static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
-                                 u16 *data);
-static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
-                                  u16 *data);
-static void e1000_release_software_flag(struct e1000_hw *hw);
 static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
 static s32 e1000_wait_autoneg(struct e1000_hw *hw);
 static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
 static s32 e1000_set_phy_type(struct e1000_hw *hw);
@@ -117,12 +63,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
 static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
 static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
 static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
-static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
-                                    u16 count);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
 static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
 static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
 static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
-                                      u16 words, u16 *data);
+                                 u16 words, u16 *data);
 static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
                                        u16 words, u16 *data);
 static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
@@ -131,7 +76,7 @@ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
 static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
 static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                                  u16 phy_data);
-static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                                 u16 *phy_data);
 static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
 static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
@@ -140,188 +85,164 @@ static void e1000_standby_eeprom(struct e1000_hw *hw);
 static s32 e1000_set_vco_speed(struct e1000_hw *hw);
 static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
 static s32 e1000_set_phy_mode(struct e1000_hw *hw);
-static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
-static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
-static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
-static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+                               u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+                                u16 *data);
 
 /* IGP cable length table */
 static const
-u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
-    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
-      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
-      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
-      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
-      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
-      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
-      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
-      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
-
-static const
-u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
-    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
-      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
-      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
-      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
-      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
-      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
-      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
-      104, 109, 114, 118, 121, 124};
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
+       5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+       5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+       25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+       40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+       60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+       90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+           100,
+       100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+           110, 110,
+       110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
+           120, 120
+};
 
 static DEFINE_SPINLOCK(e1000_eeprom_lock);
 
-/******************************************************************************
- * Set the phy type member in the hw struct.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_set_phy_type - Set the phy type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
 static s32 e1000_set_phy_type(struct e1000_hw *hw)
 {
-    DEBUGFUNC("e1000_set_phy_type");
-
-    if (hw->mac_type == e1000_undefined)
-        return -E1000_ERR_PHY_TYPE;
-
-    switch (hw->phy_id) {
-    case M88E1000_E_PHY_ID:
-    case M88E1000_I_PHY_ID:
-    case M88E1011_I_PHY_ID:
-    case M88E1111_I_PHY_ID:
-        hw->phy_type = e1000_phy_m88;
-        break;
-    case IGP01E1000_I_PHY_ID:
-        if (hw->mac_type == e1000_82541 ||
-            hw->mac_type == e1000_82541_rev_2 ||
-            hw->mac_type == e1000_82547 ||
-            hw->mac_type == e1000_82547_rev_2) {
-            hw->phy_type = e1000_phy_igp;
-            break;
-        }
-    case IGP03E1000_E_PHY_ID:
-        hw->phy_type = e1000_phy_igp_3;
-        break;
-    case IFE_E_PHY_ID:
-    case IFE_PLUS_E_PHY_ID:
-    case IFE_C_E_PHY_ID:
-        hw->phy_type = e1000_phy_ife;
-        break;
-    case GG82563_E_PHY_ID:
-        if (hw->mac_type == e1000_80003es2lan) {
-            hw->phy_type = e1000_phy_gg82563;
-            break;
-        }
-        /* Fall Through */
-    default:
-        /* Should never have loaded on this device */
-        hw->phy_type = e1000_phy_undefined;
-        return -E1000_ERR_PHY_TYPE;
-    }
-
-    return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * IGP phy init script - initializes the GbE PHY
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_phy_init_script(struct e1000_hw *hw)
-{
-    u32 ret_val;
-    u16 phy_saved_data;
-
-    DEBUGFUNC("e1000_phy_init_script");
-
-    if (hw->phy_init_script) {
-        msleep(20);
-
-        /* Save off the current value of register 0x2F5B to be restored at
-         * the end of this routine. */
-        ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
-        /* Disabled the PHY transmitter */
-        e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
-
-        msleep(20);
-
-        e1000_write_phy_reg(hw,0x0000,0x0140);
-
-        msleep(5);
-
-        switch (hw->mac_type) {
-        case e1000_82541:
-        case e1000_82547:
-            e1000_write_phy_reg(hw, 0x1F95, 0x0001);
-
-            e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
-
-            e1000_write_phy_reg(hw, 0x1F79, 0x0018);
-
-            e1000_write_phy_reg(hw, 0x1F30, 0x1600);
-
-            e1000_write_phy_reg(hw, 0x1F31, 0x0014);
-
-            e1000_write_phy_reg(hw, 0x1F32, 0x161C);
-
-            e1000_write_phy_reg(hw, 0x1F94, 0x0003);
-
-            e1000_write_phy_reg(hw, 0x1F96, 0x003F);
-
-            e1000_write_phy_reg(hw, 0x2010, 0x0008);
-            break;
+       DEBUGFUNC("e1000_set_phy_type");
 
-        case e1000_82541_rev_2:
-        case e1000_82547_rev_2:
-            e1000_write_phy_reg(hw, 0x1F73, 0x0099);
-            break;
-        default:
-            break;
-        }
+       if (hw->mac_type == e1000_undefined)
+               return -E1000_ERR_PHY_TYPE;
 
-        e1000_write_phy_reg(hw, 0x0000, 0x3300);
-
-        msleep(20);
-
-        /* Now enable the transmitter */
-        e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
-        if (hw->mac_type == e1000_82547) {
-            u16 fused, fine, coarse;
-
-            /* Move to analog registers page */
-            e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
-
-            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
-                e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+       switch (hw->phy_id) {
+       case M88E1000_E_PHY_ID:
+       case M88E1000_I_PHY_ID:
+       case M88E1011_I_PHY_ID:
+       case M88E1111_I_PHY_ID:
+               hw->phy_type = e1000_phy_m88;
+               break;
+       case IGP01E1000_I_PHY_ID:
+               if (hw->mac_type == e1000_82541 ||
+                   hw->mac_type == e1000_82541_rev_2 ||
+                   hw->mac_type == e1000_82547 ||
+                   hw->mac_type == e1000_82547_rev_2) {
+                       hw->phy_type = e1000_phy_igp;
+                       break;
+               }
+       default:
+               /* Should never have loaded on this device */
+               hw->phy_type = e1000_phy_undefined;
+               return -E1000_ERR_PHY_TYPE;
+       }
 
-                fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
-                coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+       return E1000_SUCCESS;
+}
 
-                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
-                    coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
-                    fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
-                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
-                    fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+/**
+ * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+       u32 ret_val;
+       u16 phy_saved_data;
+
+       DEBUGFUNC("e1000_phy_init_script");
+
+       if (hw->phy_init_script) {
+               msleep(20);
+
+               /* Save off the current value of register 0x2F5B to be restored at
+                * the end of this routine. */
+               ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+               /* Disabled the PHY transmitter */
+               e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+               msleep(20);
+
+               e1000_write_phy_reg(hw, 0x0000, 0x0140);
+               msleep(5);
+
+               switch (hw->mac_type) {
+               case e1000_82541:
+               case e1000_82547:
+                       e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+                       e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+                       e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+                       e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+                       e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+                       e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+                       e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+                       e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+                       e1000_write_phy_reg(hw, 0x2010, 0x0008);
+                       break;
 
-                fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
-                        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
-                        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+               case e1000_82541_rev_2:
+               case e1000_82547_rev_2:
+                       e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+                       break;
+               default:
+                       break;
+               }
 
-                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
-                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
-                                    IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
-            }
-        }
-    }
+               e1000_write_phy_reg(hw, 0x0000, 0x3300);
+               msleep(20);
+
+               /* Now enable the transmitter */
+               e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+               if (hw->mac_type == e1000_82547) {
+                       u16 fused, fine, coarse;
+
+                       /* Move to analog registers page */
+                       e1000_read_phy_reg(hw,
+                                          IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+                                          &fused);
+
+                       if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+                               e1000_read_phy_reg(hw,
+                                                  IGP01E1000_ANALOG_FUSE_STATUS,
+                                                  &fused);
+
+                               fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+                               coarse =
+                                   fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+                               if (coarse >
+                                   IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                                       coarse -=
+                                           IGP01E1000_ANALOG_FUSE_COARSE_10;
+                                       fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+                               } else if (coarse ==
+                                          IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                                       fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+                               fused =
+                                   (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+                                   (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+                                   (coarse &
+                                    IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+                               e1000_write_phy_reg(hw,
+                                                   IGP01E1000_ANALOG_FUSE_CONTROL,
+                                                   fused);
+                               e1000_write_phy_reg(hw,
+                                                   IGP01E1000_ANALOG_FUSE_BYPASS,
+                                                   IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+                       }
+               }
+       }
 }
 
-/******************************************************************************
- * Set the mac type member in the hw struct.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_set_mac_type - Set the mac type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
 s32 e1000_set_mac_type(struct e1000_hw *hw)
 {
        DEBUGFUNC("e1000_set_mac_type");
@@ -397,61 +318,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
        case E1000_DEV_ID_82547GI:
                hw->mac_type = e1000_82547_rev_2;
                break;
-       case E1000_DEV_ID_82571EB_COPPER:
-       case E1000_DEV_ID_82571EB_FIBER:
-       case E1000_DEV_ID_82571EB_SERDES:
-       case E1000_DEV_ID_82571EB_SERDES_DUAL:
-       case E1000_DEV_ID_82571EB_SERDES_QUAD:
-       case E1000_DEV_ID_82571EB_QUAD_COPPER:
-       case E1000_DEV_ID_82571PT_QUAD_COPPER:
-       case E1000_DEV_ID_82571EB_QUAD_FIBER:
-       case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
-               hw->mac_type = e1000_82571;
-               break;
-       case E1000_DEV_ID_82572EI_COPPER:
-       case E1000_DEV_ID_82572EI_FIBER:
-       case E1000_DEV_ID_82572EI_SERDES:
-       case E1000_DEV_ID_82572EI:
-               hw->mac_type = e1000_82572;
-               break;
-       case E1000_DEV_ID_82573E:
-       case E1000_DEV_ID_82573E_IAMT:
-       case E1000_DEV_ID_82573L:
-               hw->mac_type = e1000_82573;
-               break;
-       case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
-       case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
-       case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
-       case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
-               hw->mac_type = e1000_80003es2lan;
-               break;
-       case E1000_DEV_ID_ICH8_IGP_M_AMT:
-       case E1000_DEV_ID_ICH8_IGP_AMT:
-       case E1000_DEV_ID_ICH8_IGP_C:
-       case E1000_DEV_ID_ICH8_IFE:
-       case E1000_DEV_ID_ICH8_IFE_GT:
-       case E1000_DEV_ID_ICH8_IFE_G:
-       case E1000_DEV_ID_ICH8_IGP_M:
-               hw->mac_type = e1000_ich8lan;
-               break;
        default:
                /* Should never have loaded on this device */
                return -E1000_ERR_MAC_TYPE;
        }
 
        switch (hw->mac_type) {
-       case e1000_ich8lan:
-               hw->swfwhw_semaphore_present = true;
-               hw->asf_firmware_present = true;
-               break;
-       case e1000_80003es2lan:
-               hw->swfw_sync_present = true;
-               /* fall through */
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_82573:
-               hw->eeprom_semaphore_present = true;
-               /* fall through */
        case e1000_82541:
        case e1000_82547:
        case e1000_82541_rev_2:
@@ -468,8365 +340,5295 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
        if (hw->mac_type == e1000_82543)
                hw->bad_tx_carr_stats_fd = true;
 
-       /* capable of receiving management packets to the host */
-       if (hw->mac_type >= e1000_82571)
-               hw->has_manc2h = true;
-
-       /* In rare occasions, ESB2 systems would end up started without
-        * the RX unit being turned on.
-        */
-       if (hw->mac_type == e1000_80003es2lan)
-               hw->rx_needs_kicking = true;
-
        if (hw->mac_type > e1000_82544)
                hw->has_smbus = true;
 
        return E1000_SUCCESS;
 }
 
-/*****************************************************************************
- * Set media type and TBI compatibility.
- *
- * hw - Struct containing variables accessed by shared code
- * **************************************************************************/
+/**
+ * e1000_set_media_type - Set media type and TBI compatibility.
+ * @hw: Struct containing variables accessed by shared code
+ */
 void e1000_set_media_type(struct e1000_hw *hw)
 {
-    u32 status;
-
-    DEBUGFUNC("e1000_set_media_type");
-
-    if (hw->mac_type != e1000_82543) {
-        /* tbi_compatibility is only valid on 82543 */
-        hw->tbi_compatibility_en = false;
-    }
-
-    switch (hw->device_id) {
-    case E1000_DEV_ID_82545GM_SERDES:
-    case E1000_DEV_ID_82546GB_SERDES:
-    case E1000_DEV_ID_82571EB_SERDES:
-    case E1000_DEV_ID_82571EB_SERDES_DUAL:
-    case E1000_DEV_ID_82571EB_SERDES_QUAD:
-    case E1000_DEV_ID_82572EI_SERDES:
-    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
-        hw->media_type = e1000_media_type_internal_serdes;
-        break;
-    default:
-        switch (hw->mac_type) {
-        case e1000_82542_rev2_0:
-        case e1000_82542_rev2_1:
-            hw->media_type = e1000_media_type_fiber;
-            break;
-        case e1000_ich8lan:
-        case e1000_82573:
-            /* The STATUS_TBIMODE bit is reserved or reused for the this
-             * device.
-             */
-            hw->media_type = e1000_media_type_copper;
-            break;
-        default:
-            status = er32(STATUS);
-            if (status & E1000_STATUS_TBIMODE) {
-                hw->media_type = e1000_media_type_fiber;
-                /* tbi_compatibility not valid on fiber */
-                hw->tbi_compatibility_en = false;
-            } else {
-                hw->media_type = e1000_media_type_copper;
-            }
-            break;
-        }
-    }
+       u32 status;
+
+       DEBUGFUNC("e1000_set_media_type");
+
+       if (hw->mac_type != e1000_82543) {
+               /* tbi_compatibility is only valid on 82543 */
+               hw->tbi_compatibility_en = false;
+       }
+
+       switch (hw->device_id) {
+       case E1000_DEV_ID_82545GM_SERDES:
+       case E1000_DEV_ID_82546GB_SERDES:
+               hw->media_type = e1000_media_type_internal_serdes;
+               break;
+       default:
+               switch (hw->mac_type) {
+               case e1000_82542_rev2_0:
+               case e1000_82542_rev2_1:
+                       hw->media_type = e1000_media_type_fiber;
+                       break;
+               default:
+                       status = er32(STATUS);
+                       if (status & E1000_STATUS_TBIMODE) {
+                               hw->media_type = e1000_media_type_fiber;
+                               /* tbi_compatibility not valid on fiber */
+                               hw->tbi_compatibility_en = false;
+                       } else {
+                               hw->media_type = e1000_media_type_copper;
+                       }
+                       break;
+               }
+       }
 }
 
-/******************************************************************************
- * Reset the transmit and receive units; mask and clear all interrupts.
+/**
+ * e1000_reset_hw: reset the hardware completely
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ */
 s32 e1000_reset_hw(struct e1000_hw *hw)
 {
-    u32 ctrl;
-    u32 ctrl_ext;
-    u32 icr;
-    u32 manc;
-    u32 led_ctrl;
-    u32 timeout;
-    u32 extcnf_ctrl;
-    s32 ret_val;
-
-    DEBUGFUNC("e1000_reset_hw");
-
-    /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
-    if (hw->mac_type == e1000_82542_rev2_0) {
-        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
-        e1000_pci_clear_mwi(hw);
-    }
-
-    if (hw->bus_type == e1000_bus_type_pci_express) {
-        /* Prevent the PCI-E bus from sticking if there is no TLP connection
-         * on the last TLP read/write transaction when MAC is reset.
-         */
-        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
-            DEBUGOUT("PCI-E Master disable polling has failed.\n");
-        }
-    }
-
-    /* Clear interrupt mask to stop board from generating interrupts */
-    DEBUGOUT("Masking off all interrupts\n");
-    ew32(IMC, 0xffffffff);
-
-    /* Disable the Transmit and Receive units.  Then delay to allow
-     * any pending transactions to complete before we hit the MAC with
-     * the global reset.
-     */
-    ew32(RCTL, 0);
-    ew32(TCTL, E1000_TCTL_PSP);
-    E1000_WRITE_FLUSH();
-
-    /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
-    hw->tbi_compatibility_on = false;
-
-    /* Delay to allow any outstanding PCI transactions to complete before
-     * resetting the device
-     */
-    msleep(10);
-
-    ctrl = er32(CTRL);
-
-    /* Must reset the PHY before resetting the MAC */
-    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
-        ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
-        msleep(5);
-    }
-
-    /* Must acquire the MDIO ownership before MAC reset.
-     * Ownership defaults to firmware after a reset. */
-    if (hw->mac_type == e1000_82573) {
-        timeout = 10;
-
-        extcnf_ctrl = er32(EXTCNF_CTRL);
-        extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-        do {
-            ew32(EXTCNF_CTRL, extcnf_ctrl);
-            extcnf_ctrl = er32(EXTCNF_CTRL);
-
-            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
-                break;
-            else
-                extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-            msleep(2);
-            timeout--;
-        } while (timeout);
-    }
-
-    /* Workaround for ICH8 bit corruption issue in FIFO memory */
-    if (hw->mac_type == e1000_ich8lan) {
-        /* Set Tx and Rx buffer allocation to 8k apiece. */
-        ew32(PBA, E1000_PBA_8K);
-        /* Set Packet Buffer Size to 16k. */
-        ew32(PBS, E1000_PBS_16K);
-    }
-
-    /* Issue a global reset to the MAC.  This will reset the chip's
-     * transmit, receive, DMA, and link units.  It will not effect
-     * the current PCI configuration.  The global reset bit is self-
-     * clearing, and should clear within a microsecond.
-     */
-    DEBUGOUT("Issuing a global reset to MAC\n");
-
-    switch (hw->mac_type) {
-        case e1000_82544:
-        case e1000_82540:
-        case e1000_82545:
-        case e1000_82546:
-        case e1000_82541:
-        case e1000_82541_rev_2:
-            /* These controllers can't ack the 64-bit write when issuing the
-             * reset, so use IO-mapping as a workaround to issue the reset */
-            E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
-            break;
-        case e1000_82545_rev_3:
-        case e1000_82546_rev_3:
-            /* Reset is performed on a shadow of the control register */
-            ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
-            break;
-        case e1000_ich8lan:
-            if (!hw->phy_reset_disable &&
-                e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
-                /* e1000_ich8lan PHY HW reset requires MAC CORE reset
-                 * at the same time to make sure the interface between
-                 * MAC and the external PHY is reset.
-                 */
-                ctrl |= E1000_CTRL_PHY_RST;
-            }
-
-            e1000_get_software_flag(hw);
-            ew32(CTRL, (ctrl | E1000_CTRL_RST));
-            msleep(5);
-            break;
-        default:
-            ew32(CTRL, (ctrl | E1000_CTRL_RST));
-            break;
-    }
-
-    /* After MAC reset, force reload of EEPROM to restore power-on settings to
-     * device.  Later controllers reload the EEPROM automatically, so just wait
-     * for reload to complete.
-     */
-    switch (hw->mac_type) {
-        case e1000_82542_rev2_0:
-        case e1000_82542_rev2_1:
-        case e1000_82543:
-        case e1000_82544:
-            /* Wait for reset to complete */
-            udelay(10);
-            ctrl_ext = er32(CTRL_EXT);
-            ctrl_ext |= E1000_CTRL_EXT_EE_RST;
-            ew32(CTRL_EXT, ctrl_ext);
-            E1000_WRITE_FLUSH();
-            /* Wait for EEPROM reload */
-            msleep(2);
-            break;
-        case e1000_82541:
-        case e1000_82541_rev_2:
-        case e1000_82547:
-        case e1000_82547_rev_2:
-            /* Wait for EEPROM reload */
-            msleep(20);
-            break;
-        case e1000_82573:
-            if (!e1000_is_onboard_nvm_eeprom(hw)) {
-                udelay(10);
-                ctrl_ext = er32(CTRL_EXT);
-                ctrl_ext |= E1000_CTRL_EXT_EE_RST;
-                ew32(CTRL_EXT, ctrl_ext);
-                E1000_WRITE_FLUSH();
-            }
-            /* fall through */
-        default:
-            /* Auto read done will delay 5ms or poll based on mac type */
-            ret_val = e1000_get_auto_rd_done(hw);
-            if (ret_val)
-                return ret_val;
-            break;
-    }
-
-    /* Disable HW ARPs on ASF enabled adapters */
-    if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
-        manc = er32(MANC);
-        manc &= ~(E1000_MANC_ARP_EN);
-        ew32(MANC, manc);
-    }
-
-    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
-        e1000_phy_init_script(hw);
-
-        /* Configure activity LED after PHY reset */
-        led_ctrl = er32(LEDCTL);
-        led_ctrl &= IGP_ACTIVITY_LED_MASK;
-        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
-        ew32(LEDCTL, led_ctrl);
-    }
-
-    /* Clear interrupt mask to stop board from generating interrupts */
-    DEBUGOUT("Masking off all interrupts\n");
-    ew32(IMC, 0xffffffff);
-
-    /* Clear any pending interrupt events. */
-    icr = er32(ICR);
-
-    /* If MWI was previously enabled, reenable it. */
-    if (hw->mac_type == e1000_82542_rev2_0) {
-        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
-            e1000_pci_set_mwi(hw);
-    }
-
-    if (hw->mac_type == e1000_ich8lan) {
-        u32 kab = er32(KABGTXD);
-        kab |= E1000_KABGTXD_BGSQLBIAS;
-        ew32(KABGTXD, kab);
-    }
-
-    return E1000_SUCCESS;
-}
+       u32 ctrl;
+       u32 ctrl_ext;
+       u32 icr;
+       u32 manc;
+       u32 led_ctrl;
+       s32 ret_val;
 
-/******************************************************************************
- *
- * Initialize a number of hardware-dependent bits
- *
- * hw: Struct containing variables accessed by shared code
- *
- * This function contains hardware limitation workarounds for PCI-E adapters
- *
- *****************************************************************************/
-static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
-{
-    if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
-        /* Settings common to all PCI-express silicon */
-        u32 reg_ctrl, reg_ctrl_ext;
-        u32 reg_tarc0, reg_tarc1;
-        u32 reg_tctl;
-        u32 reg_txdctl, reg_txdctl1;
-
-        /* link autonegotiation/sync workarounds */
-        reg_tarc0 = er32(TARC0);
-        reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
-
-        /* Enable not-done TX descriptor counting */
-        reg_txdctl = er32(TXDCTL);
-        reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
-        ew32(TXDCTL, reg_txdctl);
-        reg_txdctl1 = er32(TXDCTL1);
-        reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
-        ew32(TXDCTL1, reg_txdctl1);
-
-        switch (hw->mac_type) {
-            case e1000_82571:
-            case e1000_82572:
-                /* Clear PHY TX compatible mode bits */
-                reg_tarc1 = er32(TARC1);
-                reg_tarc1 &= ~((1 << 30)|(1 << 29));
-
-                /* link autonegotiation/sync workarounds */
-                reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
-
-                /* TX ring control fixes */
-                reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
-
-                /* Multiple read bit is reversed polarity */
-                reg_tctl = er32(TCTL);
-                if (reg_tctl & E1000_TCTL_MULR)
-                    reg_tarc1 &= ~(1 << 28);
-                else
-                    reg_tarc1 |= (1 << 28);
-
-                ew32(TARC1, reg_tarc1);
-                break;
-            case e1000_82573:
-                reg_ctrl_ext = er32(CTRL_EXT);
-                reg_ctrl_ext &= ~(1 << 23);
-                reg_ctrl_ext |= (1 << 22);
-
-                /* TX byte count fix */
-                reg_ctrl = er32(CTRL);
-                reg_ctrl &= ~(1 << 29);
-
-                ew32(CTRL_EXT, reg_ctrl_ext);
-                ew32(CTRL, reg_ctrl);
-                break;
-            case e1000_80003es2lan:
-                /* improve small packet performace for fiber/serdes */
-                if ((hw->media_type == e1000_media_type_fiber) ||
-                    (hw->media_type == e1000_media_type_internal_serdes)) {
-                    reg_tarc0 &= ~(1 << 20);
-                }
-
-                /* Multiple read bit is reversed polarity */
-                reg_tctl = er32(TCTL);
-                reg_tarc1 = er32(TARC1);
-                if (reg_tctl & E1000_TCTL_MULR)
-                    reg_tarc1 &= ~(1 << 28);
-                else
-                    reg_tarc1 |= (1 << 28);
-
-                ew32(TARC1, reg_tarc1);
-                break;
-            case e1000_ich8lan:
-                /* Reduce concurrent DMA requests to 3 from 4 */
-                if ((hw->revision_id < 3) ||
-                    ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
-                     (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
-                    reg_tarc0 |= ((1 << 29)|(1 << 28));
-
-                reg_ctrl_ext = er32(CTRL_EXT);
-                reg_ctrl_ext |= (1 << 22);
-                ew32(CTRL_EXT, reg_ctrl_ext);
-
-                /* workaround TX hang with TSO=on */
-                reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
-
-                /* Multiple read bit is reversed polarity */
-                reg_tctl = er32(TCTL);
-                reg_tarc1 = er32(TARC1);
-                if (reg_tctl & E1000_TCTL_MULR)
-                    reg_tarc1 &= ~(1 << 28);
-                else
-                    reg_tarc1 |= (1 << 28);
-
-                /* workaround TX hang with TSO=on */
-                reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
-
-                ew32(TARC1, reg_tarc1);
-                break;
-            default:
-                break;
-        }
-
-        ew32(TARC0, reg_tarc0);
-    }
+       DEBUGFUNC("e1000_reset_hw");
+
+       /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+       if (hw->mac_type == e1000_82542_rev2_0) {
+               DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+               e1000_pci_clear_mwi(hw);
+       }
+
+       /* Clear interrupt mask to stop board from generating interrupts */
+       DEBUGOUT("Masking off all interrupts\n");
+       ew32(IMC, 0xffffffff);
+
+       /* Disable the Transmit and Receive units.  Then delay to allow
+        * any pending transactions to complete before we hit the MAC with
+        * the global reset.
+        */
+       ew32(RCTL, 0);
+       ew32(TCTL, E1000_TCTL_PSP);
+       E1000_WRITE_FLUSH();
+
+       /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+       hw->tbi_compatibility_on = false;
+
+       /* Delay to allow any outstanding PCI transactions to complete before
+        * resetting the device
+        */
+       msleep(10);
+
+       ctrl = er32(CTRL);
+
+       /* Must reset the PHY before resetting the MAC */
+       if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+               ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+               msleep(5);
+       }
+
+       /* Issue a global reset to the MAC.  This will reset the chip's
+        * transmit, receive, DMA, and link units.  It will not effect
+        * the current PCI configuration.  The global reset bit is self-
+        * clearing, and should clear within a microsecond.
+        */
+       DEBUGOUT("Issuing a global reset to MAC\n");
+
+       switch (hw->mac_type) {
+       case e1000_82544:
+       case e1000_82540:
+       case e1000_82545:
+       case e1000_82546:
+       case e1000_82541:
+       case e1000_82541_rev_2:
+               /* These controllers can't ack the 64-bit write when issuing the
+                * reset, so use IO-mapping as a workaround to issue the reset */
+               E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+               break;
+       case e1000_82545_rev_3:
+       case e1000_82546_rev_3:
+               /* Reset is performed on a shadow of the control register */
+               ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+               break;
+       default:
+               ew32(CTRL, (ctrl | E1000_CTRL_RST));
+               break;
+       }
+
+       /* After MAC reset, force reload of EEPROM to restore power-on settings to
+        * device.  Later controllers reload the EEPROM automatically, so just wait
+        * for reload to complete.
+        */
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+       case e1000_82544:
+               /* Wait for reset to complete */
+               udelay(10);
+               ctrl_ext = er32(CTRL_EXT);
+               ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+               ew32(CTRL_EXT, ctrl_ext);
+               E1000_WRITE_FLUSH();
+               /* Wait for EEPROM reload */
+               msleep(2);
+               break;
+       case e1000_82541:
+       case e1000_82541_rev_2:
+       case e1000_82547:
+       case e1000_82547_rev_2:
+               /* Wait for EEPROM reload */
+               msleep(20);
+               break;
+       default:
+               /* Auto read done will delay 5ms or poll based on mac type */
+               ret_val = e1000_get_auto_rd_done(hw);
+               if (ret_val)
+                       return ret_val;
+               break;
+       }
+
+       /* Disable HW ARPs on ASF enabled adapters */
+       if (hw->mac_type >= e1000_82540) {
+               manc = er32(MANC);
+               manc &= ~(E1000_MANC_ARP_EN);
+               ew32(MANC, manc);
+       }
+
+       if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+               e1000_phy_init_script(hw);
+
+               /* Configure activity LED after PHY reset */
+               led_ctrl = er32(LEDCTL);
+               led_ctrl &= IGP_ACTIVITY_LED_MASK;
+               led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+               ew32(LEDCTL, led_ctrl);
+       }
+
+       /* Clear interrupt mask to stop board from generating interrupts */
+       DEBUGOUT("Masking off all interrupts\n");
+       ew32(IMC, 0xffffffff);
+
+       /* Clear any pending interrupt events. */
+       icr = er32(ICR);
+
+       /* If MWI was previously enabled, reenable it. */
+       if (hw->mac_type == e1000_82542_rev2_0) {
+               if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+                       e1000_pci_set_mwi(hw);
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Performs basic configuration of the adapter.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_init_hw: Performs basic configuration of the adapter.
+ * @hw: Struct containing variables accessed by shared code
  *
  * Assumes that the controller has previously been reset and is in a
  * post-reset uninitialized state. Initializes the receive address registers,
  * multicast table, and VLAN filter table. Calls routines to setup link
  * configuration and flow control settings. Clears all on-chip counters. Leaves
  * the transmit and receive units disabled and uninitialized.
- *****************************************************************************/
+ */
 s32 e1000_init_hw(struct e1000_hw *hw)
 {
-    u32 ctrl;
-    u32 i;
-    s32 ret_val;
-    u32 mta_size;
-    u32 reg_data;
-    u32 ctrl_ext;
-
-    DEBUGFUNC("e1000_init_hw");
-
-    /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
-    if ((hw->mac_type == e1000_ich8lan) &&
-        ((hw->revision_id < 3) ||
-         ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
-          (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
-            reg_data = er32(STATUS);
-            reg_data &= ~0x80000000;
-            ew32(STATUS, reg_data);
-    }
-
-    /* Initialize Identification LED */
-    ret_val = e1000_id_led_init(hw);
-    if (ret_val) {
-        DEBUGOUT("Error Initializing Identification LED\n");
-        return ret_val;
-    }
-
-    /* Set the media type and TBI compatibility */
-    e1000_set_media_type(hw);
-
-    /* Must be called after e1000_set_media_type because media_type is used */
-    e1000_initialize_hardware_bits(hw);
-
-    /* Disabling VLAN filtering. */
-    DEBUGOUT("Initializing the IEEE VLAN\n");
-    /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
-    if (hw->mac_type != e1000_ich8lan) {
-        if (hw->mac_type < e1000_82545_rev_3)
-            ew32(VET, 0);
-        e1000_clear_vfta(hw);
-    }
-
-    /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
-    if (hw->mac_type == e1000_82542_rev2_0) {
-        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
-        e1000_pci_clear_mwi(hw);
-        ew32(RCTL, E1000_RCTL_RST);
-        E1000_WRITE_FLUSH();
-        msleep(5);
-    }
-
-    /* Setup the receive address. This involves initializing all of the Receive
-     * Address Registers (RARs 0 - 15).
-     */
-    e1000_init_rx_addrs(hw);
-
-    /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
-    if (hw->mac_type == e1000_82542_rev2_0) {
-        ew32(RCTL, 0);
-        E1000_WRITE_FLUSH();
-        msleep(1);
-        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
-            e1000_pci_set_mwi(hw);
-    }
-
-    /* Zero out the Multicast HASH table */
-    DEBUGOUT("Zeroing the MTA\n");
-    mta_size = E1000_MC_TBL_SIZE;
-    if (hw->mac_type == e1000_ich8lan)
-        mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
-    for (i = 0; i < mta_size; i++) {
-        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
-        /* use write flush to prevent Memory Write Block (MWB) from
-         * occuring when accessing our register space */
-        E1000_WRITE_FLUSH();
-    }
-
-    /* Set the PCI priority bit correctly in the CTRL register.  This
-     * determines if the adapter gives priority to receives, or if it
-     * gives equal priority to transmits and receives.  Valid only on
-     * 82542 and 82543 silicon.
-     */
-    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
-        ctrl = er32(CTRL);
-        ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
-    }
-
-    switch (hw->mac_type) {
-    case e1000_82545_rev_3:
-    case e1000_82546_rev_3:
-        break;
-    default:
-        /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
-       if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
-               e1000_pcix_set_mmrbc(hw, 2048);
-       break;
-    }
-
-    /* More time needed for PHY to initialize */
-    if (hw->mac_type == e1000_ich8lan)
-        msleep(15);
-
-    /* Call a subroutine to configure the link and setup flow control. */
-    ret_val = e1000_setup_link(hw);
-
-    /* Set the transmit descriptor write-back policy */
-    if (hw->mac_type > e1000_82544) {
-        ctrl = er32(TXDCTL);
-        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
-        ew32(TXDCTL, ctrl);
-    }
-
-    if (hw->mac_type == e1000_82573) {
-        e1000_enable_tx_pkt_filtering(hw);
-    }
-
-    switch (hw->mac_type) {
-    default:
-        break;
-    case e1000_80003es2lan:
-        /* Enable retransmit on late collisions */
-        reg_data = er32(TCTL);
-        reg_data |= E1000_TCTL_RTLC;
-        ew32(TCTL, reg_data);
-
-        /* Configure Gigabit Carry Extend Padding */
-        reg_data = er32(TCTL_EXT);
-        reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
-        reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
-        ew32(TCTL_EXT, reg_data);
-
-        /* Configure Transmit Inter-Packet Gap */
-        reg_data = er32(TIPG);
-        reg_data &= ~E1000_TIPG_IPGT_MASK;
-        reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
-        ew32(TIPG, reg_data);
-
-        reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
-        reg_data &= ~0x00100000;
-        E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
-        /* Fall through */
-    case e1000_82571:
-    case e1000_82572:
-    case e1000_ich8lan:
-        ctrl = er32(TXDCTL1);
-        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
-        ew32(TXDCTL1, ctrl);
-        break;
-    }
-
-
-    if (hw->mac_type == e1000_82573) {
-        u32 gcr = er32(GCR);
-        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
-        ew32(GCR, gcr);
-    }
-
-    /* Clear all of the statistics registers (clear on read).  It is
-     * important that we do this after we have tried to establish link
-     * because the symbol error count will increment wildly if there
-     * is no link.
-     */
-    e1000_clear_hw_cntrs(hw);
-
-    /* ICH8 No-snoop bits are opposite polarity.
-     * Set to snoop by default after reset. */
-    if (hw->mac_type == e1000_ich8lan)
-        e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
-
-    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
-        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
-        ctrl_ext = er32(CTRL_EXT);
-        /* Relaxed ordering must be disabled to avoid a parity
-         * error crash in a PCI slot. */
-        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
-        ew32(CTRL_EXT, ctrl_ext);
-    }
-
-    return ret_val;
-}
+       u32 ctrl;
+       u32 i;
+       s32 ret_val;
+       u32 mta_size;
+       u32 ctrl_ext;
 
-/******************************************************************************
- * Adjust SERDES output amplitude based on EEPROM setting.
- *
- * hw - Struct containing variables accessed by shared code.
- *****************************************************************************/
-static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
-{
-    u16 eeprom_data;
-    s32  ret_val;
-
-    DEBUGFUNC("e1000_adjust_serdes_amplitude");
-
-    if (hw->media_type != e1000_media_type_internal_serdes)
-        return E1000_SUCCESS;
-
-    switch (hw->mac_type) {
-    case e1000_82545_rev_3:
-    case e1000_82546_rev_3:
-        break;
-    default:
-        return E1000_SUCCESS;
-    }
-
-    ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
-    if (ret_val) {
-        return ret_val;
-    }
-
-    if (eeprom_data != EEPROM_RESERVED_WORD) {
-        /* Adjust SERDES output amplitude only. */
-        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
-        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    return E1000_SUCCESS;
-}
+       DEBUGFUNC("e1000_init_hw");
 
-/******************************************************************************
- * Configures flow control and link settings.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Determines which flow control settings to use. Calls the apropriate media-
- * specific link configuration function. Configures the flow control settings.
- * Assuming the adapter has a valid link partner, a valid link should be
- * established. Assumes the hardware has previously been reset and the
- * transmitter and receiver are not enabled.
- *****************************************************************************/
-s32 e1000_setup_link(struct e1000_hw *hw)
-{
-    u32 ctrl_ext;
-    s32 ret_val;
-    u16 eeprom_data;
-
-    DEBUGFUNC("e1000_setup_link");
-
-    /* In the case of the phy reset being blocked, we already have a link.
-     * We do not have to set it up again. */
-    if (e1000_check_phy_reset_block(hw))
-        return E1000_SUCCESS;
-
-    /* Read and store word 0x0F of the EEPROM. This word contains bits
-     * that determine the hardware's default PAUSE (flow control) mode,
-     * a bit that determines whether the HW defaults to enabling or
-     * disabling auto-negotiation, and the direction of the
-     * SW defined pins. If there is no SW over-ride of the flow
-     * control setting, then the variable hw->fc will
-     * be initialized based on a value in the EEPROM.
-     */
-    if (hw->fc == E1000_FC_DEFAULT) {
-        switch (hw->mac_type) {
-        case e1000_ich8lan:
-        case e1000_82573:
-            hw->fc = E1000_FC_FULL;
-            break;
-        default:
-            ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
-                                        1, &eeprom_data);
-            if (ret_val) {
-                DEBUGOUT("EEPROM Read Error\n");
-                return -E1000_ERR_EEPROM;
-            }
-            if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
-                hw->fc = E1000_FC_NONE;
-            else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
-                    EEPROM_WORD0F_ASM_DIR)
-                hw->fc = E1000_FC_TX_PAUSE;
-            else
-                hw->fc = E1000_FC_FULL;
-            break;
-        }
-    }
-
-    /* We want to save off the original Flow Control configuration just
-     * in case we get disconnected and then reconnected into a different
-     * hub or switch with different Flow Control capabilities.
-     */
-    if (hw->mac_type == e1000_82542_rev2_0)
-        hw->fc &= (~E1000_FC_TX_PAUSE);
-
-    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
-        hw->fc &= (~E1000_FC_RX_PAUSE);
-
-    hw->original_fc = hw->fc;
-
-    DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
-
-    /* Take the 4 bits from EEPROM word 0x0F that determine the initial
-     * polarity value for the SW controlled pins, and setup the
-     * Extended Device Control reg with that info.
-     * This is needed because one of the SW controlled pins is used for
-     * signal detection.  So this should be done before e1000_setup_pcs_link()
-     * or e1000_phy_setup() is called.
-     */
-    if (hw->mac_type == e1000_82543) {
-        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
-                                    1, &eeprom_data);
-        if (ret_val) {
-            DEBUGOUT("EEPROM Read Error\n");
-            return -E1000_ERR_EEPROM;
-        }
-        ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
-                    SWDPIO__EXT_SHIFT);
-        ew32(CTRL_EXT, ctrl_ext);
-    }
-
-    /* Call the necessary subroutine to configure the link. */
-    ret_val = (hw->media_type == e1000_media_type_copper) ?
-              e1000_setup_copper_link(hw) :
-              e1000_setup_fiber_serdes_link(hw);
-
-    /* Initialize the flow control address, type, and PAUSE timer
-     * registers to their default values.  This is done even if flow
-     * control is disabled, because it does not hurt anything to
-     * initialize these registers.
-     */
-    DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
-
-    /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
-    if (hw->mac_type != e1000_ich8lan) {
-        ew32(FCT, FLOW_CONTROL_TYPE);
-        ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
-        ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
-    }
-
-    ew32(FCTTV, hw->fc_pause_time);
-
-    /* Set the flow control receive threshold registers.  Normally,
-     * these registers will be set to a default threshold that may be
-     * adjusted later by the driver's runtime code.  However, if the
-     * ability to transmit pause frames in not enabled, then these
-     * registers will be set to 0.
-     */
-    if (!(hw->fc & E1000_FC_TX_PAUSE)) {
-        ew32(FCRTL, 0);
-        ew32(FCRTH, 0);
-    } else {
-        /* We need to set up the Receive Threshold high and low water marks
-         * as well as (optionally) enabling the transmission of XON frames.
-         */
-        if (hw->fc_send_xon) {
-            ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
-            ew32(FCRTH, hw->fc_high_water);
-        } else {
-            ew32(FCRTL, hw->fc_low_water);
-            ew32(FCRTH, hw->fc_high_water);
-        }
-    }
-    return ret_val;
-}
+       /* Initialize Identification LED */
+       ret_val = e1000_id_led_init(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Initializing Identification LED\n");
+               return ret_val;
+       }
 
-/******************************************************************************
- * Sets up link for a fiber based or serdes based adapter
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Manipulates Physical Coding Sublayer functions in order to configure
- * link. Assumes the hardware has been previously reset and the transmitter
- * and receiver are not enabled.
- *****************************************************************************/
-static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
-{
-    u32 ctrl;
-    u32 status;
-    u32 txcw = 0;
-    u32 i;
-    u32 signal = 0;
-    s32 ret_val;
-
-    DEBUGFUNC("e1000_setup_fiber_serdes_link");
-
-    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
-     * until explicitly turned off or a power cycle is performed.  A read to
-     * the register does not indicate its status.  Therefore, we ensure
-     * loopback mode is disabled during initialization.
-     */
-    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
-        ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
-
-    /* On adapters with a MAC newer than 82544, SWDP 1 will be
-     * set when the optics detect a signal. On older adapters, it will be
-     * cleared when there is a signal.  This applies to fiber media only.
-     * If we're on serdes media, adjust the output amplitude to value
-     * set in the EEPROM.
-     */
-    ctrl = er32(CTRL);
-    if (hw->media_type == e1000_media_type_fiber)
-        signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
-
-    ret_val = e1000_adjust_serdes_amplitude(hw);
-    if (ret_val)
-        return ret_val;
-
-    /* Take the link out of reset */
-    ctrl &= ~(E1000_CTRL_LRST);
-
-    /* Adjust VCO speed to improve BER performance */
-    ret_val = e1000_set_vco_speed(hw);
-    if (ret_val)
-        return ret_val;
-
-    e1000_config_collision_dist(hw);
-
-    /* Check for a software override of the flow control settings, and setup
-     * the device accordingly.  If auto-negotiation is enabled, then software
-     * will have to set the "PAUSE" bits to the correct value in the Tranmsit
-     * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
-     * auto-negotiation is disabled, then software will have to manually
-     * configure the two flow control enable bits in the CTRL register.
-     *
-     * The possible values of the "fc" parameter are:
-     *      0:  Flow control is completely disabled
-     *      1:  Rx flow control is enabled (we can receive pause frames, but
-     *          not send pause frames).
-     *      2:  Tx flow control is enabled (we can send pause frames but we do
-     *          not support receiving pause frames).
-     *      3:  Both Rx and TX flow control (symmetric) are enabled.
-     */
-    switch (hw->fc) {
-    case E1000_FC_NONE:
-        /* Flow control is completely disabled by a software over-ride. */
-        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
-        break;
-    case E1000_FC_RX_PAUSE:
-        /* RX Flow control is enabled and TX Flow control is disabled by a
-         * software over-ride. Since there really isn't a way to advertise
-         * that we are capable of RX Pause ONLY, we will advertise that we
-         * support both symmetric and asymmetric RX PAUSE. Later, we will
-         *  disable the adapter's ability to send PAUSE frames.
-         */
-        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
-        break;
-    case E1000_FC_TX_PAUSE:
-        /* TX Flow control is enabled, and RX Flow control is disabled, by a
-         * software over-ride.
-         */
-        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
-        break;
-    case E1000_FC_FULL:
-        /* Flow control (both RX and TX) is enabled by a software over-ride. */
-        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
-        break;
-    default:
-        DEBUGOUT("Flow control param set incorrectly\n");
-        return -E1000_ERR_CONFIG;
-        break;
-    }
-
-    /* Since auto-negotiation is enabled, take the link out of reset (the link
-     * will be in reset, because we previously reset the chip). This will
-     * restart auto-negotiation.  If auto-neogtiation is successful then the
-     * link-up status bit will be set and the flow control enable bits (RFCE
-     * and TFCE) will be set according to their negotiated value.
-     */
-    DEBUGOUT("Auto-negotiation enabled\n");
-
-    ew32(TXCW, txcw);
-    ew32(CTRL, ctrl);
-    E1000_WRITE_FLUSH();
-
-    hw->txcw = txcw;
-    msleep(1);
-
-    /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
-     * indication in the Device Status Register.  Time-out if a link isn't
-     * seen in 500 milliseconds seconds (Auto-negotiation should complete in
-     * less than 500 milliseconds even if the other end is doing it in SW).
-     * For internal serdes, we just assume a signal is present, then poll.
-     */
-    if (hw->media_type == e1000_media_type_internal_serdes ||
-       (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
-        DEBUGOUT("Looking for Link\n");
-        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
-            msleep(10);
-            status = er32(STATUS);
-            if (status & E1000_STATUS_LU) break;
-        }
-        if (i == (LINK_UP_TIMEOUT / 10)) {
-            DEBUGOUT("Never got a valid link from auto-neg!!!\n");
-            hw->autoneg_failed = 1;
-            /* AutoNeg failed to achieve a link, so we'll call
-             * e1000_check_for_link. This routine will force the link up if
-             * we detect a signal. This will allow us to communicate with
-             * non-autonegotiating link partners.
-             */
-            ret_val = e1000_check_for_link(hw);
-            if (ret_val) {
-                DEBUGOUT("Error while checking for link\n");
-                return ret_val;
-            }
-            hw->autoneg_failed = 0;
-        } else {
-            hw->autoneg_failed = 0;
-            DEBUGOUT("Valid Link Found\n");
-        }
-    } else {
-        DEBUGOUT("No Signal Detected\n");
-    }
-    return E1000_SUCCESS;
-}
+       /* Set the media type and TBI compatibility */
+       e1000_set_media_type(hw);
+
+       /* Disabling VLAN filtering. */
+       DEBUGOUT("Initializing the IEEE VLAN\n");
+       if (hw->mac_type < e1000_82545_rev_3)
+               ew32(VET, 0);
+       e1000_clear_vfta(hw);
+
+       /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+       if (hw->mac_type == e1000_82542_rev2_0) {
+               DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+               e1000_pci_clear_mwi(hw);
+               ew32(RCTL, E1000_RCTL_RST);
+               E1000_WRITE_FLUSH();
+               msleep(5);
+       }
 
-/******************************************************************************
-* Make sure we have a valid PHY and change PHY mode before link setup.
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
-{
-    u32 ctrl;
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_copper_link_preconfig");
-
-    ctrl = er32(CTRL);
-    /* With 82543, we need to force speed and duplex on the MAC equal to what
-     * the PHY speed and duplex configuration is. In addition, we need to
-     * perform a hardware reset on the PHY to take it out of reset.
-     */
-    if (hw->mac_type > e1000_82543) {
-        ctrl |= E1000_CTRL_SLU;
-        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-        ew32(CTRL, ctrl);
-    } else {
-        ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
-        ew32(CTRL, ctrl);
-        ret_val = e1000_phy_hw_reset(hw);
-        if (ret_val)
-            return ret_val;
-    }
-
-    /* Make sure we have a valid PHY */
-    ret_val = e1000_detect_gig_phy(hw);
-    if (ret_val) {
-        DEBUGOUT("Error, did not detect valid phy.\n");
-        return ret_val;
-    }
-    DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
-
-    /* Set PHY to class A mode (if necessary) */
-    ret_val = e1000_set_phy_mode(hw);
-    if (ret_val)
-        return ret_val;
-
-    if ((hw->mac_type == e1000_82545_rev_3) ||
-       (hw->mac_type == e1000_82546_rev_3)) {
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-        phy_data |= 0x00000008;
-        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-    }
-
-    if (hw->mac_type <= e1000_82543 ||
-        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
-        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
-        hw->phy_reset_disable = false;
-
-   return E1000_SUCCESS;
-}
+       /* Setup the receive address. This involves initializing all of the Receive
+        * Address Registers (RARs 0 - 15).
+        */
+       e1000_init_rx_addrs(hw);
+
+       /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+       if (hw->mac_type == e1000_82542_rev2_0) {
+               ew32(RCTL, 0);
+               E1000_WRITE_FLUSH();
+               msleep(1);
+               if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+                       e1000_pci_set_mwi(hw);
+       }
 
+       /* Zero out the Multicast HASH table */
+       DEBUGOUT("Zeroing the MTA\n");
+       mta_size = E1000_MC_TBL_SIZE;
+       for (i = 0; i < mta_size; i++) {
+               E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+               /* use write flush to prevent Memory Write Block (MWB) from
+                * occurring when accessing our register space */
+               E1000_WRITE_FLUSH();
+       }
 
-/********************************************************************
-* Copper link setup for e1000_phy_igp series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
-{
-    u32 led_ctrl;
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_copper_link_igp_setup");
-
-    if (hw->phy_reset_disable)
-        return E1000_SUCCESS;
-
-    ret_val = e1000_phy_reset(hw);
-    if (ret_val) {
-        DEBUGOUT("Error Resetting the PHY\n");
-        return ret_val;
-    }
-
-    /* Wait 15ms for MAC to configure PHY from eeprom settings */
-    msleep(15);
-    if (hw->mac_type != e1000_ich8lan) {
-    /* Configure activity LED after PHY reset */
-    led_ctrl = er32(LEDCTL);
-    led_ctrl &= IGP_ACTIVITY_LED_MASK;
-    led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
-    ew32(LEDCTL, led_ctrl);
-    }
-
-    /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
-    if (hw->phy_type == e1000_phy_igp) {
-        /* disable lplu d3 during driver init */
-        ret_val = e1000_set_d3_lplu_state(hw, false);
-        if (ret_val) {
-            DEBUGOUT("Error Disabling LPLU D3\n");
-            return ret_val;
-        }
-    }
-
-    /* disable lplu d0 during driver init */
-    ret_val = e1000_set_d0_lplu_state(hw, false);
-    if (ret_val) {
-        DEBUGOUT("Error Disabling LPLU D0\n");
-        return ret_val;
-    }
-    /* Configure mdi-mdix settings */
-    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
-        hw->dsp_config_state = e1000_dsp_config_disabled;
-        /* Force MDI for earlier revs of the IGP PHY */
-        phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
-        hw->mdix = 1;
-
-    } else {
-        hw->dsp_config_state = e1000_dsp_config_enabled;
-        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-
-        switch (hw->mdix) {
-        case 1:
-            phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-            break;
-        case 2:
-            phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
-            break;
-        case 0:
-        default:
-            phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
-            break;
-        }
-    }
-    ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
-    if (ret_val)
-        return ret_val;
-
-    /* set auto-master slave resolution settings */
-    if (hw->autoneg) {
-        e1000_ms_type phy_ms_setting = hw->master_slave;
-
-        if (hw->ffe_config_state == e1000_ffe_config_active)
-            hw->ffe_config_state = e1000_ffe_config_enabled;
-
-        if (hw->dsp_config_state == e1000_dsp_config_activated)
-            hw->dsp_config_state = e1000_dsp_config_enabled;
-
-        /* when autonegotiation advertisment is only 1000Mbps then we
-          * should disable SmartSpeed and enable Auto MasterSlave
-          * resolution as hardware default. */
-        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
-            /* Disable SmartSpeed */
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-            /* Set auto Master/Slave resolution process */
-            ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
-            if (ret_val)
-                return ret_val;
-            phy_data &= ~CR_1000T_MS_ENABLE;
-            ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
-            if (ret_val)
-                return ret_val;
-        }
-
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* load defaults for future use */
-        hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
-                                        ((phy_data & CR_1000T_MS_VALUE) ?
-                                         e1000_ms_force_master :
-                                         e1000_ms_force_slave) :
-                                         e1000_ms_auto;
-
-        switch (phy_ms_setting) {
-        case e1000_ms_force_master:
-            phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
-            break;
-        case e1000_ms_force_slave:
-            phy_data |= CR_1000T_MS_ENABLE;
-            phy_data &= ~(CR_1000T_MS_VALUE);
-            break;
-        case e1000_ms_auto:
-            phy_data &= ~CR_1000T_MS_ENABLE;
-            default:
-            break;
-        }
-        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    return E1000_SUCCESS;
-}
+       /* Set the PCI priority bit correctly in the CTRL register.  This
+        * determines if the adapter gives priority to receives, or if it
+        * gives equal priority to transmits and receives.  Valid only on
+        * 82542 and 82543 silicon.
+        */
+       if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+               ctrl = er32(CTRL);
+               ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+       }
 
-/********************************************************************
-* Copper link setup for e1000_phy_gg82563 series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 phy_data;
-    u32 reg_data;
-
-    DEBUGFUNC("e1000_copper_link_ggp_setup");
-
-    if (!hw->phy_reset_disable) {
-
-        /* Enable CRS on TX for half-duplex operation. */
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
-        /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
-        phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
-
-        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
-                                      phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* Options:
-         *   MDI/MDI-X = 0 (default)
-         *   0 - Auto for all speeds
-         *   1 - MDI mode
-         *   2 - MDI-X mode
-         *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
-         */
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
-
-        switch (hw->mdix) {
-        case 1:
-            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
-            break;
-        case 2:
-            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
-            break;
-        case 0:
-        default:
-            phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
-            break;
-        }
-
-        /* Options:
-         *   disable_polarity_correction = 0 (default)
-         *       Automatic Correction for Reversed Cable Polarity
-         *   0 - Disabled
-         *   1 - Enabled
-         */
-        phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
-        if (hw->disable_polarity_correction == 1)
-            phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
-        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
-
-        if (ret_val)
-            return ret_val;
-
-        /* SW Reset the PHY so all changes take effect */
-        ret_val = e1000_phy_reset(hw);
-        if (ret_val) {
-            DEBUGOUT("Error Resetting the PHY\n");
-            return ret_val;
-        }
-    } /* phy_reset_disable */
-
-    if (hw->mac_type == e1000_80003es2lan) {
-        /* Bypass RX and TX FIFO's */
-        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
-                                       E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
-                                       E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
-        if (ret_val)
-            return ret_val;
-
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
-        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
-
-        if (ret_val)
-            return ret_val;
-
-        reg_data = er32(CTRL_EXT);
-        reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
-        ew32(CTRL_EXT, reg_data);
-
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
-                                          &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* Do not init these registers when the HW is in IAMT mode, since the
-         * firmware will have already initialized them.  We only initialize
-         * them if the HW is not in IAMT mode.
-         */
-        if (!e1000_check_mng_mode(hw)) {
-            /* Enable Electrical Idle on the PHY */
-            phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
-            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-
-            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
-            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
-                                          phy_data);
-
-            if (ret_val)
-                return ret_val;
-        }
-
-        /* Workaround: Disable padding in Kumeran interface in the MAC
-         * and in the PHY to avoid CRC errors.
-         */
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-        phy_data |= GG82563_ICR_DIS_PADDING;
-        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
-                                      phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    return E1000_SUCCESS;
-}
+       switch (hw->mac_type) {
+       case e1000_82545_rev_3:
+       case e1000_82546_rev_3:
+               break;
+       default:
+               /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+               if (hw->bus_type == e1000_bus_type_pcix
+                   && e1000_pcix_get_mmrbc(hw) > 2048)
+                       e1000_pcix_set_mmrbc(hw, 2048);
+               break;
+       }
 
-/********************************************************************
-* Copper link setup for e1000_phy_m88 series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_copper_link_mgp_setup");
-
-    if (hw->phy_reset_disable)
-        return E1000_SUCCESS;
-
-    /* Enable CRS on TX. This must be set for half-duplex operation. */
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-
-    /* Options:
-     *   MDI/MDI-X = 0 (default)
-     *   0 - Auto for all speeds
-     *   1 - MDI mode
-     *   2 - MDI-X mode
-     *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
-     */
-    phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
-    switch (hw->mdix) {
-    case 1:
-        phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
-        break;
-    case 2:
-        phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
-        break;
-    case 3:
-        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
-        break;
-    case 0:
-    default:
-        phy_data |= M88E1000_PSCR_AUTO_X_MODE;
-        break;
-    }
-
-    /* Options:
-     *   disable_polarity_correction = 0 (default)
-     *       Automatic Correction for Reversed Cable Polarity
-     *   0 - Disabled
-     *   1 - Enabled
-     */
-    phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-    if (hw->disable_polarity_correction == 1)
-        phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-    if (ret_val)
-        return ret_val;
-
-    if (hw->phy_revision < M88E1011_I_REV_4) {
-        /* Force TX_CLK in the Extended PHY Specific Control Register
-         * to 25MHz clock.
-         */
-        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
-        if ((hw->phy_revision == E1000_REVISION_2) &&
-            (hw->phy_id == M88E1111_I_PHY_ID)) {
-            /* Vidalia Phy, set the downshift counter to 5x */
-            phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
-            phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
-            ret_val = e1000_write_phy_reg(hw,
-                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
-            if (ret_val)
-                return ret_val;
-        } else {
-            /* Configure Master and Slave downshift values */
-            phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
-                              M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
-            phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
-                             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
-            ret_val = e1000_write_phy_reg(hw,
-                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
-            if (ret_val)
-               return ret_val;
-        }
-    }
-
-    /* SW Reset the PHY so all changes take effect */
-    ret_val = e1000_phy_reset(hw);
-    if (ret_val) {
-        DEBUGOUT("Error Resetting the PHY\n");
-        return ret_val;
-    }
-
-   return E1000_SUCCESS;
-}
+       /* Call a subroutine to configure the link and setup flow control. */
+       ret_val = e1000_setup_link(hw);
 
-/********************************************************************
-* Setup auto-negotiation and flow control advertisements,
-* and then perform auto-negotiation.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_copper_link_autoneg");
-
-    /* Perform some bounds checking on the hw->autoneg_advertised
-     * parameter.  If this variable is zero, then set it to the default.
-     */
-    hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
-
-    /* If autoneg_advertised is zero, we assume it was not defaulted
-     * by the calling code so we set to advertise full capability.
-     */
-    if (hw->autoneg_advertised == 0)
-        hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-
-    /* IFE phy only supports 10/100 */
-    if (hw->phy_type == e1000_phy_ife)
-        hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
-
-    DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
-    ret_val = e1000_phy_setup_autoneg(hw);
-    if (ret_val) {
-        DEBUGOUT("Error Setting up Auto-Negotiation\n");
-        return ret_val;
-    }
-    DEBUGOUT("Restarting Auto-Neg\n");
-
-    /* Restart auto-negotiation by setting the Auto Neg Enable bit and
-     * the Auto Neg Restart bit in the PHY control register.
-     */
-    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
-    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
-    if (ret_val)
-        return ret_val;
-
-    /* Does the user want to wait for Auto-Neg to complete here, or
-     * check at a later time (for example, callback routine).
-     */
-    if (hw->wait_autoneg_complete) {
-        ret_val = e1000_wait_autoneg(hw);
-        if (ret_val) {
-            DEBUGOUT("Error while waiting for autoneg to complete\n");
-            return ret_val;
-        }
-    }
-
-    hw->get_link_status = true;
-
-    return E1000_SUCCESS;
-}
+       /* Set the transmit descriptor write-back policy */
+       if (hw->mac_type > e1000_82544) {
+               ctrl = er32(TXDCTL);
+               ctrl =
+                   (ctrl & ~E1000_TXDCTL_WTHRESH) |
+                   E1000_TXDCTL_FULL_TX_DESC_WB;
+               ew32(TXDCTL, ctrl);
+       }
 
-/******************************************************************************
-* Config the MAC and the PHY after link is up.
-*   1) Set up the MAC to the current PHY speed/duplex
-*      if we are on 82543.  If we
-*      are on newer silicon, we only need to configure
-*      collision distance in the Transmit Control Register.
-*   2) Set up flow control on the MAC to that established with
-*      the link partner.
-*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    DEBUGFUNC("e1000_copper_link_postconfig");
-
-    if (hw->mac_type >= e1000_82544) {
-        e1000_config_collision_dist(hw);
-    } else {
-        ret_val = e1000_config_mac_to_phy(hw);
-        if (ret_val) {
-            DEBUGOUT("Error configuring MAC to PHY settings\n");
-            return ret_val;
-        }
-    }
-    ret_val = e1000_config_fc_after_link_up(hw);
-    if (ret_val) {
-        DEBUGOUT("Error Configuring Flow Control\n");
-        return ret_val;
-    }
-
-    /* Config DSP to improve Giga link quality */
-    if (hw->phy_type == e1000_phy_igp) {
-        ret_val = e1000_config_dsp_after_link_change(hw, true);
-        if (ret_val) {
-            DEBUGOUT("Error Configuring DSP after link up\n");
-            return ret_val;
-        }
-    }
-
-    return E1000_SUCCESS;
-}
+       /* Clear all of the statistics registers (clear on read).  It is
+        * important that we do this after we have tried to establish link
+        * because the symbol error count will increment wildly if there
+        * is no link.
+        */
+       e1000_clear_hw_cntrs(hw);
+
+       if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+           hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+               ctrl_ext = er32(CTRL_EXT);
+               /* Relaxed ordering must be disabled to avoid a parity
+                * error crash in a PCI slot. */
+               ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+               ew32(CTRL_EXT, ctrl_ext);
+       }
 
-/******************************************************************************
-* Detects which PHY is present and setup the speed and duplex
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_setup_copper_link(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 i;
-    u16 phy_data;
-    u16 reg_data = 0;
-
-    DEBUGFUNC("e1000_setup_copper_link");
-
-    switch (hw->mac_type) {
-    case e1000_80003es2lan:
-    case e1000_ich8lan:
-        /* Set the mac to wait the maximum time between each
-         * iteration and increase the max iterations when
-         * polling the phy; this fixes erroneous timeouts at 10Mbps. */
-        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
-        if (ret_val)
-            return ret_val;
-        reg_data |= 0x3F;
-        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
-        if (ret_val)
-            return ret_val;
-    default:
-        break;
-    }
-
-    /* Check if it is a valid PHY and set PHY mode if necessary. */
-    ret_val = e1000_copper_link_preconfig(hw);
-    if (ret_val)
-        return ret_val;
-
-    switch (hw->mac_type) {
-    case e1000_80003es2lan:
-        /* Kumeran registers are written-only */
-        reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
-        reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
-        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
-                                       reg_data);
-        if (ret_val)
-            return ret_val;
-        break;
-    default:
-        break;
-    }
-
-    if (hw->phy_type == e1000_phy_igp ||
-        hw->phy_type == e1000_phy_igp_3 ||
-        hw->phy_type == e1000_phy_igp_2) {
-        ret_val = e1000_copper_link_igp_setup(hw);
-        if (ret_val)
-            return ret_val;
-    } else if (hw->phy_type == e1000_phy_m88) {
-        ret_val = e1000_copper_link_mgp_setup(hw);
-        if (ret_val)
-            return ret_val;
-    } else if (hw->phy_type == e1000_phy_gg82563) {
-        ret_val = e1000_copper_link_ggp_setup(hw);
-        if (ret_val)
-            return ret_val;
-    }
-
-    if (hw->autoneg) {
-        /* Setup autoneg and flow control advertisement
-          * and perform autonegotiation */
-        ret_val = e1000_copper_link_autoneg(hw);
-        if (ret_val)
-            return ret_val;
-    } else {
-        /* PHY will be set to 10H, 10F, 100H,or 100F
-          * depending on value from forced_speed_duplex. */
-        DEBUGOUT("Forcing speed and duplex\n");
-        ret_val = e1000_phy_force_speed_duplex(hw);
-        if (ret_val) {
-            DEBUGOUT("Error Forcing Speed and Duplex\n");
-            return ret_val;
-        }
-    }
-
-    /* Check link status. Wait up to 100 microseconds for link to become
-     * valid.
-     */
-    for (i = 0; i < 10; i++) {
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        if (phy_data & MII_SR_LINK_STATUS) {
-            /* Config the MAC and PHY after link is up */
-            ret_val = e1000_copper_link_postconfig(hw);
-            if (ret_val)
-                return ret_val;
-
-            DEBUGOUT("Valid link established!!!\n");
-            return E1000_SUCCESS;
-        }
-        udelay(10);
-    }
-
-    DEBUGOUT("Unable to establish link!!!\n");
-    return E1000_SUCCESS;
+       return ret_val;
 }
 
-/******************************************************************************
-* Configure the MAC-to-PHY interface for 10/100Mbps
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+/**
+ * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
+ * @hw: Struct containing variables accessed by shared code.
+ */
+static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
 {
-    s32 ret_val = E1000_SUCCESS;
-    u32 tipg;
-    u16 reg_data;
-
-    DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+       u16 eeprom_data;
+       s32 ret_val;
 
-    reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
-    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
-                                   reg_data);
-    if (ret_val)
-        return ret_val;
+       DEBUGFUNC("e1000_adjust_serdes_amplitude");
 
-    /* Configure Transmit Inter-Packet Gap */
-    tipg = er32(TIPG);
-    tipg &= ~E1000_TIPG_IPGT_MASK;
-    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
-    ew32(TIPG, tipg);
+       if (hw->media_type != e1000_media_type_internal_serdes)
+               return E1000_SUCCESS;
 
-    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
-
-    if (ret_val)
-        return ret_val;
+       switch (hw->mac_type) {
+       case e1000_82545_rev_3:
+       case e1000_82546_rev_3:
+               break;
+       default:
+               return E1000_SUCCESS;
+       }
 
-    if (duplex == HALF_DUPLEX)
-        reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
-    else
-        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+       ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
+                                   &eeprom_data);
+       if (ret_val) {
+               return ret_val;
+       }
 
-    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+       if (eeprom_data != EEPROM_RESERVED_WORD) {
+               /* Adjust SERDES output amplitude only. */
+               eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+               ret_val =
+                   e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+               if (ret_val)
+                       return ret_val;
+       }
 
-    return ret_val;
+       return E1000_SUCCESS;
 }
 
-static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+/**
+ * e1000_setup_link - Configures flow control and link settings.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the appropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+s32 e1000_setup_link(struct e1000_hw *hw)
 {
-    s32 ret_val = E1000_SUCCESS;
-    u16 reg_data;
-    u32 tipg;
+       u32 ctrl_ext;
+       s32 ret_val;
+       u16 eeprom_data;
 
-    DEBUGFUNC("e1000_configure_kmrn_for_1000");
+       DEBUGFUNC("e1000_setup_link");
 
-    reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
-    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
-                                   reg_data);
-    if (ret_val)
-        return ret_val;
+       /* Read and store word 0x0F of the EEPROM. This word contains bits
+        * that determine the hardware's default PAUSE (flow control) mode,
+        * a bit that determines whether the HW defaults to enabling or
+        * disabling auto-negotiation, and the direction of the
+        * SW defined pins. If there is no SW over-ride of the flow
+        * control setting, then the variable hw->fc will
+        * be initialized based on a value in the EEPROM.
+        */
+       if (hw->fc == E1000_FC_DEFAULT) {
+               ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                           1, &eeprom_data);
+               if (ret_val) {
+                       DEBUGOUT("EEPROM Read Error\n");
+                       return -E1000_ERR_EEPROM;
+               }
+               if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+                       hw->fc = E1000_FC_NONE;
+               else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+                        EEPROM_WORD0F_ASM_DIR)
+                       hw->fc = E1000_FC_TX_PAUSE;
+               else
+                       hw->fc = E1000_FC_FULL;
+       }
 
-    /* Configure Transmit Inter-Packet Gap */
-    tipg = er32(TIPG);
-    tipg &= ~E1000_TIPG_IPGT_MASK;
-    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
-    ew32(TIPG, tipg);
+       /* We want to save off the original Flow Control configuration just
+        * in case we get disconnected and then reconnected into a different
+        * hub or switch with different Flow Control capabilities.
+        */
+       if (hw->mac_type == e1000_82542_rev2_0)
+               hw->fc &= (~E1000_FC_TX_PAUSE);
 
-    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+       if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+               hw->fc &= (~E1000_FC_RX_PAUSE);
 
-    if (ret_val)
-        return ret_val;
+       hw->original_fc = hw->fc;
 
-    reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
-    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+       DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
 
-    return ret_val;
-}
+       /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+        * polarity value for the SW controlled pins, and setup the
+        * Extended Device Control reg with that info.
+        * This is needed because one of the SW controlled pins is used for
+        * signal detection.  So this should be done before e1000_setup_pcs_link()
+        * or e1000_phy_setup() is called.
+        */
+       if (hw->mac_type == e1000_82543) {
+               ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                           1, &eeprom_data);
+               if (ret_val) {
+                       DEBUGOUT("EEPROM Read Error\n");
+                       return -E1000_ERR_EEPROM;
+               }
+               ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+                           SWDPIO__EXT_SHIFT);
+               ew32(CTRL_EXT, ctrl_ext);
+       }
 
-/******************************************************************************
-* Configures PHY autoneg and flow control advertisement settings
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 mii_autoneg_adv_reg;
-    u16 mii_1000t_ctrl_reg;
-
-    DEBUGFUNC("e1000_phy_setup_autoneg");
-
-    /* Read the MII Auto-Neg Advertisement Register (Address 4). */
-    ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
-    if (ret_val)
-        return ret_val;
-
-    if (hw->phy_type != e1000_phy_ife) {
-        /* Read the MII 1000Base-T Control Register (Address 9). */
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
-        if (ret_val)
-            return ret_val;
-    } else
-        mii_1000t_ctrl_reg=0;
-
-    /* Need to parse both autoneg_advertised and fc and set up
-     * the appropriate PHY registers.  First we will parse for
-     * autoneg_advertised software override.  Since we can advertise
-     * a plethora of combinations, we need to check each bit
-     * individually.
-     */
-
-    /* First we clear all the 10/100 mb speed bits in the Auto-Neg
-     * Advertisement Register (Address 4) and the 1000 mb speed bits in
-     * the  1000Base-T Control Register (Address 9).
-     */
-    mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
-    mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
-
-    DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
-
-    /* Do we want to advertise 10 Mb Half Duplex? */
-    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
-        DEBUGOUT("Advertise 10mb Half duplex\n");
-        mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
-    }
-
-    /* Do we want to advertise 10 Mb Full Duplex? */
-    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
-        DEBUGOUT("Advertise 10mb Full duplex\n");
-        mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
-    }
-
-    /* Do we want to advertise 100 Mb Half Duplex? */
-    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
-        DEBUGOUT("Advertise 100mb Half duplex\n");
-        mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
-    }
-
-    /* Do we want to advertise 100 Mb Full Duplex? */
-    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
-        DEBUGOUT("Advertise 100mb Full duplex\n");
-        mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
-    }
-
-    /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
-    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
-        DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
-    }
-
-    /* Do we want to advertise 1000 Mb Full Duplex? */
-    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
-        DEBUGOUT("Advertise 1000mb Full duplex\n");
-        mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
-        if (hw->phy_type == e1000_phy_ife) {
-            DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
-        }
-    }
-
-    /* Check for a software override of the flow control settings, and
-     * setup the PHY advertisement registers accordingly.  If
-     * auto-negotiation is enabled, then software will have to set the
-     * "PAUSE" bits to the correct value in the Auto-Negotiation
-     * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
-     *
-     * The possible values of the "fc" parameter are:
-     *      0:  Flow control is completely disabled
-     *      1:  Rx flow control is enabled (we can receive pause frames
-     *          but not send pause frames).
-     *      2:  Tx flow control is enabled (we can send pause frames
-     *          but we do not support receiving pause frames).
-     *      3:  Both Rx and TX flow control (symmetric) are enabled.
-     *  other:  No software override.  The flow control configuration
-     *          in the EEPROM is used.
-     */
-    switch (hw->fc) {
-    case E1000_FC_NONE: /* 0 */
-        /* Flow control (RX & TX) is completely disabled by a
-         * software over-ride.
-         */
-        mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-        break;
-    case E1000_FC_RX_PAUSE: /* 1 */
-        /* RX Flow control is enabled, and TX Flow control is
-         * disabled, by a software over-ride.
-         */
-        /* Since there really isn't a way to advertise that we are
-         * capable of RX Pause ONLY, we will advertise that we
-         * support both symmetric and asymmetric RX PAUSE.  Later
-         * (in e1000_config_fc_after_link_up) we will disable the
-         *hw's ability to send PAUSE frames.
-         */
-        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-        break;
-    case E1000_FC_TX_PAUSE: /* 2 */
-        /* TX Flow control is enabled, and RX Flow control is
-         * disabled, by a software over-ride.
-         */
-        mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
-        mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
-        break;
-    case E1000_FC_FULL: /* 3 */
-        /* Flow control (both RX and TX) is enabled by a software
-         * over-ride.
-         */
-        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
-        break;
-    default:
-        DEBUGOUT("Flow control param set incorrectly\n");
-        return -E1000_ERR_CONFIG;
-    }
-
-    ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
-    if (ret_val)
-        return ret_val;
-
-    DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
-
-    if (hw->phy_type != e1000_phy_ife) {
-        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
-        if (ret_val)
-            return ret_val;
-    }
-
-    return E1000_SUCCESS;
-}
+       /* Call the necessary subroutine to configure the link. */
+       ret_val = (hw->media_type == e1000_media_type_copper) ?
+           e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
 
-/******************************************************************************
-* Force PHY speed and duplex settings to hw->forced_speed_duplex
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
-{
-    u32 ctrl;
-    s32 ret_val;
-    u16 mii_ctrl_reg;
-    u16 mii_status_reg;
-    u16 phy_data;
-    u16 i;
-
-    DEBUGFUNC("e1000_phy_force_speed_duplex");
-
-    /* Turn off Flow control if we are forcing speed and duplex. */
-    hw->fc = E1000_FC_NONE;
-
-    DEBUGOUT1("hw->fc = %d\n", hw->fc);
-
-    /* Read the Device Control Register. */
-    ctrl = er32(CTRL);
-
-    /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
-    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-    ctrl &= ~(DEVICE_SPEED_MASK);
-
-    /* Clear the Auto Speed Detect Enable bit. */
-    ctrl &= ~E1000_CTRL_ASDE;
-
-    /* Read the MII Control Register. */
-    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
-    if (ret_val)
-        return ret_val;
-
-    /* We need to disable autoneg in order to force link and duplex. */
-
-    mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
-
-    /* Are we forcing Full or Half Duplex? */
-    if (hw->forced_speed_duplex == e1000_100_full ||
-        hw->forced_speed_duplex == e1000_10_full) {
-        /* We want to force full duplex so we SET the full duplex bits in the
-         * Device and MII Control Registers.
-         */
-        ctrl |= E1000_CTRL_FD;
-        mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
-        DEBUGOUT("Full Duplex\n");
-    } else {
-        /* We want to force half duplex so we CLEAR the full duplex bits in
-         * the Device and MII Control Registers.
-         */
-        ctrl &= ~E1000_CTRL_FD;
-        mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
-        DEBUGOUT("Half Duplex\n");
-    }
-
-    /* Are we forcing 100Mbps??? */
-    if (hw->forced_speed_duplex == e1000_100_full ||
-       hw->forced_speed_duplex == e1000_100_half) {
-        /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
-        ctrl |= E1000_CTRL_SPD_100;
-        mii_ctrl_reg |= MII_CR_SPEED_100;
-        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
-        DEBUGOUT("Forcing 100mb ");
-    } else {
-        /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
-        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
-        mii_ctrl_reg |= MII_CR_SPEED_10;
-        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
-        DEBUGOUT("Forcing 10mb ");
-    }
-
-    e1000_config_collision_dist(hw);
-
-    /* Write the configured values back to the Device Control Reg. */
-    ew32(CTRL, ctrl);
-
-    if ((hw->phy_type == e1000_phy_m88) ||
-        (hw->phy_type == e1000_phy_gg82563)) {
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
-         * forced whenever speed are duplex are forced.
-         */
-        phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-
-        DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
-
-        /* Need to reset the PHY or these changes will be ignored */
-        mii_ctrl_reg |= MII_CR_RESET;
-
-    /* Disable MDI-X support for 10/100 */
-    } else if (hw->phy_type == e1000_phy_ife) {
-        ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~IFE_PMC_AUTO_MDIX;
-        phy_data &= ~IFE_PMC_FORCE_MDIX;
-
-        ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
-        if (ret_val)
-            return ret_val;
-
-    } else {
-        /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
-         * forced whenever speed or duplex are forced.
-         */
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-        phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-
-        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    /* Write back the modified PHY MII control register. */
-    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
-    if (ret_val)
-        return ret_val;
-
-    udelay(1);
-
-    /* The wait_autoneg_complete flag may be a little misleading here.
-     * Since we are forcing speed and duplex, Auto-Neg is not enabled.
-     * But we do want to delay for a period while forcing only so we
-     * don't generate false No Link messages.  So we will wait here
-     * only if the user has set wait_autoneg_complete to 1, which is
-     * the default.
-     */
-    if (hw->wait_autoneg_complete) {
-        /* We will wait for autoneg to complete. */
-        DEBUGOUT("Waiting for forced speed/duplex link.\n");
-        mii_status_reg = 0;
-
-        /* We will wait for autoneg to complete or 4.5 seconds to expire. */
-        for (i = PHY_FORCE_TIME; i > 0; i--) {
-            /* Read the MII Status Register and wait for Auto-Neg Complete bit
-             * to be set.
-             */
-            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if (ret_val)
-                return ret_val;
-
-            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if (ret_val)
-                return ret_val;
-
-            if (mii_status_reg & MII_SR_LINK_STATUS) break;
-            msleep(100);
-        }
-        if ((i == 0) &&
-           ((hw->phy_type == e1000_phy_m88) ||
-            (hw->phy_type == e1000_phy_gg82563))) {
-            /* We didn't get link.  Reset the DSP and wait again for link. */
-            ret_val = e1000_phy_reset_dsp(hw);
-            if (ret_val) {
-                DEBUGOUT("Error Resetting PHY DSP\n");
-                return ret_val;
-            }
-        }
-        /* This loop will early-out if the link condition has been met.  */
-        for (i = PHY_FORCE_TIME; i > 0; i--) {
-            if (mii_status_reg & MII_SR_LINK_STATUS) break;
-            msleep(100);
-            /* Read the MII Status Register and wait for Auto-Neg Complete bit
-             * to be set.
-             */
-            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if (ret_val)
-                return ret_val;
-
-            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-            if (ret_val)
-                return ret_val;
-        }
-    }
-
-    if (hw->phy_type == e1000_phy_m88) {
-        /* Because we reset the PHY above, we need to re-force TX_CLK in the
-         * Extended PHY Specific Control Register to 25MHz clock.  This value
-         * defaults back to a 2.5MHz clock when the PHY is reset.
-         */
-        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data |= M88E1000_EPSCR_TX_CLK_25;
-        ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* In addition, because of the s/w reset above, we need to enable CRS on
-         * TX.  This must be set for both full and half duplex operation.
-         */
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-
-        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
-            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
-             hw->forced_speed_duplex == e1000_10_half)) {
-            ret_val = e1000_polarity_reversal_workaround(hw);
-            if (ret_val)
-                return ret_val;
-        }
-    } else if (hw->phy_type == e1000_phy_gg82563) {
-        /* The TX_CLK of the Extended PHY Specific Control Register defaults
-         * to 2.5MHz on a reset.  We need to re-force it back to 25MHz, if
-         * we're not in a forced 10/duplex configuration. */
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
-        if ((hw->forced_speed_duplex == e1000_10_full) ||
-            (hw->forced_speed_duplex == e1000_10_half))
-            phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
-        else
-            phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
-
-        /* Also due to the reset, we need to enable CRS on Tx. */
-        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
-
-        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-    return E1000_SUCCESS;
+       /* Initialize the flow control address, type, and PAUSE timer
+        * registers to their default values.  This is done even if flow
+        * control is disabled, because it does not hurt anything to
+        * initialize these registers.
+        */
+       DEBUGOUT
+           ("Initializing the Flow Control address, type and timer regs\n");
+
+       ew32(FCT, FLOW_CONTROL_TYPE);
+       ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+       ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+       ew32(FCTTV, hw->fc_pause_time);
+
+       /* Set the flow control receive threshold registers.  Normally,
+        * these registers will be set to a default threshold that may be
+        * adjusted later by the driver's runtime code.  However, if the
+        * ability to transmit pause frames in not enabled, then these
+        * registers will be set to 0.
+        */
+       if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+               ew32(FCRTL, 0);
+               ew32(FCRTH, 0);
+       } else {
+               /* We need to set up the Receive Threshold high and low water marks
+                * as well as (optionally) enabling the transmission of XON frames.
+                */
+               if (hw->fc_send_xon) {
+                       ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+                       ew32(FCRTH, hw->fc_high_water);
+               } else {
+                       ew32(FCRTL, hw->fc_low_water);
+                       ew32(FCRTH, hw->fc_high_water);
+               }
+       }
+       return ret_val;
 }
 
-/******************************************************************************
-* Sets the collision distance in the Transmit Control register
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Link should have been established previously. Reads the speed and duplex
-* information from the Device Status register.
-******************************************************************************/
-void e1000_config_collision_dist(struct e1000_hw *hw)
+/**
+ * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ */
+static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
 {
-    u32 tctl, coll_dist;
+       u32 ctrl;
+       u32 status;
+       u32 txcw = 0;
+       u32 i;
+       u32 signal = 0;
+       s32 ret_val;
 
-    DEBUGFUNC("e1000_config_collision_dist");
+       DEBUGFUNC("e1000_setup_fiber_serdes_link");
 
-    if (hw->mac_type < e1000_82543)
-        coll_dist = E1000_COLLISION_DISTANCE_82542;
-    else
-        coll_dist = E1000_COLLISION_DISTANCE;
+       /* On adapters with a MAC newer than 82544, SWDP 1 will be
+        * set when the optics detect a signal. On older adapters, it will be
+        * cleared when there is a signal.  This applies to fiber media only.
+        * If we're on serdes media, adjust the output amplitude to value
+        * set in the EEPROM.
+        */
+       ctrl = er32(CTRL);
+       if (hw->media_type == e1000_media_type_fiber)
+               signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+       ret_val = e1000_adjust_serdes_amplitude(hw);
+       if (ret_val)
+               return ret_val;
+
+       /* Take the link out of reset */
+       ctrl &= ~(E1000_CTRL_LRST);
+
+       /* Adjust VCO speed to improve BER performance */
+       ret_val = e1000_set_vco_speed(hw);
+       if (ret_val)
+               return ret_val;
+
+       e1000_config_collision_dist(hw);
+
+       /* Check for a software override of the flow control settings, and setup
+        * the device accordingly.  If auto-negotiation is enabled, then software
+        * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+        * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
+        * auto-negotiation is disabled, then software will have to manually
+        * configure the two flow control enable bits in the CTRL register.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames, but
+        *          not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames but we do
+        *          not support receiving pause frames).
+        *      3:  Both Rx and TX flow control (symmetric) are enabled.
+        */
+       switch (hw->fc) {
+       case E1000_FC_NONE:
+               /* Flow control is completely disabled by a software over-ride. */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+               break;
+       case E1000_FC_RX_PAUSE:
+               /* RX Flow control is enabled and TX Flow control is disabled by a
+                * software over-ride. Since there really isn't a way to advertise
+                * that we are capable of RX Pause ONLY, we will advertise that we
+                * support both symmetric and asymmetric RX PAUSE. Later, we will
+                *  disable the adapter's ability to send PAUSE frames.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       case E1000_FC_TX_PAUSE:
+               /* TX Flow control is enabled, and RX Flow control is disabled, by a
+                * software over-ride.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+               break;
+       case E1000_FC_FULL:
+               /* Flow control (both RX and TX) is enabled by a software over-ride. */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               return -E1000_ERR_CONFIG;
+               break;
+       }
+
+       /* Since auto-negotiation is enabled, take the link out of reset (the link
+        * will be in reset, because we previously reset the chip). This will
+        * restart auto-negotiation.  If auto-negotiation is successful then the
+        * link-up status bit will be set and the flow control enable bits (RFCE
+        * and TFCE) will be set according to their negotiated value.
+        */
+       DEBUGOUT("Auto-negotiation enabled\n");
 
-    tctl = er32(TCTL);
+       ew32(TXCW, txcw);
+       ew32(CTRL, ctrl);
+       E1000_WRITE_FLUSH();
 
-    tctl &= ~E1000_TCTL_COLD;
-    tctl |= coll_dist << E1000_COLD_SHIFT;
+       hw->txcw = txcw;
+       msleep(1);
 
-    ew32(TCTL, tctl);
-    E1000_WRITE_FLUSH();
+       /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+        * indication in the Device Status Register.  Time-out if a link isn't
+        * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+        * less than 500 milliseconds even if the other end is doing it in SW).
+        * For internal serdes, we just assume a signal is present, then poll.
+        */
+       if (hw->media_type == e1000_media_type_internal_serdes ||
+           (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+               DEBUGOUT("Looking for Link\n");
+               for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+                       msleep(10);
+                       status = er32(STATUS);
+                       if (status & E1000_STATUS_LU)
+                               break;
+               }
+               if (i == (LINK_UP_TIMEOUT / 10)) {
+                       DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+                       hw->autoneg_failed = 1;
+                       /* AutoNeg failed to achieve a link, so we'll call
+                        * e1000_check_for_link. This routine will force the link up if
+                        * we detect a signal. This will allow us to communicate with
+                        * non-autonegotiating link partners.
+                        */
+                       ret_val = e1000_check_for_link(hw);
+                       if (ret_val) {
+                               DEBUGOUT("Error while checking for link\n");
+                               return ret_val;
+                       }
+                       hw->autoneg_failed = 0;
+               } else {
+                       hw->autoneg_failed = 0;
+                       DEBUGOUT("Valid Link Found\n");
+               }
+       } else {
+               DEBUGOUT("No Signal Detected\n");
+       }
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
-* Sets MAC speed and duplex settings to reflect the those in the PHY
-*
-* hw - Struct containing variables accessed by shared code
-* mii_reg - data to write to the MII control register
-*
-* The contents of the PHY register containing the needed information need to
-* be passed in.
-******************************************************************************/
-static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
-{
-    u32 ctrl;
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_config_mac_to_phy");
-
-    /* 82544 or newer MAC, Auto Speed Detection takes care of
-    * MAC speed/duplex configuration.*/
-    if (hw->mac_type >= e1000_82544)
-        return E1000_SUCCESS;
-
-    /* Read the Device Control Register and set the bits to Force Speed
-     * and Duplex.
-     */
-    ctrl = er32(CTRL);
-    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
-    ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
-
-    /* Set up duplex in the Device Control and Transmit Control
-     * registers depending on negotiated values.
-     */
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    if (phy_data & M88E1000_PSSR_DPLX)
-        ctrl |= E1000_CTRL_FD;
-    else
-        ctrl &= ~E1000_CTRL_FD;
-
-    e1000_config_collision_dist(hw);
-
-    /* Set up speed in the Device Control register depending on
-     * negotiated values.
-     */
-    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
-        ctrl |= E1000_CTRL_SPD_1000;
-    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
-        ctrl |= E1000_CTRL_SPD_100;
-
-    /* Write the configured values back to the Device Control Reg. */
-    ew32(CTRL, ctrl);
-    return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * Forces the MAC's flow control settings.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Sets the TFCE and RFCE bits in the device control register to reflect
- * the adapter settings. TFCE and RFCE need to be explicitly set by
- * software when a Copper PHY is used because autonegotiation is managed
- * by the PHY rather than the MAC. Software must also configure these
- * bits when link is forced on a fiber connection.
- *****************************************************************************/
-s32 e1000_force_mac_fc(struct e1000_hw *hw)
-{
-    u32 ctrl;
-
-    DEBUGFUNC("e1000_force_mac_fc");
-
-    /* Get the current configuration of the Device Control Register */
-    ctrl = er32(CTRL);
-
-    /* Because we didn't get link via the internal auto-negotiation
-     * mechanism (we either forced link or we got link via PHY
-     * auto-neg), we have to manually enable/disable transmit an
-     * receive flow control.
-     *
-     * The "Case" statement below enables/disable flow control
-     * according to the "hw->fc" parameter.
-     *
-     * The possible values of the "fc" parameter are:
-     *      0:  Flow control is completely disabled
-     *      1:  Rx flow control is enabled (we can receive pause
-     *          frames but not send pause frames).
-     *      2:  Tx flow control is enabled (we can send pause frames
-     *          frames but we do not receive pause frames).
-     *      3:  Both Rx and TX flow control (symmetric) is enabled.
-     *  other:  No other values should be possible at this point.
-     */
-
-    switch (hw->fc) {
-    case E1000_FC_NONE:
-        ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
-        break;
-    case E1000_FC_RX_PAUSE:
-        ctrl &= (~E1000_CTRL_TFCE);
-        ctrl |= E1000_CTRL_RFCE;
-        break;
-    case E1000_FC_TX_PAUSE:
-        ctrl &= (~E1000_CTRL_RFCE);
-        ctrl |= E1000_CTRL_TFCE;
-        break;
-    case E1000_FC_FULL:
-        ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
-        break;
-    default:
-        DEBUGOUT("Flow control param set incorrectly\n");
-        return -E1000_ERR_CONFIG;
-    }
-
-    /* Disable TX Flow Control for 82542 (rev 2.0) */
-    if (hw->mac_type == e1000_82542_rev2_0)
-        ctrl &= (~E1000_CTRL_TFCE);
-
-    ew32(CTRL, ctrl);
-    return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * Configures flow control settings after link is established
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Should be called immediately after a valid link has been established.
- * Forces MAC flow control settings if link was forced. When in MII/GMII mode
- * and autonegotiation is enabled, the MAC flow control settings will be set
- * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
- * and RFCE bits will be automaticaly set to the negotiated flow control mode.
- *****************************************************************************/
-static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 mii_status_reg;
-    u16 mii_nway_adv_reg;
-    u16 mii_nway_lp_ability_reg;
-    u16 speed;
-    u16 duplex;
-
-    DEBUGFUNC("e1000_config_fc_after_link_up");
-
-    /* Check for the case where we have fiber media and auto-neg failed
-     * so we had to force link.  In this case, we need to force the
-     * configuration of the MAC to match the "fc" parameter.
-     */
-    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
-        ((hw->media_type == e1000_media_type_internal_serdes) &&
-         (hw->autoneg_failed)) ||
-        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
-        ret_val = e1000_force_mac_fc(hw);
-        if (ret_val) {
-            DEBUGOUT("Error forcing flow control settings\n");
-            return ret_val;
-        }
-    }
-
-    /* Check for the case where we have copper media and auto-neg is
-     * enabled.  In this case, we need to check and see if Auto-Neg
-     * has completed, and if so, how the PHY and link partner has
-     * flow control configured.
-     */
-    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
-        /* Read the MII Status Register and check to see if AutoNeg
-         * has completed.  We read this twice because this reg has
-         * some "sticky" (latched) bits.
-         */
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-
-        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
-            /* The AutoNeg process has completed, so we now need to
-             * read both the Auto Negotiation Advertisement Register
-             * (Address 4) and the Auto_Negotiation Base Page Ability
-             * Register (Address 5) to determine how flow control was
-             * negotiated.
-             */
-            ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
-                                         &mii_nway_adv_reg);
-            if (ret_val)
-                return ret_val;
-            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
-                                         &mii_nway_lp_ability_reg);
-            if (ret_val)
-                return ret_val;
-
-            /* Two bits in the Auto Negotiation Advertisement Register
-             * (Address 4) and two bits in the Auto Negotiation Base
-             * Page Ability Register (Address 5) determine flow control
-             * for both the PHY and the link partner.  The following
-             * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
-             * 1999, describes these PAUSE resolution bits and how flow
-             * control is determined based upon these settings.
-             * NOTE:  DC = Don't Care
-             *
-             *   LOCAL DEVICE  |   LINK PARTNER
-             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
-             *-------|---------|-------|---------|--------------------
-             *   0   |    0    |  DC   |   DC    | E1000_FC_NONE
-             *   0   |    1    |   0   |   DC    | E1000_FC_NONE
-             *   0   |    1    |   1   |    0    | E1000_FC_NONE
-             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
-             *   1   |    0    |   0   |   DC    | E1000_FC_NONE
-             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
-             *   1   |    1    |   0   |    0    | E1000_FC_NONE
-             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
-             *
-             */
-            /* Are both PAUSE bits set to 1?  If so, this implies
-             * Symmetric Flow Control is enabled at both ends.  The
-             * ASM_DIR bits are irrelevant per the spec.
-             *
-             * For Symmetric Flow Control:
-             *
-             *   LOCAL DEVICE  |   LINK PARTNER
-             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-             *-------|---------|-------|---------|--------------------
-             *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
-             *
-             */
-            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
-                /* Now we need to check if the user selected RX ONLY
-                 * of pause frames.  In this case, we had to advertise
-                 * FULL flow control because we could not advertise RX
-                 * ONLY. Hence, we must now check to see if we need to
-                 * turn OFF  the TRANSMISSION of PAUSE frames.
-                 */
-                if (hw->original_fc == E1000_FC_FULL) {
-                    hw->fc = E1000_FC_FULL;
-                    DEBUGOUT("Flow Control = FULL.\n");
-                } else {
-                    hw->fc = E1000_FC_RX_PAUSE;
-                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
-                }
-            }
-            /* For receiving PAUSE frames ONLY.
-             *
-             *   LOCAL DEVICE  |   LINK PARTNER
-             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-             *-------|---------|-------|---------|--------------------
-             *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
-             *
-             */
-            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-                hw->fc = E1000_FC_TX_PAUSE;
-                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
-            }
-            /* For transmitting PAUSE frames ONLY.
-             *
-             *   LOCAL DEVICE  |   LINK PARTNER
-             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
-             *-------|---------|-------|---------|--------------------
-             *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
-             *
-             */
-            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
-                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
-                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
-                hw->fc = E1000_FC_RX_PAUSE;
-                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
-            }
-            /* Per the IEEE spec, at this point flow control should be
-             * disabled.  However, we want to consider that we could
-             * be connected to a legacy switch that doesn't advertise
-             * desired flow control, but can be forced on the link
-             * partner.  So if we advertised no flow control, that is
-             * what we will resolve to.  If we advertised some kind of
-             * receive capability (Rx Pause Only or Full Flow Control)
-             * and the link partner advertised none, we will configure
-             * ourselves to enable Rx Flow Control only.  We can do
-             * this safely for two reasons:  If the link partner really
-             * didn't want flow control enabled, and we enable Rx, no
-             * harm done since we won't be receiving any PAUSE frames
-             * anyway.  If the intent on the link partner was to have
-             * flow control enabled, then by us enabling RX only, we
-             * can at least receive pause frames and process them.
-             * This is a good idea because in most cases, since we are
-             * predominantly a server NIC, more times than not we will
-             * be asked to delay transmission of packets than asking
-             * our link partner to pause transmission of frames.
-             */
-            else if ((hw->original_fc == E1000_FC_NONE ||
-                      hw->original_fc == E1000_FC_TX_PAUSE) ||
-                      hw->fc_strict_ieee) {
-                hw->fc = E1000_FC_NONE;
-                DEBUGOUT("Flow Control = NONE.\n");
-            } else {
-                hw->fc = E1000_FC_RX_PAUSE;
-                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
-            }
-
-            /* Now we need to do one last check...  If we auto-
-             * negotiated to HALF DUPLEX, flow control should not be
-             * enabled per IEEE 802.3 spec.
-             */
-            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
-            if (ret_val) {
-                DEBUGOUT("Error getting link speed and duplex\n");
-                return ret_val;
-            }
-
-            if (duplex == HALF_DUPLEX)
-                hw->fc = E1000_FC_NONE;
-
-            /* Now we call a subroutine to actually force the MAC
-             * controller to use the correct flow control settings.
-             */
-            ret_val = e1000_force_mac_fc(hw);
-            if (ret_val) {
-                DEBUGOUT("Error forcing flow control settings\n");
-                return ret_val;
-            }
-        } else {
-            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
-        }
-    }
-    return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * Checks to see if the link status of the hardware has changed.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
-s32 e1000_check_for_link(struct e1000_hw *hw)
-{
-    u32 rxcw = 0;
-    u32 ctrl;
-    u32 status;
-    u32 rctl;
-    u32 icr;
-    u32 signal = 0;
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_check_for_link");
-
-    ctrl = er32(CTRL);
-    status = er32(STATUS);
-
-    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
-     * set when the optics detect a signal. On older adapters, it will be
-     * cleared when there is a signal.  This applies to fiber media only.
-     */
-    if ((hw->media_type == e1000_media_type_fiber) ||
-        (hw->media_type == e1000_media_type_internal_serdes)) {
-        rxcw = er32(RXCW);
-
-        if (hw->media_type == e1000_media_type_fiber) {
-            signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
-            if (status & E1000_STATUS_LU)
-                hw->get_link_status = false;
-        }
-    }
-
-    /* If we have a copper PHY then we only want to go out to the PHY
-     * registers to see if Auto-Neg has completed and/or if our link
-     * status has changed.  The get_link_status flag will be set if we
-     * receive a Link Status Change interrupt or we have Rx Sequence
-     * Errors.
-     */
-    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
-        /* First we want to see if the MII Status Register reports
-         * link.  If so, then we want to get the current speed/duplex
-         * of the PHY.
-         * Read the register twice since the link bit is sticky.
-         */
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        if (phy_data & MII_SR_LINK_STATUS) {
-            hw->get_link_status = false;
-            /* Check if there was DownShift, must be checked immediately after
-             * link-up */
-            e1000_check_downshift(hw);
-
-            /* If we are on 82544 or 82543 silicon and speed/duplex
-             * are forced to 10H or 10F, then we will implement the polarity
-             * reversal workaround.  We disable interrupts first, and upon
-             * returning, place the devices interrupt state to its previous
-             * value except for the link status change interrupt which will
-             * happen due to the execution of this workaround.
-             */
-
-            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
-                (!hw->autoneg) &&
-                (hw->forced_speed_duplex == e1000_10_full ||
-                 hw->forced_speed_duplex == e1000_10_half)) {
-                ew32(IMC, 0xffffffff);
-                ret_val = e1000_polarity_reversal_workaround(hw);
-                icr = er32(ICR);
-                ew32(ICS, (icr & ~E1000_ICS_LSC));
-                ew32(IMS, IMS_ENABLE_MASK);
-            }
-
-        } else {
-            /* No link detected */
-            e1000_config_dsp_after_link_change(hw, false);
-            return 0;
-        }
-
-        /* If we are forcing speed/duplex, then we simply return since
-         * we have already determined whether we have link or not.
-         */
-        if (!hw->autoneg) return -E1000_ERR_CONFIG;
-
-        /* optimize the dsp settings for the igp phy */
-        e1000_config_dsp_after_link_change(hw, true);
-
-        /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
-         * have Si on board that is 82544 or newer, Auto
-         * Speed Detection takes care of MAC speed/duplex
-         * configuration.  So we only need to configure Collision
-         * Distance in the MAC.  Otherwise, we need to force
-         * speed/duplex on the MAC to the current PHY speed/duplex
-         * settings.
-         */
-        if (hw->mac_type >= e1000_82544)
-            e1000_config_collision_dist(hw);
-        else {
-            ret_val = e1000_config_mac_to_phy(hw);
-            if (ret_val) {
-                DEBUGOUT("Error configuring MAC to PHY settings\n");
-                return ret_val;
-            }
-        }
-
-        /* Configure Flow Control now that Auto-Neg has completed. First, we
-         * need to restore the desired flow control settings because we may
-         * have had to re-autoneg with a different link partner.
-         */
-        ret_val = e1000_config_fc_after_link_up(hw);
-        if (ret_val) {
-            DEBUGOUT("Error configuring flow control\n");
-            return ret_val;
-        }
-
-        /* At this point we know that we are on copper and we have
-         * auto-negotiated link.  These are conditions for checking the link
-         * partner capability register.  We use the link speed to determine if
-         * TBI compatibility needs to be turned on or off.  If the link is not
-         * at gigabit speed, then TBI compatibility is not needed.  If we are
-         * at gigabit speed, we turn on TBI compatibility.
-         */
-        if (hw->tbi_compatibility_en) {
-            u16 speed, duplex;
-            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
-            if (ret_val) {
-                DEBUGOUT("Error getting link speed and duplex\n");
-                return ret_val;
-            }
-            if (speed != SPEED_1000) {
-                /* If link speed is not set to gigabit speed, we do not need
-                 * to enable TBI compatibility.
-                 */
-                if (hw->tbi_compatibility_on) {
-                    /* If we previously were in the mode, turn it off. */
-                    rctl = er32(RCTL);
-                    rctl &= ~E1000_RCTL_SBP;
-                    ew32(RCTL, rctl);
-                    hw->tbi_compatibility_on = false;
-                }
-            } else {
-                /* If TBI compatibility is was previously off, turn it on. For
-                 * compatibility with a TBI link partner, we will store bad
-                 * packets. Some frames have an additional byte on the end and
-                 * will look like CRC errors to the hardware.
-                 */
-                if (!hw->tbi_compatibility_on) {
-                    hw->tbi_compatibility_on = true;
-                    rctl = er32(RCTL);
-                    rctl |= E1000_RCTL_SBP;
-                    ew32(RCTL, rctl);
-                }
-            }
-        }
-    }
-    /* If we don't have link (auto-negotiation failed or link partner cannot
-     * auto-negotiate), the cable is plugged in (we have signal), and our
-     * link partner is not trying to auto-negotiate with us (we are receiving
-     * idles or data), we need to force link up. We also need to give
-     * auto-negotiation time to complete, in case the cable was just plugged
-     * in. The autoneg_failed flag does this.
-     */
-    else if ((((hw->media_type == e1000_media_type_fiber) &&
-              ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
-              (hw->media_type == e1000_media_type_internal_serdes)) &&
-              (!(status & E1000_STATUS_LU)) &&
-              (!(rxcw & E1000_RXCW_C))) {
-        if (hw->autoneg_failed == 0) {
-            hw->autoneg_failed = 1;
-            return 0;
-        }
-        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
-
-        /* Disable auto-negotiation in the TXCW register */
-        ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
-
-        /* Force link-up and also force full-duplex. */
-        ctrl = er32(CTRL);
-        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
-        ew32(CTRL, ctrl);
-
-        /* Configure Flow Control after forcing link up. */
-        ret_val = e1000_config_fc_after_link_up(hw);
-        if (ret_val) {
-            DEBUGOUT("Error configuring flow control\n");
-            return ret_val;
-        }
-    }
-    /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
-     * auto-negotiation in the TXCW register and disable forced link in the
-     * Device Control register in an attempt to auto-negotiate with our link
-     * partner.
-     */
-    else if (((hw->media_type == e1000_media_type_fiber) ||
-              (hw->media_type == e1000_media_type_internal_serdes)) &&
-              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
-        ew32(TXCW, hw->txcw);
-        ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
-
-        hw->serdes_link_down = false;
-    }
-    /* If we force link for non-auto-negotiation switch, check link status
-     * based on MAC synchronization for internal serdes media type.
-     */
-    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
-             !(E1000_TXCW_ANE & er32(TXCW))) {
-        /* SYNCH bit and IV bit are sticky. */
-        udelay(10);
-        if (E1000_RXCW_SYNCH & er32(RXCW)) {
-            if (!(rxcw & E1000_RXCW_IV)) {
-                hw->serdes_link_down = false;
-                DEBUGOUT("SERDES: Link is up.\n");
-            }
-        } else {
-            hw->serdes_link_down = true;
-            DEBUGOUT("SERDES: Link is down.\n");
-        }
-    }
-    if ((hw->media_type == e1000_media_type_internal_serdes) &&
-        (E1000_TXCW_ANE & er32(TXCW))) {
-        hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
-    }
-    return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * Detects the current speed and duplex settings of the hardware.
+/**
+ * e1000_copper_link_preconfig - early configuration for copper
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- * speed - Speed of the connection
- * duplex - Duplex setting of the connection
- *****************************************************************************/
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+ * Make sure we have a valid PHY and change PHY mode before link setup.
+ */
+static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
 {
-    u32 status;
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_get_speed_and_duplex");
-
-    if (hw->mac_type >= e1000_82543) {
-        status = er32(STATUS);
-        if (status & E1000_STATUS_SPEED_1000) {
-            *speed = SPEED_1000;
-            DEBUGOUT("1000 Mbs, ");
-        } else if (status & E1000_STATUS_SPEED_100) {
-            *speed = SPEED_100;
-            DEBUGOUT("100 Mbs, ");
-        } else {
-            *speed = SPEED_10;
-            DEBUGOUT("10 Mbs, ");
-        }
-
-        if (status & E1000_STATUS_FD) {
-            *duplex = FULL_DUPLEX;
-            DEBUGOUT("Full Duplex\n");
-        } else {
-            *duplex = HALF_DUPLEX;
-            DEBUGOUT(" Half Duplex\n");
-        }
-    } else {
-        DEBUGOUT("1000 Mbs, Full Duplex\n");
-        *speed = SPEED_1000;
-        *duplex = FULL_DUPLEX;
-    }
-
-    /* IGP01 PHY may advertise full duplex operation after speed downgrade even
-     * if it is operating at half duplex.  Here we set the duplex settings to
-     * match the duplex in the link partner's capabilities.
-     */
-    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
-        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
-            *duplex = HALF_DUPLEX;
-        else {
-            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
-            if (ret_val)
-                return ret_val;
-            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
-               (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
-                *duplex = HALF_DUPLEX;
-        }
-    }
-
-    if ((hw->mac_type == e1000_80003es2lan) &&
-        (hw->media_type == e1000_media_type_copper)) {
-        if (*speed == SPEED_1000)
-            ret_val = e1000_configure_kmrn_for_1000(hw);
-        else
-            ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
-        if (ret_val)
-            return ret_val;
-    }
-
-    if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
-        ret_val = e1000_kumeran_lock_loss_workaround(hw);
-        if (ret_val)
-            return ret_val;
-    }
-
-    return E1000_SUCCESS;
-}
+       u32 ctrl;
+       s32 ret_val;
+       u16 phy_data;
 
-/******************************************************************************
-* Blocks until autoneg completes or times out (~4.5 seconds)
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_wait_autoneg(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 i;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_wait_autoneg");
-    DEBUGOUT("Waiting for Auto-Neg to complete.\n");
-
-    /* We will wait for autoneg to complete or 4.5 seconds to expire. */
-    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
-        /* Read the MII Status Register and wait for Auto-Neg
-         * Complete bit to be set.
-         */
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
-            return E1000_SUCCESS;
-        }
-        msleep(100);
-    }
-    return E1000_SUCCESS;
-}
+       DEBUGFUNC("e1000_copper_link_preconfig");
 
-/******************************************************************************
-* Raises the Management Data Clock
-*
-* hw - Struct containing variables accessed by shared code
-* ctrl - Device control register's current value
-******************************************************************************/
-static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
-{
-    /* Raise the clock input to the Management Data Clock (by setting the MDC
-     * bit), and then delay 10 microseconds.
-     */
-    ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
-    E1000_WRITE_FLUSH();
-    udelay(10);
-}
+       ctrl = er32(CTRL);
+       /* With 82543, we need to force speed and duplex on the MAC equal to what
+        * the PHY speed and duplex configuration is. In addition, we need to
+        * perform a hardware reset on the PHY to take it out of reset.
+        */
+       if (hw->mac_type > e1000_82543) {
+               ctrl |= E1000_CTRL_SLU;
+               ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+               ew32(CTRL, ctrl);
+       } else {
+               ctrl |=
+                   (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+               ew32(CTRL, ctrl);
+               ret_val = e1000_phy_hw_reset(hw);
+               if (ret_val)
+                       return ret_val;
+       }
 
-/******************************************************************************
-* Lowers the Management Data Clock
-*
-* hw - Struct containing variables accessed by shared code
-* ctrl - Device control register's current value
-******************************************************************************/
-static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
-{
-    /* Lower the clock input to the Management Data Clock (by clearing the MDC
-     * bit), and then delay 10 microseconds.
-     */
-    ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
-    E1000_WRITE_FLUSH();
-    udelay(10);
-}
+       /* Make sure we have a valid PHY */
+       ret_val = e1000_detect_gig_phy(hw);
+       if (ret_val) {
+               DEBUGOUT("Error, did not detect valid phy.\n");
+               return ret_val;
+       }
+       DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+       /* Set PHY to class A mode (if necessary) */
+       ret_val = e1000_set_phy_mode(hw);
+       if (ret_val)
+               return ret_val;
+
+       if ((hw->mac_type == e1000_82545_rev_3) ||
+           (hw->mac_type == e1000_82546_rev_3)) {
+               ret_val =
+                   e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+               phy_data |= 0x00000008;
+               ret_val =
+                   e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       }
 
-/******************************************************************************
-* Shifts data bits out to the PHY
-*
-* hw - Struct containing variables accessed by shared code
-* data - Data to send out to the PHY
-* count - Number of bits to shift out
-*
-* Bits are shifted out in MSB to LSB order.
-******************************************************************************/
-static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
-{
-    u32 ctrl;
-    u32 mask;
-
-    /* We need to shift "count" number of bits out to the PHY. So, the value
-     * in the "data" parameter will be shifted out to the PHY one bit at a
-     * time. In order to do this, "data" must be broken down into bits.
-     */
-    mask = 0x01;
-    mask <<= (count - 1);
-
-    ctrl = er32(CTRL);
-
-    /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
-    ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
-
-    while (mask) {
-        /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
-         * then raising and lowering the Management Data Clock. A "0" is
-         * shifted out to the PHY by setting the MDIO bit to "0" and then
-         * raising and lowering the clock.
-         */
-        if (data & mask)
-            ctrl |= E1000_CTRL_MDIO;
-        else
-            ctrl &= ~E1000_CTRL_MDIO;
-
-        ew32(CTRL, ctrl);
-        E1000_WRITE_FLUSH();
-
-        udelay(10);
-
-        e1000_raise_mdi_clk(hw, &ctrl);
-        e1000_lower_mdi_clk(hw, &ctrl);
-
-        mask = mask >> 1;
-    }
-}
+       if (hw->mac_type <= e1000_82543 ||
+           hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+           hw->mac_type == e1000_82541_rev_2
+           || hw->mac_type == e1000_82547_rev_2)
+               hw->phy_reset_disable = false;
 
-/******************************************************************************
-* Shifts data bits in from the PHY
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Bits are shifted in in MSB to LSB order.
-******************************************************************************/
-static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
-{
-    u32 ctrl;
-    u16 data = 0;
-    u8 i;
-
-    /* In order to read a register from the PHY, we need to shift in a total
-     * of 18 bits from the PHY. The first two bit (turnaround) times are used
-     * to avoid contention on the MDIO pin when a read operation is performed.
-     * These two bits are ignored by us and thrown away. Bits are "shifted in"
-     * by raising the input to the Management Data Clock (setting the MDC bit),
-     * and then reading the value of the MDIO bit.
-     */
-    ctrl = er32(CTRL);
-
-    /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
-    ctrl &= ~E1000_CTRL_MDIO_DIR;
-    ctrl &= ~E1000_CTRL_MDIO;
-
-    ew32(CTRL, ctrl);
-    E1000_WRITE_FLUSH();
-
-    /* Raise and Lower the clock before reading in the data. This accounts for
-     * the turnaround bits. The first clock occurred when we clocked out the
-     * last bit of the Register Address.
-     */
-    e1000_raise_mdi_clk(hw, &ctrl);
-    e1000_lower_mdi_clk(hw, &ctrl);
-
-    for (data = 0, i = 0; i < 16; i++) {
-        data = data << 1;
-        e1000_raise_mdi_clk(hw, &ctrl);
-        ctrl = er32(CTRL);
-        /* Check to see if we shifted in a "1". */
-        if (ctrl & E1000_CTRL_MDIO)
-            data |= 1;
-        e1000_lower_mdi_clk(hw, &ctrl);
-    }
-
-    e1000_raise_mdi_clk(hw, &ctrl);
-    e1000_lower_mdi_clk(hw, &ctrl);
-
-    return data;
+       return E1000_SUCCESS;
 }
 
-static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
+/**
+ * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
 {
-    u32 swfw_sync = 0;
-    u32 swmask = mask;
-    u32 fwmask = mask << 16;
-    s32 timeout = 200;
+       u32 led_ctrl;
+       s32 ret_val;
+       u16 phy_data;
 
-    DEBUGFUNC("e1000_swfw_sync_acquire");
+       DEBUGFUNC("e1000_copper_link_igp_setup");
 
-    if (hw->swfwhw_semaphore_present)
-        return e1000_get_software_flag(hw);
+       if (hw->phy_reset_disable)
+               return E1000_SUCCESS;
 
-    if (!hw->swfw_sync_present)
-        return e1000_get_hw_eeprom_semaphore(hw);
+       ret_val = e1000_phy_reset(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Resetting the PHY\n");
+               return ret_val;
+       }
 
-    while (timeout) {
-            if (e1000_get_hw_eeprom_semaphore(hw))
-                return -E1000_ERR_SWFW_SYNC;
+       /* Wait 15ms for MAC to configure PHY from eeprom settings */
+       msleep(15);
+       /* Configure activity LED after PHY reset */
+       led_ctrl = er32(LEDCTL);
+       led_ctrl &= IGP_ACTIVITY_LED_MASK;
+       led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+       ew32(LEDCTL, led_ctrl);
+
+       /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+       if (hw->phy_type == e1000_phy_igp) {
+               /* disable lplu d3 during driver init */
+               ret_val = e1000_set_d3_lplu_state(hw, false);
+               if (ret_val) {
+                       DEBUGOUT("Error Disabling LPLU D3\n");
+                       return ret_val;
+               }
+       }
 
-            swfw_sync = er32(SW_FW_SYNC);
-            if (!(swfw_sync & (fwmask | swmask))) {
-                break;
-            }
+       /* Configure mdi-mdix settings */
+       ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+               hw->dsp_config_state = e1000_dsp_config_disabled;
+               /* Force MDI for earlier revs of the IGP PHY */
+               phy_data &=
+                   ~(IGP01E1000_PSCR_AUTO_MDIX |
+                     IGP01E1000_PSCR_FORCE_MDI_MDIX);
+               hw->mdix = 1;
+
+       } else {
+               hw->dsp_config_state = e1000_dsp_config_enabled;
+               phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+               switch (hw->mdix) {
+               case 1:
+                       phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+                       break;
+               case 2:
+                       phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+                       break;
+               case 0:
+               default:
+                       phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+                       break;
+               }
+       }
+       ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+       if (ret_val)
+               return ret_val;
+
+       /* set auto-master slave resolution settings */
+       if (hw->autoneg) {
+               e1000_ms_type phy_ms_setting = hw->master_slave;
+
+               if (hw->ffe_config_state == e1000_ffe_config_active)
+                       hw->ffe_config_state = e1000_ffe_config_enabled;
+
+               if (hw->dsp_config_state == e1000_dsp_config_activated)
+                       hw->dsp_config_state = e1000_dsp_config_enabled;
+
+               /* when autonegotiation advertisement is only 1000Mbps then we
+                * should disable SmartSpeed and enable Auto MasterSlave
+                * resolution as hardware default. */
+               if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+                       /* Disable SmartSpeed */
+                       ret_val =
+                           e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                              &phy_data);
+                       if (ret_val)
+                               return ret_val;
+                       phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                               phy_data);
+                       if (ret_val)
+                               return ret_val;
+                       /* Set auto Master/Slave resolution process */
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+                       if (ret_val)
+                               return ret_val;
+                       phy_data &= ~CR_1000T_MS_ENABLE;
+                       ret_val =
+                           e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+                       if (ret_val)
+                               return ret_val;
+               }
 
-            /* firmware currently using resource (fwmask) */
-            /* or other software thread currently using resource (swmask) */
-            e1000_put_hw_eeprom_semaphore(hw);
-            mdelay(5);
-            timeout--;
-    }
+               ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+               if (ret_val)
+                       return ret_val;
 
-    if (!timeout) {
-        DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
-        return -E1000_ERR_SWFW_SYNC;
-    }
+               /* load defaults for future use */
+               hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+                   ((phy_data & CR_1000T_MS_VALUE) ?
+                    e1000_ms_force_master :
+                    e1000_ms_force_slave) : e1000_ms_auto;
 
-    swfw_sync |= swmask;
-    ew32(SW_FW_SYNC, swfw_sync);
+               switch (phy_ms_setting) {
+               case e1000_ms_force_master:
+                       phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_force_slave:
+                       phy_data |= CR_1000T_MS_ENABLE;
+                       phy_data &= ~(CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_auto:
+                       phy_data &= ~CR_1000T_MS_ENABLE;
+               default:
+                       break;
+               }
+               ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+               if (ret_val)
+                       return ret_val;
+       }
 
-    e1000_put_hw_eeprom_semaphore(hw);
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
-static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+/**
+ * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
 {
-    u32 swfw_sync;
-    u32 swmask = mask;
+       s32 ret_val;
+       u16 phy_data;
 
-    DEBUGFUNC("e1000_swfw_sync_release");
+       DEBUGFUNC("e1000_copper_link_mgp_setup");
 
-    if (hw->swfwhw_semaphore_present) {
-        e1000_release_software_flag(hw);
-        return;
-    }
+       if (hw->phy_reset_disable)
+               return E1000_SUCCESS;
 
-    if (!hw->swfw_sync_present) {
-        e1000_put_hw_eeprom_semaphore(hw);
-        return;
-    }
+       /* Enable CRS on TX. This must be set for half-duplex operation. */
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               return ret_val;
 
-    /* if (e1000_get_hw_eeprom_semaphore(hw))
-     *    return -E1000_ERR_SWFW_SYNC; */
-    while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
-        /* empty */
+       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
 
-    swfw_sync = er32(SW_FW_SYNC);
-    swfw_sync &= ~swmask;
-    ew32(SW_FW_SYNC, swfw_sync);
-
-    e1000_put_hw_eeprom_semaphore(hw);
-}
-
-/*****************************************************************************
-* Reads the value from a PHY register, if the value is on a specific non zero
-* page, sets the page first.
-* hw - Struct containing variables accessed by shared code
-* reg_addr - address of the PHY register to read
-******************************************************************************/
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
-{
-    u32 ret_val;
-    u16 swfw;
-
-    DEBUGFUNC("e1000_read_phy_reg");
-
-    if ((hw->mac_type == e1000_80003es2lan) &&
-        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
-        swfw = E1000_SWFW_PHY1_SM;
-    } else {
-        swfw = E1000_SWFW_PHY0_SM;
-    }
-    if (e1000_swfw_sync_acquire(hw, swfw))
-        return -E1000_ERR_SWFW_SYNC;
-
-    if ((hw->phy_type == e1000_phy_igp ||
-        hw->phy_type == e1000_phy_igp_3 ||
-        hw->phy_type == e1000_phy_igp_2) &&
-       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
-        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
-                                         (u16)reg_addr);
-        if (ret_val) {
-            e1000_swfw_sync_release(hw, swfw);
-            return ret_val;
-        }
-    } else if (hw->phy_type == e1000_phy_gg82563) {
-        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
-            (hw->mac_type == e1000_80003es2lan)) {
-            /* Select Configuration Page */
-            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
-                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
-                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
-            } else {
-                /* Use Alternative Page Select register to access
-                 * registers 30 and 31
-                 */
-                ret_val = e1000_write_phy_reg_ex(hw,
-                                                 GG82563_PHY_PAGE_SELECT_ALT,
-                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
-            }
-
-            if (ret_val) {
-                e1000_swfw_sync_release(hw, swfw);
-                return ret_val;
-            }
-        }
-    }
-
-    ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
-                                    phy_data);
-
-    e1000_swfw_sync_release(hw, swfw);
-    return ret_val;
-}
+       /* Options:
+        *   MDI/MDI-X = 0 (default)
+        *   0 - Auto for all speeds
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+        */
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
 
-static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
-                                u16 *phy_data)
-{
-    u32 i;
-    u32 mdic = 0;
-    const u32 phy_addr = 1;
-
-    DEBUGFUNC("e1000_read_phy_reg_ex");
-
-    if (reg_addr > MAX_PHY_REG_ADDRESS) {
-        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
-        return -E1000_ERR_PARAM;
-    }
-
-    if (hw->mac_type > e1000_82543) {
-        /* Set up Op-code, Phy Address, and register address in the MDI
-         * Control register.  The MAC will take care of interfacing with the
-         * PHY to retrieve the desired data.
-         */
-        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
-                (phy_addr << E1000_MDIC_PHY_SHIFT) |
-                (E1000_MDIC_OP_READ));
-
-        ew32(MDIC, mdic);
-
-        /* Poll the ready bit to see if the MDI read completed */
-        for (i = 0; i < 64; i++) {
-            udelay(50);
-            mdic = er32(MDIC);
-            if (mdic & E1000_MDIC_READY) break;
-        }
-        if (!(mdic & E1000_MDIC_READY)) {
-            DEBUGOUT("MDI Read did not complete\n");
-            return -E1000_ERR_PHY;
-        }
-        if (mdic & E1000_MDIC_ERROR) {
-            DEBUGOUT("MDI Error\n");
-            return -E1000_ERR_PHY;
-        }
-        *phy_data = (u16)mdic;
-    } else {
-        /* We must first send a preamble through the MDIO pin to signal the
-         * beginning of an MII instruction.  This is done by sending 32
-         * consecutive "1" bits.
-         */
-        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
-
-        /* Now combine the next few fields that are required for a read
-         * operation.  We use this method instead of calling the
-         * e1000_shift_out_mdi_bits routine five different times. The format of
-         * a MII read instruction consists of a shift out of 14 bits and is
-         * defined as follows:
-         *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
-         * followed by a shift in of 18 bits.  This first two bits shifted in
-         * are TurnAround bits used to avoid contention on the MDIO pin when a
-         * READ operation is performed.  These two bits are thrown away
-         * followed by a shift in of 16 bits which contains the desired data.
-         */
-        mdic = ((reg_addr) | (phy_addr << 5) |
-                (PHY_OP_READ << 10) | (PHY_SOF << 12));
-
-        e1000_shift_out_mdi_bits(hw, mdic, 14);
-
-        /* Now that we've shifted out the read command to the MII, we need to
-         * "shift in" the 16-bit value (18 total bits) of the requested PHY
-         * register address.
-         */
-        *phy_data = e1000_shift_in_mdi_bits(hw);
-    }
-    return E1000_SUCCESS;
-}
+       switch (hw->mdix) {
+       case 1:
+               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+               break;
+       case 2:
+               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+               break;
+       case 3:
+               phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+               break;
+       case 0:
+       default:
+               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+               break;
+       }
 
-/******************************************************************************
-* Writes a value to a PHY register
-*
-* hw - Struct containing variables accessed by shared code
-* reg_addr - address of the PHY register to write
-* data - data to write to the PHY
-******************************************************************************/
-s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
-{
-    u32 ret_val;
-    u16 swfw;
-
-    DEBUGFUNC("e1000_write_phy_reg");
-
-    if ((hw->mac_type == e1000_80003es2lan) &&
-        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
-        swfw = E1000_SWFW_PHY1_SM;
-    } else {
-        swfw = E1000_SWFW_PHY0_SM;
-    }
-    if (e1000_swfw_sync_acquire(hw, swfw))
-        return -E1000_ERR_SWFW_SYNC;
-
-    if ((hw->phy_type == e1000_phy_igp ||
-        hw->phy_type == e1000_phy_igp_3 ||
-        hw->phy_type == e1000_phy_igp_2) &&
-       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
-        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
-                                         (u16)reg_addr);
-        if (ret_val) {
-            e1000_swfw_sync_release(hw, swfw);
-            return ret_val;
-        }
-    } else if (hw->phy_type == e1000_phy_gg82563) {
-        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
-            (hw->mac_type == e1000_80003es2lan)) {
-            /* Select Configuration Page */
-            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
-                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
-                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
-            } else {
-                /* Use Alternative Page Select register to access
-                 * registers 30 and 31
-                 */
-                ret_val = e1000_write_phy_reg_ex(hw,
-                                                 GG82563_PHY_PAGE_SELECT_ALT,
-                          (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
-            }
-
-            if (ret_val) {
-                e1000_swfw_sync_release(hw, swfw);
-                return ret_val;
-            }
-        }
-    }
-
-    ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
-                                     phy_data);
-
-    e1000_swfw_sync_release(hw, swfw);
-    return ret_val;
-}
+       /* Options:
+        *   disable_polarity_correction = 0 (default)
+        *       Automatic Correction for Reversed Cable Polarity
+        *   0 - Disabled
+        *   1 - Enabled
+        */
+       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+       if (hw->disable_polarity_correction == 1)
+               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               return ret_val;
+
+       if (hw->phy_revision < M88E1011_I_REV_4) {
+               /* Force TX_CLK in the Extended PHY Specific Control Register
+                * to 25MHz clock.
+                */
+               ret_val =
+                   e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                      &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+               if ((hw->phy_revision == E1000_REVISION_2) &&
+                   (hw->phy_id == M88E1111_I_PHY_ID)) {
+                       /* Vidalia Phy, set the downshift counter to 5x */
+                       phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+                       phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+                       ret_val = e1000_write_phy_reg(hw,
+                                                     M88E1000_EXT_PHY_SPEC_CTRL,
+                                                     phy_data);
+                       if (ret_val)
+                               return ret_val;
+               } else {
+                       /* Configure Master and Slave downshift values */
+                       phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                                     M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+                       phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+                       ret_val = e1000_write_phy_reg(hw,
+                                                     M88E1000_EXT_PHY_SPEC_CTRL,
+                                                     phy_data);
+                       if (ret_val)
+                               return ret_val;
+               }
+       }
 
-static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
-                                 u16 phy_data)
-{
-    u32 i;
-    u32 mdic = 0;
-    const u32 phy_addr = 1;
-
-    DEBUGFUNC("e1000_write_phy_reg_ex");
-
-    if (reg_addr > MAX_PHY_REG_ADDRESS) {
-        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
-        return -E1000_ERR_PARAM;
-    }
-
-    if (hw->mac_type > e1000_82543) {
-        /* Set up Op-code, Phy Address, register address, and data intended
-         * for the PHY register in the MDI Control register.  The MAC will take
-         * care of interfacing with the PHY to send the desired data.
-         */
-        mdic = (((u32)phy_data) |
-                (reg_addr << E1000_MDIC_REG_SHIFT) |
-                (phy_addr << E1000_MDIC_PHY_SHIFT) |
-                (E1000_MDIC_OP_WRITE));
-
-        ew32(MDIC, mdic);
-
-        /* Poll the ready bit to see if the MDI read completed */
-        for (i = 0; i < 641; i++) {
-            udelay(5);
-            mdic = er32(MDIC);
-            if (mdic & E1000_MDIC_READY) break;
-        }
-        if (!(mdic & E1000_MDIC_READY)) {
-            DEBUGOUT("MDI Write did not complete\n");
-            return -E1000_ERR_PHY;
-        }
-    } else {
-        /* We'll need to use the SW defined pins to shift the write command
-         * out to the PHY. We first send a preamble to the PHY to signal the
-         * beginning of the MII instruction.  This is done by sending 32
-         * consecutive "1" bits.
-         */
-        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
-
-        /* Now combine the remaining required fields that will indicate a
-         * write operation. We use this method instead of calling the
-         * e1000_shift_out_mdi_bits routine for each field in the command. The
-         * format of a MII write instruction is as follows:
-         * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
-         */
-        mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
-                (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
-        mdic <<= 16;
-        mdic |= (u32)phy_data;
-
-        e1000_shift_out_mdi_bits(hw, mdic, 32);
-    }
-
-    return E1000_SUCCESS;
-}
+       /* SW Reset the PHY so all changes take effect */
+       ret_val = e1000_phy_reset(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Resetting the PHY\n");
+               return ret_val;
+       }
 
-static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
-{
-    u32 reg_val;
-    u16 swfw;
-    DEBUGFUNC("e1000_read_kmrn_reg");
-
-    if ((hw->mac_type == e1000_80003es2lan) &&
-        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
-        swfw = E1000_SWFW_PHY1_SM;
-    } else {
-        swfw = E1000_SWFW_PHY0_SM;
-    }
-    if (e1000_swfw_sync_acquire(hw, swfw))
-        return -E1000_ERR_SWFW_SYNC;
-
-    /* Write register address */
-    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
-              E1000_KUMCTRLSTA_OFFSET) |
-              E1000_KUMCTRLSTA_REN;
-    ew32(KUMCTRLSTA, reg_val);
-    udelay(2);
-
-    /* Read the data returned */
-    reg_val = er32(KUMCTRLSTA);
-    *data = (u16)reg_val;
-
-    e1000_swfw_sync_release(hw, swfw);
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
-static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
+/**
+ * e1000_copper_link_autoneg - setup auto-neg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Setup auto-negotiation and flow control advertisements,
+ * and then perform auto-negotiation.
+ */
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
 {
-    u32 reg_val;
-    u16 swfw;
-    DEBUGFUNC("e1000_write_kmrn_reg");
-
-    if ((hw->mac_type == e1000_80003es2lan) &&
-        (er32(STATUS) & E1000_STATUS_FUNC_1)) {
-        swfw = E1000_SWFW_PHY1_SM;
-    } else {
-        swfw = E1000_SWFW_PHY0_SM;
-    }
-    if (e1000_swfw_sync_acquire(hw, swfw))
-        return -E1000_ERR_SWFW_SYNC;
-
-    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
-              E1000_KUMCTRLSTA_OFFSET) | data;
-    ew32(KUMCTRLSTA, reg_val);
-    udelay(2);
-
-    e1000_swfw_sync_release(hw, swfw);
-    return E1000_SUCCESS;
-}
+       s32 ret_val;
+       u16 phy_data;
 
-/******************************************************************************
-* Returns the PHY to the power-on reset state
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-s32 e1000_phy_hw_reset(struct e1000_hw *hw)
-{
-    u32 ctrl, ctrl_ext;
-    u32 led_ctrl;
-    s32 ret_val;
-    u16 swfw;
-
-    DEBUGFUNC("e1000_phy_hw_reset");
-
-    /* In the case of the phy reset being blocked, it's not an error, we
-     * simply return success without performing the reset. */
-    ret_val = e1000_check_phy_reset_block(hw);
-    if (ret_val)
-        return E1000_SUCCESS;
-
-    DEBUGOUT("Resetting Phy...\n");
-
-    if (hw->mac_type > e1000_82543) {
-        if ((hw->mac_type == e1000_80003es2lan) &&
-            (er32(STATUS) & E1000_STATUS_FUNC_1)) {
-            swfw = E1000_SWFW_PHY1_SM;
-        } else {
-            swfw = E1000_SWFW_PHY0_SM;
-        }
-        if (e1000_swfw_sync_acquire(hw, swfw)) {
-            DEBUGOUT("Unable to acquire swfw sync\n");
-            return -E1000_ERR_SWFW_SYNC;
-        }
-        /* Read the device control register and assert the E1000_CTRL_PHY_RST
-         * bit. Then, take it out of reset.
-         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
-         * and deassert.  For e1000_82571 hardware and later, we instead delay
-         * for 50us between and 10ms after the deassertion.
-         */
-        ctrl = er32(CTRL);
-        ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
-        E1000_WRITE_FLUSH();
-
-        if (hw->mac_type < e1000_82571)
-            msleep(10);
-        else
-            udelay(100);
-
-        ew32(CTRL, ctrl);
-        E1000_WRITE_FLUSH();
-
-        if (hw->mac_type >= e1000_82571)
-            mdelay(10);
-
-        e1000_swfw_sync_release(hw, swfw);
-    } else {
-        /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
-         * bit to put the PHY into reset. Then, take it out of reset.
-         */
-        ctrl_ext = er32(CTRL_EXT);
-        ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
-        ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
-        ew32(CTRL_EXT, ctrl_ext);
-        E1000_WRITE_FLUSH();
-        msleep(10);
-        ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
-        ew32(CTRL_EXT, ctrl_ext);
-        E1000_WRITE_FLUSH();
-    }
-    udelay(150);
-
-    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
-        /* Configure activity LED after PHY reset */
-        led_ctrl = er32(LEDCTL);
-        led_ctrl &= IGP_ACTIVITY_LED_MASK;
-        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
-        ew32(LEDCTL, led_ctrl);
-    }
-
-    /* Wait for FW to finish PHY configuration. */
-    ret_val = e1000_get_phy_cfg_done(hw);
-    if (ret_val != E1000_SUCCESS)
-        return ret_val;
-    e1000_release_software_semaphore(hw);
-
-    if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
-        ret_val = e1000_init_lcd_from_nvm(hw);
-
-    return ret_val;
-}
+       DEBUGFUNC("e1000_copper_link_autoneg");
 
-/******************************************************************************
-* Resets the PHY
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Sets bit 15 of the MII Control register
-******************************************************************************/
-s32 e1000_phy_reset(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_phy_reset");
-
-    /* In the case of the phy reset being blocked, it's not an error, we
-     * simply return success without performing the reset. */
-    ret_val = e1000_check_phy_reset_block(hw);
-    if (ret_val)
-        return E1000_SUCCESS;
-
-    switch (hw->phy_type) {
-    case e1000_phy_igp:
-    case e1000_phy_igp_2:
-    case e1000_phy_igp_3:
-    case e1000_phy_ife:
-        ret_val = e1000_phy_hw_reset(hw);
-        if (ret_val)
-            return ret_val;
-        break;
-    default:
-        ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data |= MII_CR_RESET;
-        ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
-        if (ret_val)
-            return ret_val;
-
-        udelay(1);
-        break;
-    }
-
-    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
-        e1000_phy_init_script(hw);
-
-    return E1000_SUCCESS;
-}
+       /* Perform some bounds checking on the hw->autoneg_advertised
+        * parameter.  If this variable is zero, then set it to the default.
+        */
+       hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
-/******************************************************************************
-* Work-around for 82566 power-down: on D3 entry-
-* 1) disable gigabit link
-* 2) write VR power-down enable
-* 3) read it back
-* if successful continue, else issue LCD reset and repeat
-*
-* hw - struct containing variables accessed by shared code
-******************************************************************************/
-void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
-{
-    s32 reg;
-    u16 phy_data;
-    s32 retry = 0;
+       /* If autoneg_advertised is zero, we assume it was not defaulted
+        * by the calling code so we set to advertise full capability.
+        */
+       if (hw->autoneg_advertised == 0)
+               hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+       DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+       ret_val = e1000_phy_setup_autoneg(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Setting up Auto-Negotiation\n");
+               return ret_val;
+       }
+       DEBUGOUT("Restarting Auto-Neg\n");
 
-    DEBUGFUNC("e1000_phy_powerdown_workaround");
+       /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+        * the Auto Neg Restart bit in the PHY control register.
+        */
+       ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+       if (ret_val)
+               return ret_val;
 
-    if (hw->phy_type != e1000_phy_igp_3)
-        return;
+       phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+       ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+       if (ret_val)
+               return ret_val;
 
-    do {
-        /* Disable link */
-        reg = er32(PHY_CTRL);
-        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
-                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+       /* Does the user want to wait for Auto-Neg to complete here, or
+        * check at a later time (for example, callback routine).
+        */
+       if (hw->wait_autoneg_complete) {
+               ret_val = e1000_wait_autoneg(hw);
+               if (ret_val) {
+                       DEBUGOUT
+                           ("Error while waiting for autoneg to complete\n");
+                       return ret_val;
+               }
+       }
 
-        /* Write VR power-down enable - bits 9:8 should be 10b */
-        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
-        phy_data |= (1 << 9);
-        phy_data &= ~(1 << 8);
-        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+       hw->get_link_status = true;
 
-        /* Read it back and test */
-        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
-        if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
-            break;
+       return E1000_SUCCESS;
+}
 
-        /* Issue PHY reset and repeat at most one more time */
-        reg = er32(CTRL);
-        ew32(CTRL, reg | E1000_CTRL_PHY_RST);
-        retry++;
-    } while (retry);
+/**
+ * e1000_copper_link_postconfig - post link setup
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Config the MAC and the PHY after link is up.
+ *   1) Set up the MAC to the current PHY speed/duplex
+ *      if we are on 82543.  If we
+ *      are on newer silicon, we only need to configure
+ *      collision distance in the Transmit Control Register.
+ *   2) Set up flow control on the MAC to that established with
+ *      the link partner.
+ *   3) Config DSP to improve Gigabit link quality for some PHY revisions.
+ */
+static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       DEBUGFUNC("e1000_copper_link_postconfig");
 
-    return;
+       if (hw->mac_type >= e1000_82544) {
+               e1000_config_collision_dist(hw);
+       } else {
+               ret_val = e1000_config_mac_to_phy(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring MAC to PHY settings\n");
+                       return ret_val;
+               }
+       }
+       ret_val = e1000_config_fc_after_link_up(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Configuring Flow Control\n");
+               return ret_val;
+       }
 
-}
+       /* Config DSP to improve Giga link quality */
+       if (hw->phy_type == e1000_phy_igp) {
+               ret_val = e1000_config_dsp_after_link_change(hw, true);
+               if (ret_val) {
+                       DEBUGOUT("Error Configuring DSP after link up\n");
+                       return ret_val;
+               }
+       }
 
-/******************************************************************************
-* Work-around for 82566 Kumeran PCS lock loss:
-* On link status change (i.e. PCI reset, speed change) and link is up and
-* speed is gigabit-
-* 0) if workaround is optionally disabled do nothing
-* 1) wait 1ms for Kumeran link to come up
-* 2) check Kumeran Diagnostic register PCS lock loss bit
-* 3) if not set the link is locked (all is good), otherwise...
-* 4) reset the PHY
-* 5) repeat up to 10 times
-* Note: this is only called for IGP3 copper when speed is 1gb.
-*
-* hw - struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    s32 reg;
-    s32 cnt;
-    u16 phy_data;
-
-    if (hw->kmrn_lock_loss_workaround_disabled)
-        return E1000_SUCCESS;
-
-    /* Make sure link is up before proceeding.  If not just return.
-     * Attempting this while link is negotiating fouled up link
-     * stability */
-    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-
-    if (phy_data & MII_SR_LINK_STATUS) {
-        for (cnt = 0; cnt < 10; cnt++) {
-            /* read once to clear */
-            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
-            if (ret_val)
-                return ret_val;
-            /* and again to get new status */
-            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            /* check for PCS lock */
-            if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
-                return E1000_SUCCESS;
-
-            /* Issue PHY reset */
-            e1000_phy_hw_reset(hw);
-            mdelay(5);
-        }
-        /* Disable GigE link negotiation */
-        reg = er32(PHY_CTRL);
-        ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
-                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
-
-        /* unable to acquire PCS lock */
-        return E1000_ERR_PHY;
-    }
-
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
-* Probes the expected PHY address for known PHY IDs
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+/**
+ * e1000_setup_copper_link - phy/speed/duplex setting
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Detects which PHY is present and sets up the speed and duplex
+ */
+static s32 e1000_setup_copper_link(struct e1000_hw *hw)
 {
-    s32 phy_init_status, ret_val;
-    u16 phy_id_high, phy_id_low;
-    bool match = false;
-
-    DEBUGFUNC("e1000_detect_gig_phy");
-
-    if (hw->phy_id != 0)
-        return E1000_SUCCESS;
-
-    /* The 82571 firmware may still be configuring the PHY.  In this
-     * case, we cannot access the PHY until the configuration is done.  So
-     * we explicitly set the PHY values. */
-    if (hw->mac_type == e1000_82571 ||
-        hw->mac_type == e1000_82572) {
-        hw->phy_id = IGP01E1000_I_PHY_ID;
-        hw->phy_type = e1000_phy_igp_2;
-        return E1000_SUCCESS;
-    }
-
-    /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
-     * around that forces PHY page 0 to be set or the reads fail.  The rest of
-     * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
-     * So for ESB-2 we need to have this set so our reads won't fail.  If the
-     * attached PHY is not a e1000_phy_gg82563, the routines below will figure
-     * this out as well. */
-    if (hw->mac_type == e1000_80003es2lan)
-        hw->phy_type = e1000_phy_gg82563;
-
-    /* Read the PHY ID Registers to identify which PHY is onboard. */
-    ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
-    if (ret_val)
-        return ret_val;
-
-    hw->phy_id = (u32)(phy_id_high << 16);
-    udelay(20);
-    ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
-    if (ret_val)
-        return ret_val;
-
-    hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
-    hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
-
-    switch (hw->mac_type) {
-    case e1000_82543:
-        if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
-        break;
-    case e1000_82544:
-        if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
-        break;
-    case e1000_82540:
-    case e1000_82545:
-    case e1000_82545_rev_3:
-    case e1000_82546:
-    case e1000_82546_rev_3:
-        if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
-        break;
-    case e1000_82541:
-    case e1000_82541_rev_2:
-    case e1000_82547:
-    case e1000_82547_rev_2:
-        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
-        break;
-    case e1000_82573:
-        if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
-        break;
-    case e1000_80003es2lan:
-        if (hw->phy_id == GG82563_E_PHY_ID) match = true;
-        break;
-    case e1000_ich8lan:
-        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
-        if (hw->phy_id == IFE_E_PHY_ID) match = true;
-        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
-        if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
-        break;
-    default:
-        DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
-        return -E1000_ERR_CONFIG;
-    }
-    phy_init_status = e1000_set_phy_type(hw);
-
-    if ((match) && (phy_init_status == E1000_SUCCESS)) {
-        DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
-        return E1000_SUCCESS;
-    }
-    DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
-    return -E1000_ERR_PHY;
-}
+       s32 ret_val;
+       u16 i;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_setup_copper_link");
+
+       /* Check if it is a valid PHY and set PHY mode if necessary. */
+       ret_val = e1000_copper_link_preconfig(hw);
+       if (ret_val)
+               return ret_val;
+
+       if (hw->phy_type == e1000_phy_igp) {
+               ret_val = e1000_copper_link_igp_setup(hw);
+               if (ret_val)
+                       return ret_val;
+       } else if (hw->phy_type == e1000_phy_m88) {
+               ret_val = e1000_copper_link_mgp_setup(hw);
+               if (ret_val)
+                       return ret_val;
+       }
 
-/******************************************************************************
-* Resets the PHY's DSP
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    DEBUGFUNC("e1000_phy_reset_dsp");
-
-    do {
-        if (hw->phy_type != e1000_phy_gg82563) {
-            ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
-            if (ret_val) break;
-        }
-        ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
-        if (ret_val) break;
-        ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
-        if (ret_val) break;
-        ret_val = E1000_SUCCESS;
-    } while (0);
-
-    return ret_val;
-}
+       if (hw->autoneg) {
+               /* Setup autoneg and flow control advertisement
+                * and perform autonegotiation */
+               ret_val = e1000_copper_link_autoneg(hw);
+               if (ret_val)
+                       return ret_val;
+       } else {
+               /* PHY will be set to 10H, 10F, 100H,or 100F
+                * depending on value from forced_speed_duplex. */
+               DEBUGOUT("Forcing speed and duplex\n");
+               ret_val = e1000_phy_force_speed_duplex(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error Forcing Speed and Duplex\n");
+                       return ret_val;
+               }
+       }
 
-/******************************************************************************
-* Get PHY information from various PHY registers for igp PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
-static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
-                                 struct e1000_phy_info *phy_info)
-{
-    s32 ret_val;
-    u16 phy_data, min_length, max_length, average;
-    e1000_rev_polarity polarity;
-
-    DEBUGFUNC("e1000_phy_igp_get_info");
-
-    /* The downshift status is checked only once, after link is established,
-     * and it stored in the hw->speed_downgraded parameter. */
-    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
-
-    /* IGP01E1000 does not need to support it. */
-    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
-
-    /* IGP01E1000 always correct polarity reversal */
-    phy_info->polarity_correction = e1000_polarity_reversal_enabled;
-
-    /* Check polarity status */
-    ret_val = e1000_check_polarity(hw, &polarity);
-    if (ret_val)
-        return ret_val;
-
-    phy_info->cable_polarity = polarity;
-
-    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
-                          IGP01E1000_PSSR_MDIX_SHIFT);
-
-    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
-       IGP01E1000_PSSR_SPEED_1000MBPS) {
-        /* Local/Remote Receiver Information are only valid at 1000 Mbps */
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
-                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
-                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
-                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
-                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-
-        /* Get cable length */
-        ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
-        if (ret_val)
-            return ret_val;
-
-        /* Translate to old method */
-        average = (max_length + min_length) / 2;
-
-        if (average <= e1000_igp_cable_length_50)
-            phy_info->cable_length = e1000_cable_length_50;
-        else if (average <= e1000_igp_cable_length_80)
-            phy_info->cable_length = e1000_cable_length_50_80;
-        else if (average <= e1000_igp_cable_length_110)
-            phy_info->cable_length = e1000_cable_length_80_110;
-        else if (average <= e1000_igp_cable_length_140)
-            phy_info->cable_length = e1000_cable_length_110_140;
-        else
-            phy_info->cable_length = e1000_cable_length_140;
-    }
-
-    return E1000_SUCCESS;
-}
+       /* Check link status. Wait up to 100 microseconds for link to become
+        * valid.
+        */
+       for (i = 0; i < 10; i++) {
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               if (phy_data & MII_SR_LINK_STATUS) {
+                       /* Config the MAC and PHY after link is up */
+                       ret_val = e1000_copper_link_postconfig(hw);
+                       if (ret_val)
+                               return ret_val;
+
+                       DEBUGOUT("Valid link established!!!\n");
+                       return E1000_SUCCESS;
+               }
+               udelay(10);
+       }
 
-/******************************************************************************
-* Get PHY information from various PHY registers for ife PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
-static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
-                                 struct e1000_phy_info *phy_info)
-{
-    s32 ret_val;
-    u16 phy_data;
-    e1000_rev_polarity polarity;
-
-    DEBUGFUNC("e1000_phy_ife_get_info");
-
-    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
-    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
-
-    ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
-    if (ret_val)
-        return ret_val;
-    phy_info->polarity_correction =
-                        ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
-                        IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
-                        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
-
-    if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
-        ret_val = e1000_check_polarity(hw, &polarity);
-        if (ret_val)
-            return ret_val;
-    } else {
-        /* Polarity is forced. */
-        polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
-                     IFE_PSC_FORCE_POLARITY_SHIFT) ?
-                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-    }
-    phy_info->cable_polarity = polarity;
-
-    ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_info->mdix_mode = (e1000_auto_x_mode)
-                     ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
-                     IFE_PMC_MDIX_MODE_SHIFT);
-
-    return E1000_SUCCESS;
+       DEBUGOUT("Unable to establish link!!!\n");
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
-* Get PHY information from various PHY registers fot m88 PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
-static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
-                                 struct e1000_phy_info *phy_info)
+/**
+ * e1000_phy_setup_autoneg - phy settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures PHY autoneg and flow control advertisement settings
+ */
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
 {
-    s32 ret_val;
-    u16 phy_data;
-    e1000_rev_polarity polarity;
-
-    DEBUGFUNC("e1000_phy_m88_get_info");
-
-    /* The downshift status is checked only once, after link is established,
-     * and it stored in the hw->speed_downgraded parameter. */
-    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
-
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_info->extended_10bt_distance =
-        ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
-        M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
-        e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
-
-    phy_info->polarity_correction =
-        ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
-        M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
-        e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
-
-    /* Check polarity status */
-    ret_val = e1000_check_polarity(hw, &polarity);
-    if (ret_val)
-        return ret_val;
-    phy_info->cable_polarity = polarity;
-
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
-                          M88E1000_PSSR_MDIX_SHIFT);
-
-    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
-        /* Cable Length Estimation and Local/Remote Receiver Information
-         * are only valid at 1000 Mbps.
-         */
-        if (hw->phy_type != e1000_phy_gg82563) {
-            phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
-                                      M88E1000_PSSR_CABLE_LENGTH_SHIFT);
-        } else {
-            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
-        }
-
-        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
-                             SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
-                             e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-        phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
-                              SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
-                              e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-
-    }
-
-    return E1000_SUCCESS;
-}
+       s32 ret_val;
+       u16 mii_autoneg_adv_reg;
+       u16 mii_1000t_ctrl_reg;
 
-/******************************************************************************
-* Get PHY information from various PHY registers
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
-s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
-{
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_phy_get_info");
-
-    phy_info->cable_length = e1000_cable_length_undefined;
-    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
-    phy_info->cable_polarity = e1000_rev_polarity_undefined;
-    phy_info->downshift = e1000_downshift_undefined;
-    phy_info->polarity_correction = e1000_polarity_reversal_undefined;
-    phy_info->mdix_mode = e1000_auto_x_mode_undefined;
-    phy_info->local_rx = e1000_1000t_rx_status_undefined;
-    phy_info->remote_rx = e1000_1000t_rx_status_undefined;
-
-    if (hw->media_type != e1000_media_type_copper) {
-        DEBUGOUT("PHY info is only valid for copper media\n");
-        return -E1000_ERR_CONFIG;
-    }
-
-    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-    if (ret_val)
-        return ret_val;
-
-    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
-        DEBUGOUT("PHY info is only valid if link is up\n");
-        return -E1000_ERR_CONFIG;
-    }
-
-    if (hw->phy_type == e1000_phy_igp ||
-        hw->phy_type == e1000_phy_igp_3 ||
-        hw->phy_type == e1000_phy_igp_2)
-        return e1000_phy_igp_get_info(hw, phy_info);
-    else if (hw->phy_type == e1000_phy_ife)
-        return e1000_phy_ife_get_info(hw, phy_info);
-    else
-        return e1000_phy_m88_get_info(hw, phy_info);
-}
+       DEBUGFUNC("e1000_phy_setup_autoneg");
 
-s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
-{
-    DEBUGFUNC("e1000_validate_mdi_settings");
-
-    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
-        DEBUGOUT("Invalid MDI setting detected\n");
-        hw->mdix = 1;
-        return -E1000_ERR_CONFIG;
-    }
-    return E1000_SUCCESS;
-}
+       /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+       ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+       if (ret_val)
+               return ret_val;
 
+       /* Read the MII 1000Base-T Control Register (Address 9). */
+       ret_val =
+           e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+       if (ret_val)
+               return ret_val;
 
-/******************************************************************************
- * Sets up eeprom variables in the hw struct.  Must be called after mac_type
- * is configured.  Additionally, if this is ICH8, the flash controller GbE
- * registers must be mapped, or this will crash.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_init_eeprom_params(struct e1000_hw *hw)
-{
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 eecd = er32(EECD);
-    s32 ret_val = E1000_SUCCESS;
-    u16 eeprom_size;
-
-    DEBUGFUNC("e1000_init_eeprom_params");
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-    case e1000_82543:
-    case e1000_82544:
-        eeprom->type = e1000_eeprom_microwire;
-        eeprom->word_size = 64;
-        eeprom->opcode_bits = 3;
-        eeprom->address_bits = 6;
-        eeprom->delay_usec = 50;
-        eeprom->use_eerd = false;
-        eeprom->use_eewr = false;
-        break;
-    case e1000_82540:
-    case e1000_82545:
-    case e1000_82545_rev_3:
-    case e1000_82546:
-    case e1000_82546_rev_3:
-        eeprom->type = e1000_eeprom_microwire;
-        eeprom->opcode_bits = 3;
-        eeprom->delay_usec = 50;
-        if (eecd & E1000_EECD_SIZE) {
-            eeprom->word_size = 256;
-            eeprom->address_bits = 8;
-        } else {
-            eeprom->word_size = 64;
-            eeprom->address_bits = 6;
-        }
-        eeprom->use_eerd = false;
-        eeprom->use_eewr = false;
-        break;
-    case e1000_82541:
-    case e1000_82541_rev_2:
-    case e1000_82547:
-    case e1000_82547_rev_2:
-        if (eecd & E1000_EECD_TYPE) {
-            eeprom->type = e1000_eeprom_spi;
-            eeprom->opcode_bits = 8;
-            eeprom->delay_usec = 1;
-            if (eecd & E1000_EECD_ADDR_BITS) {
-                eeprom->page_size = 32;
-                eeprom->address_bits = 16;
-            } else {
-                eeprom->page_size = 8;
-                eeprom->address_bits = 8;
-            }
-        } else {
-            eeprom->type = e1000_eeprom_microwire;
-            eeprom->opcode_bits = 3;
-            eeprom->delay_usec = 50;
-            if (eecd & E1000_EECD_ADDR_BITS) {
-                eeprom->word_size = 256;
-                eeprom->address_bits = 8;
-            } else {
-                eeprom->word_size = 64;
-                eeprom->address_bits = 6;
-            }
-        }
-        eeprom->use_eerd = false;
-        eeprom->use_eewr = false;
-        break;
-    case e1000_82571:
-    case e1000_82572:
-        eeprom->type = e1000_eeprom_spi;
-        eeprom->opcode_bits = 8;
-        eeprom->delay_usec = 1;
-        if (eecd & E1000_EECD_ADDR_BITS) {
-            eeprom->page_size = 32;
-            eeprom->address_bits = 16;
-        } else {
-            eeprom->page_size = 8;
-            eeprom->address_bits = 8;
-        }
-        eeprom->use_eerd = false;
-        eeprom->use_eewr = false;
-        break;
-    case e1000_82573:
-        eeprom->type = e1000_eeprom_spi;
-        eeprom->opcode_bits = 8;
-        eeprom->delay_usec = 1;
-        if (eecd & E1000_EECD_ADDR_BITS) {
-            eeprom->page_size = 32;
-            eeprom->address_bits = 16;
-        } else {
-            eeprom->page_size = 8;
-            eeprom->address_bits = 8;
-        }
-        eeprom->use_eerd = true;
-        eeprom->use_eewr = true;
-        if (!e1000_is_onboard_nvm_eeprom(hw)) {
-            eeprom->type = e1000_eeprom_flash;
-            eeprom->word_size = 2048;
-
-            /* Ensure that the Autonomous FLASH update bit is cleared due to
-             * Flash update issue on parts which use a FLASH for NVM. */
-            eecd &= ~E1000_EECD_AUPDEN;
-            ew32(EECD, eecd);
-        }
-        break;
-    case e1000_80003es2lan:
-        eeprom->type = e1000_eeprom_spi;
-        eeprom->opcode_bits = 8;
-        eeprom->delay_usec = 1;
-        if (eecd & E1000_EECD_ADDR_BITS) {
-            eeprom->page_size = 32;
-            eeprom->address_bits = 16;
-        } else {
-            eeprom->page_size = 8;
-            eeprom->address_bits = 8;
-        }
-        eeprom->use_eerd = true;
-        eeprom->use_eewr = false;
-        break;
-    case e1000_ich8lan:
-        {
-        s32  i = 0;
-        u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
-
-        eeprom->type = e1000_eeprom_ich8;
-        eeprom->use_eerd = false;
-        eeprom->use_eewr = false;
-        eeprom->word_size = E1000_SHADOW_RAM_WORDS;
-
-        /* Zero the shadow RAM structure. But don't load it from NVM
-         * so as to save time for driver init */
-        if (hw->eeprom_shadow_ram != NULL) {
-            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
-                hw->eeprom_shadow_ram[i].modified = false;
-                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
-            }
-        }
-
-        hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
-                              ICH_FLASH_SECTOR_SIZE;
-
-        hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
-        hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
-
-        hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
-
-        hw->flash_bank_size /= 2 * sizeof(u16);
-
-        break;
-        }
-    default:
-        break;
-    }
-
-    if (eeprom->type == e1000_eeprom_spi) {
-        /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
-         * 32KB (incremented by powers of 2).
-         */
-        if (hw->mac_type <= e1000_82547_rev_2) {
-            /* Set to default value for initial eeprom read. */
-            eeprom->word_size = 64;
-            ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
-            if (ret_val)
-                return ret_val;
-            eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
-            /* 256B eeprom size was not supported in earlier hardware, so we
-             * bump eeprom_size up one to ensure that "1" (which maps to 256B)
-             * is never the result used in the shifting logic below. */
-            if (eeprom_size)
-                eeprom_size++;
-        } else {
-            eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
-                          E1000_EECD_SIZE_EX_SHIFT);
-        }
-
-        eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
-    }
-    return ret_val;
-}
+       /* Need to parse both autoneg_advertised and fc and set up
+        * the appropriate PHY registers.  First we will parse for
+        * autoneg_advertised software override.  Since we can advertise
+        * a plethora of combinations, we need to check each bit
+        * individually.
+        */
 
-/******************************************************************************
- * Raises the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd - EECD's current value
- *****************************************************************************/
-static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
-{
-    /* Raise the clock input to the EEPROM (by setting the SK bit), and then
-     * wait <delay> microseconds.
-     */
-    *eecd = *eecd | E1000_EECD_SK;
-    ew32(EECD, *eecd);
-    E1000_WRITE_FLUSH();
-    udelay(hw->eeprom.delay_usec);
-}
+       /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+        * Advertisement Register (Address 4) and the 1000 mb speed bits in
+        * the  1000Base-T Control Register (Address 9).
+        */
+       mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+       mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
 
-/******************************************************************************
- * Lowers the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd - EECD's current value
- *****************************************************************************/
-static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
-{
-    /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
-     * wait 50 microseconds.
-     */
-    *eecd = *eecd & ~E1000_EECD_SK;
-    ew32(EECD, *eecd);
-    E1000_WRITE_FLUSH();
-    udelay(hw->eeprom.delay_usec);
-}
+       DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
 
-/******************************************************************************
- * Shift data bits out to the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * data - data to send to the EEPROM
- * count - number of bits to shift out
- *****************************************************************************/
-static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
-{
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 eecd;
-    u32 mask;
-
-    /* We need to shift "count" bits out to the EEPROM. So, value in the
-     * "data" parameter will be shifted out to the EEPROM one bit at a time.
-     * In order to do this, "data" must be broken down into bits.
-     */
-    mask = 0x01 << (count - 1);
-    eecd = er32(EECD);
-    if (eeprom->type == e1000_eeprom_microwire) {
-        eecd &= ~E1000_EECD_DO;
-    } else if (eeprom->type == e1000_eeprom_spi) {
-        eecd |= E1000_EECD_DO;
-    }
-    do {
-        /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
-         * and then raising and then lowering the clock (the SK bit controls
-         * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
-         * by setting "DI" to "0" and then raising and then lowering the clock.
-         */
-        eecd &= ~E1000_EECD_DI;
-
-        if (data & mask)
-            eecd |= E1000_EECD_DI;
-
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-
-        udelay(eeprom->delay_usec);
-
-        e1000_raise_ee_clk(hw, &eecd);
-        e1000_lower_ee_clk(hw, &eecd);
-
-        mask = mask >> 1;
-
-    } while (mask);
-
-    /* We leave the "DI" bit set to "0" when we leave this routine. */
-    eecd &= ~E1000_EECD_DI;
-    ew32(EECD, eecd);
-}
+       /* Do we want to advertise 10 Mb Half Duplex? */
+       if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+               DEBUGOUT("Advertise 10mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+       }
 
-/******************************************************************************
- * Shift data bits in from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
-{
-    u32 eecd;
-    u32 i;
-    u16 data;
+       /* Do we want to advertise 10 Mb Full Duplex? */
+       if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+               DEBUGOUT("Advertise 10mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+       }
 
-    /* In order to read a register from the EEPROM, we need to shift 'count'
-     * bits in from the EEPROM. Bits are "shifted in" by raising the clock
-     * input to the EEPROM (setting the SK bit), and then reading the value of
-     * the "DO" bit.  During this "shifting in" process the "DI" bit should
-     * always be clear.
-     */
+       /* Do we want to advertise 100 Mb Half Duplex? */
+       if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+               DEBUGOUT("Advertise 100mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+       }
 
-    eecd = er32(EECD);
+       /* Do we want to advertise 100 Mb Full Duplex? */
+       if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+               DEBUGOUT("Advertise 100mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+       }
 
-    eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
-    data = 0;
+       /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+       if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+               DEBUGOUT
+                   ("Advertise 1000mb Half duplex requested, request denied!\n");
+       }
 
-    for (i = 0; i < count; i++) {
-        data = data << 1;
-        e1000_raise_ee_clk(hw, &eecd);
+       /* Do we want to advertise 1000 Mb Full Duplex? */
+       if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+               DEBUGOUT("Advertise 1000mb Full duplex\n");
+               mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+       }
 
-        eecd = er32(EECD);
+       /* Check for a software override of the flow control settings, and
+        * setup the PHY advertisement registers accordingly.  If
+        * auto-negotiation is enabled, then software will have to set the
+        * "PAUSE" bits to the correct value in the Auto-Negotiation
+        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames
+        *          but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          but we do not support receiving pause frames).
+        *      3:  Both Rx and TX flow control (symmetric) are enabled.
+        *  other:  No software override.  The flow control configuration
+        *          in the EEPROM is used.
+        */
+       switch (hw->fc) {
+       case E1000_FC_NONE:     /* 0 */
+               /* Flow control (RX & TX) is completely disabled by a
+                * software over-ride.
+                */
+               mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case E1000_FC_RX_PAUSE: /* 1 */
+               /* RX Flow control is enabled, and TX Flow control is
+                * disabled, by a software over-ride.
+                */
+               /* Since there really isn't a way to advertise that we are
+                * capable of RX Pause ONLY, we will advertise that we
+                * support both symmetric and asymmetric RX PAUSE.  Later
+                * (in e1000_config_fc_after_link_up) we will disable the
+                *hw's ability to send PAUSE frames.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case E1000_FC_TX_PAUSE: /* 2 */
+               /* TX Flow control is enabled, and RX Flow control is
+                * disabled, by a software over-ride.
+                */
+               mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+               mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+               break;
+       case E1000_FC_FULL:     /* 3 */
+               /* Flow control (both RX and TX) is enabled by a software
+                * over-ride.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               return -E1000_ERR_CONFIG;
+       }
 
-        eecd &= ~(E1000_EECD_DI);
-        if (eecd & E1000_EECD_DO)
-            data |= 1;
+       ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+       if (ret_val)
+               return ret_val;
 
-        e1000_lower_ee_clk(hw, &eecd);
-    }
+       DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
-    return data;
-}
+       ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+       if (ret_val)
+               return ret_val;
 
-/******************************************************************************
- * Prepares EEPROM for access
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
- * function should be called before issuing a command to the EEPROM.
- *****************************************************************************/
-static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
-{
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 eecd, i=0;
-
-    DEBUGFUNC("e1000_acquire_eeprom");
-
-    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
-        return -E1000_ERR_SWFW_SYNC;
-    eecd = er32(EECD);
-
-    if (hw->mac_type != e1000_82573) {
-        /* Request EEPROM Access */
-        if (hw->mac_type > e1000_82544) {
-            eecd |= E1000_EECD_REQ;
-            ew32(EECD, eecd);
-            eecd = er32(EECD);
-            while ((!(eecd & E1000_EECD_GNT)) &&
-                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
-                i++;
-                udelay(5);
-                eecd = er32(EECD);
-            }
-            if (!(eecd & E1000_EECD_GNT)) {
-                eecd &= ~E1000_EECD_REQ;
-                ew32(EECD, eecd);
-                DEBUGOUT("Could not acquire EEPROM grant\n");
-                e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
-                return -E1000_ERR_EEPROM;
-            }
-        }
-    }
-
-    /* Setup EEPROM for Read/Write */
-
-    if (eeprom->type == e1000_eeprom_microwire) {
-        /* Clear SK and DI */
-        eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
-        ew32(EECD, eecd);
-
-        /* Set CS */
-        eecd |= E1000_EECD_CS;
-        ew32(EECD, eecd);
-    } else if (eeprom->type == e1000_eeprom_spi) {
-        /* Clear SK and CS */
-        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-        ew32(EECD, eecd);
-        udelay(1);
-    }
-
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Returns EEPROM to a "standby" state
+/**
+ * e1000_phy_force_speed_duplex - force link settings
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_standby_eeprom(struct e1000_hw *hw)
+ * Force PHY speed and duplex settings to hw->forced_speed_duplex
+ */
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
 {
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 eecd;
-
-    eecd = er32(EECD);
-
-    if (eeprom->type == e1000_eeprom_microwire) {
-        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-
-        /* Clock high */
-        eecd |= E1000_EECD_SK;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-
-        /* Select EEPROM */
-        eecd |= E1000_EECD_CS;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-
-        /* Clock low */
-        eecd &= ~E1000_EECD_SK;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-    } else if (eeprom->type == e1000_eeprom_spi) {
-        /* Toggle CS to flush commands */
-        eecd |= E1000_EECD_CS;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-        eecd &= ~E1000_EECD_CS;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(eeprom->delay_usec);
-    }
-}
+       u32 ctrl;
+       s32 ret_val;
+       u16 mii_ctrl_reg;
+       u16 mii_status_reg;
+       u16 phy_data;
+       u16 i;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+       /* Turn off Flow control if we are forcing speed and duplex. */
+       hw->fc = E1000_FC_NONE;
+
+       DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+       /* Read the Device Control Register. */
+       ctrl = er32(CTRL);
+
+       /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       ctrl &= ~(DEVICE_SPEED_MASK);
+
+       /* Clear the Auto Speed Detect Enable bit. */
+       ctrl &= ~E1000_CTRL_ASDE;
+
+       /* Read the MII Control Register. */
+       ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+       if (ret_val)
+               return ret_val;
+
+       /* We need to disable autoneg in order to force link and duplex. */
+
+       mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+       /* Are we forcing Full or Half Duplex? */
+       if (hw->forced_speed_duplex == e1000_100_full ||
+           hw->forced_speed_duplex == e1000_10_full) {
+               /* We want to force full duplex so we SET the full duplex bits in the
+                * Device and MII Control Registers.
+                */
+               ctrl |= E1000_CTRL_FD;
+               mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       } else {
+               /* We want to force half duplex so we CLEAR the full duplex bits in
+                * the Device and MII Control Registers.
+                */
+               ctrl &= ~E1000_CTRL_FD;
+               mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       }
 
-/******************************************************************************
- * Terminates a command by inverting the EEPROM's chip select pin
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_release_eeprom(struct e1000_hw *hw)
-{
-    u32 eecd;
+       /* Are we forcing 100Mbps??? */
+       if (hw->forced_speed_duplex == e1000_100_full ||
+           hw->forced_speed_duplex == e1000_100_half) {
+               /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+               ctrl |= E1000_CTRL_SPD_100;
+               mii_ctrl_reg |= MII_CR_SPEED_100;
+               mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+               DEBUGOUT("Forcing 100mb ");
+       } else {
+               /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+               ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+               mii_ctrl_reg |= MII_CR_SPEED_10;
+               mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+               DEBUGOUT("Forcing 10mb ");
+       }
 
-    DEBUGFUNC("e1000_release_eeprom");
+       e1000_config_collision_dist(hw);
+
+       /* Write the configured values back to the Device Control Reg. */
+       ew32(CTRL, ctrl);
+
+       if (hw->phy_type == e1000_phy_m88) {
+               ret_val =
+                   e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+                * forced whenever speed are duplex are forced.
+                */
+               phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+               ret_val =
+                   e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+               /* Need to reset the PHY or these changes will be ignored */
+               mii_ctrl_reg |= MII_CR_RESET;
+
+               /* Disable MDI-X support for 10/100 */
+       } else {
+               /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+                * forced whenever speed or duplex are forced.
+                */
+               ret_val =
+                   e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+               phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+               ret_val =
+                   e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+               if (ret_val)
+                       return ret_val;
+       }
 
-    eecd = er32(EECD);
+       /* Write back the modified PHY MII control register. */
+       ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+       if (ret_val)
+               return ret_val;
 
-    if (hw->eeprom.type == e1000_eeprom_spi) {
-        eecd |= E1000_EECD_CS;  /* Pull CS high */
-        eecd &= ~E1000_EECD_SK; /* Lower SCK */
+       udelay(1);
 
-        ew32(EECD, eecd);
+       /* The wait_autoneg_complete flag may be a little misleading here.
+        * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+        * But we do want to delay for a period while forcing only so we
+        * don't generate false No Link messages.  So we will wait here
+        * only if the user has set wait_autoneg_complete to 1, which is
+        * the default.
+        */
+       if (hw->wait_autoneg_complete) {
+               /* We will wait for autoneg to complete. */
+               DEBUGOUT("Waiting for forced speed/duplex link.\n");
+               mii_status_reg = 0;
+
+               /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+               for (i = PHY_FORCE_TIME; i > 0; i--) {
+                       /* Read the MII Status Register and wait for Auto-Neg Complete bit
+                        * to be set.
+                        */
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+                       if (ret_val)
+                               return ret_val;
+
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+                       if (ret_val)
+                               return ret_val;
+
+                       if (mii_status_reg & MII_SR_LINK_STATUS)
+                               break;
+                       msleep(100);
+               }
+               if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
+                       /* We didn't get link.  Reset the DSP and wait again for link. */
+                       ret_val = e1000_phy_reset_dsp(hw);
+                       if (ret_val) {
+                               DEBUGOUT("Error Resetting PHY DSP\n");
+                               return ret_val;
+                       }
+               }
+               /* This loop will early-out if the link condition has been met.  */
+               for (i = PHY_FORCE_TIME; i > 0; i--) {
+                       if (mii_status_reg & MII_SR_LINK_STATUS)
+                               break;
+                       msleep(100);
+                       /* Read the MII Status Register and wait for Auto-Neg Complete bit
+                        * to be set.
+                        */
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+                       if (ret_val)
+                               return ret_val;
+
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+                       if (ret_val)
+                               return ret_val;
+               }
+       }
 
-        udelay(hw->eeprom.delay_usec);
-    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
-        /* cleanup eeprom */
+       if (hw->phy_type == e1000_phy_m88) {
+               /* Because we reset the PHY above, we need to re-force TX_CLK in the
+                * Extended PHY Specific Control Register to 25MHz clock.  This value
+                * defaults back to a 2.5MHz clock when the PHY is reset.
+                */
+               ret_val =
+                   e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                      &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_data |= M88E1000_EPSCR_TX_CLK_25;
+               ret_val =
+                   e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                       phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               /* In addition, because of the s/w reset above, we need to enable CRS on
+                * TX.  This must be set for both full and half duplex operation.
+                */
+               ret_val =
+                   e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+               ret_val =
+                   e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
+                   && (!hw->autoneg)
+                   && (hw->forced_speed_duplex == e1000_10_full
+                       || hw->forced_speed_duplex == e1000_10_half)) {
+                       ret_val = e1000_polarity_reversal_workaround(hw);
+                       if (ret_val)
+                               return ret_val;
+               }
+       }
+       return E1000_SUCCESS;
+}
 
-        /* CS on Microwire is active-high */
-        eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+/**
+ * e1000_config_collision_dist - set collision distance register
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets the collision distance in the Transmit Control register.
+ * Link should have been established previously. Reads the speed and duplex
+ * information from the Device Status register.
+ */
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+       u32 tctl, coll_dist;
 
-        ew32(EECD, eecd);
+       DEBUGFUNC("e1000_config_collision_dist");
 
-        /* Rising edge of clock */
-        eecd |= E1000_EECD_SK;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(hw->eeprom.delay_usec);
+       if (hw->mac_type < e1000_82543)
+               coll_dist = E1000_COLLISION_DISTANCE_82542;
+       else
+               coll_dist = E1000_COLLISION_DISTANCE;
 
-        /* Falling edge of clock */
-        eecd &= ~E1000_EECD_SK;
-        ew32(EECD, eecd);
-        E1000_WRITE_FLUSH();
-        udelay(hw->eeprom.delay_usec);
-    }
+       tctl = er32(TCTL);
 
-    /* Stop requesting EEPROM access */
-    if (hw->mac_type > e1000_82544) {
-        eecd &= ~E1000_EECD_REQ;
-        ew32(EECD, eecd);
-    }
+       tctl &= ~E1000_TCTL_COLD;
+       tctl |= coll_dist << E1000_COLD_SHIFT;
 
-    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+       ew32(TCTL, tctl);
+       E1000_WRITE_FLUSH();
 }
 
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
+/**
+ * e1000_config_mac_to_phy - sync phy and mac settings
+ * @hw: Struct containing variables accessed by shared code
+ * @mii_reg: data to write to the MII control register
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+ * Sets MAC speed and duplex settings to reflect the those in the PHY
+ * The contents of the PHY register containing the needed information need to
+ * be passed in.
+ */
+static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
 {
-    u16 retry_count = 0;
-    u8 spi_stat_reg;
-
-    DEBUGFUNC("e1000_spi_eeprom_ready");
-
-    /* Read "Status Register" repeatedly until the LSB is cleared.  The
-     * EEPROM will signal that the command has been completed by clearing
-     * bit 0 of the internal status register.  If it's not cleared within
-     * 5 milliseconds, then error out.
-     */
-    retry_count = 0;
-    do {
-        e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
-                                hw->eeprom.opcode_bits);
-        spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
-        if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
-            break;
-
-        udelay(5);
-        retry_count += 5;
-
-        e1000_standby_eeprom(hw);
-    } while (retry_count < EEPROM_MAX_RETRY_SPI);
-
-    /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
-     * only 0-5mSec on 5V devices)
-     */
-    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
-        DEBUGOUT("SPI EEPROM Status error\n");
-        return -E1000_ERR_EEPROM;
-    }
-
-    return E1000_SUCCESS;
-}
+       u32 ctrl;
+       s32 ret_val;
+       u16 phy_data;
 
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of  word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-    s32 ret;
-    spin_lock(&e1000_eeprom_lock);
-    ret = e1000_do_read_eeprom(hw, offset, words, data);
-    spin_unlock(&e1000_eeprom_lock);
-    return ret;
-}
+       DEBUGFUNC("e1000_config_mac_to_phy");
 
-static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 i = 0;
-
-    DEBUGFUNC("e1000_read_eeprom");
-
-    /* If eeprom is not yet detected, do so now */
-    if (eeprom->word_size == 0)
-        e1000_init_eeprom_params(hw);
-
-    /* A check for invalid values:  offset too large, too many words, and not
-     * enough words.
-     */
-    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
-       (words == 0)) {
-        DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
-        return -E1000_ERR_EEPROM;
-    }
-
-    /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
-     * directly. In this case, we need to acquire the EEPROM so that
-     * FW or other port software does not interrupt.
-     */
-    if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
-        /* Prepare the EEPROM for bit-bang reading */
-        if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
-            return -E1000_ERR_EEPROM;
-    }
-
-    /* Eerd register EEPROM access requires no eeprom aquire/release */
-    if (eeprom->use_eerd)
-        return e1000_read_eeprom_eerd(hw, offset, words, data);
-
-    /* ICH EEPROM access is done via the ICH flash controller */
-    if (eeprom->type == e1000_eeprom_ich8)
-        return e1000_read_eeprom_ich8(hw, offset, words, data);
-
-    /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
-     * acquired the EEPROM at this point, so any returns should relase it */
-    if (eeprom->type == e1000_eeprom_spi) {
-        u16 word_in;
-        u8 read_opcode = EEPROM_READ_OPCODE_SPI;
-
-        if (e1000_spi_eeprom_ready(hw)) {
-            e1000_release_eeprom(hw);
-            return -E1000_ERR_EEPROM;
-        }
-
-        e1000_standby_eeprom(hw);
-
-        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
-        if ((eeprom->address_bits == 8) && (offset >= 128))
-            read_opcode |= EEPROM_A8_OPCODE_SPI;
-
-        /* Send the READ command (opcode + addr)  */
-        e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
-        e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
-
-        /* Read the data.  The address of the eeprom internally increments with
-         * each byte (spi) being read, saving on the overhead of eeprom setup
-         * and tear-down.  The address counter will roll over if reading beyond
-         * the size of the eeprom, thus allowing the entire memory to be read
-         * starting from any offset. */
-        for (i = 0; i < words; i++) {
-            word_in = e1000_shift_in_ee_bits(hw, 16);
-            data[i] = (word_in >> 8) | (word_in << 8);
-        }
-    } else if (eeprom->type == e1000_eeprom_microwire) {
-        for (i = 0; i < words; i++) {
-            /* Send the READ command (opcode + addr)  */
-            e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
-                                    eeprom->opcode_bits);
-            e1000_shift_out_ee_bits(hw, (u16)(offset + i),
-                                    eeprom->address_bits);
-
-            /* Read the data.  For microwire, each word requires the overhead
-             * of eeprom setup and tear-down. */
-            data[i] = e1000_shift_in_ee_bits(hw, 16);
-            e1000_standby_eeprom(hw);
-        }
-    }
-
-    /* End this read operation */
-    e1000_release_eeprom(hw);
-
-    return E1000_SUCCESS;
-}
+       /* 82544 or newer MAC, Auto Speed Detection takes care of
+        * MAC speed/duplex configuration.*/
+       if (hw->mac_type >= e1000_82544)
+               return E1000_SUCCESS;
 
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM using the EERD register.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of  word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
-                                 u16 *data)
-{
-    u32 i, eerd = 0;
-    s32 error = 0;
+       /* Read the Device Control Register and set the bits to Force Speed
+        * and Duplex.
+        */
+       ctrl = er32(CTRL);
+       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
 
-    for (i = 0; i < words; i++) {
-        eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
-                         E1000_EEPROM_RW_REG_START;
+       /* Set up duplex in the Device Control and Transmit Control
+        * registers depending on negotiated values.
+        */
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               return ret_val;
 
-        ew32(EERD, eerd);
-        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+       if (phy_data & M88E1000_PSSR_DPLX)
+               ctrl |= E1000_CTRL_FD;
+       else
+               ctrl &= ~E1000_CTRL_FD;
 
-        if (error) {
-            break;
-        }
-        data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+       e1000_config_collision_dist(hw);
 
-    }
+       /* Set up speed in the Device Control register depending on
+        * negotiated values.
+        */
+       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+               ctrl |= E1000_CTRL_SPD_1000;
+       else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+               ctrl |= E1000_CTRL_SPD_100;
 
-    return error;
+       /* Write the configured values back to the Device Control Reg. */
+       ew32(CTRL, ctrl);
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Writes a 16 bit word from the EEPROM using the EEWR register.
+/**
+ * e1000_force_mac_fc - force flow control settings
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of  word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
-                                  u16 *data)
+ * Forces the MAC's flow control settings.
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ */
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
 {
-    u32    register_value = 0;
-    u32    i              = 0;
-    s32     error          = 0;
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_force_mac_fc");
+
+       /* Get the current configuration of the Device Control Register */
+       ctrl = er32(CTRL);
+
+       /* Because we didn't get link via the internal auto-negotiation
+        * mechanism (we either forced link or we got link via PHY
+        * auto-neg), we have to manually enable/disable transmit an
+        * receive flow control.
+        *
+        * The "Case" statement below enables/disable flow control
+        * according to the "hw->fc" parameter.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause
+        *          frames but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          frames but we do not receive pause frames).
+        *      3:  Both Rx and TX flow control (symmetric) is enabled.
+        *  other:  No other values should be possible at this point.
+        */
 
-    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
-        return -E1000_ERR_SWFW_SYNC;
+       switch (hw->fc) {
+       case E1000_FC_NONE:
+               ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+               break;
+       case E1000_FC_RX_PAUSE:
+               ctrl &= (~E1000_CTRL_TFCE);
+               ctrl |= E1000_CTRL_RFCE;
+               break;
+       case E1000_FC_TX_PAUSE:
+               ctrl &= (~E1000_CTRL_RFCE);
+               ctrl |= E1000_CTRL_TFCE;
+               break;
+       case E1000_FC_FULL:
+               ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               return -E1000_ERR_CONFIG;
+       }
 
-    for (i = 0; i < words; i++) {
-        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
-                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
-                         E1000_EEPROM_RW_REG_START;
+       /* Disable TX Flow Control for 82542 (rev 2.0) */
+       if (hw->mac_type == e1000_82542_rev2_0)
+               ctrl &= (~E1000_CTRL_TFCE);
 
-        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-        if (error) {
-            break;
-        }
+       ew32(CTRL, ctrl);
+       return E1000_SUCCESS;
+}
 
-        ew32(EEWR, register_value);
+/**
+ * e1000_config_fc_after_link_up - configure flow control after autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures flow control settings after link is established
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automatically set to the negotiated flow control mode.
+ */
+static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 mii_status_reg;
+       u16 mii_nway_adv_reg;
+       u16 mii_nway_lp_ability_reg;
+       u16 speed;
+       u16 duplex;
 
-        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+       DEBUGFUNC("e1000_config_fc_after_link_up");
 
-        if (error) {
-            break;
-        }
-    }
+       /* Check for the case where we have fiber media and auto-neg failed
+        * so we had to force link.  In this case, we need to force the
+        * configuration of the MAC to match the "fc" parameter.
+        */
+       if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
+           || ((hw->media_type == e1000_media_type_internal_serdes)
+               && (hw->autoneg_failed))
+           || ((hw->media_type == e1000_media_type_copper)
+               && (!hw->autoneg))) {
+               ret_val = e1000_force_mac_fc(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error forcing flow control settings\n");
+                       return ret_val;
+               }
+       }
 
-    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
-    return error;
+       /* Check for the case where we have copper media and auto-neg is
+        * enabled.  In this case, we need to check and see if Auto-Neg
+        * has completed, and if so, how the PHY and link partner has
+        * flow control configured.
+        */
+       if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+               /* Read the MII Status Register and check to see if AutoNeg
+                * has completed.  We read this twice because this reg has
+                * some "sticky" (latched) bits.
+                */
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
+
+               if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+                       /* The AutoNeg process has completed, so we now need to
+                        * read both the Auto Negotiation Advertisement Register
+                        * (Address 4) and the Auto_Negotiation Base Page Ability
+                        * Register (Address 5) to determine how flow control was
+                        * negotiated.
+                        */
+                       ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+                                                    &mii_nway_adv_reg);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+                                                    &mii_nway_lp_ability_reg);
+                       if (ret_val)
+                               return ret_val;
+
+                       /* Two bits in the Auto Negotiation Advertisement Register
+                        * (Address 4) and two bits in the Auto Negotiation Base
+                        * Page Ability Register (Address 5) determine flow control
+                        * for both the PHY and the link partner.  The following
+                        * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+                        * 1999, describes these PAUSE resolution bits and how flow
+                        * control is determined based upon these settings.
+                        * NOTE:  DC = Don't Care
+                        *
+                        *   LOCAL DEVICE  |   LINK PARTNER
+                        * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+                        *-------|---------|-------|---------|--------------------
+                        *   0   |    0    |  DC   |   DC    | E1000_FC_NONE
+                        *   0   |    1    |   0   |   DC    | E1000_FC_NONE
+                        *   0   |    1    |   1   |    0    | E1000_FC_NONE
+                        *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+                        *   1   |    0    |   0   |   DC    | E1000_FC_NONE
+                        *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+                        *   1   |    1    |   0   |    0    | E1000_FC_NONE
+                        *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+                        *
+                        */
+                       /* Are both PAUSE bits set to 1?  If so, this implies
+                        * Symmetric Flow Control is enabled at both ends.  The
+                        * ASM_DIR bits are irrelevant per the spec.
+                        *
+                        * For Symmetric Flow Control:
+                        *
+                        *   LOCAL DEVICE  |   LINK PARTNER
+                        * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                        *-------|---------|-------|---------|--------------------
+                        *   1   |   DC    |   1   |   DC    | E1000_FC_FULL
+                        *
+                        */
+                       if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                           (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                               /* Now we need to check if the user selected RX ONLY
+                                * of pause frames.  In this case, we had to advertise
+                                * FULL flow control because we could not advertise RX
+                                * ONLY. Hence, we must now check to see if we need to
+                                * turn OFF  the TRANSMISSION of PAUSE frames.
+                                */
+                               if (hw->original_fc == E1000_FC_FULL) {
+                                       hw->fc = E1000_FC_FULL;
+                                       DEBUGOUT("Flow Control = FULL.\n");
+                               } else {
+                                       hw->fc = E1000_FC_RX_PAUSE;
+                                       DEBUGOUT
+                                           ("Flow Control = RX PAUSE frames only.\n");
+                               }
+                       }
+                       /* For receiving PAUSE frames ONLY.
+                        *
+                        *   LOCAL DEVICE  |   LINK PARTNER
+                        * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                        *-------|---------|-------|---------|--------------------
+                        *   0   |    1    |   1   |    1    | E1000_FC_TX_PAUSE
+                        *
+                        */
+                       else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                                (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+                       {
+                               hw->fc = E1000_FC_TX_PAUSE;
+                               DEBUGOUT
+                                   ("Flow Control = TX PAUSE frames only.\n");
+                       }
+                       /* For transmitting PAUSE frames ONLY.
+                        *
+                        *   LOCAL DEVICE  |   LINK PARTNER
+                        * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                        *-------|---------|-------|---------|--------------------
+                        *   1   |    1    |   0   |    1    | E1000_FC_RX_PAUSE
+                        *
+                        */
+                       else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                                (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                                !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+                       {
+                               hw->fc = E1000_FC_RX_PAUSE;
+                               DEBUGOUT
+                                   ("Flow Control = RX PAUSE frames only.\n");
+                       }
+                       /* Per the IEEE spec, at this point flow control should be
+                        * disabled.  However, we want to consider that we could
+                        * be connected to a legacy switch that doesn't advertise
+                        * desired flow control, but can be forced on the link
+                        * partner.  So if we advertised no flow control, that is
+                        * what we will resolve to.  If we advertised some kind of
+                        * receive capability (Rx Pause Only or Full Flow Control)
+                        * and the link partner advertised none, we will configure
+                        * ourselves to enable Rx Flow Control only.  We can do
+                        * this safely for two reasons:  If the link partner really
+                        * didn't want flow control enabled, and we enable Rx, no
+                        * harm done since we won't be receiving any PAUSE frames
+                        * anyway.  If the intent on the link partner was to have
+                        * flow control enabled, then by us enabling RX only, we
+                        * can at least receive pause frames and process them.
+                        * This is a good idea because in most cases, since we are
+                        * predominantly a server NIC, more times than not we will
+                        * be asked to delay transmission of packets than asking
+                        * our link partner to pause transmission of frames.
+                        */
+                       else if ((hw->original_fc == E1000_FC_NONE ||
+                                 hw->original_fc == E1000_FC_TX_PAUSE) ||
+                                hw->fc_strict_ieee) {
+                               hw->fc = E1000_FC_NONE;
+                               DEBUGOUT("Flow Control = NONE.\n");
+                       } else {
+                               hw->fc = E1000_FC_RX_PAUSE;
+                               DEBUGOUT
+                                   ("Flow Control = RX PAUSE frames only.\n");
+                       }
+
+                       /* Now we need to do one last check...  If we auto-
+                        * negotiated to HALF DUPLEX, flow control should not be
+                        * enabled per IEEE 802.3 spec.
+                        */
+                       ret_val =
+                           e1000_get_speed_and_duplex(hw, &speed, &duplex);
+                       if (ret_val) {
+                               DEBUGOUT
+                                   ("Error getting link speed and duplex\n");
+                               return ret_val;
+                       }
+
+                       if (duplex == HALF_DUPLEX)
+                               hw->fc = E1000_FC_NONE;
+
+                       /* Now we call a subroutine to actually force the MAC
+                        * controller to use the correct flow control settings.
+                        */
+                       ret_val = e1000_force_mac_fc(hw);
+                       if (ret_val) {
+                               DEBUGOUT
+                                   ("Error forcing flow control settings\n");
+                               return ret_val;
+                       }
+               } else {
+                       DEBUGOUT
+                           ("Copper PHY and Auto Neg has not completed.\n");
+               }
+       }
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+ * Checks for link up on the hardware.  If link is not up and we have
+ * a signal, then we need to force link up.
+ */
+static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
 {
-    u32 attempts = 100000;
-    u32 i, reg = 0;
-    s32 done = E1000_ERR_EEPROM;
-
-    for (i = 0; i < attempts; i++) {
-        if (eerd == E1000_EEPROM_POLL_READ)
-            reg = er32(EERD);
-        else
-            reg = er32(EEWR);
-
-        if (reg & E1000_EEPROM_RW_REG_DONE) {
-            done = E1000_SUCCESS;
-            break;
-        }
-        udelay(5);
-    }
-
-    return done;
-}
+       u32 rxcw;
+       u32 ctrl;
+       u32 status;
+       s32 ret_val = E1000_SUCCESS;
 
-/***************************************************************************
-* Description:     Determines if the onboard NVM is FLASH or EEPROM.
-*
-* hw - Struct containing variables accessed by shared code
-****************************************************************************/
-static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
-{
-    u32 eecd = 0;
+       DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+       ctrl = er32(CTRL);
+       status = er32(STATUS);
+       rxcw = er32(RXCW);
 
-    DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+       /*
+        * If we don't have link (auto-negotiation failed or link partner
+        * cannot auto-negotiate), and our link partner is not trying to
+        * auto-negotiate with us (we are receiving idles or data),
+        * we need to force link up. We also need to give auto-negotiation
+        * time to complete.
+        */
+       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+       if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+               if (hw->autoneg_failed == 0) {
+                       hw->autoneg_failed = 1;
+                       goto out;
+               }
+               DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
 
-    if (hw->mac_type == e1000_ich8lan)
-        return false;
+               /* Disable auto-negotiation in the TXCW register */
+               ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
 
-    if (hw->mac_type == e1000_82573) {
-        eecd = er32(EECD);
+               /* Force link-up and also force full-duplex. */
+               ctrl = er32(CTRL);
+               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+               ew32(CTRL, ctrl);
+
+               /* Configure Flow Control after forcing link up. */
+               ret_val = e1000_config_fc_after_link_up(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       goto out;
+               }
+       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+               /*
+                * If we are forcing link and we are receiving /C/ ordered
+                * sets, re-enable auto-negotiation in the TXCW register
+                * and disable forced link in the Device Control register
+                * in an attempt to auto-negotiate with our link partner.
+                */
+               DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+               ew32(TXCW, hw->txcw);
+               ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+               hw->serdes_has_link = true;
+       } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+               /*
+                * If we force link for non-auto-negotiation switch, check
+                * link status based on MAC synchronization for internal
+                * serdes media type.
+                */
+               /* SYNCH bit and IV bit are sticky. */
+               udelay(10);
+               rxcw = er32(RXCW);
+               if (rxcw & E1000_RXCW_SYNCH) {
+                       if (!(rxcw & E1000_RXCW_IV)) {
+                               hw->serdes_has_link = true;
+                               DEBUGOUT("SERDES: Link up - forced.\n");
+                       }
+               } else {
+                       hw->serdes_has_link = false;
+                       DEBUGOUT("SERDES: Link down - force failed.\n");
+               }
+       }
 
-        /* Isolate bits 15 & 16 */
-        eecd = ((eecd >> 15) & 0x03);
+       if (E1000_TXCW_ANE & er32(TXCW)) {
+               status = er32(STATUS);
+               if (status & E1000_STATUS_LU) {
+                       /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+                       udelay(10);
+                       rxcw = er32(RXCW);
+                       if (rxcw & E1000_RXCW_SYNCH) {
+                               if (!(rxcw & E1000_RXCW_IV)) {
+                                       hw->serdes_has_link = true;
+                                       DEBUGOUT("SERDES: Link up - autoneg "
+                                                "completed successfully.\n");
+                               } else {
+                                       hw->serdes_has_link = false;
+                                       DEBUGOUT("SERDES: Link down - invalid"
+                                                "codewords detected in autoneg.\n");
+                               }
+                       } else {
+                               hw->serdes_has_link = false;
+                               DEBUGOUT("SERDES: Link down - no sync.\n");
+                       }
+               } else {
+                       hw->serdes_has_link = false;
+                       DEBUGOUT("SERDES: Link down - autoneg failed\n");
+               }
+       }
 
-        /* If both bits are set, device is Flash type */
-        if (eecd == 0x03) {
-            return false;
-        }
-    }
-    return true;
+      out:
+       return ret_val;
 }
 
-/******************************************************************************
- * Verifies that the EEPROM has a valid checksum
+/**
+ * e1000_check_for_link
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *
- * Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
- * valid.
- *****************************************************************************/
-s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+ * Checks to see if the link status of the hardware has changed.
+ * Called by any function that needs to check the link status of the adapter.
+ */
+s32 e1000_check_for_link(struct e1000_hw *hw)
 {
-    u16 checksum = 0;
-    u16 i, eeprom_data;
-
-    DEBUGFUNC("e1000_validate_eeprom_checksum");
-
-    if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
-        /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
-         * 10h-12h.  Checksum may need to be fixed. */
-        e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
-        if ((eeprom_data & 0x10) == 0) {
-            /* Read 0x23 and check bit 15.  This bit is a 1 when the checksum
-             * has already been fixed.  If the checksum is still wrong and this
-             * bit is a 1, we need to return bad checksum.  Otherwise, we need
-             * to set this bit to a 1 and update the checksum. */
-            e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
-            if ((eeprom_data & 0x8000) == 0) {
-                eeprom_data |= 0x8000;
-                e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
-                e1000_update_eeprom_checksum(hw);
-            }
-        }
-    }
-
-    if (hw->mac_type == e1000_ich8lan) {
-        /* Drivers must allocate the shadow ram structure for the
-         * EEPROM checksum to be updated.  Otherwise, this bit as well
-         * as the checksum must both be set correctly for this
-         * validation to pass.
-         */
-        e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
-        if ((eeprom_data & 0x40) == 0) {
-            eeprom_data |= 0x40;
-            e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
-            e1000_update_eeprom_checksum(hw);
-        }
-    }
-
-    for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
-        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
-            DEBUGOUT("EEPROM Read Error\n");
-            return -E1000_ERR_EEPROM;
-        }
-        checksum += eeprom_data;
-    }
-
-    if (checksum == (u16)EEPROM_SUM)
-        return E1000_SUCCESS;
-    else {
-        DEBUGOUT("EEPROM Checksum Invalid\n");
-        return -E1000_ERR_EEPROM;
-    }
+       u32 rxcw = 0;
+       u32 ctrl;
+       u32 status;
+       u32 rctl;
+       u32 icr;
+       u32 signal = 0;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_check_for_link");
+
+       ctrl = er32(CTRL);
+       status = er32(STATUS);
+
+       /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
+        * set when the optics detect a signal. On older adapters, it will be
+        * cleared when there is a signal.  This applies to fiber media only.
+        */
+       if ((hw->media_type == e1000_media_type_fiber) ||
+           (hw->media_type == e1000_media_type_internal_serdes)) {
+               rxcw = er32(RXCW);
+
+               if (hw->media_type == e1000_media_type_fiber) {
+                       signal =
+                           (hw->mac_type >
+                            e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+                       if (status & E1000_STATUS_LU)
+                               hw->get_link_status = false;
+               }
+       }
+
+       /* If we have a copper PHY then we only want to go out to the PHY
+        * registers to see if Auto-Neg has completed and/or if our link
+        * status has changed.  The get_link_status flag will be set if we
+        * receive a Link Status Change interrupt or we have Rx Sequence
+        * Errors.
+        */
+       if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+               /* First we want to see if the MII Status Register reports
+                * link.  If so, then we want to get the current speed/duplex
+                * of the PHY.
+                * Read the register twice since the link bit is sticky.
+                */
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               if (phy_data & MII_SR_LINK_STATUS) {
+                       hw->get_link_status = false;
+                       /* Check if there was DownShift, must be checked immediately after
+                        * link-up */
+                       e1000_check_downshift(hw);
+
+                       /* If we are on 82544 or 82543 silicon and speed/duplex
+                        * are forced to 10H or 10F, then we will implement the polarity
+                        * reversal workaround.  We disable interrupts first, and upon
+                        * returning, place the devices interrupt state to its previous
+                        * value except for the link status change interrupt which will
+                        * happen due to the execution of this workaround.
+                        */
+
+                       if ((hw->mac_type == e1000_82544
+                            || hw->mac_type == e1000_82543) && (!hw->autoneg)
+                           && (hw->forced_speed_duplex == e1000_10_full
+                               || hw->forced_speed_duplex == e1000_10_half)) {
+                               ew32(IMC, 0xffffffff);
+                               ret_val =
+                                   e1000_polarity_reversal_workaround(hw);
+                               icr = er32(ICR);
+                               ew32(ICS, (icr & ~E1000_ICS_LSC));
+                               ew32(IMS, IMS_ENABLE_MASK);
+                       }
+
+               } else {
+                       /* No link detected */
+                       e1000_config_dsp_after_link_change(hw, false);
+                       return 0;
+               }
+
+               /* If we are forcing speed/duplex, then we simply return since
+                * we have already determined whether we have link or not.
+                */
+               if (!hw->autoneg)
+                       return -E1000_ERR_CONFIG;
+
+               /* optimize the dsp settings for the igp phy */
+               e1000_config_dsp_after_link_change(hw, true);
+
+               /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+                * have Si on board that is 82544 or newer, Auto
+                * Speed Detection takes care of MAC speed/duplex
+                * configuration.  So we only need to configure Collision
+                * Distance in the MAC.  Otherwise, we need to force
+                * speed/duplex on the MAC to the current PHY speed/duplex
+                * settings.
+                */
+               if (hw->mac_type >= e1000_82544)
+                       e1000_config_collision_dist(hw);
+               else {
+                       ret_val = e1000_config_mac_to_phy(hw);
+                       if (ret_val) {
+                               DEBUGOUT
+                                   ("Error configuring MAC to PHY settings\n");
+                               return ret_val;
+                       }
+               }
+
+               /* Configure Flow Control now that Auto-Neg has completed. First, we
+                * need to restore the desired flow control settings because we may
+                * have had to re-autoneg with a different link partner.
+                */
+               ret_val = e1000_config_fc_after_link_up(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       return ret_val;
+               }
+
+               /* At this point we know that we are on copper and we have
+                * auto-negotiated link.  These are conditions for checking the link
+                * partner capability register.  We use the link speed to determine if
+                * TBI compatibility needs to be turned on or off.  If the link is not
+                * at gigabit speed, then TBI compatibility is not needed.  If we are
+                * at gigabit speed, we turn on TBI compatibility.
+                */
+               if (hw->tbi_compatibility_en) {
+                       u16 speed, duplex;
+                       ret_val =
+                           e1000_get_speed_and_duplex(hw, &speed, &duplex);
+                       if (ret_val) {
+                               DEBUGOUT
+                                   ("Error getting link speed and duplex\n");
+                               return ret_val;
+                       }
+                       if (speed != SPEED_1000) {
+                               /* If link speed is not set to gigabit speed, we do not need
+                                * to enable TBI compatibility.
+                                */
+                               if (hw->tbi_compatibility_on) {
+                                       /* If we previously were in the mode, turn it off. */
+                                       rctl = er32(RCTL);
+                                       rctl &= ~E1000_RCTL_SBP;
+                                       ew32(RCTL, rctl);
+                                       hw->tbi_compatibility_on = false;
+                               }
+                       } else {
+                               /* If TBI compatibility is was previously off, turn it on. For
+                                * compatibility with a TBI link partner, we will store bad
+                                * packets. Some frames have an additional byte on the end and
+                                * will look like CRC errors to to the hardware.
+                                */
+                               if (!hw->tbi_compatibility_on) {
+                                       hw->tbi_compatibility_on = true;
+                                       rctl = er32(RCTL);
+                                       rctl |= E1000_RCTL_SBP;
+                                       ew32(RCTL, rctl);
+                               }
+                       }
+               }
+       }
+
+       if ((hw->media_type == e1000_media_type_fiber) ||
+           (hw->media_type == e1000_media_type_internal_serdes))
+               e1000_check_for_serdes_link_generic(hw);
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Calculates the EEPROM checksum and writes it to the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
- * Writes the difference to word offset 63 of the EEPROM.
- *****************************************************************************/
-s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+/**
+ * e1000_get_speed_and_duplex
+ * @hw: Struct containing variables accessed by shared code
+ * @speed: Speed of the connection
+ * @duplex: Duplex setting of the connection
+
+ * Detects the current speed and duplex settings of the hardware.
+ */
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
 {
-    u32 ctrl_ext;
-    u16 checksum = 0;
-    u16 i, eeprom_data;
-
-    DEBUGFUNC("e1000_update_eeprom_checksum");
-
-    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
-        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
-            DEBUGOUT("EEPROM Read Error\n");
-            return -E1000_ERR_EEPROM;
-        }
-        checksum += eeprom_data;
-    }
-    checksum = (u16)EEPROM_SUM - checksum;
-    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
-        DEBUGOUT("EEPROM Write Error\n");
-        return -E1000_ERR_EEPROM;
-    } else if (hw->eeprom.type == e1000_eeprom_flash) {
-        e1000_commit_shadow_ram(hw);
-    } else if (hw->eeprom.type == e1000_eeprom_ich8) {
-        e1000_commit_shadow_ram(hw);
-        /* Reload the EEPROM, or else modifications will not appear
-         * until after next adapter reset. */
-        ctrl_ext = er32(CTRL_EXT);
-        ctrl_ext |= E1000_CTRL_EXT_EE_RST;
-        ew32(CTRL_EXT, ctrl_ext);
-        msleep(10);
-    }
-    return E1000_SUCCESS;
+       u32 status;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_get_speed_and_duplex");
+
+       if (hw->mac_type >= e1000_82543) {
+               status = er32(STATUS);
+               if (status & E1000_STATUS_SPEED_1000) {
+                       *speed = SPEED_1000;
+                       DEBUGOUT("1000 Mbs, ");
+               } else if (status & E1000_STATUS_SPEED_100) {
+                       *speed = SPEED_100;
+                       DEBUGOUT("100 Mbs, ");
+               } else {
+                       *speed = SPEED_10;
+                       DEBUGOUT("10 Mbs, ");
+               }
+
+               if (status & E1000_STATUS_FD) {
+                       *duplex = FULL_DUPLEX;
+                       DEBUGOUT("Full Duplex\n");
+               } else {
+                       *duplex = HALF_DUPLEX;
+                       DEBUGOUT(" Half Duplex\n");
+               }
+       } else {
+               DEBUGOUT("1000 Mbs, Full Duplex\n");
+               *speed = SPEED_1000;
+               *duplex = FULL_DUPLEX;
+       }
+
+       /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+        * if it is operating at half duplex.  Here we set the duplex settings to
+        * match the duplex in the link partner's capabilities.
+        */
+       if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+               ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+                       *duplex = HALF_DUPLEX;
+               else {
+                       ret_val =
+                           e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+                       if (ret_val)
+                               return ret_val;
+                       if ((*speed == SPEED_100
+                            && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
+                           || (*speed == SPEED_10
+                               && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                               *duplex = HALF_DUPLEX;
+               }
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Parent function for writing words to the different EEPROM types.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - 16 bit word to be written to the EEPROM
+/**
+ * e1000_wait_autoneg
+ * @hw: Struct containing variables accessed by shared code
  *
- * If e1000_update_eeprom_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
- *****************************************************************************/
-s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+ * Blocks until autoneg completes or times out (~4.5 seconds)
+ */
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
 {
-    s32 ret;
-    spin_lock(&e1000_eeprom_lock);
-    ret = e1000_do_write_eeprom(hw, offset, words, data);
-    spin_unlock(&e1000_eeprom_lock);
-    return ret;
+       s32 ret_val;
+       u16 i;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_wait_autoneg");
+       DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+       /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+       for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+               /* Read the MII Status Register and wait for Auto-Neg
+                * Complete bit to be set.
+                */
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+               if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+                       return E1000_SUCCESS;
+               }
+               msleep(100);
+       }
+       return E1000_SUCCESS;
 }
 
+/**
+ * e1000_raise_mdi_clk - Raises the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
+{
+       /* Raise the clock input to the Management Data Clock (by setting the MDC
+        * bit), and then delay 10 microseconds.
+        */
+       ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+       E1000_WRITE_FLUSH();
+       udelay(10);
+}
 
-static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+/**
+ * e1000_lower_mdi_clk - Lowers the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
 {
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    s32 status = 0;
-
-    DEBUGFUNC("e1000_write_eeprom");
-
-    /* If eeprom is not yet detected, do so now */
-    if (eeprom->word_size == 0)
-        e1000_init_eeprom_params(hw);
-
-    /* A check for invalid values:  offset too large, too many words, and not
-     * enough words.
-     */
-    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
-       (words == 0)) {
-        DEBUGOUT("\"words\" parameter out of bounds\n");
-        return -E1000_ERR_EEPROM;
-    }
-
-    /* 82573 writes only through eewr */
-    if (eeprom->use_eewr)
-        return e1000_write_eeprom_eewr(hw, offset, words, data);
-
-    if (eeprom->type == e1000_eeprom_ich8)
-        return e1000_write_eeprom_ich8(hw, offset, words, data);
-
-    /* Prepare the EEPROM for writing  */
-    if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
-        return -E1000_ERR_EEPROM;
-
-    if (eeprom->type == e1000_eeprom_microwire) {
-        status = e1000_write_eeprom_microwire(hw, offset, words, data);
-    } else {
-        status = e1000_write_eeprom_spi(hw, offset, words, data);
-        msleep(10);
-    }
-
-    /* Done with writing */
-    e1000_release_eeprom(hw);
-
-    return status;
+       /* Lower the clock input to the Management Data Clock (by clearing the MDC
+        * bit), and then delay 10 microseconds.
+        */
+       ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+       E1000_WRITE_FLUSH();
+       udelay(10);
 }
 
-/******************************************************************************
- * Writes a 16 bit word to a given offset in an SPI EEPROM.
+/**
+ * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
+ * @hw: Struct containing variables accessed by shared code
+ * @data: Data to send out to the PHY
+ * @count: Number of bits to shift out
  *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - pointer to array of 8 bit words to be written to the EEPROM
- *
- *****************************************************************************/
-static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
-                                 u16 *data)
+ * Bits are shifted out in MSB to LSB order.
+ */
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
 {
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u16 widx = 0;
+       u32 ctrl;
+       u32 mask;
 
-    DEBUGFUNC("e1000_write_eeprom_spi");
+       /* We need to shift "count" number of bits out to the PHY. So, the value
+        * in the "data" parameter will be shifted out to the PHY one bit at a
+        * time. In order to do this, "data" must be broken down into bits.
+        */
+       mask = 0x01;
+       mask <<= (count - 1);
 
-    while (widx < words) {
-        u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+       ctrl = er32(CTRL);
 
-        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+       /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+       ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
 
-        e1000_standby_eeprom(hw);
+       while (mask) {
+               /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+                * then raising and lowering the Management Data Clock. A "0" is
+                * shifted out to the PHY by setting the MDIO bit to "0" and then
+                * raising and lowering the clock.
+                */
+               if (data & mask)
+                       ctrl |= E1000_CTRL_MDIO;
+               else
+                       ctrl &= ~E1000_CTRL_MDIO;
 
-        /*  Send the WRITE ENABLE command (8 bit opcode )  */
-        e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
-                                    eeprom->opcode_bits);
+               ew32(CTRL, ctrl);
+               E1000_WRITE_FLUSH();
 
-        e1000_standby_eeprom(hw);
+               udelay(10);
 
-        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
-        if ((eeprom->address_bits == 8) && (offset >= 128))
-            write_opcode |= EEPROM_A8_OPCODE_SPI;
+               e1000_raise_mdi_clk(hw, &ctrl);
+               e1000_lower_mdi_clk(hw, &ctrl);
 
-        /* Send the Write command (8-bit opcode + addr) */
-        e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+               mask = mask >> 1;
+       }
+}
 
-        e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
-                                eeprom->address_bits);
+/**
+ * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Bits are shifted in in MSB to LSB order.
+ */
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       u16 data = 0;
+       u8 i;
 
-        /* Send the data */
+       /* In order to read a register from the PHY, we need to shift in a total
+        * of 18 bits from the PHY. The first two bit (turnaround) times are used
+        * to avoid contention on the MDIO pin when a read operation is performed.
+        * These two bits are ignored by us and thrown away. Bits are "shifted in"
+        * by raising the input to the Management Data Clock (setting the MDC bit),
+        * and then reading the value of the MDIO bit.
+        */
+       ctrl = er32(CTRL);
 
-        /* Loop to allow for up to whole page write (32 bytes) of eeprom */
-        while (widx < words) {
-            u16 word_out = data[widx];
-            word_out = (word_out >> 8) | (word_out << 8);
-            e1000_shift_out_ee_bits(hw, word_out, 16);
-            widx++;
+       /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+       ctrl &= ~E1000_CTRL_MDIO_DIR;
+       ctrl &= ~E1000_CTRL_MDIO;
 
-            /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
-             * operation, while the smaller eeproms are capable of an 8-byte
-             * PAGE WRITE operation.  Break the inner loop to pass new address
-             */
-            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
-                e1000_standby_eeprom(hw);
-                break;
-            }
-        }
-    }
+       ew32(CTRL, ctrl);
+       E1000_WRITE_FLUSH();
 
-    return E1000_SUCCESS;
-}
+       /* Raise and Lower the clock before reading in the data. This accounts for
+        * the turnaround bits. The first clock occurred when we clocked out the
+        * last bit of the Register Address.
+        */
+       e1000_raise_mdi_clk(hw, &ctrl);
+       e1000_lower_mdi_clk(hw, &ctrl);
+
+       for (data = 0, i = 0; i < 16; i++) {
+               data = data << 1;
+               e1000_raise_mdi_clk(hw, &ctrl);
+               ctrl = er32(CTRL);
+               /* Check to see if we shifted in a "1". */
+               if (ctrl & E1000_CTRL_MDIO)
+                       data |= 1;
+               e1000_lower_mdi_clk(hw, &ctrl);
+       }
 
-/******************************************************************************
- * Writes a 16 bit word to a given offset in a Microwire EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - pointer to array of 16 bit words to be written to the EEPROM
- *
- *****************************************************************************/
-static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
-                                       u16 words, u16 *data)
-{
-    struct e1000_eeprom_info *eeprom = &hw->eeprom;
-    u32 eecd;
-    u16 words_written = 0;
-    u16 i = 0;
-
-    DEBUGFUNC("e1000_write_eeprom_microwire");
-
-    /* Send the write enable command to the EEPROM (3-bit opcode plus
-     * 6/8-bit dummy address beginning with 11).  It's less work to include
-     * the 11 of the dummy address as part of the opcode than it is to shift
-     * it over the correct number of bits for the address.  This puts the
-     * EEPROM into write/erase mode.
-     */
-    e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
-                            (u16)(eeprom->opcode_bits + 2));
-
-    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
-
-    /* Prepare the EEPROM */
-    e1000_standby_eeprom(hw);
-
-    while (words_written < words) {
-        /* Send the Write command (3-bit opcode + addr) */
-        e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
-                                eeprom->opcode_bits);
-
-        e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
-                                eeprom->address_bits);
-
-        /* Send the data */
-        e1000_shift_out_ee_bits(hw, data[words_written], 16);
-
-        /* Toggle the CS line.  This in effect tells the EEPROM to execute
-         * the previous command.
-         */
-        e1000_standby_eeprom(hw);
-
-        /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
-         * signal that the command has been completed by raising the DO signal.
-         * If DO does not go high in 10 milliseconds, then error out.
-         */
-        for (i = 0; i < 200; i++) {
-            eecd = er32(EECD);
-            if (eecd & E1000_EECD_DO) break;
-            udelay(50);
-        }
-        if (i == 200) {
-            DEBUGOUT("EEPROM Write did not complete\n");
-            return -E1000_ERR_EEPROM;
-        }
-
-        /* Recover from write */
-        e1000_standby_eeprom(hw);
-
-        words_written++;
-    }
-
-    /* Send the write disable command to the EEPROM (3-bit opcode plus
-     * 6/8-bit dummy address beginning with 10).  It's less work to include
-     * the 10 of the dummy address as part of the opcode than it is to shift
-     * it over the correct number of bits for the address.  This takes the
-     * EEPROM out of write/erase mode.
-     */
-    e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
-                            (u16)(eeprom->opcode_bits + 2));
-
-    e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
-
-    return E1000_SUCCESS;
+       e1000_raise_mdi_clk(hw, &ctrl);
+       e1000_lower_mdi_clk(hw, &ctrl);
+
+       return data;
 }
 
-/******************************************************************************
- * Flushes the cached eeprom to NVM. This is done by saving the modified values
- * in the eeprom cache and the non modified values in the currently active bank
- * to the new bank.
+
+/**
+ * e1000_read_phy_reg - read a phy register
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to read
  *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of  word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
+ * Reads the value from a PHY register, if the value is on a specific non zero
+ * page, sets the page first.
+ */
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
 {
-    u32 attempts = 100000;
-    u32 eecd = 0;
-    u32 flop = 0;
-    u32 i = 0;
-    s32 error = E1000_SUCCESS;
-    u32 old_bank_offset = 0;
-    u32 new_bank_offset = 0;
-    u8 low_byte = 0;
-    u8 high_byte = 0;
-    bool sector_write_failed = false;
-
-    if (hw->mac_type == e1000_82573) {
-        /* The flop register will be used to determine if flash type is STM */
-        flop = er32(FLOP);
-        for (i=0; i < attempts; i++) {
-            eecd = er32(EECD);
-            if ((eecd & E1000_EECD_FLUPD) == 0) {
-                break;
-            }
-            udelay(5);
-        }
-
-        if (i == attempts) {
-            return -E1000_ERR_EEPROM;
-        }
-
-        /* If STM opcode located in bits 15:8 of flop, reset firmware */
-        if ((flop & 0xFF00) == E1000_STM_OPCODE) {
-            ew32(HICR, E1000_HICR_FW_RESET);
-        }
-
-        /* Perform the flash update */
-        ew32(EECD, eecd | E1000_EECD_FLUPD);
-
-        for (i=0; i < attempts; i++) {
-            eecd = er32(EECD);
-            if ((eecd & E1000_EECD_FLUPD) == 0) {
-                break;
-            }
-            udelay(5);
-        }
-
-        if (i == attempts) {
-            return -E1000_ERR_EEPROM;
-        }
-    }
-
-    if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
-        /* We're writing to the opposite bank so if we're on bank 1,
-         * write to bank 0 etc.  We also need to erase the segment that
-         * is going to be written */
-        if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
-            new_bank_offset = hw->flash_bank_size * 2;
-            old_bank_offset = 0;
-            e1000_erase_ich8_4k_segment(hw, 1);
-        } else {
-            old_bank_offset = hw->flash_bank_size * 2;
-            new_bank_offset = 0;
-            e1000_erase_ich8_4k_segment(hw, 0);
-        }
-
-        sector_write_failed = false;
-        /* Loop for every byte in the shadow RAM,
-         * which is in units of words. */
-        for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
-            /* Determine whether to write the value stored
-             * in the other NVM bank or a modified value stored
-             * in the shadow RAM */
-            if (hw->eeprom_shadow_ram[i].modified) {
-                low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
-                udelay(100);
-                error = e1000_verify_write_ich8_byte(hw,
-                            (i << 1) + new_bank_offset, low_byte);
-
-                if (error != E1000_SUCCESS)
-                    sector_write_failed = true;
-                else {
-                    high_byte =
-                        (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
-                    udelay(100);
-                }
-            } else {
-                e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
-                                     &low_byte);
-                udelay(100);
-                error = e1000_verify_write_ich8_byte(hw,
-                            (i << 1) + new_bank_offset, low_byte);
-
-                if (error != E1000_SUCCESS)
-                    sector_write_failed = true;
-                else {
-                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
-                                         &high_byte);
-                    udelay(100);
-                }
-            }
-
-            /* If the write of the low byte was successful, go ahead and
-             * write the high byte while checking to make sure that if it
-             * is the signature byte, then it is handled properly */
-            if (!sector_write_failed) {
-                /* If the word is 0x13, then make sure the signature bits
-                 * (15:14) are 11b until the commit has completed.
-                 * This will allow us to write 10b which indicates the
-                 * signature is valid.  We want to do this after the write
-                 * has completed so that we don't mark the segment valid
-                 * while the write is still in progress */
-                if (i == E1000_ICH_NVM_SIG_WORD)
-                    high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
-
-                error = e1000_verify_write_ich8_byte(hw,
-                            (i << 1) + new_bank_offset + 1, high_byte);
-                if (error != E1000_SUCCESS)
-                    sector_write_failed = true;
-
-            } else {
-                /* If the write failed then break from the loop and
-                 * return an error */
-                break;
-            }
-        }
-
-        /* Don't bother writing the segment valid bits if sector
-         * programming failed. */
-        if (!sector_write_failed) {
-            /* Finally validate the new segment by setting bit 15:14
-             * to 10b in word 0x13 , this can be done without an
-             * erase as well since these bits are 11 to start with
-             * and we need to change bit 14 to 0b */
-            e1000_read_ich8_byte(hw,
-                                 E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
-                                 &high_byte);
-            high_byte &= 0xBF;
-            error = e1000_verify_write_ich8_byte(hw,
-                        E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
-            /* And invalidate the previously valid segment by setting
-             * its signature word (0x13) high_byte to 0b. This can be
-             * done without an erase because flash erase sets all bits
-             * to 1's. We can write 1's to 0's without an erase */
-            if (error == E1000_SUCCESS) {
-                error = e1000_verify_write_ich8_byte(hw,
-                            E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
-            }
-
-            /* Clear the now not used entry in the cache */
-            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
-                hw->eeprom_shadow_ram[i].modified = false;
-                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
-            }
-        }
-    }
-
-    return error;
+       u32 ret_val;
+
+       DEBUGFUNC("e1000_read_phy_reg");
+
+       if ((hw->phy_type == e1000_phy_igp) &&
+           (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+               ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                                (u16) reg_addr);
+               if (ret_val)
+                       return ret_val;
+       }
+
+       ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                       phy_data);
+
+       return ret_val;
 }
 
-/******************************************************************************
- * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
- * second function of dual function devices
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_read_mac_addr(struct e1000_hw *hw)
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+                                u16 *phy_data)
 {
-    u16 offset;
-    u16 eeprom_data, i;
-
-    DEBUGFUNC("e1000_read_mac_addr");
-
-    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
-        offset = i >> 1;
-        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
-            DEBUGOUT("EEPROM Read Error\n");
-            return -E1000_ERR_EEPROM;
-        }
-        hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
-        hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
-    }
-
-    switch (hw->mac_type) {
-    default:
-        break;
-    case e1000_82546:
-    case e1000_82546_rev_3:
-    case e1000_82571:
-    case e1000_80003es2lan:
-        if (er32(STATUS) & E1000_STATUS_FUNC_1)
-            hw->perm_mac_addr[5] ^= 0x01;
-        break;
-    }
-
-    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
-        hw->mac_addr[i] = hw->perm_mac_addr[i];
-    return E1000_SUCCESS;
+       u32 i;
+       u32 mdic = 0;
+       const u32 phy_addr = 1;
+
+       DEBUGFUNC("e1000_read_phy_reg_ex");
+
+       if (reg_addr > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+               return -E1000_ERR_PARAM;
+       }
+
+       if (hw->mac_type > e1000_82543) {
+               /* Set up Op-code, Phy Address, and register address in the MDI
+                * Control register.  The MAC will take care of interfacing with the
+                * PHY to retrieve the desired data.
+                */
+               mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+                       (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                       (E1000_MDIC_OP_READ));
+
+               ew32(MDIC, mdic);
+
+               /* Poll the ready bit to see if the MDI read completed */
+               for (i = 0; i < 64; i++) {
+                       udelay(50);
+                       mdic = er32(MDIC);
+                       if (mdic & E1000_MDIC_READY)
+                               break;
+               }
+               if (!(mdic & E1000_MDIC_READY)) {
+                       DEBUGOUT("MDI Read did not complete\n");
+                       return -E1000_ERR_PHY;
+               }
+               if (mdic & E1000_MDIC_ERROR) {
+                       DEBUGOUT("MDI Error\n");
+                       return -E1000_ERR_PHY;
+               }
+               *phy_data = (u16) mdic;
+       } else {
+               /* We must first send a preamble through the MDIO pin to signal the
+                * beginning of an MII instruction.  This is done by sending 32
+                * consecutive "1" bits.
+                */
+               e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+               /* Now combine the next few fields that are required for a read
+                * operation.  We use this method instead of calling the
+                * e1000_shift_out_mdi_bits routine five different times. The format of
+                * a MII read instruction consists of a shift out of 14 bits and is
+                * defined as follows:
+                *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+                * followed by a shift in of 18 bits.  This first two bits shifted in
+                * are TurnAround bits used to avoid contention on the MDIO pin when a
+                * READ operation is performed.  These two bits are thrown away
+                * followed by a shift in of 16 bits which contains the desired data.
+                */
+               mdic = ((reg_addr) | (phy_addr << 5) |
+                       (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+               e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+               /* Now that we've shifted out the read command to the MII, we need to
+                * "shift in" the 16-bit value (18 total bits) of the requested PHY
+                * register address.
+                */
+               *phy_data = e1000_shift_in_mdi_bits(hw);
+       }
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Initializes receive address filters.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_write_phy_reg - write a phy register
  *
- * Places the MAC address in receive address register 0 and clears the rest
- * of the receive addresss registers. Clears the multicast table. Assumes
- * the receiver is in reset when the routine is called.
- *****************************************************************************/
-static void e1000_init_rx_addrs(struct e1000_hw *hw)
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to write
+ * @data: data to write to the PHY
+
+ * Writes a value to a PHY register
+ */
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
 {
-    u32 i;
-    u32 rar_num;
-
-    DEBUGFUNC("e1000_init_rx_addrs");
-
-    /* Setup the receive address. */
-    DEBUGOUT("Programming MAC Address into RAR[0]\n");
-
-    e1000_rar_set(hw, hw->mac_addr, 0);
-
-    rar_num = E1000_RAR_ENTRIES;
-
-    /* Reserve a spot for the Locally Administered Address to work around
-     * an 82571 issue in which a reset on one port will reload the MAC on
-     * the other port. */
-    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
-        rar_num -= 1;
-    if (hw->mac_type == e1000_ich8lan)
-        rar_num = E1000_RAR_ENTRIES_ICH8LAN;
-
-    /* Zero out the other 15 receive addresses. */
-    DEBUGOUT("Clearing RAR[1-15]\n");
-    for (i = 1; i < rar_num; i++) {
-        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
-        E1000_WRITE_FLUSH();
-        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
-        E1000_WRITE_FLUSH();
-    }
+       u32 ret_val;
+
+       DEBUGFUNC("e1000_write_phy_reg");
+
+       if ((hw->phy_type == e1000_phy_igp) &&
+           (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+               ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                                (u16) reg_addr);
+               if (ret_val)
+                       return ret_val;
+       }
+
+       ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                        phy_data);
+
+       return ret_val;
 }
 
-/******************************************************************************
- * Hashes an address to determine its location in the multicast table
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr - the multicast address to hash
- *****************************************************************************/
-u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
+                                 u16 phy_data)
 {
-    u32 hash_value = 0;
-
-    /* The portion of the address that is used for the hash table is
-     * determined by the mc_filter_type setting.
-     */
-    switch (hw->mc_filter_type) {
-    /* [0] [1] [2] [3] [4] [5]
-     * 01  AA  00  12  34  56
-     * LSB                 MSB
-     */
-    case 0:
-        if (hw->mac_type == e1000_ich8lan) {
-            /* [47:38] i.e. 0x158 for above example address */
-            hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
-        } else {
-            /* [47:36] i.e. 0x563 for above example address */
-            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
-        }
-        break;
-    case 1:
-        if (hw->mac_type == e1000_ich8lan) {
-            /* [46:37] i.e. 0x2B1 for above example address */
-            hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
-        } else {
-            /* [46:35] i.e. 0xAC6 for above example address */
-            hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
-        }
-        break;
-    case 2:
-        if (hw->mac_type == e1000_ich8lan) {
-            /*[45:36] i.e. 0x163 for above example address */
-            hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
-        } else {
-            /* [45:34] i.e. 0x5D8 for above example address */
-            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
-        }
-        break;
-    case 3:
-        if (hw->mac_type == e1000_ich8lan) {
-            /* [43:34] i.e. 0x18D for above example address */
-            hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
-        } else {
-            /* [43:32] i.e. 0x634 for above example address */
-            hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
-        }
-        break;
-    }
-
-    hash_value &= 0xFFF;
-    if (hw->mac_type == e1000_ich8lan)
-        hash_value &= 0x3FF;
-
-    return hash_value;
+       u32 i;
+       u32 mdic = 0;
+       const u32 phy_addr = 1;
+
+       DEBUGFUNC("e1000_write_phy_reg_ex");
+
+       if (reg_addr > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+               return -E1000_ERR_PARAM;
+       }
+
+       if (hw->mac_type > e1000_82543) {
+               /* Set up Op-code, Phy Address, register address, and data intended
+                * for the PHY register in the MDI Control register.  The MAC will take
+                * care of interfacing with the PHY to send the desired data.
+                */
+               mdic = (((u32) phy_data) |
+                       (reg_addr << E1000_MDIC_REG_SHIFT) |
+                       (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                       (E1000_MDIC_OP_WRITE));
+
+               ew32(MDIC, mdic);
+
+               /* Poll the ready bit to see if the MDI read completed */
+               for (i = 0; i < 641; i++) {
+                       udelay(5);
+                       mdic = er32(MDIC);
+                       if (mdic & E1000_MDIC_READY)
+                               break;
+               }
+               if (!(mdic & E1000_MDIC_READY)) {
+                       DEBUGOUT("MDI Write did not complete\n");
+                       return -E1000_ERR_PHY;
+               }
+       } else {
+               /* We'll need to use the SW defined pins to shift the write command
+                * out to the PHY. We first send a preamble to the PHY to signal the
+                * beginning of the MII instruction.  This is done by sending 32
+                * consecutive "1" bits.
+                */
+               e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+               /* Now combine the remaining required fields that will indicate a
+                * write operation. We use this method instead of calling the
+                * e1000_shift_out_mdi_bits routine for each field in the command. The
+                * format of a MII write instruction is as follows:
+                * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+                */
+               mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+                       (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+               mdic <<= 16;
+               mdic |= (u32) phy_data;
+
+               e1000_shift_out_mdi_bits(hw, mdic, 32);
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Puts an ethernet address into a receive address register.
+/**
+ * e1000_phy_hw_reset - reset the phy, hardware style
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- * addr - Address to put into receive address register
- * index - Receive address register to write
- *****************************************************************************/
-void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+ * Returns the PHY to the power-on reset state
+ */
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
-    u32 rar_low, rar_high;
-
-    /* HW expects these in little endian so we reverse the byte order
-     * from network order (big endian) to little endian
-     */
-    rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
-               ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
-    rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
-
-    /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
-     * unit hang.
-     *
-     * Description:
-     * If there are any Rx frames queued up or otherwise present in the HW
-     * before RSS is enabled, and then we enable RSS, the HW Rx unit will
-     * hang.  To work around this issue, we have to disable receives and
-     * flush out all Rx frames before we enable RSS. To do so, we modify we
-     * redirect all Rx traffic to manageability and then reset the HW.
-     * This flushes away Rx frames, and (since the redirections to
-     * manageability persists across resets) keeps new ones from coming in
-     * while we work.  Then, we clear the Address Valid AV bit for all MAC
-     * addresses and undo the re-direction to manageability.
-     * Now, frames are coming in again, but the MAC won't accept them, so
-     * far so good.  We now proceed to initialize RSS (if necessary) and
-     * configure the Rx unit.  Last, we re-enable the AV bits and continue
-     * on our merry way.
-     */
-    switch (hw->mac_type) {
-    case e1000_82571:
-    case e1000_82572:
-    case e1000_80003es2lan:
-        if (hw->leave_av_bit_off)
-            break;
-    default:
-        /* Indicate to hardware the Address is Valid. */
-        rar_high |= E1000_RAH_AV;
-        break;
-    }
-
-    E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
-    E1000_WRITE_FLUSH();
-    E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
-    E1000_WRITE_FLUSH();
+       u32 ctrl, ctrl_ext;
+       u32 led_ctrl;
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_phy_hw_reset");
+
+       DEBUGOUT("Resetting Phy...\n");
+
+       if (hw->mac_type > e1000_82543) {
+               /* Read the device control register and assert the E1000_CTRL_PHY_RST
+                * bit. Then, take it out of reset.
+                * For e1000 hardware, we delay for 10ms between the assert
+                * and deassert.
+                */
+               ctrl = er32(CTRL);
+               ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+               E1000_WRITE_FLUSH();
+
+               msleep(10);
+
+               ew32(CTRL, ctrl);
+               E1000_WRITE_FLUSH();
+
+       } else {
+               /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+                * bit to put the PHY into reset. Then, take it out of reset.
+                */
+               ctrl_ext = er32(CTRL_EXT);
+               ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+               ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+               ew32(CTRL_EXT, ctrl_ext);
+               E1000_WRITE_FLUSH();
+               msleep(10);
+               ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+               ew32(CTRL_EXT, ctrl_ext);
+               E1000_WRITE_FLUSH();
+       }
+       udelay(150);
+
+       if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+               /* Configure activity LED after PHY reset */
+               led_ctrl = er32(LEDCTL);
+               led_ctrl &= IGP_ACTIVITY_LED_MASK;
+               led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+               ew32(LEDCTL, led_ctrl);
+       }
+
+       /* Wait for FW to finish PHY configuration. */
+       ret_val = e1000_get_phy_cfg_done(hw);
+       if (ret_val != E1000_SUCCESS)
+               return ret_val;
+
+       return ret_val;
 }
 
-/******************************************************************************
- * Writes a value to the specified offset in the VLAN filter table.
+/**
+ * e1000_phy_reset - reset the phy to commit settings
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
- * value - Value to write into VLAN filter table
- *****************************************************************************/
-void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+ * Resets the PHY
+ * Sets bit 15 of the MII Control register
+ */
+s32 e1000_phy_reset(struct e1000_hw *hw)
 {
-    u32 temp;
-
-    if (hw->mac_type == e1000_ich8lan)
-        return;
-
-    if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
-        temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
-        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
-        E1000_WRITE_FLUSH();
-        E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
-        E1000_WRITE_FLUSH();
-    } else {
-        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
-        E1000_WRITE_FLUSH();
-    }
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_phy_reset");
+
+       switch (hw->phy_type) {
+       case e1000_phy_igp:
+               ret_val = e1000_phy_hw_reset(hw);
+               if (ret_val)
+                       return ret_val;
+               break;
+       default:
+               ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_data |= MII_CR_RESET;
+               ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               udelay(1);
+               break;
+       }
+
+       if (hw->phy_type == e1000_phy_igp)
+               e1000_phy_init_script(hw);
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Clears the VLAN filer table
+/**
+ * e1000_detect_gig_phy - check the phy type
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_clear_vfta(struct e1000_hw *hw)
+ * Probes the expected PHY address for known PHY IDs
+ */
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
 {
-    u32 offset;
-    u32 vfta_value = 0;
-    u32 vfta_offset = 0;
-    u32 vfta_bit_in_reg = 0;
-
-    if (hw->mac_type == e1000_ich8lan)
-        return;
-
-    if (hw->mac_type == e1000_82573) {
-        if (hw->mng_cookie.vlan_id != 0) {
-            /* The VFTA is a 4096b bit-field, each identifying a single VLAN
-             * ID.  The following operations determine which 32b entry
-             * (i.e. offset) into the array we want to set the VLAN ID
-             * (i.e. bit) of the manageability unit. */
-            vfta_offset = (hw->mng_cookie.vlan_id >>
-                           E1000_VFTA_ENTRY_SHIFT) &
-                          E1000_VFTA_ENTRY_MASK;
-            vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
-                                    E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
-        }
-    }
-    for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
-        /* If the offset we want to clear is the same offset of the
-         * manageability VLAN ID, then clear all bits except that of the
-         * manageability unit */
-        vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
-        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
-        E1000_WRITE_FLUSH();
-    }
-}
+       s32 phy_init_status, ret_val;
+       u16 phy_id_high, phy_id_low;
+       bool match = false;
 
-static s32 e1000_id_led_init(struct e1000_hw *hw)
-{
-    u32 ledctl;
-    const u32 ledctl_mask = 0x000000FF;
-    const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
-    const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
-    u16 eeprom_data, i, temp;
-    const u16 led_mask = 0x0F;
-
-    DEBUGFUNC("e1000_id_led_init");
-
-    if (hw->mac_type < e1000_82540) {
-        /* Nothing to do */
-        return E1000_SUCCESS;
-    }
-
-    ledctl = er32(LEDCTL);
-    hw->ledctl_default = ledctl;
-    hw->ledctl_mode1 = hw->ledctl_default;
-    hw->ledctl_mode2 = hw->ledctl_default;
-
-    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
-        DEBUGOUT("EEPROM Read Error\n");
-        return -E1000_ERR_EEPROM;
-    }
-
-    if ((hw->mac_type == e1000_82573) &&
-        (eeprom_data == ID_LED_RESERVED_82573))
-        eeprom_data = ID_LED_DEFAULT_82573;
-    else if ((eeprom_data == ID_LED_RESERVED_0000) ||
-            (eeprom_data == ID_LED_RESERVED_FFFF)) {
-        if (hw->mac_type == e1000_ich8lan)
-            eeprom_data = ID_LED_DEFAULT_ICH8LAN;
-        else
-            eeprom_data = ID_LED_DEFAULT;
-    }
-
-    for (i = 0; i < 4; i++) {
-        temp = (eeprom_data >> (i << 2)) & led_mask;
-        switch (temp) {
-        case ID_LED_ON1_DEF2:
-        case ID_LED_ON1_ON2:
-        case ID_LED_ON1_OFF2:
-            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
-            hw->ledctl_mode1 |= ledctl_on << (i << 3);
-            break;
-        case ID_LED_OFF1_DEF2:
-        case ID_LED_OFF1_ON2:
-        case ID_LED_OFF1_OFF2:
-            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
-            hw->ledctl_mode1 |= ledctl_off << (i << 3);
-            break;
-        default:
-            /* Do nothing */
-            break;
-        }
-        switch (temp) {
-        case ID_LED_DEF1_ON2:
-        case ID_LED_ON1_ON2:
-        case ID_LED_OFF1_ON2:
-            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
-            hw->ledctl_mode2 |= ledctl_on << (i << 3);
-            break;
-        case ID_LED_DEF1_OFF2:
-        case ID_LED_ON1_OFF2:
-        case ID_LED_OFF1_OFF2:
-            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
-            hw->ledctl_mode2 |= ledctl_off << (i << 3);
-            break;
-        default:
-            /* Do nothing */
-            break;
-        }
-    }
-    return E1000_SUCCESS;
+       DEBUGFUNC("e1000_detect_gig_phy");
+
+       if (hw->phy_id != 0)
+               return E1000_SUCCESS;
+
+       /* Read the PHY ID Registers to identify which PHY is onboard. */
+       ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+       if (ret_val)
+               return ret_val;
+
+       hw->phy_id = (u32) (phy_id_high << 16);
+       udelay(20);
+       ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+       if (ret_val)
+               return ret_val;
+
+       hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
+       hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+
+       switch (hw->mac_type) {
+       case e1000_82543:
+               if (hw->phy_id == M88E1000_E_PHY_ID)
+                       match = true;
+               break;
+       case e1000_82544:
+               if (hw->phy_id == M88E1000_I_PHY_ID)
+                       match = true;
+               break;
+       case e1000_82540:
+       case e1000_82545:
+       case e1000_82545_rev_3:
+       case e1000_82546:
+       case e1000_82546_rev_3:
+               if (hw->phy_id == M88E1011_I_PHY_ID)
+                       match = true;
+               break;
+       case e1000_82541:
+       case e1000_82541_rev_2:
+       case e1000_82547:
+       case e1000_82547_rev_2:
+               if (hw->phy_id == IGP01E1000_I_PHY_ID)
+                       match = true;
+               break;
+       default:
+               DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+               return -E1000_ERR_CONFIG;
+       }
+       phy_init_status = e1000_set_phy_type(hw);
+
+       if ((match) && (phy_init_status == E1000_SUCCESS)) {
+               DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+               return E1000_SUCCESS;
+       }
+       DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+       return -E1000_ERR_PHY;
 }
 
-/******************************************************************************
- * Prepares SW controlable LED for use and saves the current state of the LED.
+/**
+ * e1000_phy_reset_dsp - reset DSP
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_setup_led(struct e1000_hw *hw)
+ * Resets the PHY's DSP
+ */
+static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
 {
-    u32 ledctl;
-    s32 ret_val = E1000_SUCCESS;
-
-    DEBUGFUNC("e1000_setup_led");
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-    case e1000_82543:
-    case e1000_82544:
-        /* No setup necessary */
-        break;
-    case e1000_82541:
-    case e1000_82547:
-    case e1000_82541_rev_2:
-    case e1000_82547_rev_2:
-        /* Turn off PHY Smart Power Down (if enabled) */
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
-                                     &hw->phy_spd_default);
-        if (ret_val)
-            return ret_val;
-        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
-                                      (u16)(hw->phy_spd_default &
-                                      ~IGP01E1000_GMII_SPD));
-        if (ret_val)
-            return ret_val;
-        /* Fall Through */
-    default:
-        if (hw->media_type == e1000_media_type_fiber) {
-            ledctl = er32(LEDCTL);
-            /* Save current LEDCTL settings */
-            hw->ledctl_default = ledctl;
-            /* Turn off LED0 */
-            ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
-                        E1000_LEDCTL_LED0_BLINK |
-                        E1000_LEDCTL_LED0_MODE_MASK);
-            ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
-                       E1000_LEDCTL_LED0_MODE_SHIFT);
-            ew32(LEDCTL, ledctl);
-        } else if (hw->media_type == e1000_media_type_copper)
-            ew32(LEDCTL, hw->ledctl_mode1);
-        break;
-    }
-
-    return E1000_SUCCESS;
-}
+       s32 ret_val;
+       DEBUGFUNC("e1000_phy_reset_dsp");
 
+       do {
+               ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+               if (ret_val)
+                       break;
+               ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+               if (ret_val)
+                       break;
+               ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+               if (ret_val)
+                       break;
+               ret_val = E1000_SUCCESS;
+       } while (0);
 
-/******************************************************************************
- * Used on 82571 and later Si that has LED blink bits.
- * Callers must use their own timer and should have already called
- * e1000_id_led_init()
- * Call e1000_cleanup led() to stop blinking
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_blink_led_start(struct e1000_hw *hw)
-{
-    s16  i;
-    u32 ledctl_blink = 0;
-
-    DEBUGFUNC("e1000_id_led_blink_on");
-
-    if (hw->mac_type < e1000_82571) {
-        /* Nothing to do */
-        return E1000_SUCCESS;
-    }
-    if (hw->media_type == e1000_media_type_fiber) {
-        /* always blink LED0 for PCI-E fiber */
-        ledctl_blink = E1000_LEDCTL_LED0_BLINK |
-                     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
-    } else {
-        /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
-        ledctl_blink = hw->ledctl_mode2;
-        for (i=0; i < 4; i++)
-            if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
-                E1000_LEDCTL_MODE_LED_ON)
-                ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
-    }
-
-    ew32(LEDCTL, ledctl_blink);
-
-    return E1000_SUCCESS;
+       return ret_val;
 }
 
-/******************************************************************************
- * Restores the saved state of the SW controlable LED.
+/**
+ * e1000_phy_igp_get_info - get igp specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_cleanup_led(struct e1000_hw *hw)
+ * Get PHY information from various PHY registers for igp PHY only.
+ */
+static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
+                                 struct e1000_phy_info *phy_info)
 {
-    s32 ret_val = E1000_SUCCESS;
-
-    DEBUGFUNC("e1000_cleanup_led");
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-    case e1000_82543:
-    case e1000_82544:
-        /* No cleanup necessary */
-        break;
-    case e1000_82541:
-    case e1000_82547:
-    case e1000_82541_rev_2:
-    case e1000_82547_rev_2:
-        /* Turn on PHY Smart Power Down (if previously enabled) */
-        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
-                                      hw->phy_spd_default);
-        if (ret_val)
-            return ret_val;
-        /* Fall Through */
-    default:
-        if (hw->phy_type == e1000_phy_ife) {
-            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
-            break;
-        }
-        /* Restore LEDCTL settings */
-        ew32(LEDCTL, hw->ledctl_default);
-        break;
-    }
-
-    return E1000_SUCCESS;
+       s32 ret_val;
+       u16 phy_data, min_length, max_length, average;
+       e1000_rev_polarity polarity;
+
+       DEBUGFUNC("e1000_phy_igp_get_info");
+
+       /* The downshift status is checked only once, after link is established,
+        * and it stored in the hw->speed_downgraded parameter. */
+       phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+       /* IGP01E1000 does not need to support it. */
+       phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+       /* IGP01E1000 always correct polarity reversal */
+       phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+       /* Check polarity status */
+       ret_val = e1000_check_polarity(hw, &polarity);
+       if (ret_val)
+               return ret_val;
+
+       phy_info->cable_polarity = polarity;
+
+       ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       phy_info->mdix_mode =
+           (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
+                                IGP01E1000_PSSR_MDIX_SHIFT);
+
+       if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+               /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+               ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                                     SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                   e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+               phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                                      SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                   e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+               /* Get cable length */
+               ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+               if (ret_val)
+                       return ret_val;
+
+               /* Translate to old method */
+               average = (max_length + min_length) / 2;
+
+               if (average <= e1000_igp_cable_length_50)
+                       phy_info->cable_length = e1000_cable_length_50;
+               else if (average <= e1000_igp_cable_length_80)
+                       phy_info->cable_length = e1000_cable_length_50_80;
+               else if (average <= e1000_igp_cable_length_110)
+                       phy_info->cable_length = e1000_cable_length_80_110;
+               else if (average <= e1000_igp_cable_length_140)
+                       phy_info->cable_length = e1000_cable_length_110_140;
+               else
+                       phy_info->cable_length = e1000_cable_length_140;
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Turns on the software controllable LED
+/**
+ * e1000_phy_m88_get_info - get m88 specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_led_on(struct e1000_hw *hw)
+ * Get PHY information from various PHY registers for m88 PHY only.
+ */
+static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
+                                 struct e1000_phy_info *phy_info)
 {
-    u32 ctrl = er32(CTRL);
-
-    DEBUGFUNC("e1000_led_on");
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-    case e1000_82543:
-        /* Set SW Defineable Pin 0 to turn on the LED */
-        ctrl |= E1000_CTRL_SWDPIN0;
-        ctrl |= E1000_CTRL_SWDPIO0;
-        break;
-    case e1000_82544:
-        if (hw->media_type == e1000_media_type_fiber) {
-            /* Set SW Defineable Pin 0 to turn on the LED */
-            ctrl |= E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        } else {
-            /* Clear SW Defineable Pin 0 to turn on the LED */
-            ctrl &= ~E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        }
-        break;
-    default:
-        if (hw->media_type == e1000_media_type_fiber) {
-            /* Clear SW Defineable Pin 0 to turn on the LED */
-            ctrl &= ~E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        } else if (hw->phy_type == e1000_phy_ife) {
-            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
-                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
-        } else if (hw->media_type == e1000_media_type_copper) {
-            ew32(LEDCTL, hw->ledctl_mode2);
-            return E1000_SUCCESS;
-        }
-        break;
-    }
-
-    ew32(CTRL, ctrl);
-
-    return E1000_SUCCESS;
+       s32 ret_val;
+       u16 phy_data;
+       e1000_rev_polarity polarity;
+
+       DEBUGFUNC("e1000_phy_m88_get_info");
+
+       /* The downshift status is checked only once, after link is established,
+        * and it stored in the hw->speed_downgraded parameter. */
+       phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       phy_info->extended_10bt_distance =
+           ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+            M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+           e1000_10bt_ext_dist_enable_lower :
+           e1000_10bt_ext_dist_enable_normal;
+
+       phy_info->polarity_correction =
+           ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+            M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+           e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+       /* Check polarity status */
+       ret_val = e1000_check_polarity(hw, &polarity);
+       if (ret_val)
+               return ret_val;
+       phy_info->cable_polarity = polarity;
+
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       phy_info->mdix_mode =
+           (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
+                                M88E1000_PSSR_MDIX_SHIFT);
+
+       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+               /* Cable Length Estimation and Local/Remote Receiver Information
+                * are only valid at 1000 Mbps.
+                */
+               phy_info->cable_length =
+                   (e1000_cable_length) ((phy_data &
+                                          M88E1000_PSSR_CABLE_LENGTH) >>
+                                         M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+               ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                                     SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+                   e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+               phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                                      SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+                   e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Turns off the software controllable LED
+/**
+ * e1000_phy_get_info - request phy info
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
  *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_led_off(struct e1000_hw *hw)
+ * Get PHY information from various PHY registers
+ */
+s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
 {
-    u32 ctrl = er32(CTRL);
-
-    DEBUGFUNC("e1000_led_off");
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-    case e1000_82543:
-        /* Clear SW Defineable Pin 0 to turn off the LED */
-        ctrl &= ~E1000_CTRL_SWDPIN0;
-        ctrl |= E1000_CTRL_SWDPIO0;
-        break;
-    case e1000_82544:
-        if (hw->media_type == e1000_media_type_fiber) {
-            /* Clear SW Defineable Pin 0 to turn off the LED */
-            ctrl &= ~E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        } else {
-            /* Set SW Defineable Pin 0 to turn off the LED */
-            ctrl |= E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        }
-        break;
-    default:
-        if (hw->media_type == e1000_media_type_fiber) {
-            /* Set SW Defineable Pin 0 to turn off the LED */
-            ctrl |= E1000_CTRL_SWDPIN0;
-            ctrl |= E1000_CTRL_SWDPIO0;
-        } else if (hw->phy_type == e1000_phy_ife) {
-            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
-                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
-        } else if (hw->media_type == e1000_media_type_copper) {
-            ew32(LEDCTL, hw->ledctl_mode1);
-            return E1000_SUCCESS;
-        }
-        break;
-    }
-
-    ew32(CTRL, ctrl);
-
-    return E1000_SUCCESS;
-}
+       s32 ret_val;
+       u16 phy_data;
 
-/******************************************************************************
- * Clears all hardware statistics counters.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+       DEBUGFUNC("e1000_phy_get_info");
+
+       phy_info->cable_length = e1000_cable_length_undefined;
+       phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+       phy_info->cable_polarity = e1000_rev_polarity_undefined;
+       phy_info->downshift = e1000_downshift_undefined;
+       phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+       phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+       phy_info->local_rx = e1000_1000t_rx_status_undefined;
+       phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+       if (hw->media_type != e1000_media_type_copper) {
+               DEBUGOUT("PHY info is only valid for copper media\n");
+               return -E1000_ERR_CONFIG;
+       }
+
+       ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+       if (ret_val)
+               return ret_val;
+
+       if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+               DEBUGOUT("PHY info is only valid if link is up\n");
+               return -E1000_ERR_CONFIG;
+       }
+
+       if (hw->phy_type == e1000_phy_igp)
+               return e1000_phy_igp_get_info(hw, phy_info);
+       else
+               return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
 {
-    volatile u32 temp;
-
-    temp = er32(CRCERRS);
-    temp = er32(SYMERRS);
-    temp = er32(MPC);
-    temp = er32(SCC);
-    temp = er32(ECOL);
-    temp = er32(MCC);
-    temp = er32(LATECOL);
-    temp = er32(COLC);
-    temp = er32(DC);
-    temp = er32(SEC);
-    temp = er32(RLEC);
-    temp = er32(XONRXC);
-    temp = er32(XONTXC);
-    temp = er32(XOFFRXC);
-    temp = er32(XOFFTXC);
-    temp = er32(FCRUC);
-
-    if (hw->mac_type != e1000_ich8lan) {
-    temp = er32(PRC64);
-    temp = er32(PRC127);
-    temp = er32(PRC255);
-    temp = er32(PRC511);
-    temp = er32(PRC1023);
-    temp = er32(PRC1522);
-    }
-
-    temp = er32(GPRC);
-    temp = er32(BPRC);
-    temp = er32(MPRC);
-    temp = er32(GPTC);
-    temp = er32(GORCL);
-    temp = er32(GORCH);
-    temp = er32(GOTCL);
-    temp = er32(GOTCH);
-    temp = er32(RNBC);
-    temp = er32(RUC);
-    temp = er32(RFC);
-    temp = er32(ROC);
-    temp = er32(RJC);
-    temp = er32(TORL);
-    temp = er32(TORH);
-    temp = er32(TOTL);
-    temp = er32(TOTH);
-    temp = er32(TPR);
-    temp = er32(TPT);
-
-    if (hw->mac_type != e1000_ich8lan) {
-    temp = er32(PTC64);
-    temp = er32(PTC127);
-    temp = er32(PTC255);
-    temp = er32(PTC511);
-    temp = er32(PTC1023);
-    temp = er32(PTC1522);
-    }
-
-    temp = er32(MPTC);
-    temp = er32(BPTC);
-
-    if (hw->mac_type < e1000_82543) return;
-
-    temp = er32(ALGNERRC);
-    temp = er32(RXERRC);
-    temp = er32(TNCRS);
-    temp = er32(CEXTERR);
-    temp = er32(TSCTC);
-    temp = er32(TSCTFC);
-
-    if (hw->mac_type <= e1000_82544) return;
-
-    temp = er32(MGTPRC);
-    temp = er32(MGTPDC);
-    temp = er32(MGTPTC);
-
-    if (hw->mac_type <= e1000_82547_rev_2) return;
-
-    temp = er32(IAC);
-    temp = er32(ICRXOC);
-
-    if (hw->mac_type == e1000_ich8lan) return;
-
-    temp = er32(ICRXPTC);
-    temp = er32(ICRXATC);
-    temp = er32(ICTXPTC);
-    temp = er32(ICTXATC);
-    temp = er32(ICTXQEC);
-    temp = er32(ICTXQMTC);
-    temp = er32(ICRXDMTC);
+       DEBUGFUNC("e1000_validate_mdi_settings");
+
+       if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+               DEBUGOUT("Invalid MDI setting detected\n");
+               hw->mdix = 1;
+               return -E1000_ERR_CONFIG;
+       }
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Resets Adaptive IFS to its default state.
+/**
+ * e1000_init_eeprom_params - initialize sw eeprom vars
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- *
- * Call this after e1000_init_hw. You may override the IFS defaults by setting
- * hw->ifs_params_forced to true. However, you must initialize hw->
- * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
- * before calling this function.
- *****************************************************************************/
-void e1000_reset_adaptive(struct e1000_hw *hw)
+ * Sets up eeprom variables in the hw struct.  Must be called after mac_type
+ * is configured.
+ */
+s32 e1000_init_eeprom_params(struct e1000_hw *hw)
 {
-    DEBUGFUNC("e1000_reset_adaptive");
-
-    if (hw->adaptive_ifs) {
-        if (!hw->ifs_params_forced) {
-            hw->current_ifs_val = 0;
-            hw->ifs_min_val = IFS_MIN;
-            hw->ifs_max_val = IFS_MAX;
-            hw->ifs_step_size = IFS_STEP;
-            hw->ifs_ratio = IFS_RATIO;
-        }
-        hw->in_ifs_mode = false;
-        ew32(AIT, 0);
-    } else {
-        DEBUGOUT("Not in Adaptive IFS mode!\n");
-    }
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 eecd = er32(EECD);
+       s32 ret_val = E1000_SUCCESS;
+       u16 eeprom_size;
+
+       DEBUGFUNC("e1000_init_eeprom_params");
+
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+       case e1000_82544:
+               eeprom->type = e1000_eeprom_microwire;
+               eeprom->word_size = 64;
+               eeprom->opcode_bits = 3;
+               eeprom->address_bits = 6;
+               eeprom->delay_usec = 50;
+               break;
+       case e1000_82540:
+       case e1000_82545:
+       case e1000_82545_rev_3:
+       case e1000_82546:
+       case e1000_82546_rev_3:
+               eeprom->type = e1000_eeprom_microwire;
+               eeprom->opcode_bits = 3;
+               eeprom->delay_usec = 50;
+               if (eecd & E1000_EECD_SIZE) {
+                       eeprom->word_size = 256;
+                       eeprom->address_bits = 8;
+               } else {
+                       eeprom->word_size = 64;
+                       eeprom->address_bits = 6;
+               }
+               break;
+       case e1000_82541:
+       case e1000_82541_rev_2:
+       case e1000_82547:
+       case e1000_82547_rev_2:
+               if (eecd & E1000_EECD_TYPE) {
+                       eeprom->type = e1000_eeprom_spi;
+                       eeprom->opcode_bits = 8;
+                       eeprom->delay_usec = 1;
+                       if (eecd & E1000_EECD_ADDR_BITS) {
+                               eeprom->page_size = 32;
+                               eeprom->address_bits = 16;
+                       } else {
+                               eeprom->page_size = 8;
+                               eeprom->address_bits = 8;
+                       }
+               } else {
+                       eeprom->type = e1000_eeprom_microwire;
+                       eeprom->opcode_bits = 3;
+                       eeprom->delay_usec = 50;
+                       if (eecd & E1000_EECD_ADDR_BITS) {
+                               eeprom->word_size = 256;
+                               eeprom->address_bits = 8;
+                       } else {
+                               eeprom->word_size = 64;
+                               eeprom->address_bits = 6;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+
+       if (eeprom->type == e1000_eeprom_spi) {
+               /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+                * 32KB (incremented by powers of 2).
+                */
+               /* Set to default value for initial eeprom read. */
+               eeprom->word_size = 64;
+               ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+               if (ret_val)
+                       return ret_val;
+               eeprom_size =
+                   (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+               /* 256B eeprom size was not supported in earlier hardware, so we
+                * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+                * is never the result used in the shifting logic below. */
+               if (eeprom_size)
+                       eeprom_size++;
+
+               eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+       }
+       return ret_val;
 }
 
-/******************************************************************************
- * Called during the callback/watchdog routine to update IFS value based on
- * the ratio of transmits to collisions.
- *
- * hw - Struct containing variables accessed by shared code
- * tx_packets - Number of transmits since last callback
- * total_collisions - Number of collisions since last callback
- *****************************************************************************/
-void e1000_update_adaptive(struct e1000_hw *hw)
+/**
+ * e1000_raise_ee_clk - Raises the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
 {
-    DEBUGFUNC("e1000_update_adaptive");
-
-    if (hw->adaptive_ifs) {
-        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
-            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
-                hw->in_ifs_mode = true;
-                if (hw->current_ifs_val < hw->ifs_max_val) {
-                    if (hw->current_ifs_val == 0)
-                        hw->current_ifs_val = hw->ifs_min_val;
-                    else
-                        hw->current_ifs_val += hw->ifs_step_size;
-                    ew32(AIT, hw->current_ifs_val);
-                }
-            }
-        } else {
-            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
-                hw->current_ifs_val = 0;
-                hw->in_ifs_mode = false;
-                ew32(AIT, 0);
-            }
-        }
-    } else {
-        DEBUGOUT("Not in Adaptive IFS mode!\n");
-    }
+       /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+        * wait <delay> microseconds.
+        */
+       *eecd = *eecd | E1000_EECD_SK;
+       ew32(EECD, *eecd);
+       E1000_WRITE_FLUSH();
+       udelay(hw->eeprom.delay_usec);
 }
 
-/******************************************************************************
- * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
- *
- * hw - Struct containing variables accessed by shared code
- * frame_len - The length of the frame in question
- * mac_addr - The Ethernet destination address of the frame in question
- *****************************************************************************/
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
-                           u32 frame_len, u8 *mac_addr)
+/**
+ * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
+static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
 {
-    u64 carry_bit;
-
-    /* First adjust the frame length. */
-    frame_len--;
-    /* We need to adjust the statistics counters, since the hardware
-     * counters overcount this packet as a CRC error and undercount
-     * the packet as a good packet
-     */
-    /* This packet should not be counted as a CRC error.    */
-    stats->crcerrs--;
-    /* This packet does count as a Good Packet Received.    */
-    stats->gprc++;
-
-    /* Adjust the Good Octets received counters             */
-    carry_bit = 0x80000000 & stats->gorcl;
-    stats->gorcl += frame_len;
-    /* If the high bit of Gorcl (the low 32 bits of the Good Octets
-     * Received Count) was one before the addition,
-     * AND it is zero after, then we lost the carry out,
-     * need to add one to Gorch (Good Octets Received Count High).
-     * This could be simplified if all environments supported
-     * 64-bit integers.
-     */
-    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
-        stats->gorch++;
-    /* Is this a broadcast or multicast?  Check broadcast first,
-     * since the test for a multicast frame will test positive on
-     * a broadcast frame.
-     */
-    if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
-        /* Broadcast packet */
-        stats->bprc++;
-    else if (*mac_addr & 0x01)
-        /* Multicast packet */
-        stats->mprc++;
-
-    if (frame_len == hw->max_frame_size) {
-        /* In this case, the hardware has overcounted the number of
-         * oversize frames.
-         */
-        if (stats->roc > 0)
-            stats->roc--;
-    }
-
-    /* Adjust the bin counters when the extra byte put the frame in the
-     * wrong bin. Remember that the frame_len was adjusted above.
-     */
-    if (frame_len == 64) {
-        stats->prc64++;
-        stats->prc127--;
-    } else if (frame_len == 127) {
-        stats->prc127++;
-        stats->prc255--;
-    } else if (frame_len == 255) {
-        stats->prc255++;
-        stats->prc511--;
-    } else if (frame_len == 511) {
-        stats->prc511++;
-        stats->prc1023--;
-    } else if (frame_len == 1023) {
-        stats->prc1023++;
-        stats->prc1522--;
-    } else if (frame_len == 1522) {
-        stats->prc1522++;
-    }
+       /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+        * wait 50 microseconds.
+        */
+       *eecd = *eecd & ~E1000_EECD_SK;
+       ew32(EECD, *eecd);
+       E1000_WRITE_FLUSH();
+       udelay(hw->eeprom.delay_usec);
 }
 
-/******************************************************************************
- * Gets the current PCI bus type, speed, and width of the hardware
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-void e1000_get_bus_info(struct e1000_hw *hw)
+/**
+ * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ */
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
 {
-    s32 ret_val;
-    u16 pci_ex_link_status;
-    u32 status;
-
-    switch (hw->mac_type) {
-    case e1000_82542_rev2_0:
-    case e1000_82542_rev2_1:
-        hw->bus_type = e1000_bus_type_pci;
-        hw->bus_speed = e1000_bus_speed_unknown;
-        hw->bus_width = e1000_bus_width_unknown;
-        break;
-    case e1000_82571:
-    case e1000_82572:
-    case e1000_82573:
-    case e1000_80003es2lan:
-        hw->bus_type = e1000_bus_type_pci_express;
-        hw->bus_speed = e1000_bus_speed_2500;
-        ret_val = e1000_read_pcie_cap_reg(hw,
-                                      PCI_EX_LINK_STATUS,
-                                      &pci_ex_link_status);
-        if (ret_val)
-            hw->bus_width = e1000_bus_width_unknown;
-        else
-            hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
-                          PCI_EX_LINK_WIDTH_SHIFT;
-        break;
-    case e1000_ich8lan:
-        hw->bus_type = e1000_bus_type_pci_express;
-        hw->bus_speed = e1000_bus_speed_2500;
-        hw->bus_width = e1000_bus_width_pciex_1;
-        break;
-    default:
-        status = er32(STATUS);
-        hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
-                       e1000_bus_type_pcix : e1000_bus_type_pci;
-
-        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
-            hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
-                            e1000_bus_speed_66 : e1000_bus_speed_120;
-        } else if (hw->bus_type == e1000_bus_type_pci) {
-            hw->bus_speed = (status & E1000_STATUS_PCI66) ?
-                            e1000_bus_speed_66 : e1000_bus_speed_33;
-        } else {
-            switch (status & E1000_STATUS_PCIX_SPEED) {
-            case E1000_STATUS_PCIX_SPEED_66:
-                hw->bus_speed = e1000_bus_speed_66;
-                break;
-            case E1000_STATUS_PCIX_SPEED_100:
-                hw->bus_speed = e1000_bus_speed_100;
-                break;
-            case E1000_STATUS_PCIX_SPEED_133:
-                hw->bus_speed = e1000_bus_speed_133;
-                break;
-            default:
-                hw->bus_speed = e1000_bus_speed_reserved;
-                break;
-            }
-        }
-        hw->bus_width = (status & E1000_STATUS_BUS64) ?
-                        e1000_bus_width_64 : e1000_bus_width_32;
-        break;
-    }
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 eecd;
+       u32 mask;
+
+       /* We need to shift "count" bits out to the EEPROM. So, value in the
+        * "data" parameter will be shifted out to the EEPROM one bit at a time.
+        * In order to do this, "data" must be broken down into bits.
+        */
+       mask = 0x01 << (count - 1);
+       eecd = er32(EECD);
+       if (eeprom->type == e1000_eeprom_microwire) {
+               eecd &= ~E1000_EECD_DO;
+       } else if (eeprom->type == e1000_eeprom_spi) {
+               eecd |= E1000_EECD_DO;
+       }
+       do {
+               /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+                * and then raising and then lowering the clock (the SK bit controls
+                * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
+                * by setting "DI" to "0" and then raising and then lowering the clock.
+                */
+               eecd &= ~E1000_EECD_DI;
+
+               if (data & mask)
+                       eecd |= E1000_EECD_DI;
+
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+
+               udelay(eeprom->delay_usec);
+
+               e1000_raise_ee_clk(hw, &eecd);
+               e1000_lower_ee_clk(hw, &eecd);
+
+               mask = mask >> 1;
+
+       } while (mask);
+
+       /* We leave the "DI" bit set to "0" when we leave this routine. */
+       eecd &= ~E1000_EECD_DI;
+       ew32(EECD, eecd);
 }
 
-/******************************************************************************
- * Writes a value to one of the devices registers using port I/O (as opposed to
- * memory mapped I/O). Only 82544 and newer devices support port I/O.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset to write to
- * value - value to write
- *****************************************************************************/
-static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
+/**
+ * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
+ * @hw: Struct containing variables accessed by shared code
+ * @count: number of bits to shift in
+ */
+static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
 {
-    unsigned long io_addr = hw->io_base;
-    unsigned long io_data = hw->io_base + 4;
+       u32 eecd;
+       u32 i;
+       u16 data;
+
+       /* In order to read a register from the EEPROM, we need to shift 'count'
+        * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+        * input to the EEPROM (setting the SK bit), and then reading the value of
+        * the "DO" bit.  During this "shifting in" process the "DI" bit should
+        * always be clear.
+        */
+
+       eecd = er32(EECD);
+
+       eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+       data = 0;
 
-    e1000_io_write(hw, io_addr, offset);
-    e1000_io_write(hw, io_data, value);
+       for (i = 0; i < count; i++) {
+               data = data << 1;
+               e1000_raise_ee_clk(hw, &eecd);
+
+               eecd = er32(EECD);
+
+               eecd &= ~(E1000_EECD_DI);
+               if (eecd & E1000_EECD_DO)
+                       data |= 1;
+
+               e1000_lower_ee_clk(hw, &eecd);
+       }
+
+       return data;
 }
 
-/******************************************************************************
- * Estimates the cable length.
- *
- * hw - Struct containing variables accessed by shared code
- * min_length - The estimated minimum length
- * max_length - The estimated maximum length
- *
- * returns: - E1000_ERR_XXX
- *            E1000_SUCCESS
+/**
+ * e1000_acquire_eeprom - Prepares EEPROM for access
+ * @hw: Struct containing variables accessed by shared code
  *
- * This function always returns a ranged length (minimum & maximum).
- * So for M88 phy's, this function interprets the one value returned from the
- * register to the minimum and maximum range.
- * For IGP phy's, the function calculates the range by the AGC registers.
- *****************************************************************************/
-static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
-                                 u16 *max_length)
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ */
+static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
 {
-    s32 ret_val;
-    u16 agc_value = 0;
-    u16 i, phy_data;
-    u16 cable_length;
-
-    DEBUGFUNC("e1000_get_cable_length");
-
-    *min_length = *max_length = 0;
-
-    /* Use old method for Phy older than IGP */
-    if (hw->phy_type == e1000_phy_m88) {
-
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-        cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
-                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-
-        /* Convert the enum value to ranged values */
-        switch (cable_length) {
-        case e1000_cable_length_50:
-            *min_length = 0;
-            *max_length = e1000_igp_cable_length_50;
-            break;
-        case e1000_cable_length_50_80:
-            *min_length = e1000_igp_cable_length_50;
-            *max_length = e1000_igp_cable_length_80;
-            break;
-        case e1000_cable_length_80_110:
-            *min_length = e1000_igp_cable_length_80;
-            *max_length = e1000_igp_cable_length_110;
-            break;
-        case e1000_cable_length_110_140:
-            *min_length = e1000_igp_cable_length_110;
-            *max_length = e1000_igp_cable_length_140;
-            break;
-        case e1000_cable_length_140:
-            *min_length = e1000_igp_cable_length_140;
-            *max_length = e1000_igp_cable_length_170;
-            break;
-        default:
-            return -E1000_ERR_PHY;
-            break;
-        }
-    } else if (hw->phy_type == e1000_phy_gg82563) {
-        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-        cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
-
-        switch (cable_length) {
-        case e1000_gg_cable_length_60:
-            *min_length = 0;
-            *max_length = e1000_igp_cable_length_60;
-            break;
-        case e1000_gg_cable_length_60_115:
-            *min_length = e1000_igp_cable_length_60;
-            *max_length = e1000_igp_cable_length_115;
-            break;
-        case e1000_gg_cable_length_115_150:
-            *min_length = e1000_igp_cable_length_115;
-            *max_length = e1000_igp_cable_length_150;
-            break;
-        case e1000_gg_cable_length_150:
-            *min_length = e1000_igp_cable_length_150;
-            *max_length = e1000_igp_cable_length_180;
-            break;
-        default:
-            return -E1000_ERR_PHY;
-            break;
-        }
-    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
-        u16 cur_agc_value;
-        u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
-        u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
-                                                         {IGP01E1000_PHY_AGC_A,
-                                                          IGP01E1000_PHY_AGC_B,
-                                                          IGP01E1000_PHY_AGC_C,
-                                                          IGP01E1000_PHY_AGC_D};
-        /* Read the AGC registers for all channels */
-        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
-
-            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
-
-            /* Value bound check. */
-            if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
-                (cur_agc_value == 0))
-                return -E1000_ERR_PHY;
-
-            agc_value += cur_agc_value;
-
-            /* Update minimal AGC value. */
-            if (min_agc_value > cur_agc_value)
-                min_agc_value = cur_agc_value;
-        }
-
-        /* Remove the minimal AGC result for length < 50m */
-        if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
-            agc_value -= min_agc_value;
-
-            /* Get the average length of the remaining 3 channels */
-            agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
-        } else {
-            /* Get the average length of all the 4 channels. */
-            agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
-        }
-
-        /* Set the range of the calculated length. */
-        *min_length = ((e1000_igp_cable_length_table[agc_value] -
-                       IGP01E1000_AGC_RANGE) > 0) ?
-                       (e1000_igp_cable_length_table[agc_value] -
-                       IGP01E1000_AGC_RANGE) : 0;
-        *max_length = e1000_igp_cable_length_table[agc_value] +
-                      IGP01E1000_AGC_RANGE;
-    } else if (hw->phy_type == e1000_phy_igp_2 ||
-               hw->phy_type == e1000_phy_igp_3) {
-        u16 cur_agc_index, max_agc_index = 0;
-        u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
-        u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
-                                                         {IGP02E1000_PHY_AGC_A,
-                                                          IGP02E1000_PHY_AGC_B,
-                                                          IGP02E1000_PHY_AGC_C,
-                                                          IGP02E1000_PHY_AGC_D};
-        /* Read the AGC registers for all channels */
-        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
-            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            /* Getting bits 15:9, which represent the combination of course and
-             * fine gain values.  The result is a number that can be put into
-             * the lookup table to obtain the approximate cable length. */
-            cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
-                            IGP02E1000_AGC_LENGTH_MASK;
-
-            /* Array index bound check. */
-            if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
-                (cur_agc_index == 0))
-                return -E1000_ERR_PHY;
-
-            /* Remove min & max AGC values from calculation. */
-            if (e1000_igp_2_cable_length_table[min_agc_index] >
-                e1000_igp_2_cable_length_table[cur_agc_index])
-                min_agc_index = cur_agc_index;
-            if (e1000_igp_2_cable_length_table[max_agc_index] <
-                e1000_igp_2_cable_length_table[cur_agc_index])
-                max_agc_index = cur_agc_index;
-
-            agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
-        }
-
-        agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
-                      e1000_igp_2_cable_length_table[max_agc_index]);
-        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
-
-        /* Calculate cable length with the error range of +/- 10 meters. */
-        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
-                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
-        *max_length = agc_value + IGP02E1000_AGC_RANGE;
-    }
-
-    return E1000_SUCCESS;
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 eecd, i = 0;
+
+       DEBUGFUNC("e1000_acquire_eeprom");
+
+       eecd = er32(EECD);
+
+       /* Request EEPROM Access */
+       if (hw->mac_type > e1000_82544) {
+               eecd |= E1000_EECD_REQ;
+               ew32(EECD, eecd);
+               eecd = er32(EECD);
+               while ((!(eecd & E1000_EECD_GNT)) &&
+                      (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                       i++;
+                       udelay(5);
+                       eecd = er32(EECD);
+               }
+               if (!(eecd & E1000_EECD_GNT)) {
+                       eecd &= ~E1000_EECD_REQ;
+                       ew32(EECD, eecd);
+                       DEBUGOUT("Could not acquire EEPROM grant\n");
+                       return -E1000_ERR_EEPROM;
+               }
+       }
+
+       /* Setup EEPROM for Read/Write */
+
+       if (eeprom->type == e1000_eeprom_microwire) {
+               /* Clear SK and DI */
+               eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+               ew32(EECD, eecd);
+
+               /* Set CS */
+               eecd |= E1000_EECD_CS;
+               ew32(EECD, eecd);
+       } else if (eeprom->type == e1000_eeprom_spi) {
+               /* Clear SK and CS */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               ew32(EECD, eecd);
+               udelay(1);
+       }
+
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Check the cable polarity
- *
- * hw - Struct containing variables accessed by shared code
- * polarity - output parameter : 0 - Polarity is not reversed
- *                               1 - Polarity is reversed.
- *
- * returns: - E1000_ERR_XXX
- *            E1000_SUCCESS
- *
- * For phy's older than IGP, this function simply reads the polarity bit in the
- * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
- * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
- * return 0.  If the link speed is 1000 Mbps the polarity status is in the
- * IGP01E1000_PHY_PCS_INIT_REG.
- *****************************************************************************/
-static s32 e1000_check_polarity(struct e1000_hw *hw,
-                               e1000_rev_polarity *polarity)
+/**
+ * e1000_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_standby_eeprom(struct e1000_hw *hw)
 {
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_check_polarity");
-
-    if ((hw->phy_type == e1000_phy_m88) ||
-        (hw->phy_type == e1000_phy_gg82563)) {
-        /* return the Polarity bit in the Status register. */
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-        *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
-                     M88E1000_PSSR_REV_POLARITY_SHIFT) ?
-                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-
-    } else if (hw->phy_type == e1000_phy_igp ||
-              hw->phy_type == e1000_phy_igp_3 ||
-              hw->phy_type == e1000_phy_igp_2) {
-        /* Read the Status register to check the speed */
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
-         * find the polarity status */
-        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
-           IGP01E1000_PSSR_SPEED_1000MBPS) {
-
-            /* Read the GIG initialization PCS register (0x00B4) */
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            /* Check the polarity bits */
-            *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
-                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-        } else {
-            /* For 10 Mbps, read the polarity bit in the status register. (for
-             * 100 Mbps this bit is always 0) */
-            *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
-                         e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-        }
-    } else if (hw->phy_type == e1000_phy_ife) {
-        ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-        *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
-                     IFE_PESC_POLARITY_REVERSED_SHIFT) ?
-                     e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-    }
-    return E1000_SUCCESS;
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 eecd;
+
+       eecd = er32(EECD);
+
+       if (eeprom->type == e1000_eeprom_microwire) {
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+
+               /* Clock high */
+               eecd |= E1000_EECD_SK;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+
+               /* Select EEPROM */
+               eecd |= E1000_EECD_CS;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+
+               /* Clock low */
+               eecd &= ~E1000_EECD_SK;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+       } else if (eeprom->type == e1000_eeprom_spi) {
+               /* Toggle CS to flush commands */
+               eecd |= E1000_EECD_CS;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+               eecd &= ~E1000_EECD_CS;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(eeprom->delay_usec);
+       }
 }
 
-/******************************************************************************
- * Check if Downshift occured
+/**
+ * e1000_release_eeprom - drop chip select
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - Struct containing variables accessed by shared code
- * downshift - output parameter : 0 - No Downshift ocured.
- *                                1 - Downshift ocured.
- *
- * returns: - E1000_ERR_XXX
- *            E1000_SUCCESS
- *
- * For phy's older than IGP, this function reads the Downshift bit in the Phy
- * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
- * Link Health register.  In IGP this bit is latched high, so the driver must
- * read it immediately after link is established.
- *****************************************************************************/
-static s32 e1000_check_downshift(struct e1000_hw *hw)
+ * Terminates a command by inverting the EEPROM's chip select pin
+ */
+static void e1000_release_eeprom(struct e1000_hw *hw)
 {
-    s32 ret_val;
-    u16 phy_data;
-
-    DEBUGFUNC("e1000_check_downshift");
-
-    if (hw->phy_type == e1000_phy_igp ||
-        hw->phy_type == e1000_phy_igp_3 ||
-        hw->phy_type == e1000_phy_igp_2) {
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
-    } else if ((hw->phy_type == e1000_phy_m88) ||
-               (hw->phy_type == e1000_phy_gg82563)) {
-        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
-                                     &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
-                               M88E1000_PSSR_DOWNSHIFT_SHIFT;
-    } else if (hw->phy_type == e1000_phy_ife) {
-        /* e1000_phy_ife supports 10/100 speed only */
-        hw->speed_downgraded = false;
-    }
-
-    return E1000_SUCCESS;
+       u32 eecd;
+
+       DEBUGFUNC("e1000_release_eeprom");
+
+       eecd = er32(EECD);
+
+       if (hw->eeprom.type == e1000_eeprom_spi) {
+               eecd |= E1000_EECD_CS;  /* Pull CS high */
+               eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+               ew32(EECD, eecd);
+
+               udelay(hw->eeprom.delay_usec);
+       } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+               /* cleanup eeprom */
+
+               /* CS on Microwire is active-high */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+               ew32(EECD, eecd);
+
+               /* Rising edge of clock */
+               eecd |= E1000_EECD_SK;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(hw->eeprom.delay_usec);
+
+               /* Falling edge of clock */
+               eecd &= ~E1000_EECD_SK;
+               ew32(EECD, eecd);
+               E1000_WRITE_FLUSH();
+               udelay(hw->eeprom.delay_usec);
+       }
+
+       /* Stop requesting EEPROM access */
+       if (hw->mac_type > e1000_82544) {
+               eecd &= ~E1000_EECD_REQ;
+               ew32(EECD, eecd);
+       }
 }
 
-/*****************************************************************************
- *
- * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
- * gigabit link is achieved to improve link quality.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_PHY if fail to read/write the PHY
- *            E1000_SUCCESS at any other case.
- *
- ****************************************************************************/
+/**
+ * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+       u16 retry_count = 0;
+       u8 spi_stat_reg;
 
-static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
+       DEBUGFUNC("e1000_spi_eeprom_ready");
+
+       /* Read "Status Register" repeatedly until the LSB is cleared.  The
+        * EEPROM will signal that the command has been completed by clearing
+        * bit 0 of the internal status register.  If it's not cleared within
+        * 5 milliseconds, then error out.
+        */
+       retry_count = 0;
+       do {
+               e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+                                       hw->eeprom.opcode_bits);
+               spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
+               if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+                       break;
+
+               udelay(5);
+               retry_count += 5;
+
+               e1000_standby_eeprom(hw);
+       } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+       /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+        * only 0-5mSec on 5V devices)
+        */
+       if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+               DEBUGOUT("SPI EEPROM Status error\n");
+               return -E1000_ERR_EEPROM;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset of  word in the EEPROM to read
+ * @data: word read from the EEPROM
+ * @words: number of words to read
+ */
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
-    s32 ret_val;
-    u16 phy_data, phy_saved_data, speed, duplex, i;
-    u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
-                                        {IGP01E1000_PHY_AGC_PARAM_A,
-                                        IGP01E1000_PHY_AGC_PARAM_B,
-                                        IGP01E1000_PHY_AGC_PARAM_C,
-                                        IGP01E1000_PHY_AGC_PARAM_D};
-    u16 min_length, max_length;
-
-    DEBUGFUNC("e1000_config_dsp_after_link_change");
-
-    if (hw->phy_type != e1000_phy_igp)
-        return E1000_SUCCESS;
-
-    if (link_up) {
-        ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
-        if (ret_val) {
-            DEBUGOUT("Error getting link speed and duplex\n");
-            return ret_val;
-        }
-
-        if (speed == SPEED_1000) {
-
-            ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
-            if (ret_val)
-                return ret_val;
-
-            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
-                min_length >= e1000_igp_cable_length_50) {
-
-                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
-                    ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
-                                                 &phy_data);
-                    if (ret_val)
-                        return ret_val;
-
-                    phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
-
-                    ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
-                                                  phy_data);
-                    if (ret_val)
-                        return ret_val;
-                }
-                hw->dsp_config_state = e1000_dsp_config_activated;
-            }
-
-            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
-               (min_length < e1000_igp_cable_length_50)) {
-
-                u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
-                u32 idle_errs = 0;
-
-                /* clear previous idle error counts */
-                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
-                                             &phy_data);
-                if (ret_val)
-                    return ret_val;
-
-                for (i = 0; i < ffe_idle_err_timeout; i++) {
-                    udelay(1000);
-                    ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
-                                                 &phy_data);
-                    if (ret_val)
-                        return ret_val;
-
-                    idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
-                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
-                        hw->ffe_config_state = e1000_ffe_config_active;
-
-                        ret_val = e1000_write_phy_reg(hw,
-                                    IGP01E1000_PHY_DSP_FFE,
-                                    IGP01E1000_PHY_DSP_FFE_CM_CP);
-                        if (ret_val)
-                            return ret_val;
-                        break;
-                    }
-
-                    if (idle_errs)
-                        ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
-                }
-            }
-        }
-    } else {
-        if (hw->dsp_config_state == e1000_dsp_config_activated) {
-            /* Save off the current value of register 0x2F5B to be restored at
-             * the end of the routines. */
-            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
-            if (ret_val)
-                return ret_val;
-
-            /* Disable the PHY transmitter */
-            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
-
-            if (ret_val)
-                return ret_val;
-
-            mdelay(20);
-
-            ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                          IGP01E1000_IEEE_FORCE_GIGA);
-            if (ret_val)
-                return ret_val;
-            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
-                ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
-                if (ret_val)
-                    return ret_val;
-
-                phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
-                phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
-
-                ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
-                if (ret_val)
-                    return ret_val;
-            }
-
-            ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                          IGP01E1000_IEEE_RESTART_AUTONEG);
-            if (ret_val)
-                return ret_val;
-
-            mdelay(20);
-
-            /* Now enable the transmitter */
-            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
-            if (ret_val)
-                return ret_val;
-
-            hw->dsp_config_state = e1000_dsp_config_enabled;
-        }
-
-        if (hw->ffe_config_state == e1000_ffe_config_active) {
-            /* Save off the current value of register 0x2F5B to be restored at
-             * the end of the routines. */
-            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
-            if (ret_val)
-                return ret_val;
-
-            /* Disable the PHY transmitter */
-            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
-
-            if (ret_val)
-                return ret_val;
-
-            mdelay(20);
-
-            ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                          IGP01E1000_IEEE_FORCE_GIGA);
-            if (ret_val)
-                return ret_val;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
-                                          IGP01E1000_PHY_DSP_FFE_DEFAULT);
-            if (ret_val)
-                return ret_val;
-
-            ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                          IGP01E1000_IEEE_RESTART_AUTONEG);
-            if (ret_val)
-                return ret_val;
-
-            mdelay(20);
-
-            /* Now enable the transmitter */
-            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
-            if (ret_val)
-                return ret_val;
-
-            hw->ffe_config_state = e1000_ffe_config_enabled;
-        }
-    }
-    return E1000_SUCCESS;
+       s32 ret;
+       spin_lock(&e1000_eeprom_lock);
+       ret = e1000_do_read_eeprom(hw, offset, words, data);
+       spin_unlock(&e1000_eeprom_lock);
+       return ret;
 }
 
-/*****************************************************************************
- * Set PHY to class A mode
- * Assumes the following operations will follow to enable the new class mode.
- *  1. Do a PHY soft reset
- *  2. Restart auto-negotiation or force link.
- *
- * hw - Struct containing variables accessed by shared code
- ****************************************************************************/
-static s32 e1000_set_phy_mode(struct e1000_hw *hw)
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+                               u16 *data)
 {
-    s32 ret_val;
-    u16 eeprom_data;
-
-    DEBUGFUNC("e1000_set_phy_mode");
-
-    if ((hw->mac_type == e1000_82545_rev_3) &&
-        (hw->media_type == e1000_media_type_copper)) {
-        ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
-        if (ret_val) {
-            return ret_val;
-        }
-
-        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
-            (eeprom_data & EEPROM_PHY_CLASS_A)) {
-            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
-            if (ret_val)
-                return ret_val;
-            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
-            if (ret_val)
-                return ret_val;
-
-            hw->phy_reset_disable = false;
-        }
-    }
-
-    return E1000_SUCCESS;
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 i = 0;
+
+       DEBUGFUNC("e1000_read_eeprom");
+
+       /* If eeprom is not yet detected, do so now */
+       if (eeprom->word_size == 0)
+               e1000_init_eeprom_params(hw);
+
+       /* A check for invalid values:  offset too large, too many words, and not
+        * enough words.
+        */
+       if ((offset >= eeprom->word_size)
+           || (words > eeprom->word_size - offset) || (words == 0)) {
+               DEBUGOUT2
+                   ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
+                    offset, eeprom->word_size);
+               return -E1000_ERR_EEPROM;
+       }
+
+       /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+        * directly. In this case, we need to acquire the EEPROM so that
+        * FW or other port software does not interrupt.
+        */
+       /* Prepare the EEPROM for bit-bang reading */
+       if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+               return -E1000_ERR_EEPROM;
+
+       /* Set up the SPI or Microwire EEPROM for bit-bang reading.  We have
+        * acquired the EEPROM at this point, so any returns should release it */
+       if (eeprom->type == e1000_eeprom_spi) {
+               u16 word_in;
+               u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+               if (e1000_spi_eeprom_ready(hw)) {
+                       e1000_release_eeprom(hw);
+                       return -E1000_ERR_EEPROM;
+               }
+
+               e1000_standby_eeprom(hw);
+
+               /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+               if ((eeprom->address_bits == 8) && (offset >= 128))
+                       read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+               /* Send the READ command (opcode + addr)  */
+               e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+               e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
+                                       eeprom->address_bits);
+
+               /* Read the data.  The address of the eeprom internally increments with
+                * each byte (spi) being read, saving on the overhead of eeprom setup
+                * and tear-down.  The address counter will roll over if reading beyond
+                * the size of the eeprom, thus allowing the entire memory to be read
+                * starting from any offset. */
+               for (i = 0; i < words; i++) {
+                       word_in = e1000_shift_in_ee_bits(hw, 16);
+                       data[i] = (word_in >> 8) | (word_in << 8);
+               }
+       } else if (eeprom->type == e1000_eeprom_microwire) {
+               for (i = 0; i < words; i++) {
+                       /* Send the READ command (opcode + addr)  */
+                       e1000_shift_out_ee_bits(hw,
+                                               EEPROM_READ_OPCODE_MICROWIRE,
+                                               eeprom->opcode_bits);
+                       e1000_shift_out_ee_bits(hw, (u16) (offset + i),
+                                               eeprom->address_bits);
+
+                       /* Read the data.  For microwire, each word requires the overhead
+                        * of eeprom setup and tear-down. */
+                       data[i] = e1000_shift_in_ee_bits(hw, 16);
+                       e1000_standby_eeprom(hw);
+               }
+       }
+
+       /* End this read operation */
+       e1000_release_eeprom(hw);
+
+       return E1000_SUCCESS;
 }
 
-/*****************************************************************************
+/**
+ * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
+ * @hw: Struct containing variables accessed by shared code
  *
- * This function sets the lplu state according to the active flag.  When
- * activating lplu this function also disables smart speed and vise versa.
- * lplu will not be activated unless the device autonegotiation advertisment
- * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
- * hw: Struct containing variables accessed by shared code
- * active - true to enable lplu false to disable lplu.
- *
- * returns: - E1000_ERR_PHY if fail to read/write the PHY
- *            E1000_SUCCESS at any other case.
- *
- ****************************************************************************/
-
-static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ */
+s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
 {
-    u32 phy_ctrl = 0;
-    s32 ret_val;
-    u16 phy_data;
-    DEBUGFUNC("e1000_set_d3_lplu_state");
-
-    if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
-        && hw->phy_type != e1000_phy_igp_3)
-        return E1000_SUCCESS;
-
-    /* During driver activity LPLU should not be used or it will attain link
-     * from the lowest speeds starting from 10Mbps. The capability is used for
-     * Dx transitions and states */
-    if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
-        if (ret_val)
-            return ret_val;
-    } else if (hw->mac_type == e1000_ich8lan) {
-        /* MAC writes into PHY register based on the state transition
-         * and start auto-negotiation. SW driver can overwrite the settings
-         * in CSR PHY power control E1000_PHY_CTRL register. */
-        phy_ctrl = er32(PHY_CTRL);
-    } else {
-        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    if (!active) {
-        if (hw->mac_type == e1000_82541_rev_2 ||
-            hw->mac_type == e1000_82547_rev_2) {
-            phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
-            if (ret_val)
-                return ret_val;
-        } else {
-            if (hw->mac_type == e1000_ich8lan) {
-                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
-                ew32(PHY_CTRL, phy_ctrl);
-            } else {
-                phy_data &= ~IGP02E1000_PM_D3_LPLU;
-                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                              phy_data);
-                if (ret_val)
-                    return ret_val;
-            }
-        }
-
-        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
-         * Dx states where the power conservation is most important.  During
-         * driver activity we should enable SmartSpeed, so performance is
-         * maintained. */
-        if (hw->smart_speed == e1000_smart_speed_on) {
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-        } else if (hw->smart_speed == e1000_smart_speed_off) {
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-        }
-
-    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
-               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
-               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
-
-        if (hw->mac_type == e1000_82541_rev_2 ||
-            hw->mac_type == e1000_82547_rev_2) {
-            phy_data |= IGP01E1000_GMII_FLEX_SPD;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
-            if (ret_val)
-                return ret_val;
-        } else {
-            if (hw->mac_type == e1000_ich8lan) {
-                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
-                ew32(PHY_CTRL, phy_ctrl);
-            } else {
-                phy_data |= IGP02E1000_PM_D3_LPLU;
-                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                              phy_data);
-                if (ret_val)
-                    return ret_val;
-            }
-        }
-
-        /* When LPLU is enabled we should disable SmartSpeed */
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
-        if (ret_val)
-            return ret_val;
-
-    }
-    return E1000_SUCCESS;
+       u16 checksum = 0;
+       u16 i, eeprom_data;
+
+       DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+       for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+               if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+                       DEBUGOUT("EEPROM Read Error\n");
+                       return -E1000_ERR_EEPROM;
+               }
+               checksum += eeprom_data;
+       }
+
+       if (checksum == (u16) EEPROM_SUM)
+               return E1000_SUCCESS;
+       else {
+               DEBUGOUT("EEPROM Checksum Invalid\n");
+               return -E1000_ERR_EEPROM;
+       }
 }
 
-/*****************************************************************************
- *
- * This function sets the lplu d0 state according to the active flag.  When
- * activating lplu this function also disables smart speed and vise versa.
- * lplu will not be activated unless the device autonegotiation advertisment
- * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
- * hw: Struct containing variables accessed by shared code
- * active - true to enable lplu false to disable lplu.
+/**
+ * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns: - E1000_ERR_PHY if fail to read/write the PHY
- *            E1000_SUCCESS at any other case.
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ */
+s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+       u16 checksum = 0;
+       u16 i, eeprom_data;
+
+       DEBUGFUNC("e1000_update_eeprom_checksum");
+
+       for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+               if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+                       DEBUGOUT("EEPROM Read Error\n");
+                       return -E1000_ERR_EEPROM;
+               }
+               checksum += eeprom_data;
+       }
+       checksum = (u16) EEPROM_SUM - checksum;
+       if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+               DEBUGOUT("EEPROM Write Error\n");
+               return -E1000_ERR_EEPROM;
+       }
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_eeprom - write words to the different EEPROM types.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word to be written to the EEPROM
  *
- ****************************************************************************/
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ */
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       s32 ret;
+       spin_lock(&e1000_eeprom_lock);
+       ret = e1000_do_write_eeprom(hw, offset, words, data);
+       spin_unlock(&e1000_eeprom_lock);
+       return ret;
+}
 
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+                                u16 *data)
 {
-    u32 phy_ctrl = 0;
-    s32 ret_val;
-    u16 phy_data;
-    DEBUGFUNC("e1000_set_d0_lplu_state");
-
-    if (hw->mac_type <= e1000_82547_rev_2)
-        return E1000_SUCCESS;
-
-    if (hw->mac_type == e1000_ich8lan) {
-        phy_ctrl = er32(PHY_CTRL);
-    } else {
-        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
-        if (ret_val)
-            return ret_val;
-    }
-
-    if (!active) {
-        if (hw->mac_type == e1000_ich8lan) {
-            phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
-            ew32(PHY_CTRL, phy_ctrl);
-        } else {
-            phy_data &= ~IGP02E1000_PM_D0_LPLU;
-            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
-            if (ret_val)
-                return ret_val;
-        }
-
-        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
-         * Dx states where the power conservation is most important.  During
-         * driver activity we should enable SmartSpeed, so performance is
-         * maintained. */
-        if (hw->smart_speed == e1000_smart_speed_on) {
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-        } else if (hw->smart_speed == e1000_smart_speed_off) {
-            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                         &phy_data);
-            if (ret_val)
-                return ret_val;
-
-            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
-                                          phy_data);
-            if (ret_val)
-                return ret_val;
-        }
-
-
-    } else {
-
-        if (hw->mac_type == e1000_ich8lan) {
-            phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
-            ew32(PHY_CTRL, phy_ctrl);
-        } else {
-            phy_data |= IGP02E1000_PM_D0_LPLU;
-            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
-            if (ret_val)
-                return ret_val;
-        }
-
-        /* When LPLU is enabled we should disable SmartSpeed */
-        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
-        if (ret_val)
-            return ret_val;
-
-        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
-        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
-        if (ret_val)
-            return ret_val;
-
-    }
-    return E1000_SUCCESS;
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       s32 status = 0;
+
+       DEBUGFUNC("e1000_write_eeprom");
+
+       /* If eeprom is not yet detected, do so now */
+       if (eeprom->word_size == 0)
+               e1000_init_eeprom_params(hw);
+
+       /* A check for invalid values:  offset too large, too many words, and not
+        * enough words.
+        */
+       if ((offset >= eeprom->word_size)
+           || (words > eeprom->word_size - offset) || (words == 0)) {
+               DEBUGOUT("\"words\" parameter out of bounds\n");
+               return -E1000_ERR_EEPROM;
+       }
+
+       /* Prepare the EEPROM for writing  */
+       if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+               return -E1000_ERR_EEPROM;
+
+       if (eeprom->type == e1000_eeprom_microwire) {
+               status = e1000_write_eeprom_microwire(hw, offset, words, data);
+       } else {
+               status = e1000_write_eeprom_spi(hw, offset, words, data);
+               msleep(10);
+       }
+
+       /* Done with writing */
+       e1000_release_eeprom(hw);
+
+       return status;
 }
 
-/******************************************************************************
- * Change VCO speed register to improve Bit Error Rate performance of SERDES.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+/**
+ * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
+                                 u16 *data)
 {
-    s32  ret_val;
-    u16 default_page = 0;
-    u16 phy_data;
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u16 widx = 0;
 
-    DEBUGFUNC("e1000_set_vco_speed");
+       DEBUGFUNC("e1000_write_eeprom_spi");
 
-    switch (hw->mac_type) {
-    case e1000_82545_rev_3:
-    case e1000_82546_rev_3:
-       break;
-    default:
-        return E1000_SUCCESS;
-    }
+       while (widx < words) {
+               u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
 
-    /* Set PHY register 30, page 5, bit 8 to 0 */
+               if (e1000_spi_eeprom_ready(hw))
+                       return -E1000_ERR_EEPROM;
 
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
-    if (ret_val)
-        return ret_val;
+               e1000_standby_eeprom(hw);
 
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
-    if (ret_val)
-        return ret_val;
+               /*  Send the WRITE ENABLE command (8 bit opcode )  */
+               e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+                                       eeprom->opcode_bits);
 
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
-    if (ret_val)
-        return ret_val;
+               e1000_standby_eeprom(hw);
 
-    phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
-    if (ret_val)
-        return ret_val;
+               /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+               if ((eeprom->address_bits == 8) && (offset >= 128))
+                       write_opcode |= EEPROM_A8_OPCODE_SPI;
 
-    /* Set PHY register 30, page 4, bit 11 to 1 */
+               /* Send the Write command (8-bit opcode + addr) */
+               e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
 
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
-    if (ret_val)
-        return ret_val;
+               e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
+                                       eeprom->address_bits);
 
-    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
-    if (ret_val)
-        return ret_val;
+               /* Send the data */
 
-    phy_data |= M88E1000_PHY_VCO_REG_BIT11;
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
-    if (ret_val)
-        return ret_val;
+               /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+               while (widx < words) {
+                       u16 word_out = data[widx];
+                       word_out = (word_out >> 8) | (word_out << 8);
+                       e1000_shift_out_ee_bits(hw, word_out, 16);
+                       widx++;
 
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
-    if (ret_val)
-        return ret_val;
+                       /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+                        * operation, while the smaller eeproms are capable of an 8-byte
+                        * PAGE WRITE operation.  Break the inner loop to pass new address
+                        */
+                       if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
+                               e1000_standby_eeprom(hw);
+                               break;
+                       }
+               }
+       }
 
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
+/**
+ * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
+static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
+                                       u16 words, u16 *data)
+{
+       struct e1000_eeprom_info *eeprom = &hw->eeprom;
+       u32 eecd;
+       u16 words_written = 0;
+       u16 i = 0;
+
+       DEBUGFUNC("e1000_write_eeprom_microwire");
+
+       /* Send the write enable command to the EEPROM (3-bit opcode plus
+        * 6/8-bit dummy address beginning with 11).  It's less work to include
+        * the 11 of the dummy address as part of the opcode than it is to shift
+        * it over the correct number of bits for the address.  This puts the
+        * EEPROM into write/erase mode.
+        */
+       e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+                               (u16) (eeprom->opcode_bits + 2));
+
+       e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+       /* Prepare the EEPROM */
+       e1000_standby_eeprom(hw);
+
+       while (words_written < words) {
+               /* Send the Write command (3-bit opcode + addr) */
+               e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+                                       eeprom->opcode_bits);
+
+               e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
+                                       eeprom->address_bits);
+
+               /* Send the data */
+               e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+               /* Toggle the CS line.  This in effect tells the EEPROM to execute
+                * the previous command.
+                */
+               e1000_standby_eeprom(hw);
+
+               /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
+                * signal that the command has been completed by raising the DO signal.
+                * If DO does not go high in 10 milliseconds, then error out.
+                */
+               for (i = 0; i < 200; i++) {
+                       eecd = er32(EECD);
+                       if (eecd & E1000_EECD_DO)
+                               break;
+                       udelay(50);
+               }
+               if (i == 200) {
+                       DEBUGOUT("EEPROM Write did not complete\n");
+                       return -E1000_ERR_EEPROM;
+               }
+
+               /* Recover from write */
+               e1000_standby_eeprom(hw);
 
-/*****************************************************************************
- * This function reads the cookie from ARC ram.
+               words_written++;
+       }
+
+       /* Send the write disable command to the EEPROM (3-bit opcode plus
+        * 6/8-bit dummy address beginning with 10).  It's less work to include
+        * the 10 of the dummy address as part of the opcode than it is to shift
+        * it over the correct number of bits for the address.  This takes the
+        * EEPROM out of write/erase mode.
+        */
+       e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+                               (u16) (eeprom->opcode_bits + 2));
+
+       e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - read the adapters MAC from eeprom
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns: - E1000_SUCCESS .
- ****************************************************************************/
-static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ */
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
 {
-    u8 i;
-    u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
-    u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
-
-    length = (length >> 2);
-    offset = (offset >> 2);
-
-    for (i = 0; i < length; i++) {
-        *((u32 *)buffer + i) =
-            E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
-    }
-    return E1000_SUCCESS;
-}
+       u16 offset;
+       u16 eeprom_data, i;
 
+       DEBUGFUNC("e1000_read_mac_addr");
 
-/*****************************************************************************
- * This function checks whether the HOST IF is enabled for command operaton
- * and also checks whether the previous command is completed.
- * It busy waits in case of previous command is not completed.
+       for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+               offset = i >> 1;
+               if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+                       DEBUGOUT("EEPROM Read Error\n");
+                       return -E1000_ERR_EEPROM;
+               }
+               hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
+               hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
+       }
+
+       switch (hw->mac_type) {
+       default:
+               break;
+       case e1000_82546:
+       case e1000_82546_rev_3:
+               if (er32(STATUS) & E1000_STATUS_FUNC_1)
+                       hw->perm_mac_addr[5] ^= 0x01;
+               break;
+       }
+
+       for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+               hw->mac_addr[i] = hw->perm_mac_addr[i];
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_rx_addrs - Initializes receive address filters.
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
- *            timeout
- *          - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive address registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ */
+static void e1000_init_rx_addrs(struct e1000_hw *hw)
 {
-    u32 hicr;
-    u8 i;
-
-    /* Check that the host interface is enabled. */
-    hicr = er32(HICR);
-    if ((hicr & E1000_HICR_EN) == 0) {
-        DEBUGOUT("E1000_HOST_EN bit disabled.\n");
-        return -E1000_ERR_HOST_INTERFACE_COMMAND;
-    }
-    /* check the previous command is completed */
-    for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
-        hicr = er32(HICR);
-        if (!(hicr & E1000_HICR_C))
-            break;
-        mdelay(1);
-    }
-
-    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
-        DEBUGOUT("Previous command timeout failed .\n");
-        return -E1000_ERR_HOST_INTERFACE_COMMAND;
-    }
-    return E1000_SUCCESS;
+       u32 i;
+       u32 rar_num;
+
+       DEBUGFUNC("e1000_init_rx_addrs");
+
+       /* Setup the receive address. */
+       DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+       e1000_rar_set(hw, hw->mac_addr, 0);
+
+       rar_num = E1000_RAR_ENTRIES;
+
+       /* Zero out the other 15 receive addresses. */
+       DEBUGOUT("Clearing RAR[1-15]\n");
+       for (i = 1; i < rar_num; i++) {
+               E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+               E1000_WRITE_FLUSH();
+               E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+               E1000_WRITE_FLUSH();
+       }
 }
 
-/*****************************************************************************
- * This function writes the buffer content at the offset given on the host if.
- * It also does alignment considerations to do the writes in most efficient way.
- * Also fills up the sum of the buffer in *buffer parameter.
- *
- * returns  - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
-                                  u16 offset, u8 *sum)
+/**
+ * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
+ * @hw: Struct containing variables accessed by shared code
+ * @mc_addr: the multicast address to hash
+ */
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 {
-    u8 *tmp;
-    u8 *bufptr = buffer;
-    u32 data = 0;
-    u16 remaining, i, j, prev_bytes;
-
-    /* sum = only sum of the data and it is not checksum */
-
-    if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
-        return -E1000_ERR_PARAM;
-    }
-
-    tmp = (u8 *)&data;
-    prev_bytes = offset & 0x3;
-    offset &= 0xFFFC;
-    offset >>= 2;
-
-    if (prev_bytes) {
-        data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
-        for (j = prev_bytes; j < sizeof(u32); j++) {
-            *(tmp + j) = *bufptr++;
-            *sum += *(tmp + j);
-        }
-        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
-        length -= j - prev_bytes;
-        offset++;
-    }
-
-    remaining = length & 0x3;
-    length -= remaining;
-
-    /* Calculate length in DWORDs */
-    length >>= 2;
-
-    /* The device driver writes the relevant command block into the
-     * ram area. */
-    for (i = 0; i < length; i++) {
-        for (j = 0; j < sizeof(u32); j++) {
-            *(tmp + j) = *bufptr++;
-            *sum += *(tmp + j);
-        }
-
-        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
-    }
-    if (remaining) {
-        for (j = 0; j < sizeof(u32); j++) {
-            if (j < remaining)
-                *(tmp + j) = *bufptr++;
-            else
-                *(tmp + j) = 0;
-
-            *sum += *(tmp + j);
-        }
-        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
-    }
-
-    return E1000_SUCCESS;
+       u32 hash_value = 0;
+
+       /* The portion of the address that is used for the hash table is
+        * determined by the mc_filter_type setting.
+        */
+       switch (hw->mc_filter_type) {
+               /* [0] [1] [2] [3] [4] [5]
+                * 01  AA  00  12  34  56
+                * LSB                 MSB
+                */
+       case 0:
+               /* [47:36] i.e. 0x563 for above example address */
+               hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+               break;
+       case 1:
+               /* [46:35] i.e. 0xAC6 for above example address */
+               hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+               break;
+       case 2:
+               /* [45:34] i.e. 0x5D8 for above example address */
+               hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+               break;
+       case 3:
+               /* [43:32] i.e. 0x634 for above example address */
+               hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+               break;
+       }
+
+       hash_value &= 0xFFF;
+       return hash_value;
 }
 
+/**
+ * e1000_rar_set - Puts an ethernet address into a receive address register.
+ * @hw: Struct containing variables accessed by shared code
+ * @addr: Address to put into receive address register
+ * @index: Receive address register to write
+ */
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       u32 rar_low, rar_high;
 
-/*****************************************************************************
- * This function writes the command header after does the checksum calculation.
- *
- * returns  - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
-                                     struct e1000_host_mng_command_header *hdr)
+       /* HW expects these in little endian so we reverse the byte order
+        * from network order (big endian) to little endian
+        */
+       rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+       /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+        * unit hang.
+        *
+        * Description:
+        * If there are any Rx frames queued up or otherwise present in the HW
+        * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+        * hang.  To work around this issue, we have to disable receives and
+        * flush out all Rx frames before we enable RSS. To do so, we modify we
+        * redirect all Rx traffic to manageability and then reset the HW.
+        * This flushes away Rx frames, and (since the redirections to
+        * manageability persists across resets) keeps new ones from coming in
+        * while we work.  Then, we clear the Address Valid AV bit for all MAC
+        * addresses and undo the re-direction to manageability.
+        * Now, frames are coming in again, but the MAC won't accept them, so
+        * far so good.  We now proceed to initialize RSS (if necessary) and
+        * configure the Rx unit.  Last, we re-enable the AV bits and continue
+        * on our merry way.
+        */
+       switch (hw->mac_type) {
+       default:
+               /* Indicate to hardware the Address is Valid. */
+               rar_high |= E1000_RAH_AV;
+               break;
+       }
+
+       E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+       E1000_WRITE_FLUSH();
+       E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+       E1000_WRITE_FLUSH();
+}
+
+/**
+ * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: Offset in VLAN filer table to write
+ * @value: Value to write into VLAN filter table
+ */
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
 {
-    u16 i;
-    u8 sum;
-    u8 *buffer;
+       u32 temp;
+
+       if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+               temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+               E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+               E1000_WRITE_FLUSH();
+               E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+               E1000_WRITE_FLUSH();
+       } else {
+               E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+               E1000_WRITE_FLUSH();
+       }
+}
 
-    /* Write the whole command header structure which includes sum of
-     * the buffer */
+/**
+ * e1000_clear_vfta - Clears the VLAN filer table
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_vfta(struct e1000_hw *hw)
+{
+       u32 offset;
+       u32 vfta_value = 0;
+       u32 vfta_offset = 0;
+       u32 vfta_bit_in_reg = 0;
+
+       for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+               /* If the offset we want to clear is the same offset of the
+                * manageability VLAN ID, then clear all bits except that of the
+                * manageability unit */
+               vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+               E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+               E1000_WRITE_FLUSH();
+       }
+}
 
-    u16 length = sizeof(struct e1000_host_mng_command_header);
+static s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+       u32 ledctl;
+       const u32 ledctl_mask = 0x000000FF;
+       const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+       const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+       u16 eeprom_data, i, temp;
+       const u16 led_mask = 0x0F;
 
-    sum = hdr->checksum;
-    hdr->checksum = 0;
+       DEBUGFUNC("e1000_id_led_init");
 
-    buffer = (u8 *)hdr;
-    i = length;
-    while (i--)
-        sum += buffer[i];
+       if (hw->mac_type < e1000_82540) {
+               /* Nothing to do */
+               return E1000_SUCCESS;
+       }
 
-    hdr->checksum = 0 - sum;
+       ledctl = er32(LEDCTL);
+       hw->ledctl_default = ledctl;
+       hw->ledctl_mode1 = hw->ledctl_default;
+       hw->ledctl_mode2 = hw->ledctl_default;
 
-    length >>= 2;
-    /* The device driver writes the relevant command block into the ram area. */
-    for (i = 0; i < length; i++) {
-        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
-        E1000_WRITE_FLUSH();
-    }
+       if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+               DEBUGOUT("EEPROM Read Error\n");
+               return -E1000_ERR_EEPROM;
+       }
 
-    return E1000_SUCCESS;
-}
+       if ((eeprom_data == ID_LED_RESERVED_0000) ||
+           (eeprom_data == ID_LED_RESERVED_FFFF)) {
+               eeprom_data = ID_LED_DEFAULT;
+       }
 
+       for (i = 0; i < 4; i++) {
+               temp = (eeprom_data >> (i << 2)) & led_mask;
+               switch (temp) {
+               case ID_LED_ON1_DEF2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_ON1_OFF2:
+                       hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       hw->ledctl_mode1 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_OFF1_DEF2:
+               case ID_LED_OFF1_ON2:
+               case ID_LED_OFF1_OFF2:
+                       hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       hw->ledctl_mode1 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+               switch (temp) {
+               case ID_LED_DEF1_ON2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_OFF1_ON2:
+                       hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       hw->ledctl_mode2 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_DEF1_OFF2:
+               case ID_LED_ON1_OFF2:
+               case ID_LED_OFF1_OFF2:
+                       hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       hw->ledctl_mode2 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+       }
+       return E1000_SUCCESS;
+}
 
-/*****************************************************************************
- * This function indicates to ARC that a new command is pending which completes
- * one write operation by the driver.
+/**
+ * e1000_setup_led
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns  - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ */
+s32 e1000_setup_led(struct e1000_hw *hw)
 {
-    u32 hicr;
+       u32 ledctl;
+       s32 ret_val = E1000_SUCCESS;
 
-    hicr = er32(HICR);
-    /* Setting this bit tells the ARC that a new command is pending. */
-    ew32(HICR, hicr | E1000_HICR_C);
+       DEBUGFUNC("e1000_setup_led");
 
-    return E1000_SUCCESS;
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+       case e1000_82544:
+               /* No setup necessary */
+               break;
+       case e1000_82541:
+       case e1000_82547:
+       case e1000_82541_rev_2:
+       case e1000_82547_rev_2:
+               /* Turn off PHY Smart Power Down (if enabled) */
+               ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                            &hw->phy_spd_default);
+               if (ret_val)
+                       return ret_val;
+               ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                             (u16) (hw->phy_spd_default &
+                                                    ~IGP01E1000_GMII_SPD));
+               if (ret_val)
+                       return ret_val;
+               /* Fall Through */
+       default:
+               if (hw->media_type == e1000_media_type_fiber) {
+                       ledctl = er32(LEDCTL);
+                       /* Save current LEDCTL settings */
+                       hw->ledctl_default = ledctl;
+                       /* Turn off LED0 */
+                       ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                                   E1000_LEDCTL_LED0_BLINK |
+                                   E1000_LEDCTL_LED0_MODE_MASK);
+                       ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                                  E1000_LEDCTL_LED0_MODE_SHIFT);
+                       ew32(LEDCTL, ledctl);
+               } else if (hw->media_type == e1000_media_type_copper)
+                       ew32(LEDCTL, hw->ledctl_mode1);
+               break;
+       }
+
+       return E1000_SUCCESS;
 }
 
+/**
+ * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_cleanup_led");
 
-/*****************************************************************************
- * This function checks the mode of the firmware.
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+       case e1000_82544:
+               /* No cleanup necessary */
+               break;
+       case e1000_82541:
+       case e1000_82547:
+       case e1000_82541_rev_2:
+       case e1000_82547_rev_2:
+               /* Turn on PHY Smart Power Down (if previously enabled) */
+               ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                             hw->phy_spd_default);
+               if (ret_val)
+                       return ret_val;
+               /* Fall Through */
+       default:
+               /* Restore LEDCTL settings */
+               ew32(LEDCTL, hw->ledctl_default);
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turns on the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+       u32 ctrl = er32(CTRL);
+
+       DEBUGFUNC("e1000_led_on");
+
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+               /* Set SW Defineable Pin 0 to turn on the LED */
+               ctrl |= E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               break;
+       case e1000_82544:
+               if (hw->media_type == e1000_media_type_fiber) {
+                       /* Set SW Defineable Pin 0 to turn on the LED */
+                       ctrl |= E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               } else {
+                       /* Clear SW Defineable Pin 0 to turn on the LED */
+                       ctrl &= ~E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               }
+               break;
+       default:
+               if (hw->media_type == e1000_media_type_fiber) {
+                       /* Clear SW Defineable Pin 0 to turn on the LED */
+                       ctrl &= ~E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               } else if (hw->media_type == e1000_media_type_copper) {
+                       ew32(LEDCTL, hw->ledctl_mode2);
+                       return E1000_SUCCESS;
+               }
+               break;
+       }
+
+       ew32(CTRL, ctrl);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turns off the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+       u32 ctrl = er32(CTRL);
+
+       DEBUGFUNC("e1000_led_off");
+
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+       case e1000_82543:
+               /* Clear SW Defineable Pin 0 to turn off the LED */
+               ctrl &= ~E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               break;
+       case e1000_82544:
+               if (hw->media_type == e1000_media_type_fiber) {
+                       /* Clear SW Defineable Pin 0 to turn off the LED */
+                       ctrl &= ~E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               } else {
+                       /* Set SW Defineable Pin 0 to turn off the LED */
+                       ctrl |= E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               }
+               break;
+       default:
+               if (hw->media_type == e1000_media_type_fiber) {
+                       /* Set SW Defineable Pin 0 to turn off the LED */
+                       ctrl |= E1000_CTRL_SWDPIN0;
+                       ctrl |= E1000_CTRL_SWDPIO0;
+               } else if (hw->media_type == e1000_media_type_copper) {
+                       ew32(LEDCTL, hw->ledctl_mode1);
+                       return E1000_SUCCESS;
+               }
+               break;
+       }
+
+       ew32(CTRL, ctrl);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+       volatile u32 temp;
+
+       temp = er32(CRCERRS);
+       temp = er32(SYMERRS);
+       temp = er32(MPC);
+       temp = er32(SCC);
+       temp = er32(ECOL);
+       temp = er32(MCC);
+       temp = er32(LATECOL);
+       temp = er32(COLC);
+       temp = er32(DC);
+       temp = er32(SEC);
+       temp = er32(RLEC);
+       temp = er32(XONRXC);
+       temp = er32(XONTXC);
+       temp = er32(XOFFRXC);
+       temp = er32(XOFFTXC);
+       temp = er32(FCRUC);
+
+       temp = er32(PRC64);
+       temp = er32(PRC127);
+       temp = er32(PRC255);
+       temp = er32(PRC511);
+       temp = er32(PRC1023);
+       temp = er32(PRC1522);
+
+       temp = er32(GPRC);
+       temp = er32(BPRC);
+       temp = er32(MPRC);
+       temp = er32(GPTC);
+       temp = er32(GORCL);
+       temp = er32(GORCH);
+       temp = er32(GOTCL);
+       temp = er32(GOTCH);
+       temp = er32(RNBC);
+       temp = er32(RUC);
+       temp = er32(RFC);
+       temp = er32(ROC);
+       temp = er32(RJC);
+       temp = er32(TORL);
+       temp = er32(TORH);
+       temp = er32(TOTL);
+       temp = er32(TOTH);
+       temp = er32(TPR);
+       temp = er32(TPT);
+
+       temp = er32(PTC64);
+       temp = er32(PTC127);
+       temp = er32(PTC255);
+       temp = er32(PTC511);
+       temp = er32(PTC1023);
+       temp = er32(PTC1522);
+
+       temp = er32(MPTC);
+       temp = er32(BPTC);
+
+       if (hw->mac_type < e1000_82543)
+               return;
+
+       temp = er32(ALGNERRC);
+       temp = er32(RXERRC);
+       temp = er32(TNCRS);
+       temp = er32(CEXTERR);
+       temp = er32(TSCTC);
+       temp = er32(TSCTFC);
+
+       if (hw->mac_type <= e1000_82544)
+               return;
+
+       temp = er32(MGTPRC);
+       temp = er32(MGTPDC);
+       temp = er32(MGTPTC);
+}
+
+/**
+ * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns  - true when the mode is IAMT or false.
- ****************************************************************************/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to true. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ */
+void e1000_reset_adaptive(struct e1000_hw *hw)
 {
-    u32 fwsm;
-
-    fwsm = er32(FWSM);
-
-    if (hw->mac_type == e1000_ich8lan) {
-        if ((fwsm & E1000_FWSM_MODE_MASK) ==
-            (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
-            return true;
-    } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
-               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
-        return true;
+       DEBUGFUNC("e1000_reset_adaptive");
 
-    return false;
+       if (hw->adaptive_ifs) {
+               if (!hw->ifs_params_forced) {
+                       hw->current_ifs_val = 0;
+                       hw->ifs_min_val = IFS_MIN;
+                       hw->ifs_max_val = IFS_MAX;
+                       hw->ifs_step_size = IFS_STEP;
+                       hw->ifs_ratio = IFS_RATIO;
+               }
+               hw->in_ifs_mode = false;
+               ew32(AIT, 0);
+       } else {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+       }
 }
 
-
-/*****************************************************************************
- * This function writes the dhcp info .
- ****************************************************************************/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+/**
+ * e1000_update_adaptive - update adaptive IFS
+ * @hw: Struct containing variables accessed by shared code
+ * @tx_packets: Number of transmits since last callback
+ * @total_collisions: Number of collisions since last callback
+ *
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ */
+void e1000_update_adaptive(struct e1000_hw *hw)
 {
-    s32 ret_val;
-    struct e1000_host_mng_command_header hdr;
-
-    hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
-    hdr.command_length = length;
-    hdr.reserved1 = 0;
-    hdr.reserved2 = 0;
-    hdr.checksum = 0;
-
-    ret_val = e1000_mng_enable_host_if(hw);
-    if (ret_val == E1000_SUCCESS) {
-        ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
-                                          &(hdr.checksum));
-        if (ret_val == E1000_SUCCESS) {
-            ret_val = e1000_mng_write_cmd_header(hw, &hdr);
-            if (ret_val == E1000_SUCCESS)
-                ret_val = e1000_mng_write_commit(hw);
-        }
-    }
-    return ret_val;
+       DEBUGFUNC("e1000_update_adaptive");
+
+       if (hw->adaptive_ifs) {
+               if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
+                       if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+                               hw->in_ifs_mode = true;
+                               if (hw->current_ifs_val < hw->ifs_max_val) {
+                                       if (hw->current_ifs_val == 0)
+                                               hw->current_ifs_val =
+                                                   hw->ifs_min_val;
+                                       else
+                                               hw->current_ifs_val +=
+                                                   hw->ifs_step_size;
+                                       ew32(AIT, hw->current_ifs_val);
+                               }
+                       }
+               } else {
+                       if (hw->in_ifs_mode
+                           && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                               hw->current_ifs_val = 0;
+                               hw->in_ifs_mode = false;
+                               ew32(AIT, 0);
+                       }
+               }
+       } else {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+       }
 }
 
-
-/*****************************************************************************
- * This function calculates the checksum.
+/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
  *
- * returns  - checksum of buffer contents.
- ****************************************************************************/
-static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+                           u32 frame_len, u8 *mac_addr)
 {
-    u8 sum = 0;
-    u32 i;
-
-    if (!buffer)
-        return 0;
+       u64 carry_bit;
 
-    for (i=0; i < length; i++)
-        sum += buffer[i];
+       /* First adjust the frame length. */
+       frame_len--;
+       /* We need to adjust the statistics counters, since the hardware
+        * counters overcount this packet as a CRC error and undercount
+        * the packet as a good packet
+        */
+       /* This packet should not be counted as a CRC error.    */
+       stats->crcerrs--;
+       /* This packet does count as a Good Packet Received.    */
+       stats->gprc++;
+
+       /* Adjust the Good Octets received counters             */
+       carry_bit = 0x80000000 & stats->gorcl;
+       stats->gorcl += frame_len;
+       /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+        * Received Count) was one before the addition,
+        * AND it is zero after, then we lost the carry out,
+        * need to add one to Gorch (Good Octets Received Count High).
+        * This could be simplified if all environments supported
+        * 64-bit integers.
+        */
+       if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+               stats->gorch++;
+       /* Is this a broadcast or multicast?  Check broadcast first,
+        * since the test for a multicast frame will test positive on
+        * a broadcast frame.
+        */
+       if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+               /* Broadcast packet */
+               stats->bprc++;
+       else if (*mac_addr & 0x01)
+               /* Multicast packet */
+               stats->mprc++;
+
+       if (frame_len == hw->max_frame_size) {
+               /* In this case, the hardware has overcounted the number of
+                * oversize frames.
+                */
+               if (stats->roc > 0)
+                       stats->roc--;
+       }
 
-    return (u8)(0 - sum);
+       /* Adjust the bin counters when the extra byte put the frame in the
+        * wrong bin. Remember that the frame_len was adjusted above.
+        */
+       if (frame_len == 64) {
+               stats->prc64++;
+               stats->prc127--;
+       } else if (frame_len == 127) {
+               stats->prc127++;
+               stats->prc255--;
+       } else if (frame_len == 255) {
+               stats->prc255++;
+               stats->prc511--;
+       } else if (frame_len == 511) {
+               stats->prc511++;
+               stats->prc1023--;
+       } else if (frame_len == 1023) {
+               stats->prc1023++;
+               stats->prc1522--;
+       } else if (frame_len == 1522) {
+               stats->prc1522++;
+       }
 }
 
-/*****************************************************************************
- * This function checks whether tx pkt filtering needs to be enabled or not.
+/**
+ * e1000_get_bus_info
+ * @hw: Struct containing variables accessed by shared code
  *
- * returns  - true for packet filtering or false.
- ****************************************************************************/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+ * Gets the current PCI bus type, speed, and width of the hardware
+ */
+void e1000_get_bus_info(struct e1000_hw *hw)
 {
-    /* called in init as well as watchdog timer functions */
-
-    s32 ret_val, checksum;
-    bool tx_filter = false;
-    struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
-    u8 *buffer = (u8 *) &(hw->mng_cookie);
-
-    if (e1000_check_mng_mode(hw)) {
-        ret_val = e1000_mng_enable_host_if(hw);
-        if (ret_val == E1000_SUCCESS) {
-            ret_val = e1000_host_if_read_cookie(hw, buffer);
-            if (ret_val == E1000_SUCCESS) {
-                checksum = hdr->checksum;
-                hdr->checksum = 0;
-                if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
-                    checksum == e1000_calculate_mng_checksum((char *)buffer,
-                                               E1000_MNG_DHCP_COOKIE_LENGTH)) {
-                    if (hdr->status &
-                        E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
-                        tx_filter = true;
-                } else
-                    tx_filter = true;
-            } else
-                tx_filter = true;
-        }
-    }
-
-    hw->tx_pkt_filtering = tx_filter;
-    return tx_filter;
+       u32 status;
+
+       switch (hw->mac_type) {
+       case e1000_82542_rev2_0:
+       case e1000_82542_rev2_1:
+               hw->bus_type = e1000_bus_type_pci;
+               hw->bus_speed = e1000_bus_speed_unknown;
+               hw->bus_width = e1000_bus_width_unknown;
+               break;
+       default:
+               status = er32(STATUS);
+               hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+                   e1000_bus_type_pcix : e1000_bus_type_pci;
+
+               if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+                       hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+                           e1000_bus_speed_66 : e1000_bus_speed_120;
+               } else if (hw->bus_type == e1000_bus_type_pci) {
+                       hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+                           e1000_bus_speed_66 : e1000_bus_speed_33;
+               } else {
+                       switch (status & E1000_STATUS_PCIX_SPEED) {
+                       case E1000_STATUS_PCIX_SPEED_66:
+                               hw->bus_speed = e1000_bus_speed_66;
+                               break;
+                       case E1000_STATUS_PCIX_SPEED_100:
+                               hw->bus_speed = e1000_bus_speed_100;
+                               break;
+                       case E1000_STATUS_PCIX_SPEED_133:
+                               hw->bus_speed = e1000_bus_speed_133;
+                               break;
+                       default:
+                               hw->bus_speed = e1000_bus_speed_reserved;
+                               break;
+                       }
+               }
+               hw->bus_width = (status & E1000_STATUS_BUS64) ?
+                   e1000_bus_width_64 : e1000_bus_width_32;
+               break;
+       }
 }
 
-/******************************************************************************
- * Verifies the hardware needs to allow ARPs to be processed by the host
- *
- * hw - Struct containing variables accessed by shared code
- *
- * returns: - true/false
+/**
+ * e1000_write_reg_io
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset to write to
+ * @value: value to write
  *
- *****************************************************************************/
-u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ */
+static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
 {
-    u32 manc;
-    u32 fwsm, factps;
-
-    if (hw->asf_firmware_present) {
-        manc = er32(MANC);
-
-        if (!(manc & E1000_MANC_RCV_TCO_EN) ||
-            !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
-            return false;
-        if (e1000_arc_subsystem_valid(hw)) {
-            fwsm = er32(FWSM);
-            factps = er32(FACTPS);
-
-            if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
-                   e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
-                return true;
-        } else
-            if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
-                return true;
-    }
-    return false;
-}
+       unsigned long io_addr = hw->io_base;
+       unsigned long io_data = hw->io_base + 4;
 
-static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
-{
-    s32 ret_val;
-    u16 mii_status_reg;
-    u16 i;
-
-    /* Polarity reversal workaround for forced 10F/10H links. */
-
-    /* Disable the transmitter on the PHY */
-
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
-    if (ret_val)
-        return ret_val;
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
-    if (ret_val)
-        return ret_val;
-
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
-    if (ret_val)
-        return ret_val;
-
-    /* This loop will early-out if the NO link condition has been met. */
-    for (i = PHY_FORCE_TIME; i > 0; i--) {
-        /* Read the MII Status Register and wait for Link Status bit
-         * to be clear.
-         */
-
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-
-        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
-        mdelay(100);
-    }
-
-    /* Recommended delay time after link has been lost */
-    mdelay(1000);
-
-    /* Now we will re-enable th transmitter on the PHY */
-
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
-    if (ret_val)
-        return ret_val;
-    mdelay(50);
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
-    if (ret_val)
-        return ret_val;
-    mdelay(50);
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
-    if (ret_val)
-        return ret_val;
-    mdelay(50);
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
-    if (ret_val)
-        return ret_val;
-
-    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
-    if (ret_val)
-        return ret_val;
-
-    /* This loop will early-out if the link condition has been met. */
-    for (i = PHY_FORCE_TIME; i > 0; i--) {
-        /* Read the MII Status Register and wait for Link Status bit
-         * to be set.
-         */
-
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-
-        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
-        if (ret_val)
-            return ret_val;
-
-        if (mii_status_reg & MII_SR_LINK_STATUS) break;
-        mdelay(100);
-    }
-    return E1000_SUCCESS;
+       e1000_io_write(hw, io_addr, offset);
+       e1000_io_write(hw, io_data, value);
 }
 
-/***************************************************************************
+/**
+ * e1000_get_cable_length - Estimates the cable length.
+ * @hw: Struct containing variables accessed by shared code
+ * @min_length: The estimated minimum length
+ * @max_length: The estimated maximum length
  *
- * Disables PCI-Express master access.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - none.
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
  *
- ***************************************************************************/
-static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ */
+static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
+                                 u16 *max_length)
 {
-    u32 ctrl;
-
-    DEBUGFUNC("e1000_set_pci_express_master_disable");
+       s32 ret_val;
+       u16 agc_value = 0;
+       u16 i, phy_data;
+       u16 cable_length;
 
-    if (hw->bus_type != e1000_bus_type_pci_express)
-        return;
+       DEBUGFUNC("e1000_get_cable_length");
 
-    ctrl = er32(CTRL);
-    ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
-    ew32(CTRL, ctrl);
-}
+       *min_length = *max_length = 0;
 
-/*******************************************************************************
- *
- * Disables PCI-Express master access and verifies there are no pending requests
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
- *            caused the master requests to be disabled.
- *            E1000_SUCCESS master requests disabled.
- *
- ******************************************************************************/
-s32 e1000_disable_pciex_master(struct e1000_hw *hw)
-{
-    s32 timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+       /* Use old method for Phy older than IGP */
+       if (hw->phy_type == e1000_phy_m88) {
 
-    DEBUGFUNC("e1000_disable_pciex_master");
+               ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                            &phy_data);
+               if (ret_val)
+                       return ret_val;
+               cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                   M88E1000_PSSR_CABLE_LENGTH_SHIFT;
 
-    if (hw->bus_type != e1000_bus_type_pci_express)
-        return E1000_SUCCESS;
+               /* Convert the enum value to ranged values */
+               switch (cable_length) {
+               case e1000_cable_length_50:
+                       *min_length = 0;
+                       *max_length = e1000_igp_cable_length_50;
+                       break;
+               case e1000_cable_length_50_80:
+                       *min_length = e1000_igp_cable_length_50;
+                       *max_length = e1000_igp_cable_length_80;
+                       break;
+               case e1000_cable_length_80_110:
+                       *min_length = e1000_igp_cable_length_80;
+                       *max_length = e1000_igp_cable_length_110;
+                       break;
+               case e1000_cable_length_110_140:
+                       *min_length = e1000_igp_cable_length_110;
+                       *max_length = e1000_igp_cable_length_140;
+                       break;
+               case e1000_cable_length_140:
+                       *min_length = e1000_igp_cable_length_140;
+                       *max_length = e1000_igp_cable_length_170;
+                       break;
+               default:
+                       return -E1000_ERR_PHY;
+                       break;
+               }
+       } else if (hw->phy_type == e1000_phy_igp) {     /* For IGP PHY */
+               u16 cur_agc_value;
+               u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+               u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                   { IGP01E1000_PHY_AGC_A,
+                       IGP01E1000_PHY_AGC_B,
+                       IGP01E1000_PHY_AGC_C,
+                       IGP01E1000_PHY_AGC_D
+               };
+               /* Read the AGC registers for all channels */
+               for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+                       ret_val =
+                           e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+                       if (ret_val)
+                               return ret_val;
+
+                       cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+                       /* Value bound check. */
+                       if ((cur_agc_value >=
+                            IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
+                           || (cur_agc_value == 0))
+                               return -E1000_ERR_PHY;
+
+                       agc_value += cur_agc_value;
+
+                       /* Update minimal AGC value. */
+                       if (min_agc_value > cur_agc_value)
+                               min_agc_value = cur_agc_value;
+               }
 
-    e1000_set_pci_express_master_disable(hw);
+               /* Remove the minimal AGC result for length < 50m */
+               if (agc_value <
+                   IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+                       agc_value -= min_agc_value;
 
-    while (timeout) {
-        if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
-            break;
-        else
-            udelay(100);
-        timeout--;
-    }
+                       /* Get the average length of the remaining 3 channels */
+                       agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+               } else {
+                       /* Get the average length of all the 4 channels. */
+                       agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+               }
 
-    if (!timeout) {
-        DEBUGOUT("Master requests are pending.\n");
-        return -E1000_ERR_MASTER_REQUESTS_PENDING;
-    }
+               /* Set the range of the calculated length. */
+               *min_length = ((e1000_igp_cable_length_table[agc_value] -
+                               IGP01E1000_AGC_RANGE) > 0) ?
+                   (e1000_igp_cable_length_table[agc_value] -
+                    IGP01E1000_AGC_RANGE) : 0;
+               *max_length = e1000_igp_cable_length_table[agc_value] +
+                   IGP01E1000_AGC_RANGE;
+       }
 
-    return E1000_SUCCESS;
+       return E1000_SUCCESS;
 }
 
-/*******************************************************************************
- *
- * Check for EEPROM Auto Read bit done.
- *
- * hw: Struct containing variables accessed by shared code
+/**
+ * e1000_check_polarity - Check the cable polarity
+ * @hw: Struct containing variables accessed by shared code
+ * @polarity: output parameter : 0 - Polarity is not reversed
+ *                               1 - Polarity is reversed.
  *
- * returns: - E1000_ERR_RESET if fail to reset MAC
- *            E1000_SUCCESS at any other case.
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
  *
- ******************************************************************************/
-static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
+ * For phy's older than IGP, this function simply reads the polarity bit in the
+ * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0.  If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ */
+static s32 e1000_check_polarity(struct e1000_hw *hw,
+                               e1000_rev_polarity *polarity)
 {
-    s32 timeout = AUTO_READ_DONE_TIMEOUT;
-
-    DEBUGFUNC("e1000_get_auto_rd_done");
-
-    switch (hw->mac_type) {
-    default:
-        msleep(5);
-        break;
-    case e1000_82571:
-    case e1000_82572:
-    case e1000_82573:
-    case e1000_80003es2lan:
-    case e1000_ich8lan:
-        while (timeout) {
-            if (er32(EECD) & E1000_EECD_AUTO_RD)
-                break;
-            else msleep(1);
-            timeout--;
-        }
-
-        if (!timeout) {
-            DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
-            return -E1000_ERR_RESET;
-        }
-        break;
-    }
-
-    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
-     * Need to wait for PHY configuration completion before accessing NVM
-     * and PHY. */
-    if (hw->mac_type == e1000_82573)
-        msleep(25);
-
-    return E1000_SUCCESS;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_check_polarity");
+
+       if (hw->phy_type == e1000_phy_m88) {
+               /* return the Polarity bit in the Status register. */
+               ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                            &phy_data);
+               if (ret_val)
+                       return ret_val;
+               *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+                            M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+                   e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+       } else if (hw->phy_type == e1000_phy_igp) {
+               /* Read the Status register to check the speed */
+               ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+                                            &phy_data);
+               if (ret_val)
+                       return ret_val;
+
+               /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+                * find the polarity status */
+               if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+                   IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+                       /* Read the GIG initialization PCS register (0x00B4) */
+                       ret_val =
+                           e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+                                              &phy_data);
+                       if (ret_val)
+                               return ret_val;
+
+                       /* Check the polarity bits */
+                       *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+                           e1000_rev_polarity_reversed :
+                           e1000_rev_polarity_normal;
+               } else {
+                       /* For 10 Mbps, read the polarity bit in the status register. (for
+                        * 100 Mbps this bit is always 0) */
+                       *polarity =
+                           (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+                           e1000_rev_polarity_reversed :
+                           e1000_rev_polarity_normal;
+               }
+       }
+       return E1000_SUCCESS;
 }
 
-/***************************************************************************
- * Checks if the PHY configuration is done
- *
- * hw: Struct containing variables accessed by shared code
+/**
+ * e1000_check_downshift - Check if Downshift occurred
+ * @hw: Struct containing variables accessed by shared code
+ * @downshift: output parameter : 0 - No Downshift occurred.
+ *                                1 - Downshift occurred.
  *
- * returns: - E1000_ERR_RESET if fail to reset MAC
- *            E1000_SUCCESS at any other case.
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
  *
- ***************************************************************************/
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+ * For phy's older than IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register.  In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ */
+static s32 e1000_check_downshift(struct e1000_hw *hw)
 {
-    s32 timeout = PHY_CFG_TIMEOUT;
-    u32 cfg_mask = E1000_EEPROM_CFG_DONE;
-
-    DEBUGFUNC("e1000_get_phy_cfg_done");
-
-    switch (hw->mac_type) {
-    default:
-        mdelay(10);
-        break;
-    case e1000_80003es2lan:
-        /* Separate *_CFG_DONE_* bit for each port */
-        if (er32(STATUS) & E1000_STATUS_FUNC_1)
-            cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
-        /* Fall Through */
-    case e1000_82571:
-    case e1000_82572:
-        while (timeout) {
-            if (er32(EEMNGCTL) & cfg_mask)
-                break;
-            else
-                msleep(1);
-            timeout--;
-        }
-        if (!timeout) {
-            DEBUGOUT("MNG configuration cycle has not completed.\n");
-            return -E1000_ERR_RESET;
-        }
-        break;
-    }
-
-    return E1000_SUCCESS;
-}
+       s32 ret_val;
+       u16 phy_data;
 
-/***************************************************************************
- *
- * Using the combination of SMBI and SWESMBI semaphore bits when resetting
- * adapter or Eeprom access.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
- *            E1000_SUCCESS at any other case.
- *
- ***************************************************************************/
-static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
-{
-    s32 timeout;
-    u32 swsm;
-
-    DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
-
-    if (!hw->eeprom_semaphore_present)
-        return E1000_SUCCESS;
-
-    if (hw->mac_type == e1000_80003es2lan) {
-        /* Get the SW semaphore. */
-        if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
-            return -E1000_ERR_EEPROM;
-    }
-
-    /* Get the FW semaphore. */
-    timeout = hw->eeprom.word_size + 1;
-    while (timeout) {
-        swsm = er32(SWSM);
-        swsm |= E1000_SWSM_SWESMBI;
-        ew32(SWSM, swsm);
-        /* if we managed to set the bit we got the semaphore. */
-        swsm = er32(SWSM);
-        if (swsm & E1000_SWSM_SWESMBI)
-            break;
-
-        udelay(50);
-        timeout--;
-    }
-
-    if (!timeout) {
-        /* Release semaphores */
-        e1000_put_hw_eeprom_semaphore(hw);
-        DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
-        return -E1000_ERR_EEPROM;
-    }
-
-    return E1000_SUCCESS;
-}
+       DEBUGFUNC("e1000_check_downshift");
 
-/***************************************************************************
- * This function clears HW semaphore bits.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - None.
- *
- ***************************************************************************/
-static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
-{
-    u32 swsm;
+       if (hw->phy_type == e1000_phy_igp) {
+               ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+                                            &phy_data);
+               if (ret_val)
+                       return ret_val;
 
-    DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+               hw->speed_downgraded =
+                   (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+       } else if (hw->phy_type == e1000_phy_m88) {
+               ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                            &phy_data);
+               if (ret_val)
+                       return ret_val;
 
-    if (!hw->eeprom_semaphore_present)
-        return;
+               hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+                   M88E1000_PSSR_DOWNSHIFT_SHIFT;
+       }
 
-    swsm = er32(SWSM);
-    if (hw->mac_type == e1000_80003es2lan) {
-        /* Release both semaphores. */
-        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
-    } else
-        swsm &= ~(E1000_SWSM_SWESMBI);
-    ew32(SWSM, swsm);
+       return E1000_SUCCESS;
 }
 
-/***************************************************************************
- *
- * Obtaining software semaphore bit (SMBI) before resetting PHY.
+/**
+ * e1000_config_dsp_after_link_change
+ * @hw: Struct containing variables accessed by shared code
+ * @link_up: was link up at the time this was called
  *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
  *            E1000_SUCCESS at any other case.
  *
- ***************************************************************************/
-static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
-{
-    s32 timeout = hw->eeprom.word_size + 1;
-    u32 swsm;
-
-    DEBUGFUNC("e1000_get_software_semaphore");
-
-    if (hw->mac_type != e1000_80003es2lan) {
-        return E1000_SUCCESS;
-    }
-
-    while (timeout) {
-        swsm = er32(SWSM);
-        /* If SMBI bit cleared, it is now set and we hold the semaphore */
-        if (!(swsm & E1000_SWSM_SMBI))
-            break;
-        mdelay(1);
-        timeout--;
-    }
-
-    if (!timeout) {
-        DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
-        return -E1000_ERR_RESET;
-    }
-
-    return E1000_SUCCESS;
-}
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ */
 
-/***************************************************************************
- *
- * Release semaphore bit (SMBI).
- *
- * hw: Struct containing variables accessed by shared code
- *
- ***************************************************************************/
-static void e1000_release_software_semaphore(struct e1000_hw *hw)
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
 {
-    u32 swsm;
-
-    DEBUGFUNC("e1000_release_software_semaphore");
-
-    if (hw->mac_type != e1000_80003es2lan) {
-        return;
-    }
+       s32 ret_val;
+       u16 phy_data, phy_saved_data, speed, duplex, i;
+       u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+           { IGP01E1000_PHY_AGC_PARAM_A,
+               IGP01E1000_PHY_AGC_PARAM_B,
+               IGP01E1000_PHY_AGC_PARAM_C,
+               IGP01E1000_PHY_AGC_PARAM_D
+       };
+       u16 min_length, max_length;
+
+       DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+       if (hw->phy_type != e1000_phy_igp)
+               return E1000_SUCCESS;
+
+       if (link_up) {
+               ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+               if (ret_val) {
+                       DEBUGOUT("Error getting link speed and duplex\n");
+                       return ret_val;
+               }
 
-    swsm = er32(SWSM);
-    /* Release the SW semaphores.*/
-    swsm &= ~E1000_SWSM_SMBI;
-    ew32(SWSM, swsm);
-}
+               if (speed == SPEED_1000) {
+
+                       ret_val =
+                           e1000_get_cable_length(hw, &min_length,
+                                                  &max_length);
+                       if (ret_val)
+                               return ret_val;
+
+                       if ((hw->dsp_config_state == e1000_dsp_config_enabled)
+                           && min_length >= e1000_igp_cable_length_50) {
+
+                               for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                                       ret_val =
+                                           e1000_read_phy_reg(hw,
+                                                              dsp_reg_array[i],
+                                                              &phy_data);
+                                       if (ret_val)
+                                               return ret_val;
+
+                                       phy_data &=
+                                           ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+                                       ret_val =
+                                           e1000_write_phy_reg(hw,
+                                                               dsp_reg_array
+                                                               [i], phy_data);
+                                       if (ret_val)
+                                               return ret_val;
+                               }
+                               hw->dsp_config_state =
+                                   e1000_dsp_config_activated;
+                       }
+
+                       if ((hw->ffe_config_state == e1000_ffe_config_enabled)
+                           && (min_length < e1000_igp_cable_length_50)) {
+
+                               u16 ffe_idle_err_timeout =
+                                   FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                               u32 idle_errs = 0;
+
+                               /* clear previous idle error counts */
+                               ret_val =
+                                   e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                                      &phy_data);
+                               if (ret_val)
+                                       return ret_val;
+
+                               for (i = 0; i < ffe_idle_err_timeout; i++) {
+                                       udelay(1000);
+                                       ret_val =
+                                           e1000_read_phy_reg(hw,
+                                                              PHY_1000T_STATUS,
+                                                              &phy_data);
+                                       if (ret_val)
+                                               return ret_val;
+
+                                       idle_errs +=
+                                           (phy_data &
+                                            SR_1000T_IDLE_ERROR_CNT);
+                                       if (idle_errs >
+                                           SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
+                                       {
+                                               hw->ffe_config_state =
+                                                   e1000_ffe_config_active;
+
+                                               ret_val =
+                                                   e1000_write_phy_reg(hw,
+                                                                       IGP01E1000_PHY_DSP_FFE,
+                                                                       IGP01E1000_PHY_DSP_FFE_CM_CP);
+                                               if (ret_val)
+                                                       return ret_val;
+                                               break;
+                                       }
+
+                                       if (idle_errs)
+                                               ffe_idle_err_timeout =
+                                                   FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+                               }
+                       }
+               }
+       } else {
+               if (hw->dsp_config_state == e1000_dsp_config_activated) {
+                       /* Save off the current value of register 0x2F5B to be restored at
+                        * the end of the routines. */
+                       ret_val =
+                           e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+                       if (ret_val)
+                               return ret_val;
+
+                       /* Disable the PHY transmitter */
+                       ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+                       if (ret_val)
+                               return ret_val;
+
+                       mdelay(20);
+
+                       ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                                     IGP01E1000_IEEE_FORCE_GIGA);
+                       if (ret_val)
+                               return ret_val;
+                       for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                               ret_val =
+                                   e1000_read_phy_reg(hw, dsp_reg_array[i],
+                                                      &phy_data);
+                               if (ret_val)
+                                       return ret_val;
+
+                               phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+                               phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+                               ret_val =
+                                   e1000_write_phy_reg(hw, dsp_reg_array[i],
+                                                       phy_data);
+                               if (ret_val)
+                                       return ret_val;
+                       }
+
+                       ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
+                       if (ret_val)
+                               return ret_val;
+
+                       mdelay(20);
+
+                       /* Now enable the transmitter */
+                       ret_val =
+                           e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+                       if (ret_val)
+                               return ret_val;
+
+                       hw->dsp_config_state = e1000_dsp_config_enabled;
+               }
 
-/******************************************************************************
- * Checks if PHY reset is blocked due to SOL/IDER session, for example.
- * Returning E1000_BLK_PHY_RESET isn't necessarily an error.  But it's up to
- * the caller to figure out how to deal with it.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * returns: - E1000_BLK_PHY_RESET
- *            E1000_SUCCESS
- *
- *****************************************************************************/
-s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
-{
-    u32 manc = 0;
-    u32 fwsm = 0;
-
-    if (hw->mac_type == e1000_ich8lan) {
-        fwsm = er32(FWSM);
-        return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
-                                            : E1000_BLK_PHY_RESET;
-    }
-
-    if (hw->mac_type > e1000_82547_rev_2)
-        manc = er32(MANC);
-    return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
-        E1000_BLK_PHY_RESET : E1000_SUCCESS;
-}
+               if (hw->ffe_config_state == e1000_ffe_config_active) {
+                       /* Save off the current value of register 0x2F5B to be restored at
+                        * the end of the routines. */
+                       ret_val =
+                           e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
 
-static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
-{
-    u32 fwsm;
-
-    /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
-     * may not be provided a DMA clock when no manageability features are
-     * enabled.  We do not want to perform any reads/writes to these registers
-     * if this is the case.  We read FWSM to determine the manageability mode.
-     */
-    switch (hw->mac_type) {
-    case e1000_82571:
-    case e1000_82572:
-    case e1000_82573:
-    case e1000_80003es2lan:
-        fwsm = er32(FWSM);
-        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
-            return true;
-        break;
-    case e1000_ich8lan:
-        return true;
-    default:
-        break;
-    }
-    return false;
-}
+                       if (ret_val)
+                               return ret_val;
 
+                       /* Disable the PHY transmitter */
+                       ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
 
-/******************************************************************************
- * Configure PCI-Ex no-snoop
- *
- * hw - Struct containing variables accessed by shared code.
- * no_snoop - Bitmap of no-snoop events.
- *
- * returns: E1000_SUCCESS
- *
- *****************************************************************************/
-static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
-{
-    u32 gcr_reg = 0;
+                       if (ret_val)
+                               return ret_val;
 
-    DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+                       mdelay(20);
 
-    if (hw->bus_type == e1000_bus_type_unknown)
-        e1000_get_bus_info(hw);
+                       ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                                     IGP01E1000_IEEE_FORCE_GIGA);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+                                               IGP01E1000_PHY_DSP_FFE_DEFAULT);
+                       if (ret_val)
+                               return ret_val;
 
-    if (hw->bus_type != e1000_bus_type_pci_express)
-        return E1000_SUCCESS;
+                       ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
+                       if (ret_val)
+                               return ret_val;
 
-    if (no_snoop) {
-        gcr_reg = er32(GCR);
-        gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
-        gcr_reg |= no_snoop;
-        ew32(GCR, gcr_reg);
-    }
-    if (hw->mac_type == e1000_ich8lan) {
-        u32 ctrl_ext;
+                       mdelay(20);
 
-        ew32(GCR, PCI_EX_82566_SNOOP_ALL);
+                       /* Now enable the transmitter */
+                       ret_val =
+                           e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
 
-        ctrl_ext = er32(CTRL_EXT);
-        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
-        ew32(CTRL_EXT, ctrl_ext);
-    }
+                       if (ret_val)
+                               return ret_val;
 
-    return E1000_SUCCESS;
+                       hw->ffe_config_state = e1000_ffe_config_enabled;
+               }
+       }
+       return E1000_SUCCESS;
 }
 
-/***************************************************************************
+/**
+ * e1000_set_phy_mode - Set PHY to class A mode
+ * @hw: Struct containing variables accessed by shared code
  *
- * Get software semaphore FLAG bit (SWFLAG).
- * SWFLAG is used to synchronize the access to all shared resource between
- * SW, FW and HW.
- *
- * hw: Struct containing variables accessed by shared code
- *
- ***************************************************************************/
-static s32 e1000_get_software_flag(struct e1000_hw *hw)
+ * Assumes the following operations will follow to enable the new class mode.
+ *  1. Do a PHY soft reset
+ *  2. Restart auto-negotiation or force link.
+ */
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
 {
-    s32 timeout = PHY_CFG_TIMEOUT;
-    u32 extcnf_ctrl;
-
-    DEBUGFUNC("e1000_get_software_flag");
-
-    if (hw->mac_type == e1000_ich8lan) {
-        while (timeout) {
-            extcnf_ctrl = er32(EXTCNF_CTRL);
-            extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
-            ew32(EXTCNF_CTRL, extcnf_ctrl);
-
-            extcnf_ctrl = er32(EXTCNF_CTRL);
-            if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
-                break;
-            mdelay(1);
-            timeout--;
-        }
-
-        if (!timeout) {
-            DEBUGOUT("FW or HW locks the resource too long.\n");
-            return -E1000_ERR_CONFIG;
-        }
-    }
-
-    return E1000_SUCCESS;
+       s32 ret_val;
+       u16 eeprom_data;
+
+       DEBUGFUNC("e1000_set_phy_mode");
+
+       if ((hw->mac_type == e1000_82545_rev_3) &&
+           (hw->media_type == e1000_media_type_copper)) {
+               ret_val =
+                   e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
+                                     &eeprom_data);
+               if (ret_val) {
+                       return ret_val;
+               }
+
+               if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+                   (eeprom_data & EEPROM_PHY_CLASS_A)) {
+                       ret_val =
+                           e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+                                               0x000B);
+                       if (ret_val)
+                               return ret_val;
+                       ret_val =
+                           e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
+                                               0x8104);
+                       if (ret_val)
+                               return ret_val;
+
+                       hw->phy_reset_disable = false;
+               }
+       }
+
+       return E1000_SUCCESS;
 }
 
-/***************************************************************************
- *
- * Release software semaphore FLAG bit (SWFLAG).
- * SWFLAG is used to synchronize the access to all shared resource between
- * SW, FW and HW.
+/**
+ * e1000_set_d3_lplu_state - set d3 link power state
+ * @hw: Struct containing variables accessed by shared code
+ * @active: true to enable lplu false to disable lplu.
  *
- * hw: Struct containing variables accessed by shared code
+ * This function sets the lplu state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisement
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
  *
- ***************************************************************************/
-static void e1000_release_software_flag(struct e1000_hw *hw)
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ */
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
 {
-    u32 extcnf_ctrl;
+       s32 ret_val;
+       u16 phy_data;
+       DEBUGFUNC("e1000_set_d3_lplu_state");
+
+       if (hw->phy_type != e1000_phy_igp)
+               return E1000_SUCCESS;
+
+       /* During driver activity LPLU should not be used or it will attain link
+        * from the lowest speeds starting from 10Mbps. The capability is used for
+        * Dx transitions and states */
+       if (hw->mac_type == e1000_82541_rev_2
+           || hw->mac_type == e1000_82547_rev_2) {
+               ret_val =
+                   e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+               if (ret_val)
+                       return ret_val;
+       }
 
-    DEBUGFUNC("e1000_release_software_flag");
+       if (!active) {
+               if (hw->mac_type == e1000_82541_rev_2 ||
+                   hw->mac_type == e1000_82547_rev_2) {
+                       phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                               phy_data);
+                       if (ret_val)
+                               return ret_val;
+               }
 
-    if (hw->mac_type == e1000_ich8lan) {
-        extcnf_ctrl= er32(EXTCNF_CTRL);
-        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
-        ew32(EXTCNF_CTRL, extcnf_ctrl);
-    }
+               /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+                * Dx states where the power conservation is most important.  During
+                * driver activity we should enable SmartSpeed, so performance is
+                * maintained. */
+               if (hw->smart_speed == e1000_smart_speed_on) {
+                       ret_val =
+                           e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                              &phy_data);
+                       if (ret_val)
+                               return ret_val;
+
+                       phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                               phy_data);
+                       if (ret_val)
+                               return ret_val;
+               } else if (hw->smart_speed == e1000_smart_speed_off) {
+                       ret_val =
+                           e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                              &phy_data);
+                       if (ret_val)
+                               return ret_val;
+
+                       phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                               phy_data);
+                       if (ret_val)
+                               return ret_val;
+               }
+       } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
+                  || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
+                  || (hw->autoneg_advertised ==
+                      AUTONEG_ADVERTISE_10_100_ALL)) {
+
+               if (hw->mac_type == e1000_82541_rev_2 ||
+                   hw->mac_type == e1000_82547_rev_2) {
+                       phy_data |= IGP01E1000_GMII_FLEX_SPD;
+                       ret_val =
+                           e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                               phy_data);
+                       if (ret_val)
+                               return ret_val;
+               }
 
-    return;
-}
+               /* When LPLU is enabled we should disable SmartSpeed */
+               ret_val =
+                   e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                      &phy_data);
+               if (ret_val)
+                       return ret_val;
 
-/******************************************************************************
- * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
- * register.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
-                                 u16 *data)
-{
-    s32  error = E1000_SUCCESS;
-    u32 flash_bank = 0;
-    u32 act_offset = 0;
-    u32 bank_offset = 0;
-    u16 word = 0;
-    u16 i = 0;
-
-    /* We need to know which is the valid flash bank.  In the event
-     * that we didn't allocate eeprom_shadow_ram, we may not be
-     * managing flash_bank.  So it cannot be trusted and needs
-     * to be updated with each read.
-     */
-    /* Value of bit 22 corresponds to the flash bank we're on. */
-    flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
-
-    /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
-    bank_offset = flash_bank * (hw->flash_bank_size * 2);
-
-    error = e1000_get_software_flag(hw);
-    if (error != E1000_SUCCESS)
-        return error;
-
-    for (i = 0; i < words; i++) {
-        if (hw->eeprom_shadow_ram != NULL &&
-            hw->eeprom_shadow_ram[offset+i].modified) {
-            data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
-        } else {
-            /* The NVM part needs a byte offset, hence * 2 */
-            act_offset = bank_offset + ((offset + i) * 2);
-            error = e1000_read_ich8_word(hw, act_offset, &word);
-            if (error != E1000_SUCCESS)
-                break;
-            data[i] = word;
-        }
-    }
-
-    e1000_release_software_flag(hw);
-
-    return error;
-}
+               phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+               ret_val =
+                   e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                       phy_data);
+               if (ret_val)
+                       return ret_val;
 
-/******************************************************************************
- * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
- * register.  Actually, writes are written to the shadow ram cache in the hw
- * structure hw->e1000_shadow_ram.  e1000_commit_shadow_ram flushes this to
- * the NVM, which occurs when the NVM checksum is updated.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to write
- * words - number of words to write
- * data - words to write to the EEPROM
- *****************************************************************************/
-static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
-                                  u16 *data)
-{
-    u32 i = 0;
-    s32 error = E1000_SUCCESS;
-
-    error = e1000_get_software_flag(hw);
-    if (error != E1000_SUCCESS)
-        return error;
-
-    /* A driver can write to the NVM only if it has eeprom_shadow_ram
-     * allocated.  Subsequent reads to the modified words are read from
-     * this cached structure as well.  Writes will only go into this
-     * cached structure unless it's followed by a call to
-     * e1000_update_eeprom_checksum() where it will commit the changes
-     * and clear the "modified" field.
-     */
-    if (hw->eeprom_shadow_ram != NULL) {
-        for (i = 0; i < words; i++) {
-            if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
-                hw->eeprom_shadow_ram[offset+i].modified = true;
-                hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
-            } else {
-                error = -E1000_ERR_EEPROM;
-                break;
-            }
-        }
-    } else {
-        /* Drivers have the option to not allocate eeprom_shadow_ram as long
-         * as they don't perform any NVM writes.  An attempt in doing so
-         * will result in this error.
-         */
-        error = -E1000_ERR_EEPROM;
-    }
-
-    e1000_release_software_flag(hw);
-
-    return error;
+       }
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * This function does initial flash setup so that a new read/write/erase cycle
- * can be started.
+/**
+ * e1000_set_vco_speed
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - The pointer to the hw structure
- ****************************************************************************/
-static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ */
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
 {
-    union ich8_hws_flash_status hsfsts;
-    s32 error = E1000_ERR_EEPROM;
-    s32 i     = 0;
-
-    DEBUGFUNC("e1000_ich8_cycle_init");
-
-    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-
-    /* May be check the Flash Des Valid bit in Hw status */
-    if (hsfsts.hsf_status.fldesvalid == 0) {
-        DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.");
-        return error;
-    }
-
-    /* Clear FCERR in Hw status by writing 1 */
-    /* Clear DAEL in Hw status by writing a 1 */
-    hsfsts.hsf_status.flcerr = 1;
-    hsfsts.hsf_status.dael = 1;
-
-    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
-
-    /* Either we should have a hardware SPI cycle in progress bit to check
-     * against, in order to start a new cycle or FDONE bit should be changed
-     * in the hardware so that it is 1 after harware reset, which can then be
-     * used as an indication whether a cycle is in progress or has been
-     * completed .. we should also have some software semaphore mechanism to
-     * guard FDONE or the cycle in progress bit so that two threads access to
-     * those bits can be sequentiallized or a way so that 2 threads dont
-     * start the cycle at the same time */
-
-    if (hsfsts.hsf_status.flcinprog == 0) {
-        /* There is no cycle running at present, so we can start a cycle */
-        /* Begin by setting Flash Cycle Done. */
-        hsfsts.hsf_status.flcdone = 1;
-        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
-        error = E1000_SUCCESS;
-    } else {
-        /* otherwise poll for sometime so the current cycle has a chance
-         * to end before giving up. */
-        for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
-            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-            if (hsfsts.hsf_status.flcinprog == 0) {
-                error = E1000_SUCCESS;
-                break;
-            }
-            udelay(1);
-        }
-        if (error == E1000_SUCCESS) {
-            /* Successful in waiting for previous cycle to timeout,
-             * now set the Flash Cycle Done. */
-            hsfsts.hsf_status.flcdone = 1;
-            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
-        } else {
-            DEBUGOUT("Flash controller busy, cannot get access");
-        }
-    }
-    return error;
-}
+       s32 ret_val;
+       u16 default_page = 0;
+       u16 phy_data;
 
-/******************************************************************************
- * This function starts a flash cycle and waits for its completion
- *
- * hw - The pointer to the hw structure
- ****************************************************************************/
-static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
-{
-    union ich8_hws_flash_ctrl hsflctl;
-    union ich8_hws_flash_status hsfsts;
-    s32 error = E1000_ERR_EEPROM;
-    u32 i = 0;
-
-    /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
-    hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
-    hsflctl.hsf_ctrl.flcgo = 1;
-    E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
-    /* wait till FDONE bit is set to 1 */
-    do {
-        hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-        if (hsfsts.hsf_status.flcdone == 1)
-            break;
-        udelay(1);
-        i++;
-    } while (i < timeout);
-    if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
-        error = E1000_SUCCESS;
-    }
-    return error;
-}
+       DEBUGFUNC("e1000_set_vco_speed");
 
-/******************************************************************************
- * Reads a byte or word from the NVM using the ICH8 flash access registers.
- *
- * hw - The pointer to the hw structure
- * index - The index of the byte or word to read.
- * size - Size of data to read, 1=byte 2=word
- * data - Pointer to the word to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
-                               u16 *data)
-{
-    union ich8_hws_flash_status hsfsts;
-    union ich8_hws_flash_ctrl hsflctl;
-    u32 flash_linear_address;
-    u32 flash_data = 0;
-    s32 error = -E1000_ERR_EEPROM;
-    s32 count = 0;
-
-    DEBUGFUNC("e1000_read_ich8_data");
-
-    if (size < 1  || size > 2 || data == NULL ||
-        index > ICH_FLASH_LINEAR_ADDR_MASK)
-        return error;
-
-    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
-                           hw->flash_base_addr;
-
-    do {
-        udelay(1);
-        /* Steps */
-        error = e1000_ich8_cycle_init(hw);
-        if (error != E1000_SUCCESS)
-            break;
-
-        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
-        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
-        hsflctl.hsf_ctrl.fldbcount = size - 1;
-        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
-        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
-        /* Write the last 24 bits of index into Flash Linear address field in
-         * Flash Address */
-        /* TODO: TBD maybe check the index against the size of flash */
-
-        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
-        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
-
-        /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
-         * sequence a few more times, else read in (shift in) the Flash Data0,
-         * the order is least significant byte first msb to lsb */
-        if (error == E1000_SUCCESS) {
-            flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
-            if (size == 1) {
-                *data = (u8)(flash_data & 0x000000FF);
-            } else if (size == 2) {
-                *data = (u16)(flash_data & 0x0000FFFF);
-            }
-            break;
-        } else {
-            /* If we've gotten here, then things are probably completely hosed,
-             * but if the error condition is detected, it won't hurt to give
-             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
-             */
-            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-            if (hsfsts.hsf_status.flcerr == 1) {
-                /* Repeat for some time before giving up. */
-                continue;
-            } else if (hsfsts.hsf_status.flcdone == 0) {
-                DEBUGOUT("Timeout error - flash cycle did not complete.");
-                break;
-            }
-        }
-    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
-
-    return error;
-}
+       switch (hw->mac_type) {
+       case e1000_82545_rev_3:
+       case e1000_82546_rev_3:
+               break;
+       default:
+               return E1000_SUCCESS;
+       }
 
-/******************************************************************************
- * Writes One /two bytes to the NVM using the ICH8 flash access registers.
- *
- * hw - The pointer to the hw structure
- * index - The index of the byte/word to read.
- * size - Size of data to read, 1=byte 2=word
- * data - The byte(s) to write to the NVM.
- *****************************************************************************/
-static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
-                                u16 data)
-{
-    union ich8_hws_flash_status hsfsts;
-    union ich8_hws_flash_ctrl hsflctl;
-    u32 flash_linear_address;
-    u32 flash_data = 0;
-    s32 error = -E1000_ERR_EEPROM;
-    s32 count = 0;
-
-    DEBUGFUNC("e1000_write_ich8_data");
-
-    if (size < 1  || size > 2 || data > size * 0xff ||
-        index > ICH_FLASH_LINEAR_ADDR_MASK)
-        return error;
-
-    flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
-                           hw->flash_base_addr;
-
-    do {
-        udelay(1);
-        /* Steps */
-        error = e1000_ich8_cycle_init(hw);
-        if (error != E1000_SUCCESS)
-            break;
-
-        hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
-        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
-        hsflctl.hsf_ctrl.fldbcount = size -1;
-        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
-        E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
-        /* Write the last 24 bits of index into Flash Linear address field in
-         * Flash Address */
-        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
-        if (size == 1)
-            flash_data = (u32)data & 0x00FF;
-        else
-            flash_data = (u32)data;
-
-        E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
-
-        /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
-         * sequence a few more times else done */
-        error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
-        if (error == E1000_SUCCESS) {
-            break;
-        } else {
-            /* If we're here, then things are most likely completely hosed,
-             * but if the error condition is detected, it won't hurt to give
-             * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
-             */
-            hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-            if (hsfsts.hsf_status.flcerr == 1) {
-                /* Repeat for some time before giving up. */
-                continue;
-            } else if (hsfsts.hsf_status.flcdone == 0) {
-                DEBUGOUT("Timeout error - flash cycle did not complete.");
-                break;
-            }
-        }
-    } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
-
-    return error;
-}
+       /* Set PHY register 30, page 5, bit 8 to 0 */
 
-/******************************************************************************
- * Reads a single byte from the NVM using the ICH8 flash access registers.
- *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to read.
- * data - Pointer to a byte to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
-{
-    s32 status = E1000_SUCCESS;
-    u16 word = 0;
+       ret_val =
+           e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+       if (ret_val)
+               return ret_val;
 
-    status = e1000_read_ich8_data(hw, index, 1, &word);
-    if (status == E1000_SUCCESS) {
-        *data = (u8)word;
-    }
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+       if (ret_val)
+               return ret_val;
 
-    return status;
-}
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+       if (ret_val)
+               return ret_val;
 
-/******************************************************************************
- * Writes a single byte to the NVM using the ICH8 flash access registers.
- * Performs verification by reading back the value and then going through
- * a retry algorithm before giving up.
- *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to write.
- * byte - The byte to write to the NVM.
- *****************************************************************************/
-static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
-{
-    s32 error = E1000_SUCCESS;
-    s32 program_retries = 0;
+       phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+       if (ret_val)
+               return ret_val;
+
+       /* Set PHY register 30, page 4, bit 11 to 1 */
 
-    DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+       if (ret_val)
+               return ret_val;
 
-    error = e1000_write_ich8_byte(hw, index, byte);
+       ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+       if (ret_val)
+               return ret_val;
 
-    if (error != E1000_SUCCESS) {
-        for (program_retries = 0; program_retries < 100; program_retries++) {
-            DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
-            error = e1000_write_ich8_byte(hw, index, byte);
-            udelay(100);
-            if (error == E1000_SUCCESS)
-                break;
-        }
-    }
+       phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+       if (ret_val)
+               return ret_val;
 
-    if (program_retries == 100)
-        error = E1000_ERR_EEPROM;
+       ret_val =
+           e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+       if (ret_val)
+               return ret_val;
 
-    return error;
+       return E1000_SUCCESS;
 }
 
-/******************************************************************************
- * Writes a single byte to the NVM using the ICH8 flash access registers.
+
+/**
+ * e1000_enable_mng_pass_thru - check for bmc pass through
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to read.
- * data - The byte to write to the NVM.
- *****************************************************************************/
-static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ * returns: - true/false
+ */
+u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
 {
-    s32 status = E1000_SUCCESS;
-    u16 word = (u16)data;
+       u32 manc;
 
-    status = e1000_write_ich8_data(hw, index, 1, word);
+       if (hw->asf_firmware_present) {
+               manc = er32(MANC);
 
-    return status;
+               if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+                   !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+                       return false;
+               if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+                       return true;
+       }
+       return false;
 }
 
-/******************************************************************************
- * Reads a word from the NVM using the ICH8 flash access registers.
- *
- * hw - pointer to e1000_hw structure
- * index - The starting byte index of the word to read.
- * data - Pointer to a word to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
 {
-    s32 status = E1000_SUCCESS;
-    status = e1000_read_ich8_data(hw, index, 2, data);
-    return status;
-}
+       s32 ret_val;
+       u16 mii_status_reg;
+       u16 i;
 
-/******************************************************************************
- * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
- * based.
- *
- * hw - pointer to e1000_hw structure
- * bank - 0 for first bank, 1 for second bank
- *
- * Note that this function may actually erase as much as 8 or 64 KBytes.  The
- * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
- * bank size may be 4, 8 or 64 KBytes
- *****************************************************************************/
-static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
-{
-    union ich8_hws_flash_status hsfsts;
-    union ich8_hws_flash_ctrl hsflctl;
-    u32 flash_linear_address;
-    s32  count = 0;
-    s32  error = E1000_ERR_EEPROM;
-    s32  iteration;
-    s32  sub_sector_size = 0;
-    s32  bank_size;
-    s32  j = 0;
-    s32  error_flag = 0;
-
-    hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-
-    /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
-    /* 00: The Hw sector is 256 bytes, hence we need to erase 16
-     *     consecutive sectors.  The start index for the nth Hw sector can be
-     *     calculated as bank * 4096 + n * 256
-     * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
-     *     The start index for the nth Hw sector can be calculated
-     *     as bank * 4096
-     * 10: The HW sector is 8K bytes
-     * 11: The Hw sector size is 64K bytes */
-    if (hsfsts.hsf_status.berasesz == 0x0) {
-        /* Hw sector size 256 */
-        sub_sector_size = ICH_FLASH_SEG_SIZE_256;
-        bank_size = ICH_FLASH_SECTOR_SIZE;
-        iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
-    } else if (hsfsts.hsf_status.berasesz == 0x1) {
-        bank_size = ICH_FLASH_SEG_SIZE_4K;
-        iteration = 1;
-    } else if (hsfsts.hsf_status.berasesz == 0x3) {
-        bank_size = ICH_FLASH_SEG_SIZE_64K;
-        iteration = 1;
-    } else {
-        return error;
-    }
-
-    for (j = 0; j < iteration ; j++) {
-        do {
-            count++;
-            /* Steps */
-            error = e1000_ich8_cycle_init(hw);
-            if (error != E1000_SUCCESS) {
-                error_flag = 1;
-                break;
-            }
-
-            /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
-             * Control */
-            hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
-            hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
-            E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
-            /* Write the last 24 bits of an index within the block into Flash
-             * Linear address field in Flash Address.  This probably needs to
-             * be calculated here based off the on-chip erase sector size and
-             * the software bank size (4, 8 or 64 KBytes) */
-            flash_linear_address = bank * bank_size + j * sub_sector_size;
-            flash_linear_address += hw->flash_base_addr;
-            flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
-
-            E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
-            error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
-            /* Check if FCERR is set to 1.  If 1, clear it and try the whole
-             * sequence a few more times else Done */
-            if (error == E1000_SUCCESS) {
-                break;
-            } else {
-                hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-                if (hsfsts.hsf_status.flcerr == 1) {
-                    /* repeat for some time before giving up */
-                    continue;
-                } else if (hsfsts.hsf_status.flcdone == 0) {
-                    error_flag = 1;
-                    break;
-                }
-            }
-        } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
-        if (error_flag == 1)
-            break;
-    }
-    if (error_flag != 1)
-        error = E1000_SUCCESS;
-    return error;
-}
+       /* Polarity reversal workaround for forced 10F/10H links. */
 
-static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
-                                                u32 cnf_base_addr,
-                                                u32 cnf_size)
-{
-    u32 ret_val = E1000_SUCCESS;
-    u16 word_addr, reg_data, reg_addr;
-    u16 i;
+       /* Disable the transmitter on the PHY */
 
-    /* cnf_base_addr is in DWORD */
-    word_addr = (u16)(cnf_base_addr << 1);
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+       if (ret_val)
+               return ret_val;
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+       if (ret_val)
+               return ret_val;
 
-    /* cnf_size is returned in size of dwords */
-    for (i = 0; i < cnf_size; i++) {
-        ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
-        if (ret_val)
-            return ret_val;
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+       if (ret_val)
+               return ret_val;
 
-        ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
-        if (ret_val)
-            return ret_val;
+       /* This loop will early-out if the NO link condition has been met. */
+       for (i = PHY_FORCE_TIME; i > 0; i--) {
+               /* Read the MII Status Register and wait for Link Status bit
+                * to be clear.
+                */
 
-        ret_val = e1000_get_software_flag(hw);
-        if (ret_val != E1000_SUCCESS)
-            return ret_val;
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
 
-        ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
 
-        e1000_release_software_flag(hw);
-    }
+               if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+                       break;
+               mdelay(100);
+       }
 
-    return ret_val;
+       /* Recommended delay time after link has been lost */
+       mdelay(1000);
+
+       /* Now we will re-enable th transmitter on the PHY */
+
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+       if (ret_val)
+               return ret_val;
+       mdelay(50);
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+       if (ret_val)
+               return ret_val;
+       mdelay(50);
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+       if (ret_val)
+               return ret_val;
+       mdelay(50);
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+       if (ret_val)
+               return ret_val;
+
+       ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+       if (ret_val)
+               return ret_val;
+
+       /* This loop will early-out if the link condition has been met. */
+       for (i = PHY_FORCE_TIME; i > 0; i--) {
+               /* Read the MII Status Register and wait for Link Status bit
+                * to be set.
+                */
+
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
+
+               ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       return ret_val;
+
+               if (mii_status_reg & MII_SR_LINK_STATUS)
+                       break;
+               mdelay(100);
+       }
+       return E1000_SUCCESS;
 }
 
-
-/******************************************************************************
- * This function initializes the PHY from the NVM on ICH8 platforms. This
- * is needed due to an issue where the NVM configuration is not properly
- * autoloaded after power transitions. Therefore, after each PHY reset, we
- * will load the configuration data out of the NVM manually.
+/**
+ * e1000_get_auto_rd_done
+ * @hw: Struct containing variables accessed by shared code
  *
- * hw: Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+ * Check for EEPROM Auto Read bit done.
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
 {
-    u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
-
-    if (hw->phy_type != e1000_phy_igp_3)
-          return E1000_SUCCESS;
-
-    /* Check if SW needs configure the PHY */
-    reg_data = er32(FEXTNVM);
-    if (!(reg_data & FEXTNVM_SW_CONFIG))
-        return E1000_SUCCESS;
-
-    /* Wait for basic configuration completes before proceeding*/
-    loop = 0;
-    do {
-        reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
-        udelay(100);
-        loop++;
-    } while ((!reg_data) && (loop < 50));
-
-    /* Clear the Init Done bit for the next init event */
-    reg_data = er32(STATUS);
-    reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
-    ew32(STATUS, reg_data);
-
-    /* Make sure HW does not configure LCD from PHY extended configuration
-       before SW configuration */
-    reg_data = er32(EXTCNF_CTRL);
-    if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
-        reg_data = er32(EXTCNF_SIZE);
-        cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
-        cnf_size >>= 16;
-        if (cnf_size) {
-            reg_data = er32(EXTCNF_CTRL);
-            cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
-            /* cnf_base_addr is in DWORD */
-            cnf_base_addr >>= 16;
-
-            /* Configure LCD from extended configuration region. */
-            ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
-                                                            cnf_size);
-            if (ret_val)
-                return ret_val;
-        }
-    }
-
-    return E1000_SUCCESS;
+       DEBUGFUNC("e1000_get_auto_rd_done");
+       msleep(5);
+       return E1000_SUCCESS;
 }
 
+/**
+ * e1000_get_phy_cfg_done
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Checks if the PHY configuration is done
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ */
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_get_phy_cfg_done");
+       mdelay(10);
+       return E1000_SUCCESS;
+}
index a8866bdbb671b6ffbc3dd1ecd07dafe0550f5419..9acfddb0dafb2638983e782722bf0bf0ecb40577 100644 (file)
@@ -35,7 +35,6 @@
 
 #include "e1000_osdep.h"
 
-
 /* Forward declarations of structures used by the shared code */
 struct e1000_hw;
 struct e1000_hw_stats;
@@ -43,252 +42,231 @@ struct e1000_hw_stats;
 /* Enumerated types specific to the e1000 hardware */
 /* Media Access Controlers */
 typedef enum {
-    e1000_undefined = 0,
-    e1000_82542_rev2_0,
-    e1000_82542_rev2_1,
-    e1000_82543,
-    e1000_82544,
-    e1000_82540,
-    e1000_82545,
-    e1000_82545_rev_3,
-    e1000_82546,
-    e1000_82546_rev_3,
-    e1000_82541,
-    e1000_82541_rev_2,
-    e1000_82547,
-    e1000_82547_rev_2,
-    e1000_82571,
-    e1000_82572,
-    e1000_82573,
-    e1000_80003es2lan,
-    e1000_ich8lan,
-    e1000_num_macs
+       e1000_undefined = 0,
+       e1000_82542_rev2_0,
+       e1000_82542_rev2_1,
+       e1000_82543,
+       e1000_82544,
+       e1000_82540,
+       e1000_82545,
+       e1000_82545_rev_3,
+       e1000_82546,
+       e1000_82546_rev_3,
+       e1000_82541,
+       e1000_82541_rev_2,
+       e1000_82547,
+       e1000_82547_rev_2,
+       e1000_num_macs
 } e1000_mac_type;
 
 typedef enum {
-    e1000_eeprom_uninitialized = 0,
-    e1000_eeprom_spi,
-    e1000_eeprom_microwire,
-    e1000_eeprom_flash,
-    e1000_eeprom_ich8,
-    e1000_eeprom_none, /* No NVM support */
-    e1000_num_eeprom_types
+       e1000_eeprom_uninitialized = 0,
+       e1000_eeprom_spi,
+       e1000_eeprom_microwire,
+       e1000_eeprom_flash,
+       e1000_eeprom_none,      /* No NVM support */
+       e1000_num_eeprom_types
 } e1000_eeprom_type;
 
 /* Media Types */
 typedef enum {
-    e1000_media_type_copper = 0,
-    e1000_media_type_fiber = 1,
-    e1000_media_type_internal_serdes = 2,
-    e1000_num_media_types
+       e1000_media_type_copper = 0,
+       e1000_media_type_fiber = 1,
+       e1000_media_type_internal_serdes = 2,
+       e1000_num_media_types
 } e1000_media_type;
 
 typedef enum {
-    e1000_10_half = 0,
-    e1000_10_full = 1,
-    e1000_100_half = 2,
-    e1000_100_full = 3
+       e1000_10_half = 0,
+       e1000_10_full = 1,
+       e1000_100_half = 2,
+       e1000_100_full = 3
 } e1000_speed_duplex_type;
 
 /* Flow Control Settings */
 typedef enum {
-    E1000_FC_NONE = 0,
-    E1000_FC_RX_PAUSE = 1,
-    E1000_FC_TX_PAUSE = 2,
-    E1000_FC_FULL = 3,
-    E1000_FC_DEFAULT = 0xFF
+       E1000_FC_NONE = 0,
+       E1000_FC_RX_PAUSE = 1,
+       E1000_FC_TX_PAUSE = 2,
+       E1000_FC_FULL = 3,
+       E1000_FC_DEFAULT = 0xFF
 } e1000_fc_type;
 
 struct e1000_shadow_ram {
-    u16 eeprom_word;
-    bool modified;
+       u16 eeprom_word;
+       bool modified;
 };
 
 /* PCI bus types */
 typedef enum {
-    e1000_bus_type_unknown = 0,
-    e1000_bus_type_pci,
-    e1000_bus_type_pcix,
-    e1000_bus_type_pci_express,
-    e1000_bus_type_reserved
+       e1000_bus_type_unknown = 0,
+       e1000_bus_type_pci,
+       e1000_bus_type_pcix,
+       e1000_bus_type_reserved
 } e1000_bus_type;
 
 /* PCI bus speeds */
 typedef enum {
-    e1000_bus_speed_unknown = 0,
-    e1000_bus_speed_33,
-    e1000_bus_speed_66,
-    e1000_bus_speed_100,
-    e1000_bus_speed_120,
-    e1000_bus_speed_133,
-    e1000_bus_speed_2500,
-    e1000_bus_speed_reserved
+       e1000_bus_speed_unknown = 0,
+       e1000_bus_speed_33,
+       e1000_bus_speed_66,
+       e1000_bus_speed_100,
+       e1000_bus_speed_120,
+       e1000_bus_speed_133,
+       e1000_bus_speed_reserved
 } e1000_bus_speed;
 
 /* PCI bus widths */
 typedef enum {
-    e1000_bus_width_unknown = 0,
-    /* These PCIe values should literally match the possible return values
-     * from config space */
-    e1000_bus_width_pciex_1 = 1,
-    e1000_bus_width_pciex_2 = 2,
-    e1000_bus_width_pciex_4 = 4,
-    e1000_bus_width_32,
-    e1000_bus_width_64,
-    e1000_bus_width_reserved
+       e1000_bus_width_unknown = 0,
+       e1000_bus_width_32,
+       e1000_bus_width_64,
+       e1000_bus_width_reserved
 } e1000_bus_width;
 
 /* PHY status info structure and supporting enums */
 typedef enum {
-    e1000_cable_length_50 = 0,
-    e1000_cable_length_50_80,
-    e1000_cable_length_80_110,
-    e1000_cable_length_110_140,
-    e1000_cable_length_140,
-    e1000_cable_length_undefined = 0xFF
+       e1000_cable_length_50 = 0,
+       e1000_cable_length_50_80,
+       e1000_cable_length_80_110,
+       e1000_cable_length_110_140,
+       e1000_cable_length_140,
+       e1000_cable_length_undefined = 0xFF
 } e1000_cable_length;
 
 typedef enum {
-    e1000_gg_cable_length_60 = 0,
-    e1000_gg_cable_length_60_115 = 1,
-    e1000_gg_cable_length_115_150 = 2,
-    e1000_gg_cable_length_150 = 4
+       e1000_gg_cable_length_60 = 0,
+       e1000_gg_cable_length_60_115 = 1,
+       e1000_gg_cable_length_115_150 = 2,
+       e1000_gg_cable_length_150 = 4
 } e1000_gg_cable_length;
 
 typedef enum {
-    e1000_igp_cable_length_10  = 10,
-    e1000_igp_cable_length_20  = 20,
-    e1000_igp_cable_length_30  = 30,
-    e1000_igp_cable_length_40  = 40,
-    e1000_igp_cable_length_50  = 50,
-    e1000_igp_cable_length_60  = 60,
-    e1000_igp_cable_length_70  = 70,
-    e1000_igp_cable_length_80  = 80,
-    e1000_igp_cable_length_90  = 90,
-    e1000_igp_cable_length_100 = 100,
-    e1000_igp_cable_length_110 = 110,
-    e1000_igp_cable_length_115 = 115,
-    e1000_igp_cable_length_120 = 120,
-    e1000_igp_cable_length_130 = 130,
-    e1000_igp_cable_length_140 = 140,
-    e1000_igp_cable_length_150 = 150,
-    e1000_igp_cable_length_160 = 160,
-    e1000_igp_cable_length_170 = 170,
-    e1000_igp_cable_length_180 = 180
+       e1000_igp_cable_length_10 = 10,
+       e1000_igp_cable_length_20 = 20,
+       e1000_igp_cable_length_30 = 30,
+       e1000_igp_cable_length_40 = 40,
+       e1000_igp_cable_length_50 = 50,
+       e1000_igp_cable_length_60 = 60,
+       e1000_igp_cable_length_70 = 70,
+       e1000_igp_cable_length_80 = 80,
+       e1000_igp_cable_length_90 = 90,
+       e1000_igp_cable_length_100 = 100,
+       e1000_igp_cable_length_110 = 110,
+       e1000_igp_cable_length_115 = 115,
+       e1000_igp_cable_length_120 = 120,
+       e1000_igp_cable_length_130 = 130,
+       e1000_igp_cable_length_140 = 140,
+       e1000_igp_cable_length_150 = 150,
+       e1000_igp_cable_length_160 = 160,
+       e1000_igp_cable_length_170 = 170,
+       e1000_igp_cable_length_180 = 180
 } e1000_igp_cable_length;
 
 typedef enum {
-    e1000_10bt_ext_dist_enable_normal = 0,
-    e1000_10bt_ext_dist_enable_lower,
-    e1000_10bt_ext_dist_enable_undefined = 0xFF
+       e1000_10bt_ext_dist_enable_normal = 0,
+       e1000_10bt_ext_dist_enable_lower,
+       e1000_10bt_ext_dist_enable_undefined = 0xFF
 } e1000_10bt_ext_dist_enable;
 
 typedef enum {
-    e1000_rev_polarity_normal = 0,
-    e1000_rev_polarity_reversed,
-    e1000_rev_polarity_undefined = 0xFF
+       e1000_rev_polarity_normal = 0,
+       e1000_rev_polarity_reversed,
+       e1000_rev_polarity_undefined = 0xFF
 } e1000_rev_polarity;
 
 typedef enum {
-    e1000_downshift_normal = 0,
-    e1000_downshift_activated,
-    e1000_downshift_undefined = 0xFF
+       e1000_downshift_normal = 0,
+       e1000_downshift_activated,
+       e1000_downshift_undefined = 0xFF
 } e1000_downshift;
 
 typedef enum {
-    e1000_smart_speed_default = 0,
-    e1000_smart_speed_on,
-    e1000_smart_speed_off
+       e1000_smart_speed_default = 0,
+       e1000_smart_speed_on,
+       e1000_smart_speed_off
 } e1000_smart_speed;
 
 typedef enum {
-    e1000_polarity_reversal_enabled = 0,
-    e1000_polarity_reversal_disabled,
-    e1000_polarity_reversal_undefined = 0xFF
+       e1000_polarity_reversal_enabled = 0,
+       e1000_polarity_reversal_disabled,
+       e1000_polarity_reversal_undefined = 0xFF
 } e1000_polarity_reversal;
 
 typedef enum {
-    e1000_auto_x_mode_manual_mdi = 0,
-    e1000_auto_x_mode_manual_mdix,
-    e1000_auto_x_mode_auto1,
-    e1000_auto_x_mode_auto2,
-    e1000_auto_x_mode_undefined = 0xFF
+       e1000_auto_x_mode_manual_mdi = 0,
+       e1000_auto_x_mode_manual_mdix,
+       e1000_auto_x_mode_auto1,
+       e1000_auto_x_mode_auto2,
+       e1000_auto_x_mode_undefined = 0xFF
 } e1000_auto_x_mode;
 
 typedef enum {
-    e1000_1000t_rx_status_not_ok = 0,
-    e1000_1000t_rx_status_ok,
-    e1000_1000t_rx_status_undefined = 0xFF
+       e1000_1000t_rx_status_not_ok = 0,
+       e1000_1000t_rx_status_ok,
+       e1000_1000t_rx_status_undefined = 0xFF
 } e1000_1000t_rx_status;
 
 typedef enum {
     e1000_phy_m88 = 0,
     e1000_phy_igp,
-    e1000_phy_igp_2,
-    e1000_phy_gg82563,
-    e1000_phy_igp_3,
-    e1000_phy_ife,
     e1000_phy_undefined = 0xFF
 } e1000_phy_type;
 
 typedef enum {
-    e1000_ms_hw_default = 0,
-    e1000_ms_force_master,
-    e1000_ms_force_slave,
-    e1000_ms_auto
+       e1000_ms_hw_default = 0,
+       e1000_ms_force_master,
+       e1000_ms_force_slave,
+       e1000_ms_auto
 } e1000_ms_type;
 
 typedef enum {
-    e1000_ffe_config_enabled = 0,
-    e1000_ffe_config_active,
-    e1000_ffe_config_blocked
+       e1000_ffe_config_enabled = 0,
+       e1000_ffe_config_active,
+       e1000_ffe_config_blocked
 } e1000_ffe_config;
 
 typedef enum {
-    e1000_dsp_config_disabled = 0,
-    e1000_dsp_config_enabled,
-    e1000_dsp_config_activated,
-    e1000_dsp_config_undefined = 0xFF
+       e1000_dsp_config_disabled = 0,
+       e1000_dsp_config_enabled,
+       e1000_dsp_config_activated,
+       e1000_dsp_config_undefined = 0xFF
 } e1000_dsp_config;
 
 struct e1000_phy_info {
-    e1000_cable_length cable_length;
-    e1000_10bt_ext_dist_enable extended_10bt_distance;
-    e1000_rev_polarity cable_polarity;
-    e1000_downshift downshift;
-    e1000_polarity_reversal polarity_correction;
-    e1000_auto_x_mode mdix_mode;
-    e1000_1000t_rx_status local_rx;
-    e1000_1000t_rx_status remote_rx;
+       e1000_cable_length cable_length;
+       e1000_10bt_ext_dist_enable extended_10bt_distance;
+       e1000_rev_polarity cable_polarity;
+       e1000_downshift downshift;
+       e1000_polarity_reversal polarity_correction;
+       e1000_auto_x_mode mdix_mode;
+       e1000_1000t_rx_status local_rx;
+       e1000_1000t_rx_status remote_rx;
 };
 
 struct e1000_phy_stats {
-    u32 idle_errors;
-    u32 receive_errors;
+       u32 idle_errors;
+       u32 receive_errors;
 };
 
 struct e1000_eeprom_info {
-    e1000_eeprom_type type;
-    u16 word_size;
-    u16 opcode_bits;
-    u16 address_bits;
-    u16 delay_usec;
-    u16 page_size;
-    bool use_eerd;
-    bool use_eewr;
+       e1000_eeprom_type type;
+       u16 word_size;
+       u16 opcode_bits;
+       u16 address_bits;
+       u16 delay_usec;
+       u16 page_size;
 };
 
 /* Flex ASF Information */
 #define E1000_HOST_IF_MAX_SIZE  2048
 
 typedef enum {
-    e1000_byte_align = 0,
-    e1000_word_align = 1,
-    e1000_dword_align = 2
+       e1000_byte_align = 0,
+       e1000_word_align = 1,
+       e1000_dword_align = 2
 } e1000_align_type;
 
-
-
 /* Error Codes */
 #define E1000_SUCCESS      0
 #define E1000_ERR_EEPROM   1
@@ -301,7 +279,6 @@ typedef enum {
 #define E1000_ERR_MASTER_REQUESTS_PENDING 10
 #define E1000_ERR_HOST_INTERFACE_COMMAND 11
 #define E1000_BLK_PHY_RESET   12
-#define E1000_ERR_SWFW_SYNC 13
 
 #define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
                                      (((_value) & 0xff00) >> 8))
@@ -318,19 +295,17 @@ s32 e1000_setup_link(struct e1000_hw *hw);
 s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
 void e1000_config_collision_dist(struct e1000_hw *hw);
 s32 e1000_check_for_link(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex);
 s32 e1000_force_mac_fc(struct e1000_hw *hw);
 
 /* PHY */
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data);
 s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
 s32 e1000_phy_hw_reset(struct e1000_hw *hw);
 s32 e1000_phy_reset(struct e1000_hw *hw);
 s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
 s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
 
-void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
-
 /* EEPROM Functions */
 s32 e1000_init_eeprom_params(struct e1000_hw *hw);
 
@@ -338,66 +313,63 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw);
 u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
 
 #define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
-#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
+#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8  /* Host Interface data length */
 
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
-#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
-#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10     /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0  /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10   /* Cookie length */
 #define E1000_MNG_IAMT_MODE             0x3
 #define E1000_MNG_ICH_IAMT_MODE         0x2
-#define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE            0x544D4149     /* Intel(R) Active Management Technology signature */
 
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1       /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2       /* DHCP parsing enabled */
 #define E1000_VFTA_ENTRY_SHIFT                       0x5
 #define E1000_VFTA_ENTRY_MASK                        0x7F
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
 
 struct e1000_host_mng_command_header {
-    u8 command_id;
-    u8 checksum;
-    u16 reserved1;
-    u16 reserved2;
-    u16 command_length;
+       u8 command_id;
+       u8 checksum;
+       u16 reserved1;
+       u16 reserved2;
+       u16 command_length;
 };
 
 struct e1000_host_mng_command_info {
-    struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
-    u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+       struct e1000_host_mng_command_header command_header;    /* Command Head/Command Result Head has 4 bytes */
+       u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];  /* Command data can length 0..0x658 */
 };
 #ifdef __BIG_ENDIAN
-struct e1000_host_mng_dhcp_cookie{
-    u32 signature;
-    u16 vlan_id;
-    u8 reserved0;
-    u8 status;
-    u32 reserved1;
-    u8 checksum;
-    u8 reserved3;
-    u16 reserved2;
+struct e1000_host_mng_dhcp_cookie {
+       u32 signature;
+       u16 vlan_id;
+       u8 reserved0;
+       u8 status;
+       u32 reserved1;
+       u8 checksum;
+       u8 reserved3;
+       u16 reserved2;
 };
 #else
-struct e1000_host_mng_dhcp_cookie{
-    u32 signature;
-    u8 status;
-    u8 reserved0;
-    u16 vlan_id;
-    u32 reserved1;
-    u16 reserved2;
-    u8 reserved3;
-    u8 checksum;
+struct e1000_host_mng_dhcp_cookie {
+       u32 signature;
+       u8 status;
+       u8 reserved0;
+       u16 vlan_id;
+       u32 reserved1;
+       u16 reserved2;
+       u8 reserved3;
+       u8 checksum;
 };
 #endif
 
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
-                                  u16 length);
 bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
 s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
 s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
-s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
-s32 e1000_read_mac_addr(struct e1000_hw * hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
 
 /* Filters (multicast, vlan, receive) */
 u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
@@ -417,18 +389,15 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
 /* Everything else */
 void e1000_reset_adaptive(struct e1000_hw *hw);
 void e1000_update_adaptive(struct e1000_hw *hw);
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+                           u32 frame_len, u8 * mac_addr);
 void e1000_get_bus_info(struct e1000_hw *hw);
 void e1000_pci_set_mwi(struct e1000_hw *hw);
 void e1000_pci_clear_mwi(struct e1000_hw *hw);
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
 int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
 /* Port I/O is only supported on 82544 and newer */
 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
-s32 e1000_disable_pciex_master(struct e1000_hw *hw);
-s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
-
 
 #define E1000_READ_REG_IO(a, reg) \
     e1000_read_reg_io((a), E1000_##reg)
@@ -471,36 +440,7 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82547EI_MOBILE      0x101A
-#define E1000_DEV_ID_82571EB_COPPER      0x105E
-#define E1000_DEV_ID_82571EB_FIBER       0x105F
-#define E1000_DEV_ID_82571EB_SERDES      0x1060
-#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
-#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
-#define E1000_DEV_ID_82571EB_QUAD_FIBER  0x10A5
-#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
-#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
-#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
-#define E1000_DEV_ID_82572EI_COPPER      0x107D
-#define E1000_DEV_ID_82572EI_FIBER       0x107E
-#define E1000_DEV_ID_82572EI_SERDES      0x107F
-#define E1000_DEV_ID_82572EI             0x10B9
-#define E1000_DEV_ID_82573E              0x108B
-#define E1000_DEV_ID_82573E_IAMT         0x108C
-#define E1000_DEV_ID_82573L              0x109A
 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
-#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT     0x1096
-#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT     0x1098
-#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT     0x10BA
-#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT     0x10BB
-
-#define E1000_DEV_ID_ICH8_IGP_M_AMT      0x1049
-#define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
-#define E1000_DEV_ID_ICH8_IGP_C          0x104B
-#define E1000_DEV_ID_ICH8_IFE            0x104C
-#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
-#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
-#define E1000_DEV_ID_ICH8_IGP_M          0x104D
-
 
 #define NODE_ADDRESS_SIZE 6
 #define ETH_LENGTH_OF_ADDRESS 6
@@ -523,21 +463,20 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
 
 /* The sizes (in bytes) of a ethernet packet */
 #define ENET_HEADER_SIZE             14
-#define MINIMUM_ETHERNET_FRAME_SIZE  64   /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE  64        /* With FCS */
 #define ETHERNET_FCS_SIZE            4
 #define MINIMUM_ETHERNET_PACKET_SIZE \
     (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
 #define CRC_LENGTH                   ETHERNET_FCS_SIZE
 #define MAX_JUMBO_FRAME_SIZE         0x3F00
 
-
 /* 802.1q VLAN Packet Sizes */
-#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
+#define VLAN_TAG_SIZE  4       /* 802.3ac tag (not DMAed) */
 
 /* Ethertype field values */
-#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
-#define ETHERNET_IP_TYPE        0x0800  /* IP packets */
-#define ETHERNET_ARP_TYPE       0x0806  /* Address Resolution Protocol (ARP) */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE        0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE       0x0806 /* Address Resolution Protocol (ARP) */
 
 /* Packet Header defines */
 #define IP_PROTOCOL_TCP    6
@@ -567,15 +506,6 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
     E1000_IMS_RXSEQ  |    \
     E1000_IMS_LSC)
 
-/* Additional interrupts need to be handled for e1000_ich8lan:
-    DSW = The FW changed the status of the DISSW bit in FWSM
-    PHYINT = The LAN connected device generates an interrupt
-    EPRST = Manageability reset event */
-#define IMS_ICH8LAN_ENABLE_MASK (\
-    E1000_IMS_DSW   | \
-    E1000_IMS_PHYINT | \
-    E1000_IMS_EPRST)
-
 /* Number of high/low register pairs in the RAR. The RAR (Receive Address
  * Registers) holds the directed and multicast addresses that we monitor. We
  * reserve one of these spots for our directed address, allowing us room for
@@ -583,100 +513,98 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
  */
 #define E1000_RAR_ENTRIES 15
 
-#define E1000_RAR_ENTRIES_ICH8LAN  6
-
 #define MIN_NUMBER_OF_DESCRIPTORS  8
 #define MAX_NUMBER_OF_DESCRIPTORS  0xFFF8
 
 /* Receive Descriptor */
 struct e1000_rx_desc {
-    __le64 buffer_addr; /* Address of the descriptor's data buffer */
-    __le16 length;     /* Length of data DMAed into data buffer */
-    __le16 csum;       /* Packet checksum */
-    u8 status;      /* Descriptor status */
-    u8 errors;      /* Descriptor Errors */
-    __le16 special;
+       __le64 buffer_addr;     /* Address of the descriptor's data buffer */
+       __le16 length;          /* Length of data DMAed into data buffer */
+       __le16 csum;            /* Packet checksum */
+       u8 status;              /* Descriptor status */
+       u8 errors;              /* Descriptor Errors */
+       __le16 special;
 };
 
 /* Receive Descriptor - Extended */
 union e1000_rx_desc_extended {
-    struct {
-        __le64 buffer_addr;
-        __le64 reserved;
-    } read;
-    struct {
-        struct {
-            __le32 mrq;              /* Multiple Rx Queues */
-            union {
-                __le32 rss;          /* RSS Hash */
-                struct {
-                    __le16 ip_id;    /* IP id */
-                    __le16 csum;     /* Packet Checksum */
-                } csum_ip;
-            } hi_dword;
-        } lower;
-        struct {
-            __le32 status_error;     /* ext status/error */
-            __le16 length;
-            __le16 vlan;             /* VLAN tag */
-        } upper;
-    } wb;  /* writeback */
+       struct {
+               __le64 buffer_addr;
+               __le64 reserved;
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;     /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;     /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;   /* IP id */
+                                       __le16 csum;    /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;    /* ext status/error */
+                       __le16 length;
+                       __le16 vlan;    /* VLAN tag */
+               } upper;
+       } wb;                   /* writeback */
 };
 
 #define MAX_PS_BUFFERS 4
 /* Receive Descriptor - Packet Split */
 union e1000_rx_desc_packet_split {
-    struct {
-        /* one buffer for protocol header(s), three data buffers */
-        __le64 buffer_addr[MAX_PS_BUFFERS];
-    } read;
-    struct {
-        struct {
-            __le32 mrq;              /* Multiple Rx Queues */
-            union {
-                __le32 rss;          /* RSS Hash */
-                struct {
-                    __le16 ip_id;    /* IP id */
-                    __le16 csum;     /* Packet Checksum */
-                } csum_ip;
-            } hi_dword;
-        } lower;
-        struct {
-            __le32 status_error;     /* ext status/error */
-            __le16 length0;          /* length of buffer 0 */
-            __le16 vlan;             /* VLAN tag */
-        } middle;
-        struct {
-            __le16 header_status;
-            __le16 length[3];        /* length of buffers 1-3 */
-        } upper;
-        __le64 reserved;
-    } wb; /* writeback */
+       struct {
+               /* one buffer for protocol header(s), three data buffers */
+               __le64 buffer_addr[MAX_PS_BUFFERS];
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;     /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;     /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;   /* IP id */
+                                       __le16 csum;    /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;    /* ext status/error */
+                       __le16 length0; /* length of buffer 0 */
+                       __le16 vlan;    /* VLAN tag */
+               } middle;
+               struct {
+                       __le16 header_status;
+                       __le16 length[3];       /* length of buffers 1-3 */
+               } upper;
+               __le64 reserved;
+       } wb;                   /* writeback */
 };
 
-/* Receive Decriptor bit definitions */
-#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
-#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
-#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
-#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
-#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
-#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
-#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
-#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
-#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
-#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
-#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
-#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
-#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
-#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
-#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
-#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
-#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01   /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02   /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04   /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08   /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10   /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20   /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40   /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80   /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV    0x200  /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400  /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK      0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01   /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02   /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04   /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10   /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20   /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40   /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80   /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000 /* Priority is in upper 3 bits */
 #define E1000_RXD_SPC_PRI_SHIFT 13
-#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_MASK  0x1000 /* CFI is bit 12 */
 #define E1000_RXD_SPC_CFI_SHIFT 12
 
 #define E1000_RXDEXT_STATERR_CE    0x01000000
@@ -698,7 +626,6 @@ union e1000_rx_desc_packet_split {
     E1000_RXD_ERR_CXE |                \
     E1000_RXD_ERR_RXE)
 
-
 /* Same mask, but for extended and packet split descriptors */
 #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
     E1000_RXDEXT_STATERR_CE  |            \
@@ -707,152 +634,145 @@ union e1000_rx_desc_packet_split {
     E1000_RXDEXT_STATERR_CXE |            \
     E1000_RXDEXT_STATERR_RXE)
 
-
 /* Transmit Descriptor */
 struct e1000_tx_desc {
-    __le64 buffer_addr;       /* Address of the descriptor's data buffer */
-    union {
-        __le32 data;
-        struct {
-            __le16 length;    /* Data buffer length */
-            u8 cso;        /* Checksum offset */
-            u8 cmd;        /* Descriptor control */
-        } flags;
-    } lower;
-    union {
-        __le32 data;
-        struct {
-            u8 status;     /* Descriptor status */
-            u8 css;        /* Checksum start */
-            __le16 special;
-        } fields;
-    } upper;
+       __le64 buffer_addr;     /* Address of the descriptor's data buffer */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;  /* Data buffer length */
+                       u8 cso; /* Checksum offset */
+                       u8 cmd; /* Descriptor control */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;      /* Descriptor status */
+                       u8 css; /* Checksum start */
+                       __le16 special;
+               } fields;
+       } upper;
 };
 
 /* Transmit Descriptor bit definitions */
-#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
-#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
-#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+#define E1000_TXD_DTYP_D     0x00100000        /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000        /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01      /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02      /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000        /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000        /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000        /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000        /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000        /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000        /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000        /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000        /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001        /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002        /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004        /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008        /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000        /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000        /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000        /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004        /* Tx Underrun */
 
 /* Offload Context Descriptor */
 struct e1000_context_desc {
-    union {
-        __le32 ip_config;
-        struct {
-            u8 ipcss;      /* IP checksum start */
-            u8 ipcso;      /* IP checksum offset */
-            __le16 ipcse;     /* IP checksum end */
-        } ip_fields;
-    } lower_setup;
-    union {
-        __le32 tcp_config;
-        struct {
-            u8 tucss;      /* TCP checksum start */
-            u8 tucso;      /* TCP checksum offset */
-            __le16 tucse;     /* TCP checksum end */
-        } tcp_fields;
-    } upper_setup;
-    __le32 cmd_and_length;    /* */
-    union {
-        __le32 data;
-        struct {
-            u8 status;     /* Descriptor status */
-            u8 hdr_len;    /* Header length */
-            __le16 mss;       /* Maximum segment size */
-        } fields;
-    } tcp_seg_setup;
+       union {
+               __le32 ip_config;
+               struct {
+                       u8 ipcss;       /* IP checksum start */
+                       u8 ipcso;       /* IP checksum offset */
+                       __le16 ipcse;   /* IP checksum end */
+               } ip_fields;
+       } lower_setup;
+       union {
+               __le32 tcp_config;
+               struct {
+                       u8 tucss;       /* TCP checksum start */
+                       u8 tucso;       /* TCP checksum offset */
+                       __le16 tucse;   /* TCP checksum end */
+               } tcp_fields;
+       } upper_setup;
+       __le32 cmd_and_length;  /* */
+       union {
+               __le32 data;
+               struct {
+                       u8 status;      /* Descriptor status */
+                       u8 hdr_len;     /* Header length */
+                       __le16 mss;     /* Maximum segment size */
+               } fields;
+       } tcp_seg_setup;
 };
 
 /* Offload data descriptor */
 struct e1000_data_desc {
-    __le64 buffer_addr;       /* Address of the descriptor's buffer address */
-    union {
-        __le32 data;
-        struct {
-            __le16 length;    /* Data buffer length */
-            u8 typ_len_ext;        /* */
-            u8 cmd;        /* */
-        } flags;
-    } lower;
-    union {
-        __le32 data;
-        struct {
-            u8 status;     /* Descriptor status */
-            u8 popts;      /* Packet Options */
-            __le16 special;   /* */
-        } fields;
-    } upper;
+       __le64 buffer_addr;     /* Address of the descriptor's buffer address */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;  /* Data buffer length */
+                       u8 typ_len_ext; /* */
+                       u8 cmd; /* */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;      /* Descriptor status */
+                       u8 popts;       /* Packet Options */
+                       __le16 special; /* */
+               } fields;
+       } upper;
 };
 
 /* Filters */
-#define E1000_NUM_UNICAST          16   /* Unicast filter entries */
-#define E1000_MC_TBL_SIZE          128  /* Multicast Filter Table (4096 bits) */
-#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
-
-#define E1000_NUM_UNICAST_ICH8LAN  7
-#define E1000_MC_TBL_SIZE_ICH8LAN  32
-
+#define E1000_NUM_UNICAST          16  /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE          128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
 
 /* Receive Address Register */
 struct e1000_rar {
-    volatile __le32 low;      /* receive address low */
-    volatile __le32 high;     /* receive address high */
+       volatile __le32 low;    /* receive address low */
+       volatile __le32 high;   /* receive address high */
 };
 
 /* Number of entries in the Multicast Table Array (MTA). */
 #define E1000_NUM_MTA_REGISTERS 128
-#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
 
 /* IPv4 Address Table Entry */
 struct e1000_ipv4_at_entry {
-    volatile u32 ipv4_addr;        /* IP Address (RW) */
-    volatile u32 reserved;
+       volatile u32 ipv4_addr; /* IP Address (RW) */
+       volatile u32 reserved;
 };
 
 /* Four wakeup IP addresses are supported */
 #define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
 #define E1000_IP4AT_SIZE                  E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
-#define E1000_IP4AT_SIZE_ICH8LAN          3
 #define E1000_IP6AT_SIZE                  1
 
 /* IPv6 Address Table Entry */
 struct e1000_ipv6_at_entry {
-    volatile u8 ipv6_addr[16];
+       volatile u8 ipv6_addr[16];
 };
 
 /* Flexible Filter Length Table Entry */
 struct e1000_fflt_entry {
-    volatile u32 length;   /* Flexible Filter Length (RW) */
-    volatile u32 reserved;
+       volatile u32 length;    /* Flexible Filter Length (RW) */
+       volatile u32 reserved;
 };
 
 /* Flexible Filter Mask Table Entry */
 struct e1000_ffmt_entry {
-    volatile u32 mask;     /* Flexible Filter Mask (RW) */
-    volatile u32 reserved;
+       volatile u32 mask;      /* Flexible Filter Mask (RW) */
+       volatile u32 reserved;
 };
 
 /* Flexible Filter Value Table Entry */
 struct e1000_ffvt_entry {
-    volatile u32 value;    /* Flexible Filter Value (RW) */
-    volatile u32 reserved;
+       volatile u32 value;     /* Flexible Filter Value (RW) */
+       volatile u32 reserved;
 };
 
 /* Four Flexible Filters are supported */
@@ -879,211 +799,211 @@ struct e1000_ffvt_entry {
  * R/clr - register is read only and is cleared when read
  * A - register array
  */
-#define E1000_CTRL     0x00000  /* Device Control - RW */
-#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
-#define E1000_STATUS   0x00008  /* Device Status - RO */
-#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
-#define E1000_EERD     0x00014  /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
-#define E1000_FLA      0x0001C  /* Flash Access - RW */
-#define E1000_MDIC     0x00020  /* MDI Control - RW */
-#define E1000_SCTL     0x00024  /* SerDes Control - RW */
-#define E1000_FEXTNVM  0x00028  /* Future Extended NVM register */
-#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
-#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
-#define E1000_FCT      0x00030  /* Flow Control Type - RW */
-#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
-#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
-#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
-#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
-#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
-#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
-#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
-#define E1000_RCTL     0x00100  /* RX Control - RW */
-#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
-#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
-#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
-#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
-#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
-#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
-#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
-#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
-#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
-#define E1000_TCTL     0x00400  /* TX Control - RW */
-#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
-#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
-#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
-#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
-#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
-#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
-#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_CTRL     0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008 /* Device Status - RO */
+#define E1000_EECD     0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C /* Flash Access - RW */
+#define E1000_MDIC     0x00020 /* MDI Control - RW */
+#define E1000_SCTL     0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM  0x00028 /* Future Extended NVM register */
+#define E1000_FCAL     0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030 /* Flow Control Type - RW */
+#define E1000_VET      0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100 /* RX Control - RW */
+#define E1000_RDTR1    0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918 /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL     0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00     /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08     /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10     /* PHY Control Register in CSR */
 #define FEXTNVM_SW_CONFIG  0x0001
-#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
-#define E1000_PBS      0x01008  /* Packet Buffer Size */
-#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_PBA      0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
 #define E1000_FLASH_UPDATES 1000
-#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
-#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
-#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
-#define E1000_FLSWCTL  0x01030  /* FLASH control register */
-#define E1000_FLSWDATA 0x01034  /* FLASH data register */
-#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
-#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
-#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
-#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
-#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
-#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
-#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
-#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
-#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
-#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
-#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
-#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
-#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
-#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
-#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
-#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
-#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
-#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
-#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
-#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
-#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
-#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
-#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
-#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
-#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
-#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
-#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
-#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
-#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
-#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
-#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
-#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
-#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
-#define E1000_TDT      0x03818  /* TX Descripotr Tail - RW */
-#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
-#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
-#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
-#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
-#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
-#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
-#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
-#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
-#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
-#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
-#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
-#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
-#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
-#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
-#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
-#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
-#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
-#define E1000_COLC     0x04028  /* Collision Count - R/clr */
-#define E1000_DC       0x04030  /* Defer Count - R/clr */
-#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
-#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
-#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
-#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
-#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
-#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
-#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
-#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
-#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
-#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
-#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
-#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
-#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
-#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
-#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
-#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
-#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
-#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
-#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
-#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
-#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
-#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
-#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
-#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
-#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
-#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
-#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
-#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
-#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
-#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
-#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
-#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
-#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
-#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
-#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
-#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
-#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
-#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
-#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
-#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
-#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
-#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
-#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
-#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
-#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
-#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
-#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
-#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
-#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
-#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
-#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
-#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
-#define E1000_RA       0x05400  /* Receive Address - RW Array */
-#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
-#define E1000_WUC      0x05800  /* Wakeup Control - RW */
-#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
-#define E1000_WUS      0x05810  /* Wakeup Status - RO */
-#define E1000_MANC     0x05820  /* Management Control - RW */
-#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
-#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
-#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
-#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
-#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
-#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
-#define E1000_HOST_IF  0x08800  /* Host Interface */
-#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
-#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
-
-#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
-#define E1000_MDPHYA     0x0003C  /* PHY address - RW */
-#define E1000_MANC2H     0x05860  /* Managment Control To Host - RW */
-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
-
-#define E1000_GCR       0x05B00 /* PCI-Ex Control */
-#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
-#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
-#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
-#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
-#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM      0x05B50 /* SW Semaphore */
-#define E1000_FWSM      0x05B54 /* FW Semaphore */
-#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
-#define E1000_HICR      0x08F00 /* Host Inteface Control */
+#define E1000_EEARBC   0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028 /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT  0x01038 /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C /* FLASH Opcode Register */
+#define E1000_ERT      0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDBAL    0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL     /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH     /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN     /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH       /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT       /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR      /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928 /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD    0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL    0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV     0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940 /* TX Arbitration Count (1) */
+#define E1000_CRCERRS  0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028 /* Collision Count - R/clr */
+#define E1000_DC       0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM   0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL    0x05008 /* Receive Filter Control */
+#define E1000_MTA      0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810 /* Wakeup Status - RO */
+#define E1000_MANC     0x05820 /* Management Control - RW */
+#define E1000_IPAV     0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800 /* Host Interface */
+#define E1000_FFMT     0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800 /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034       /* MAC-PHY interface - RW */
+#define E1000_MDPHYA     0x0003C       /* PHY address - RW */
+#define E1000_MANC2H     0x05860       /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C       /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR       0x05B00        /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10        /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14        /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18        /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C        /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30        /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50        /* SW Semaphore */
+#define E1000_FWSM      0x05B54        /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04        /* Debug Register */
+#define E1000_HICR      0x08F00        /* Host Interface Control */
 
 /* RSS registers */
-#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
-#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
-#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
-#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
-#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
-#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+#define E1000_CPUVEC    0x02C10        /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818        /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00        /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80        /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864        /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868        /* RSS Interrupt Request */
 /* Register Set (82542)
  *
  * Some of the 82542 registers are located at different offsets than they are
@@ -1123,19 +1043,19 @@ struct e1000_ffvt_entry {
 #define E1000_82542_RDLEN0   E1000_82542_RDLEN
 #define E1000_82542_RDH0     E1000_82542_RDH
 #define E1000_82542_RDT0     E1000_82542_RDT
-#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
-                                                       * RX Control - RW */
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8))  /* Split and Replication
+                                                        * RX Control - RW */
 #define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
-#define E1000_82542_RDBAH3   0x02B04 /* RX Desc Base High Queue 3 - RW */
-#define E1000_82542_RDBAL3   0x02B00 /* RX Desc Low Queue 3 - RW */
-#define E1000_82542_RDLEN3   0x02B08 /* RX Desc Length Queue 3 - RW */
-#define E1000_82542_RDH3     0x02B10 /* RX Desc Head Queue 3 - RW */
-#define E1000_82542_RDT3     0x02B18 /* RX Desc Tail Queue 3 - RW */
-#define E1000_82542_RDBAL2   0x02A00 /* RX Desc Base Low Queue 2 - RW */
-#define E1000_82542_RDBAH2   0x02A04 /* RX Desc Base High Queue 2 - RW */
-#define E1000_82542_RDLEN2   0x02A08 /* RX Desc Length Queue 2 - RW */
-#define E1000_82542_RDH2     0x02A10 /* RX Desc Head Queue 2 - RW */
-#define E1000_82542_RDT2     0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDBAH3   0x02B04   /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3   0x02B00   /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3   0x02B08   /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3     0x02B10   /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3     0x02B18   /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2   0x02A00   /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2   0x02A04   /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2   0x02A08   /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2     0x02A10   /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2     0x02A18   /* RX Desc Tail Queue 2 - RW */
 #define E1000_82542_RDTR1    0x00130
 #define E1000_82542_RDBAL1   0x00138
 #define E1000_82542_RDBAH1   0x0013C
@@ -1302,288 +1222,281 @@ struct e1000_ffvt_entry {
 #define E1000_82542_RSSIR       E1000_RSSIR
 #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
 #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
-#define E1000_82542_MANC2H      E1000_MANC2H
 
 /* Statistics counters collected by the MAC */
 struct e1000_hw_stats {
-       u64             crcerrs;
-       u64             algnerrc;
-       u64             symerrs;
-       u64             rxerrc;
-       u64             txerrc;
-       u64             mpc;
-       u64             scc;
-       u64             ecol;
-       u64             mcc;
-       u64             latecol;
-       u64             colc;
-       u64             dc;
-       u64             tncrs;
-       u64             sec;
-       u64             cexterr;
-       u64             rlec;
-       u64             xonrxc;
-       u64             xontxc;
-       u64             xoffrxc;
-       u64             xofftxc;
-       u64             fcruc;
-       u64             prc64;
-       u64             prc127;
-       u64             prc255;
-       u64             prc511;
-       u64             prc1023;
-       u64             prc1522;
-       u64             gprc;
-       u64             bprc;
-       u64             mprc;
-       u64             gptc;
-       u64             gorcl;
-       u64             gorch;
-       u64             gotcl;
-       u64             gotch;
-       u64             rnbc;
-       u64             ruc;
-       u64             rfc;
-       u64             roc;
-       u64             rlerrc;
-       u64             rjc;
-       u64             mgprc;
-       u64             mgpdc;
-       u64             mgptc;
-       u64             torl;
-       u64             torh;
-       u64             totl;
-       u64             toth;
-       u64             tpr;
-       u64             tpt;
-       u64             ptc64;
-       u64             ptc127;
-       u64             ptc255;
-       u64             ptc511;
-       u64             ptc1023;
-       u64             ptc1522;
-       u64             mptc;
-       u64             bptc;
-       u64             tsctc;
-       u64             tsctfc;
-       u64             iac;
-       u64             icrxptc;
-       u64             icrxatc;
-       u64             ictxptc;
-       u64             ictxatc;
-       u64             ictxqec;
-       u64             ictxqmtc;
-       u64             icrxdmtc;
-       u64             icrxoc;
+       u64 crcerrs;
+       u64 algnerrc;
+       u64 symerrs;
+       u64 rxerrc;
+       u64 txerrc;
+       u64 mpc;
+       u64 scc;
+       u64 ecol;
+       u64 mcc;
+       u64 latecol;
+       u64 colc;
+       u64 dc;
+       u64 tncrs;
+       u64 sec;
+       u64 cexterr;
+       u64 rlec;
+       u64 xonrxc;
+       u64 xontxc;
+       u64 xoffrxc;
+       u64 xofftxc;
+       u64 fcruc;
+       u64 prc64;
+       u64 prc127;
+       u64 prc255;
+       u64 prc511;
+       u64 prc1023;
+       u64 prc1522;
+       u64 gprc;
+       u64 bprc;
+       u64 mprc;
+       u64 gptc;
+       u64 gorcl;
+       u64 gorch;
+       u64 gotcl;
+       u64 gotch;
+       u64 rnbc;
+       u64 ruc;
+       u64 rfc;
+       u64 roc;
+       u64 rlerrc;
+       u64 rjc;
+       u64 mgprc;
+       u64 mgpdc;
+       u64 mgptc;
+       u64 torl;
+       u64 torh;
+       u64 totl;
+       u64 toth;
+       u64 tpr;
+       u64 tpt;
+       u64 ptc64;
+       u64 ptc127;
+       u64 ptc255;
+       u64 ptc511;
+       u64 ptc1023;
+       u64 ptc1522;
+       u64 mptc;
+       u64 bptc;
+       u64 tsctc;
+       u64 tsctfc;
+       u64 iac;
+       u64 icrxptc;
+       u64 icrxatc;
+       u64 ictxptc;
+       u64 ictxatc;
+       u64 ictxqec;
+       u64 ictxqmtc;
+       u64 icrxdmtc;
+       u64 icrxoc;
 };
 
 /* Structure containing variables used by the shared code (e1000_hw.c) */
 struct e1000_hw {
-       u8 __iomem              *hw_addr;
-       u8 __iomem              *flash_address;
-       e1000_mac_type          mac_type;
-       e1000_phy_type          phy_type;
-       u32             phy_init_script;
-       e1000_media_type        media_type;
-       void                    *back;
-       struct e1000_shadow_ram *eeprom_shadow_ram;
-       u32             flash_bank_size;
-       u32             flash_base_addr;
-       e1000_fc_type           fc;
-       e1000_bus_speed         bus_speed;
-       e1000_bus_width         bus_width;
-       e1000_bus_type          bus_type;
+       u8 __iomem *hw_addr;
+       u8 __iomem *flash_address;
+       e1000_mac_type mac_type;
+       e1000_phy_type phy_type;
+       u32 phy_init_script;
+       e1000_media_type media_type;
+       void *back;
+       struct e1000_shadow_ram *eeprom_shadow_ram;
+       u32 flash_bank_size;
+       u32 flash_base_addr;
+       e1000_fc_type fc;
+       e1000_bus_speed bus_speed;
+       e1000_bus_width bus_width;
+       e1000_bus_type bus_type;
        struct e1000_eeprom_info eeprom;
-       e1000_ms_type           master_slave;
-       e1000_ms_type           original_master_slave;
-       e1000_ffe_config        ffe_config_state;
-       u32             asf_firmware_present;
-       u32             eeprom_semaphore_present;
-       u32             swfw_sync_present;
-       u32             swfwhw_semaphore_present;
-       unsigned long           io_base;
-       u32             phy_id;
-       u32             phy_revision;
-       u32             phy_addr;
-       u32             original_fc;
-       u32             txcw;
-       u32             autoneg_failed;
-       u32             max_frame_size;
-       u32             min_frame_size;
-       u32             mc_filter_type;
-       u32             num_mc_addrs;
-       u32             collision_delta;
-       u32             tx_packet_delta;
-       u32             ledctl_default;
-       u32             ledctl_mode1;
-       u32             ledctl_mode2;
-       bool                    tx_pkt_filtering;
+       e1000_ms_type master_slave;
+       e1000_ms_type original_master_slave;
+       e1000_ffe_config ffe_config_state;
+       u32 asf_firmware_present;
+       u32 eeprom_semaphore_present;
+       unsigned long io_base;
+       u32 phy_id;
+       u32 phy_revision;
+       u32 phy_addr;
+       u32 original_fc;
+       u32 txcw;
+       u32 autoneg_failed;
+       u32 max_frame_size;
+       u32 min_frame_size;
+       u32 mc_filter_type;
+       u32 num_mc_addrs;
+       u32 collision_delta;
+       u32 tx_packet_delta;
+       u32 ledctl_default;
+       u32 ledctl_mode1;
+       u32 ledctl_mode2;
+       bool tx_pkt_filtering;
        struct e1000_host_mng_dhcp_cookie mng_cookie;
-       u16             phy_spd_default;
-       u16             autoneg_advertised;
-       u16             pci_cmd_word;
-       u16             fc_high_water;
-       u16             fc_low_water;
-       u16             fc_pause_time;
-       u16             current_ifs_val;
-       u16             ifs_min_val;
-       u16             ifs_max_val;
-       u16             ifs_step_size;
-       u16             ifs_ratio;
-       u16             device_id;
-       u16             vendor_id;
-       u16             subsystem_id;
-       u16             subsystem_vendor_id;
-       u8                      revision_id;
-       u8                      autoneg;
-       u8                      mdix;
-       u8                      forced_speed_duplex;
-       u8                      wait_autoneg_complete;
-       u8                      dma_fairness;
-       u8                      mac_addr[NODE_ADDRESS_SIZE];
-       u8                      perm_mac_addr[NODE_ADDRESS_SIZE];
-       bool                    disable_polarity_correction;
-       bool                    speed_downgraded;
-       e1000_smart_speed       smart_speed;
-       e1000_dsp_config        dsp_config_state;
-       bool                    get_link_status;
-       bool                    serdes_link_down;
-       bool                    tbi_compatibility_en;
-       bool                    tbi_compatibility_on;
-       bool                    laa_is_present;
-       bool                    phy_reset_disable;
-       bool                    initialize_hw_bits_disable;
-       bool                    fc_send_xon;
-       bool                    fc_strict_ieee;
-       bool                    report_tx_early;
-       bool                    adaptive_ifs;
-       bool                    ifs_params_forced;
-       bool                    in_ifs_mode;
-       bool                    mng_reg_access_disabled;
-       bool                    leave_av_bit_off;
-       bool                    kmrn_lock_loss_workaround_disabled;
-       bool                    bad_tx_carr_stats_fd;
-       bool                    has_manc2h;
-       bool                    rx_needs_kicking;
-       bool                    has_smbus;
+       u16 phy_spd_default;
+       u16 autoneg_advertised;
+       u16 pci_cmd_word;
+       u16 fc_high_water;
+       u16 fc_low_water;
+       u16 fc_pause_time;
+       u16 current_ifs_val;
+       u16 ifs_min_val;
+       u16 ifs_max_val;
+       u16 ifs_step_size;
+       u16 ifs_ratio;
+       u16 device_id;
+       u16 vendor_id;
+       u16 subsystem_id;
+       u16 subsystem_vendor_id;
+       u8 revision_id;
+       u8 autoneg;
+       u8 mdix;
+       u8 forced_speed_duplex;
+       u8 wait_autoneg_complete;
+       u8 dma_fairness;
+       u8 mac_addr[NODE_ADDRESS_SIZE];
+       u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+       bool disable_polarity_correction;
+       bool speed_downgraded;
+       e1000_smart_speed smart_speed;
+       e1000_dsp_config dsp_config_state;
+       bool get_link_status;
+       bool serdes_has_link;
+       bool tbi_compatibility_en;
+       bool tbi_compatibility_on;
+       bool laa_is_present;
+       bool phy_reset_disable;
+       bool initialize_hw_bits_disable;
+       bool fc_send_xon;
+       bool fc_strict_ieee;
+       bool report_tx_early;
+       bool adaptive_ifs;
+       bool ifs_params_forced;
+       bool in_ifs_mode;
+       bool mng_reg_access_disabled;
+       bool leave_av_bit_off;
+       bool bad_tx_carr_stats_fd;
+       bool has_smbus;
 };
 
-
-#define E1000_EEPROM_SWDPIN0   0x0001   /* SWDPIN 0 EEPROM Value */
-#define E1000_EEPROM_LED_LOGIC 0x0020   /* Led Logic Word */
-#define E1000_EEPROM_RW_REG_DATA   16   /* Offset to data in EEPROM read/write registers */
-#define E1000_EEPROM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
-#define E1000_EEPROM_RW_REG_START  1    /* First bit for telling part to start operation */
-#define E1000_EEPROM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
-#define E1000_EEPROM_POLL_WRITE    1    /* Flag for polling for write complete */
-#define E1000_EEPROM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_EEPROM_SWDPIN0   0x0001  /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020  /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA   16  /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE   2   /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START  1   /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2   /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE    1   /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ     0   /* Flag for polling for read complete */
 /* Register Bit Masks */
 /* Device Control */
-#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
-#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
-#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
-#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
-#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
-#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
-#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
-#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
-#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
-#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
-#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
-#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
-#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
-#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
-#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
-#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
-#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
-#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
-#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
-#define E1000_CTRL_RST      0x04000000  /* Global reset */
-#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
-#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
-#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
-#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_FD       0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004       /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000    /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000  /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000      /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000        /* Initiate an interrupt to manageability engine */
 
 /* Device Status */
-#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
-#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FD         0x00000001     /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002     /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C     /* PCI Function Mask */
 #define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
-#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
-#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
-#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_FUNC_0     0x00000000     /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004     /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010     /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020     /* TBI mode */
 #define E1000_STATUS_SPEED_MASK 0x000000C0
-#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
-#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion
-                                                   by EEPROM/Flash */
-#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
-#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
-#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
-#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
-#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
-#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
-#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
-#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
-#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
-#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
-#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
-#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
-#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_SPEED_10   0x00000000     /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040     /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080     /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200  /* Lan Init Completion
+                                                  by EEPROM/Flash */
+#define E1000_STATUS_ASDV       0x00000300     /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800     /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000      /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400     /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800     /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000     /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000     /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000     /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000     /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000     /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000     /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000     /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000     /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000   /* RGMII disabled */
 #define E1000_STATUS_FUSE_8       0x04000000
 #define E1000_STATUS_FUSE_9       0x08000000
-#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
-#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+#define E1000_STATUS_SERDES0_DIS  0x10000000   /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000   /* SERDES disabled on port 1 */
 
-/* Constants used to intrepret the masked PCI-X bus speed. */
-#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
-#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
-#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
 
 /* EEPROM/Flash Control */
-#define E1000_EECD_SK        0x00000001 /* EEPROM Clock */
-#define E1000_EECD_CS        0x00000002 /* EEPROM Chip Select */
-#define E1000_EECD_DI        0x00000004 /* EEPROM Data In */
-#define E1000_EECD_DO        0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_SK        0x00000001        /* EEPROM Clock */
+#define E1000_EECD_CS        0x00000002        /* EEPROM Chip Select */
+#define E1000_EECD_DI        0x00000004        /* EEPROM Data In */
+#define E1000_EECD_DO        0x00000008        /* EEPROM Data Out */
 #define E1000_EECD_FWE_MASK  0x00000030
-#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
-#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_DIS   0x00000010        /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020        /* Enable FLASH writes */
 #define E1000_EECD_FWE_SHIFT 4
-#define E1000_EECD_REQ       0x00000040 /* EEPROM Access Request */
-#define E1000_EECD_GNT       0x00000080 /* EEPROM Access Grant */
-#define E1000_EECD_PRES      0x00000100 /* EEPROM Present */
-#define E1000_EECD_SIZE      0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
-#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
-                                         * (0-small, 1-large) */
-#define E1000_EECD_TYPE      0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#define E1000_EECD_REQ       0x00000040        /* EEPROM Access Request */
+#define E1000_EECD_GNT       0x00000080        /* EEPROM Access Grant */
+#define E1000_EECD_PRES      0x00000100        /* EEPROM Present */
+#define E1000_EECD_SIZE      0x00000200        /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400        /* EEPROM Addressing bits based on type
+                                        * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000        /* EEPROM Type (1-SPI, 0-Microwire) */
 #ifndef E1000_EEPROM_GRANT_ATTEMPTS
-#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000       /* EEPROM # attempts to gain grant */
 #endif
-#define E1000_EECD_AUTO_RD          0x00000200  /* EEPROM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* EEprom Size */
+#define E1000_EECD_AUTO_RD          0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800 /* EEprom Size */
 #define E1000_EECD_SIZE_EX_SHIFT    11
-#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
-#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
-#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
-#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
-#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
-#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
-#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_NVADDS    0x00018000        /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000        /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000        /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000        /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000        /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000        /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000        /* Sector One Valid */
 #define E1000_EECD_SECVAL_SHIFT      22
 #define E1000_STM_OPCODE     0xDB00
 #define E1000_HICR_FW_RESET  0xC0
@@ -1593,12 +1506,12 @@ struct e1000_hw {
 #define E1000_ICH_NVM_SIG_MASK     0xC0
 
 /* EEPROM Read */
-#define E1000_EERD_START      0x00000001 /* Start Read */
-#define E1000_EERD_DONE       0x00000010 /* Read Done */
+#define E1000_EERD_START      0x00000001       /* Start Read */
+#define E1000_EERD_DONE       0x00000010       /* Read Done */
 #define E1000_EERD_ADDR_SHIFT 8
-#define E1000_EERD_ADDR_MASK  0x0000FF00 /* Read Address */
+#define E1000_EERD_ADDR_MASK  0x0000FF00       /* Read Address */
 #define E1000_EERD_DATA_SHIFT 16
-#define E1000_EERD_DATA_MASK  0xFFFF0000 /* Read Data */
+#define E1000_EERD_DATA_MASK  0xFFFF0000       /* Read Data */
 
 /* SPI EEPROM Status Register */
 #define EEPROM_STATUS_RDY_SPI  0x01
@@ -1608,25 +1521,25 @@ struct e1000_hw {
 #define EEPROM_STATUS_WPEN_SPI 0x80
 
 /* Extended Device Control */
-#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
-#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001    /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002    /* Maps SDP5 to GPI1 */
 #define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
-#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
-#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
-#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
-#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004    /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008    /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010    /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020    /* Value of SW Defineable Pin 5 */
 #define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
-#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
-#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
-#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
-#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
-#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
-#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
-#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
-#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
-#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
-#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
-#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040    /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080    /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100    /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200    /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400    /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800    /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000    /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000    /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000    /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000    /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000    /* Relaxed Ordering disable */
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
 #define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
@@ -1638,11 +1551,11 @@ struct e1000_hw {
 #define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
 #define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
 #define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
-#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
-#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
-#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
-#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000       /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000       /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000       /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000       /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000       /* descriptor FIFO parity error detection enable */
 #define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
 
 /* MDI Control */
@@ -1742,167 +1655,167 @@ struct e1000_hw {
 #define E1000_LEDCTL_MODE_LED_OFF       0xF
 
 /* Receive Address */
-#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAH_AV  0x80000000       /* Receive descriptor valid */
 
 /* Interrupt Cause Read */
-#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
-#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
-#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXO           0x00000040 /* rx overrun */
-#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
-#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
-#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
-#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXDW          0x00000001     /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002     /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004     /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008     /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010     /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040     /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080     /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200     /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400     /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800     /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000     /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000     /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000     /* GP Int 3 */
 #define E1000_ICR_TXD_LOW       0x00008000
 #define E1000_ICR_SRPD          0x00010000
-#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
-#define E1000_ICR_MNG           0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
-#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
-#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
-#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
-#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
-#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+#define E1000_ICR_ACK           0x00020000     /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000     /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000     /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000     /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000     /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000     /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000     /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000     /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000     /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000     /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000     /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020     /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000     /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000     /* ME hardware reset occurs */
 
 /* Interrupt Cause Set */
-#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
-#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
-#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
-#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
-#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
-#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
-#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
-#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
-#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
-#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
-#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
-#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW     /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE     /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC      /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ    /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0   /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO      /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0     /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC     /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG    /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0  /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1  /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2  /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3  /* GP Int 3 */
 #define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
 #define E1000_ICS_SRPD      E1000_ICR_SRPD
-#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
-#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
-#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
-#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
-#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
-#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_ACK       E1000_ICR_ACK      /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG      /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK     /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0        /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0        /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR       /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1        /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1        /* queue 1 Tx descriptor FIFO parity error */
 #define E1000_ICS_DSW       E1000_ICR_DSW
 #define E1000_ICS_PHYINT    E1000_ICR_PHYINT
 #define E1000_ICS_EPRST     E1000_ICR_EPRST
 
 /* Interrupt Mask Set */
-#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
-#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
-#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
-#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
-#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
-#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
-#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
-#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
-#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
-#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
-#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
-#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW     /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE     /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC      /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ    /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0   /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO      /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0     /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC     /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG    /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0  /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1  /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2  /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3  /* GP Int 3 */
 #define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
 #define E1000_IMS_SRPD      E1000_ICR_SRPD
-#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
-#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
-#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
-#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
-#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
-#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_ACK       E1000_ICR_ACK      /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG      /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK     /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0        /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0        /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR       /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1        /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1        /* queue 1 Tx descriptor FIFO parity error */
 #define E1000_IMS_DSW       E1000_ICR_DSW
 #define E1000_IMS_PHYINT    E1000_ICR_PHYINT
 #define E1000_IMS_EPRST     E1000_ICR_EPRST
 
 /* Interrupt Mask Clear */
-#define E1000_IMC_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
-#define E1000_IMC_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
-#define E1000_IMC_LSC       E1000_ICR_LSC       /* Link Status Change */
-#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
-#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
-#define E1000_IMC_RXO       E1000_ICR_RXO       /* rx overrun */
-#define E1000_IMC_RXT0      E1000_ICR_RXT0      /* rx timer intr */
-#define E1000_IMC_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
-#define E1000_IMC_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
-#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
-#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
-#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
-#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMC_TXDW      E1000_ICR_TXDW     /* Transmit desc written back */
+#define E1000_IMC_TXQE      E1000_ICR_TXQE     /* Transmit Queue empty */
+#define E1000_IMC_LSC       E1000_ICR_LSC      /* Link Status Change */
+#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ    /* rx sequence error */
+#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0   /* rx desc min. threshold */
+#define E1000_IMC_RXO       E1000_ICR_RXO      /* rx overrun */
+#define E1000_IMC_RXT0      E1000_ICR_RXT0     /* rx timer intr */
+#define E1000_IMC_MDAC      E1000_ICR_MDAC     /* MDIO access complete */
+#define E1000_IMC_RXCFG     E1000_ICR_RXCFG    /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0  /* GP Int 0 */
+#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1  /* GP Int 1 */
+#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2  /* GP Int 2 */
+#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3  /* GP Int 3 */
 #define E1000_IMC_TXD_LOW   E1000_ICR_TXD_LOW
 #define E1000_IMC_SRPD      E1000_ICR_SRPD
-#define E1000_IMC_ACK       E1000_ICR_ACK       /* Receive Ack frame */
-#define E1000_IMC_MNG       E1000_ICR_MNG       /* Manageability event */
-#define E1000_IMC_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
-#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
-#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
-#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_ACK       E1000_ICR_ACK      /* Receive Ack frame */
+#define E1000_IMC_MNG       E1000_ICR_MNG      /* Manageability event */
+#define E1000_IMC_DOCK      E1000_ICR_DOCK     /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0        /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0        /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR       /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1        /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1        /* queue 1 Tx descriptor FIFO parity error */
 #define E1000_IMC_DSW       E1000_ICR_DSW
 #define E1000_IMC_PHYINT    E1000_ICR_PHYINT
 #define E1000_IMC_EPRST     E1000_ICR_EPRST
 
 /* Receive Control */
-#define E1000_RCTL_RST            0x00000001    /* Software reset */
-#define E1000_RCTL_EN             0x00000002    /* enable */
-#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
-#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
-#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
-#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
-#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
-#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
-#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
-#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
-#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
-#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
-#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
-#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
-#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
-#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
-#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
-#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+#define E1000_RCTL_RST            0x00000001   /* Software reset */
+#define E1000_RCTL_EN             0x00000002   /* enable */
+#define E1000_RCTL_SBP            0x00000004   /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008   /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010   /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020   /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000   /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040   /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080   /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0   /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00   /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400   /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000   /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100   /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200   /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12   /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000   /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000   /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000   /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000   /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000   /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000   /* broadcast enable */
 /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
-#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
-#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+#define E1000_RCTL_SZ_2048        0x00000000   /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000   /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000   /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000   /* rx buffer size 256 */
 /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
-#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
-#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
-#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
-#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
-#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
-#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
-#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
-#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
-#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+#define E1000_RCTL_SZ_16384       0x00010000   /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000   /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000   /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000   /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000   /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000   /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000   /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000   /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000   /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000   /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000   /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27   /* Flexible buffer shift */
 
 /* Use byte values for the following shift parameters
  * Usage:
@@ -1925,10 +1838,10 @@ struct e1000_hw {
 #define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
 #define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
 
-#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
-#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
-#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
-#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+#define E1000_PSRCTL_BSIZE0_SHIFT  7   /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2   /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6   /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14   /* Shift _left_ 14 */
 
 /* SW_W_SYNC definitions */
 #define E1000_SWFW_EEP_SM     0x0001
@@ -1937,17 +1850,17 @@ struct e1000_hw {
 #define E1000_SWFW_MAC_CSR_SM 0x0008
 
 /* Receive Descriptor */
-#define E1000_RDT_DELAY 0x0000ffff      /* Delay timer (1=1024us) */
-#define E1000_RDT_FPDB  0x80000000      /* Flush descriptor block */
-#define E1000_RDLEN_LEN 0x0007ff80      /* descriptor length */
-#define E1000_RDH_RDH   0x0000ffff      /* receive descriptor head */
-#define E1000_RDT_RDT   0x0000ffff      /* receive descriptor tail */
+#define E1000_RDT_DELAY 0x0000ffff     /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB  0x80000000     /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80     /* descriptor length */
+#define E1000_RDH_RDH   0x0000ffff     /* receive descriptor head */
+#define E1000_RDT_RDT   0x0000ffff     /* receive descriptor tail */
 
 /* Flow Control */
-#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
-#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
-#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
-#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+#define E1000_FCRTH_RTH  0x0000FFF8    /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000    /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8    /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000    /* Enable XON frame transmission */
 
 /* Header split receive */
 #define E1000_RFCTL_ISCSI_DIS           0x00000001
@@ -1967,66 +1880,64 @@ struct e1000_hw {
 #define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
 
 /* Receive Descriptor Control */
-#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
-#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
-#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
-#define E1000_RXDCTL_GRAN    0x01000000 /* RXDCTL Granularity */
+#define E1000_RXDCTL_PTHRESH 0x0000003F        /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00        /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000        /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN    0x01000000        /* RXDCTL Granularity */
 
 /* Transmit Descriptor Control */
-#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
-#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
-#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
-#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
-#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
-#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
-#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
-                                              still to be processed. */
+#define E1000_TXDCTL_PTHRESH 0x0000003F        /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00        /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000        /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000        /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000       /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000        /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000     /* Enable the counting of desc.
+                                                  still to be processed. */
 /* Transmit Configuration Word */
-#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
-#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
-#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
-#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
-#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
-#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
-#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
-#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
-#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
-#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+#define E1000_TXCW_FD         0x00000020       /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040       /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080       /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100       /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180       /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000       /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000       /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff       /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000       /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000       /* Auto-neg enable */
 
 /* Receive Configuration Word */
-#define E1000_RXCW_CW    0x0000ffff     /* RxConfigWord mask */
-#define E1000_RXCW_NC    0x04000000     /* Receive config no carrier */
-#define E1000_RXCW_IV    0x08000000     /* Receive config invalid */
-#define E1000_RXCW_CC    0x10000000     /* Receive config change */
-#define E1000_RXCW_C     0x20000000     /* Receive config */
-#define E1000_RXCW_SYNCH 0x40000000     /* Receive config synch */
-#define E1000_RXCW_ANC   0x80000000     /* Auto-neg complete */
+#define E1000_RXCW_CW    0x0000ffff    /* RxConfigWord mask */
+#define E1000_RXCW_NC    0x04000000    /* Receive config no carrier */
+#define E1000_RXCW_IV    0x08000000    /* Receive config invalid */
+#define E1000_RXCW_CC    0x10000000    /* Receive config change */
+#define E1000_RXCW_C     0x20000000    /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000    /* Receive config synch */
+#define E1000_RXCW_ANC   0x80000000    /* Auto-neg complete */
 
 /* Transmit Control */
-#define E1000_TCTL_RST    0x00000001    /* software reset */
-#define E1000_TCTL_EN     0x00000002    /* enable tx */
-#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
-#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
-#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
-#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
-#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
-#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
-#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
-#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
-#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+#define E1000_TCTL_RST    0x00000001   /* software reset */
+#define E1000_TCTL_EN     0x00000002   /* enable tx */
+#define E1000_TCTL_BCE    0x00000004   /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008   /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0   /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000   /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000   /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000   /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000   /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000   /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000   /* Multiple request support */
 /* Extended Transmit Control */
-#define E1000_TCTL_EXT_BST_MASK  0x000003FF /* Backoff Slot Time */
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-
-#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX   0x00010000
+#define E1000_TCTL_EXT_BST_MASK  0x000003FF    /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00    /* Gigabit Carry Extend Padding */
 
 /* Receive Checksum Control */
-#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
-#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
-#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
-#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
-#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF      /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100      /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200      /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400      /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000      /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000      /* packet checksum disabled */
 
 /* Multiple Receive Queue Control */
 #define E1000_MRQC_ENABLE_MASK              0x00000003
@@ -2042,141 +1953,141 @@ struct e1000_hw {
 
 /* Definitions for power management and wakeup registers */
 /* Wake Up Control */
-#define E1000_WUC_APME       0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
-#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
-#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+#define E1000_WUC_APME       0x00000001        /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002        /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004        /* PME Status */
+#define E1000_WUC_APMPME     0x00000008        /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000        /* Enable SPM */
 
 /* Wake Up Filter Control */
-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
-#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
-#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
-#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
-#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
-#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
-#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
-#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
-#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+#define E1000_WUFC_LNKC 0x00000001     /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002     /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004     /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008     /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010     /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020     /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040     /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080     /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000  /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000     /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000     /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000     /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000     /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF      /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000      /* Mask for the 4 flexible filters */
 
 /* Wake Up Status */
-#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
-#define E1000_WUS_MAG  0x00000002 /* Magic Packet Received */
-#define E1000_WUS_EX   0x00000004 /* Directed Exact Received */
-#define E1000_WUS_MC   0x00000008 /* Directed Multicast Received */
-#define E1000_WUS_BC   0x00000010 /* Broadcast Received */
-#define E1000_WUS_ARP  0x00000020 /* ARP Request Packet Received */
-#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
-#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
-#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
-#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
-#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
-#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
-#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+#define E1000_WUS_LNKC 0x00000001      /* Link Status Changed */
+#define E1000_WUS_MAG  0x00000002      /* Magic Packet Received */
+#define E1000_WUS_EX   0x00000004      /* Directed Exact Received */
+#define E1000_WUS_MC   0x00000008      /* Directed Multicast Received */
+#define E1000_WUS_BC   0x00000010      /* Broadcast Received */
+#define E1000_WUS_ARP  0x00000020      /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040      /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080      /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000      /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000      /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000      /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000      /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000       /* Mask for the 4 flexible filters */
 
 /* Management Control */
-#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
-#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
-#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
-#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
-#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
-#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
-#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
-#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
-                                             * Filtering */
-#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
-#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
-#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
-#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
-#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
-                                                    * filtering */
-#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
-                                             * memory */
-#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
-                                                    * filtering */
-#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
-#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
-#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
-#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
-#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
-#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
-#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
-
-#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
-#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+#define E1000_MANC_SMBUS_EN      0x00000001    /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002    /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004    /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100    /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200    /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400    /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800    /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000    /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000    /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000    /* Enable Neighbor Discovery
+                                                * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000    /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000    /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000    /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000    /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000    /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000     /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000     /* Enable MAC address
+                                                        * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000    /* Enable MNG packets to host
+                                                * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000     /* Enable IP address
+                                                        * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN         0x01000000    /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000    /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000    /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000    /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000    /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000    /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000    /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28      /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29      /* SMBus Clock Out Shift */
 
 /* SW Semaphore Register */
-#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
-#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
-#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
-#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+#define E1000_SWSM_SMBI         0x00000001     /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002     /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004     /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008     /* Driver Loaded Bit */
 
 /* FW Semaphore Register */
-#define E1000_FWSM_MODE_MASK    0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_MASK    0x0000000E     /* FW mode */
 #define E1000_FWSM_MODE_SHIFT            1
-#define E1000_FWSM_FW_VALID     0x00008000 /* FW established a valid mode */
+#define E1000_FWSM_FW_VALID     0x00008000     /* FW established a valid mode */
 
-#define E1000_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI reset */
-#define E1000_FWSM_DISSW           0x10000000 /* FW disable SW Write Access */
-#define E1000_FWSM_SKUSEL_MASK     0x60000000 /* LAN SKU select */
+#define E1000_FWSM_RSPCIPHY        0x00000040  /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW           0x10000000  /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK     0x60000000  /* LAN SKU select */
 #define E1000_FWSM_SKUEL_SHIFT     29
-#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
-#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
-#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
-#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
 
 /* FFLT Debug Register */
-#define E1000_FFLT_DBG_INVC     0x00100000 /* Invalid /C/ code handling */
+#define E1000_FFLT_DBG_INVC     0x00100000     /* Invalid /C/ code handling */
 
 typedef enum {
-    e1000_mng_mode_none     = 0,
-    e1000_mng_mode_asf,
-    e1000_mng_mode_pt,
-    e1000_mng_mode_ipmi,
-    e1000_mng_mode_host_interface_only
+       e1000_mng_mode_none = 0,
+       e1000_mng_mode_asf,
+       e1000_mng_mode_pt,
+       e1000_mng_mode_ipmi,
+       e1000_mng_mode_host_interface_only
 } e1000_mng_mode;
 
-/* Host Inteface Control Register */
-#define E1000_HICR_EN           0x00000001  /* Enable Bit - RO */
-#define E1000_HICR_C            0x00000002  /* Driver sets this bit when done
-                                             * to put command in RAM */
-#define E1000_HICR_SV           0x00000004  /* Status Validity */
-#define E1000_HICR_FWR          0x00000080  /* FW reset. Set by the Host */
+/* Host Interface Control Register */
+#define E1000_HICR_EN           0x00000001     /* Enable Bit - RO */
+#define E1000_HICR_C            0x00000002     /* Driver sets this bit when done
+                                                * to put command in RAM */
+#define E1000_HICR_SV           0x00000004     /* Status Validity */
+#define E1000_HICR_FWR          0x00000080     /* FW reset. Set by the Host */
 
 /* Host Interface Command Interface - Address range 0x8800-0x8EFF */
-#define E1000_HI_MAX_DATA_LENGTH         252 /* Host Interface data length */
-#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792 /* Number of bytes in range */
-#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448 /* Number of dwords in range */
-#define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
+#define E1000_HI_MAX_DATA_LENGTH         252   /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792   /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448   /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT         500   /* Time in ms to process HI command */
 
 struct e1000_host_command_header {
-    u8 command_id;
-    u8 command_length;
-    u8 command_options;   /* I/F bits for command, status for return */
-    u8 checksum;
+       u8 command_id;
+       u8 command_length;
+       u8 command_options;     /* I/F bits for command, status for return */
+       u8 checksum;
 };
 struct e1000_host_command_info {
-    struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
-    u8 command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+       struct e1000_host_command_header command_header;        /* Command Head/Command Result Head has 4 bytes */
+       u8 command_data[E1000_HI_MAX_DATA_LENGTH];      /* Command data can length 0..252 */
 };
 
 /* Host SMB register #0 */
-#define E1000_HSMC0R_CLKIN      0x00000001  /* SMB Clock in */
-#define E1000_HSMC0R_DATAIN     0x00000002  /* SMB Data in */
-#define E1000_HSMC0R_DATAOUT    0x00000004  /* SMB Data out */
-#define E1000_HSMC0R_CLKOUT     0x00000008  /* SMB Clock out */
+#define E1000_HSMC0R_CLKIN      0x00000001     /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN     0x00000002     /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT    0x00000004     /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT     0x00000008     /* SMB Clock out */
 
 /* Host SMB register #1 */
 #define E1000_HSMC1R_CLKIN      E1000_HSMC0R_CLKIN
@@ -2185,10 +2096,10 @@ struct e1000_host_command_info {
 #define E1000_HSMC1R_CLKOUT     E1000_HSMC0R_CLKOUT
 
 /* FW Status Register */
-#define E1000_FWSTS_FWS_MASK    0x000000FF  /* FW Status */
+#define E1000_FWSTS_FWS_MASK    0x000000FF     /* FW Status */
 
 /* Wake Up Packet Length */
-#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF  /* Only the lower 12 bits are valid */
 
 #define E1000_MDALIGN          4096
 
@@ -2242,24 +2153,24 @@ struct e1000_host_command_info {
 #define PCI_EX_LINK_WIDTH_SHIFT      4
 
 /* EEPROM Commands - Microwire */
-#define EEPROM_READ_OPCODE_MICROWIRE  0x6  /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5  /* EEPROM write opcode */
-#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7  /* EEPROM erase opcode */
-#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13 /* EEPROM erase/write enable */
-#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
+#define EEPROM_READ_OPCODE_MICROWIRE  0x6      /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5      /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7      /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13     /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10     /* EEPROM erase/write disable */
 
 /* EEPROM Commands - SPI */
-#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
-#define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
-#define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
-#define EEPROM_WREN_OPCODE_SPI      0x06  /* EEPROM set Write Enable latch */
-#define EEPROM_WRDI_OPCODE_SPI      0x04  /* EEPROM reset Write Enable latch */
-#define EEPROM_RDSR_OPCODE_SPI      0x05  /* EEPROM read Status register */
-#define EEPROM_WRSR_OPCODE_SPI      0x01  /* EEPROM write Status register */
-#define EEPROM_ERASE4K_OPCODE_SPI   0x20  /* EEPROM ERASE 4KB */
-#define EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
-#define EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+#define EEPROM_MAX_RETRY_SPI        5000       /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI      0x03       /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI     0x02       /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI        0x08       /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI      0x06       /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI      0x04       /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI      0x05       /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI      0x01       /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI   0x20       /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI  0xD8       /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI  0xDB       /* EEPROM ERASE 256B */
 
 /* EEPROM Size definitions */
 #define EEPROM_WORD_SIZE_SHIFT  6
@@ -2270,7 +2181,7 @@ struct e1000_host_command_info {
 #define EEPROM_COMPAT                 0x0003
 #define EEPROM_ID_LED_SETTINGS        0x0004
 #define EEPROM_VERSION                0x0005
-#define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_SERDES_AMPLITUDE       0x0006   /* For SERDES output amplitude adjustment. */
 #define EEPROM_PHY_CLASS_WORD         0x0007
 #define EEPROM_INIT_CONTROL1_REG      0x000A
 #define EEPROM_INIT_CONTROL2_REG      0x000F
@@ -2283,22 +2194,16 @@ struct e1000_host_command_info {
 #define EEPROM_FLASH_VERSION          0x0032
 #define EEPROM_CHECKSUM_REG           0x003F
 
-#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
-#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000   /* ...for second port */
+#define E1000_EEPROM_CFG_DONE         0x00040000       /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000       /* ...for second port */
 
 /* Word definitions for ID LED Settings */
 #define ID_LED_RESERVED_0000 0x0000
 #define ID_LED_RESERVED_FFFF 0xFFFF
-#define ID_LED_RESERVED_82573  0xF746
-#define ID_LED_DEFAULT_82573   0x1811
 #define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2 << 12) | \
                               (ID_LED_OFF1_OFF2 << 8) | \
                               (ID_LED_DEF1_DEF2 << 4) | \
                               (ID_LED_DEF1_DEF2))
-#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
-                                 (ID_LED_DEF1_OFF2 <<  8) | \
-                                 (ID_LED_DEF1_ON2  <<  4) | \
-                                 (ID_LED_DEF1_DEF2))
 #define ID_LED_DEF1_DEF2     0x1
 #define ID_LED_DEF1_ON2      0x2
 #define ID_LED_DEF1_OFF2     0x3
@@ -2313,7 +2218,6 @@ struct e1000_host_command_info {
 #define IGP_ACTIVITY_LED_ENABLE 0x0300
 #define IGP_LED3_MODE           0x07000000
 
-
 /* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
 #define EEPROM_SERDES_AMPLITUDE_MASK  0x000F
 
@@ -2384,11 +2288,8 @@ struct e1000_host_command_info {
 
 #define DEFAULT_82542_TIPG_IPGR2 10
 #define DEFAULT_82543_TIPG_IPGR2 6
-#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
 #define E1000_TIPG_IPGR2_SHIFT  20
 
-#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
-#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000   0x00000008
 #define E1000_TXDMAC_DPP 0x00000001
 
 /* Adaptive IFS defines */
@@ -2421,9 +2322,9 @@ struct e1000_host_command_info {
 #define E1000_EXTCNF_CTRL_SWFLAG            0x00000020
 
 /* PBA constants */
-#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
-#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
-#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C   /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010   /* 16KB, default TX allocation */
 #define E1000_PBA_20K 0x0014
 #define E1000_PBA_22K 0x0016
 #define E1000_PBA_24K 0x0018
@@ -2432,7 +2333,7 @@ struct e1000_host_command_info {
 #define E1000_PBA_34K 0x0022
 #define E1000_PBA_38K 0x0026
 #define E1000_PBA_40K 0x0028
-#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+#define E1000_PBA_48K 0x0030   /* 48KB, default RX allocation */
 
 #define E1000_PBS_16K E1000_PBA_16K
 
@@ -2442,9 +2343,9 @@ struct e1000_host_command_info {
 #define FLOW_CONTROL_TYPE         0x8808
 
 /* The historical defaults for the flow control values are given below. */
-#define FC_DEFAULT_HI_THRESH        (0x8000)    /* 32KB */
-#define FC_DEFAULT_LO_THRESH        (0x4000)    /* 16KB */
-#define FC_DEFAULT_TX_TIMER         (0x100)     /* ~130 us */
+#define FC_DEFAULT_HI_THRESH        (0x8000)   /* 32KB */
+#define FC_DEFAULT_LO_THRESH        (0x4000)   /* 16KB */
+#define FC_DEFAULT_TX_TIMER         (0x100)    /* ~130 us */
 
 /* PCIX Config space */
 #define PCIX_COMMAND_REGISTER    0xE6
@@ -2458,7 +2359,6 @@ struct e1000_host_command_info {
 #define PCIX_STATUS_HI_MMRBC_4K      0x3
 #define PCIX_STATUS_HI_MMRBC_2K      0x2
 
-
 /* Number of bits required to shift right the "pause" bits from the
  * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
  */
@@ -2479,14 +2379,11 @@ struct e1000_host_command_info {
  */
 #define ILOS_SHIFT  3
 
-
 #define RECEIVE_BUFFER_ALIGN_SIZE  (256)
 
 /* Number of milliseconds we wait for auto-negotiation to complete */
 #define LINK_UP_TIMEOUT             500
 
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define MASTER_DISABLE_TIMEOUT      800
 /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
 #define AUTO_READ_DONE_TIMEOUT      10
 /* Number of milliseconds we wait for PHY configuration done after MAC reset */
@@ -2534,7 +2431,6 @@ struct e1000_host_command_info {
           (((length) > (adapter)->min_frame_size) && \
            ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
 
-
 /* Structures, enums, and macros for the PHY */
 
 /* Bit definitions for the Management Data IO (MDIO) and Management Data
@@ -2551,49 +2447,49 @@ struct e1000_host_command_info {
 
 /* PHY 1000 MII Register/Bit Definitions */
 /* PHY Registers defined by IEEE */
-#define PHY_CTRL         0x00 /* Control Register */
-#define PHY_STATUS       0x01 /* Status Regiser */
-#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
-#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
-#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
-#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
-
-#define MAX_PHY_REG_ADDRESS        0x1F  /* 5 bit address bus (0-0x1F) */
-#define MAX_PHY_MULTI_PAGE_REG     0xF   /* Registers equal on all pages */
+#define PHY_CTRL         0x00  /* Control Register */
+#define PHY_STATUS       0x01  /* Status Register */
+#define PHY_ID1          0x02  /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03  /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04  /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05  /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06  /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07  /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08  /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09  /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A  /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F  /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS        0x1F        /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG     0xF /* Registers equal on all pages */
 
 /* M88E1000 Specific Registers */
-#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
-#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
-#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
-#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
-#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
-
-#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
-#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
-#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
-#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
-#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+#define M88E1000_PHY_SPEC_CTRL     0x10        /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11        /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12        /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13        /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14        /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15        /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A        /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D        /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E        /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100       /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800       /* improved BER performance */
 
 #define IGP01E1000_IEEE_REGS_PAGE  0x0000
 #define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
 #define IGP01E1000_IEEE_FORCE_GIGA      0x0140
 
 /* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
-#define IGP01E1000_PHY_PORT_CTRL   0x12 /* PHY Specific Control Register */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
-#define IGP01E1000_GMII_FIFO       0x14 /* GMII FIFO Register */
-#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10        /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11        /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL   0x12        /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13        /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO       0x14        /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15    /* PHY Channel Quality Register */
 #define IGP02E1000_PHY_POWER_MGMT      0x19
-#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* PHY Page Select Core Register */
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F    /* PHY Page Select Core Register */
 
 /* IGP01E1000 AGC Registers - stores the cable length values*/
 #define IGP01E1000_PHY_AGC_A        0x1172
@@ -2636,192 +2532,119 @@ struct e1000_host_command_info {
 
 #define IGP01E1000_ANALOG_REGS_PAGE  0x20C0
 
-/* Bits...
- * 15-5: page
- * 4-0: register offset
- */
-#define GG82563_PAGE_SHIFT        5
-#define GG82563_REG(page, reg)    \
-        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
-#define GG82563_MIN_ALT_REG       30
-
-/* GG82563 Specific Registers */
-#define GG82563_PHY_SPEC_CTRL           \
-        GG82563_REG(0, 16) /* PHY Specific Control */
-#define GG82563_PHY_SPEC_STATUS         \
-        GG82563_REG(0, 17) /* PHY Specific Status */
-#define GG82563_PHY_INT_ENABLE          \
-        GG82563_REG(0, 18) /* Interrupt Enable */
-#define GG82563_PHY_SPEC_STATUS_2       \
-        GG82563_REG(0, 19) /* PHY Specific Status 2 */
-#define GG82563_PHY_RX_ERR_CNTR         \
-        GG82563_REG(0, 21) /* Receive Error Counter */
-#define GG82563_PHY_PAGE_SELECT         \
-        GG82563_REG(0, 22) /* Page Select */
-#define GG82563_PHY_SPEC_CTRL_2         \
-        GG82563_REG(0, 26) /* PHY Specific Control 2 */
-#define GG82563_PHY_PAGE_SELECT_ALT     \
-        GG82563_REG(0, 29) /* Alternate Page Select */
-#define GG82563_PHY_TEST_CLK_CTRL       \
-        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
-
-#define GG82563_PHY_MAC_SPEC_CTRL       \
-        GG82563_REG(2, 21) /* MAC Specific Control Register */
-#define GG82563_PHY_MAC_SPEC_CTRL_2     \
-        GG82563_REG(2, 26) /* MAC Specific Control 2 */
-
-#define GG82563_PHY_DSP_DISTANCE    \
-        GG82563_REG(5, 26) /* DSP Distance */
-
-/* Page 193 - Port Control Registers */
-#define GG82563_PHY_KMRN_MODE_CTRL   \
-        GG82563_REG(193, 16) /* Kumeran Mode Control */
-#define GG82563_PHY_PORT_RESET          \
-        GG82563_REG(193, 17) /* Port Reset */
-#define GG82563_PHY_REVISION_ID         \
-        GG82563_REG(193, 18) /* Revision ID */
-#define GG82563_PHY_DEVICE_ID           \
-        GG82563_REG(193, 19) /* Device ID */
-#define GG82563_PHY_PWR_MGMT_CTRL       \
-        GG82563_REG(193, 20) /* Power Management Control */
-#define GG82563_PHY_RATE_ADAPT_CTRL     \
-        GG82563_REG(193, 25) /* Rate Adaptation Control */
-
-/* Page 194 - KMRN Registers */
-#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
-        GG82563_REG(194, 16) /* FIFO's Control/Status */
-#define GG82563_PHY_KMRN_CTRL           \
-        GG82563_REG(194, 17) /* Control */
-#define GG82563_PHY_INBAND_CTRL         \
-        GG82563_REG(194, 18) /* Inband Control */
-#define GG82563_PHY_KMRN_DIAGNOSTIC     \
-        GG82563_REG(194, 19) /* Diagnostic */
-#define GG82563_PHY_ACK_TIMEOUTS        \
-        GG82563_REG(194, 20) /* Acknowledge Timeouts */
-#define GG82563_PHY_ADV_ABILITY         \
-        GG82563_REG(194, 21) /* Advertised Ability */
-#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
-        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
-#define GG82563_PHY_ADV_NEXT_PAGE       \
-        GG82563_REG(194, 24) /* Advertised Next Page */
-#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
-        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
-#define GG82563_PHY_KMRN_MISC           \
-        GG82563_REG(194, 26) /* Misc. */
-
 /* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
-#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
-#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN       0x0800  /* Power down */
-#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000 /* 0 = normal, 1 = PHY reset */
 
 /* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+#define MII_SR_EXTENDED_CAPS     0x0001        /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002        /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004        /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008        /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010        /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020        /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040        /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100        /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200        /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400        /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800        /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000        /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000        /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000        /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000        /* 100T4 Capable */
 
 /* Autoneg Advertisement Register */
-#define NWAY_AR_SELECTOR_FIELD 0x0001   /* indicates IEEE 802.3 CSMA/CD */
-#define NWAY_AR_10T_HD_CAPS    0x0020   /* 10T   Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS    0x0040   /* 10T   Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS  0x0080   /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS  0x0100   /* 100TX Full Duplex Capable */
-#define NWAY_AR_100T4_CAPS     0x0200   /* 100T4 Capable */
-#define NWAY_AR_PAUSE          0x0400   /* Pause operation desired */
-#define NWAY_AR_ASM_DIR        0x0800   /* Asymmetric Pause Direction bit */
-#define NWAY_AR_REMOTE_FAULT   0x2000   /* Remote Fault detected */
-#define NWAY_AR_NEXT_PAGE      0x8000   /* Next Page ability supported */
+#define NWAY_AR_SELECTOR_FIELD 0x0001  /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS    0x0020  /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS    0x0040  /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS  0x0080  /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS  0x0100  /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS     0x0200  /* 100T4 Capable */
+#define NWAY_AR_PAUSE          0x0400  /* Pause operation desired */
+#define NWAY_AR_ASM_DIR        0x0800  /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT   0x2000  /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE      0x8000  /* Next Page ability supported */
 
 /* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
-#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
-#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
-#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
-#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
-#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
-#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
-#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
-#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
-#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000        /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020        /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040        /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080        /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100        /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200        /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400        /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800        /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000        /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000        /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000        /* Next Page ability supported */
 
 /* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
-#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
-#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
-#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
-#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001       /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002       /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004       /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008       /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010       /* LP is 100TX Full Duplex Capable */
 
 /* Next Page TX Register */
-#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
-#define NPTX_TOGGLE         0x0800 /* Toggles between exchanges
-                                    * of different NP
-                                    */
-#define NPTX_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
-                                    * 0 = cannot comply with msg
-                                    */
-#define NPTX_MSG_PAGE       0x2000 /* formatted(1)/unformatted(0) pg */
-#define NPTX_NEXT_PAGE      0x8000 /* 1 = addition NP will follow
-                                    * 0 = sending last NP
-                                    */
+#define NPTX_MSG_CODE_FIELD 0x0001     /* NP msg code or unformatted data */
+#define NPTX_TOGGLE         0x0800     /* Toggles between exchanges
+                                        * of different NP
+                                        */
+#define NPTX_ACKNOWLDGE2    0x1000     /* 1 = will comply with msg
+                                        * 0 = cannot comply with msg
+                                        */
+#define NPTX_MSG_PAGE       0x2000     /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE      0x8000     /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
 
 /* Link Partner Next Page Register */
-#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
-#define LP_RNPR_TOGGLE         0x0800 /* Toggles between exchanges
-                                       * of different NP
-                                       */
-#define LP_RNPR_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
-                                       * 0 = cannot comply with msg
-                                       */
-#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
-#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
-#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
-                                        * 0 = sending last NP
-                                        */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001  /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE         0x0800  /* Toggles between exchanges
+                                        * of different NP
+                                        */
+#define LP_RNPR_ACKNOWLDGE2    0x1000  /* 1 = will comply with msg
+                                        * 0 = cannot comply with msg
+                                        */
+#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
 
 /* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
-#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
-#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
-                                        /* 0=DTE device */
-#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
-                                        /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
-                                        /* 0=Automatic Master/Slave config */
-#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
-#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
-#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
-#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+#define CR_1000T_ASYM_PAUSE      0x0080        /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100        /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200        /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400        /* 1=Repeater/switch device port */
+                                       /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800        /* 1=Configure PHY as Master */
+                                       /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000        /* 1=Master/Slave manual config value */
+                                       /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000       /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000        /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000        /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000        /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000        /* Transmitter Distortion test */
 
 /* 1000BASE-T Status Register */
-#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
-#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
-#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
-#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
-#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
-#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF       /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100       /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400       /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800       /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000       /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000       /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000       /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000       /* Master/Slave config fault */
 #define SR_1000T_REMOTE_RX_STATUS_SHIFT          12
 #define SR_1000T_LOCAL_RX_STATUS_SHIFT           13
 #define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT    5
@@ -2829,64 +2652,64 @@ struct e1000_host_command_info {
 #define FFE_IDLE_ERR_COUNT_TIMEOUT_100           100
 
 /* Extended Status Register */
-#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000  /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000  /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000  /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000  /* 1000X FD capable */
 
-#define PHY_TX_POLARITY_MASK   0x0100 /* register 10h bit 8 (polarity bit) */
-#define PHY_TX_NORMAL_POLARITY 0      /* register 10h bit 8 (normal polarity) */
+#define PHY_TX_POLARITY_MASK   0x0100  /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0       /* register 10h bit 8 (normal polarity) */
 
-#define AUTO_POLARITY_DISABLE  0x0010 /* register 11h bit 4 */
-                                      /* (0=enable, 1=disable) */
+#define AUTO_POLARITY_DISABLE  0x0010  /* register 11h bit 4 */
+                                     /* (0=enable, 1=disable) */
 
 /* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
-#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
-#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
-                                                * 0=CLK125 toggling
-                                                */
-#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
-                                               /* Manual MDI configuration */
-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
-#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
-                                                *  100BASE-TX/10BASE-T:
-                                                *  MDI Mode
-                                                */
-#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
-                                                * all speeds.
-                                                */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                              /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
 #define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
-                                        /* 1=Enable Extended 10BASE-T distance
-                                         * (Lower 10BASE-T RX Threshold)
-                                         * 0=Normal 10BASE-T RX Threshold */
+                                       /* 1=Enable Extended 10BASE-T distance
+                                        * (Lower 10BASE-T RX Threshold)
+                                        * 0=Normal 10BASE-T RX Threshold */
 #define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
-                                        /* 1=5-Bit interface in 100BASE-TX
-                                         * 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
-#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+                                       /* 1=5-Bit interface in 100BASE-TX
+                                        * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200      /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400      /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800      /* 1=Assert CRS on Transmit */
 
 #define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT    1
 #define M88E1000_PSCR_AUTO_X_MODE_SHIFT          5
 #define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
 
 /* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
-#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
-#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
-#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
-#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
-                                            * 3=110-140M;4=>140M */
-#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
-#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
-#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
-#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
-#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
-#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
-#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+#define M88E1000_PSSR_JABBER             0x0001        /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002        /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020        /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040        /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380        /* 0=<50M;1=50-80M;2=80-110M;
+                                                * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400        /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800        /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000        /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000        /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000        /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000        /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000        /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000        /* 10=1000Mbs */
 
 #define M88E1000_PSSR_REV_POLARITY_SHIFT 1
 #define M88E1000_PSSR_DOWNSHIFT_SHIFT    5
@@ -2894,12 +2717,12 @@ struct e1000_host_command_info {
 #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
 
 /* M88E1000 Extended PHY Specific Control Register */
-#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
-#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
-                                              * Will assert lost lock and bring
-                                              * link down if idle not seen
-                                              * within 1ms in 1000BASE-T
-                                              */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000   /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000   /* 1=Lost lock detect enabled.
+                                                * Will assert lost lock and bring
+                                                * link down if idle not seen
+                                                * within 1ms in 1000BASE-T
+                                                */
 /* Number of times we will attempt to autonegotiate before downshifting if we
  * are the master */
 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
@@ -2914,9 +2737,9 @@ struct e1000_host_command_info {
 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
-#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060   /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070   /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000   /* NO  TX_CLK */
 
 /* M88EC018 Rev 2 specific DownShift settings */
 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
@@ -2938,18 +2761,18 @@ struct e1000_host_command_info {
 #define IGP01E1000_PSCFR_DISABLE_TRANSMIT      0x2000
 
 /* IGP01E1000 Specific Port Status Register - R/O */
-#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001  /* RO LH SC */
 #define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
 #define IGP01E1000_PSSR_CABLE_LENGTH           0x007C
 #define IGP01E1000_PSSR_FULL_DUPLEX            0x0200
 #define IGP01E1000_PSSR_LINK_UP                0x0400
 #define IGP01E1000_PSSR_MDIX                   0x0800
-#define IGP01E1000_PSSR_SPEED_MASK             0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_MASK             0xC000  /* speed bits mask */
 #define IGP01E1000_PSSR_SPEED_10MBPS           0x4000
 #define IGP01E1000_PSSR_SPEED_100MBPS          0x8000
 #define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
-#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002 /* shift right 2 */
-#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B /* shift right 11 */
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002  /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B  /* shift right 11 */
 
 /* IGP01E1000 Specific Port Control Register - R/W */
 #define IGP01E1000_PSCR_TP_LOOPBACK            0x0010
@@ -2957,16 +2780,16 @@ struct e1000_host_command_info {
 #define IGP01E1000_PSCR_TEN_CRS_SELECT         0x0400
 #define IGP01E1000_PSCR_FLIP_CHIP              0x0800
 #define IGP01E1000_PSCR_AUTO_MDIX              0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000 /* 0-MDI, 1-MDIX */
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000  /* 0-MDI, 1-MDIX */
 
 /* IGP01E1000 Specific Port Link Health Register */
 #define IGP01E1000_PLHR_SS_DOWNGRADE           0x8000
 #define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR    0x4000
 #define IGP01E1000_PLHR_MASTER_FAULT           0x2000
 #define IGP01E1000_PLHR_MASTER_RESOLUTION      0x1000
-#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800 /* LH */
-#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400 /* LH */
-#define IGP01E1000_PLHR_DATA_ERR_1             0x0200 /* LH */
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800  /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400  /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1             0x0200  /* LH */
 #define IGP01E1000_PLHR_DATA_ERR_0             0x0100
 #define IGP01E1000_PLHR_AUTONEG_FAULT          0x0040
 #define IGP01E1000_PLHR_AUTONEG_ACTIVE         0x0010
@@ -2981,9 +2804,9 @@ struct e1000_host_command_info {
 #define IGP01E1000_MSE_CHANNEL_B        0x0F00
 #define IGP01E1000_MSE_CHANNEL_A        0xF000
 
-#define IGP02E1000_PM_SPD                         0x0001  /* Smart Power Down */
-#define IGP02E1000_PM_D3_LPLU                     0x0004  /* Enable LPLU in non-D0a modes */
-#define IGP02E1000_PM_D0_LPLU                     0x0002  /* Enable LPLU in D0a mode */
+#define IGP02E1000_PM_SPD                         0x0001       /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU                     0x0004       /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU                     0x0002       /* Enable LPLU in D0a mode */
 
 /* IGP01E1000 DSP reset macros */
 #define DSP_RESET_ENABLE     0x0
@@ -2992,8 +2815,8 @@ struct e1000_host_command_info {
 
 /* IGP01E1000 & IGP02E1000 AGC Registers */
 
-#define IGP01E1000_AGC_LENGTH_SHIFT 7         /* Coarse - 13:11, Fine - 10:7 */
-#define IGP02E1000_AGC_LENGTH_SHIFT 9         /* Coarse - 15:13, Fine - 12:9 */
+#define IGP01E1000_AGC_LENGTH_SHIFT 7  /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9  /* Coarse - 15:13, Fine - 12:9 */
 
 /* IGP02E1000 AGC Register Length 9-bit mask */
 #define IGP02E1000_AGC_LENGTH_MASK  0x7F
@@ -3011,9 +2834,9 @@ struct e1000_host_command_info {
 #define IGP01E1000_PHY_POLARITY_MASK    0x0078
 
 /* IGP01E1000 GMII FIFO Register */
-#define IGP01E1000_GMII_FLEX_SPD               0x10 /* Enable flexible speed
-                                                     * on Link-Up */
-#define IGP01E1000_GMII_SPD                    0x20 /* Enable SPD */
+#define IGP01E1000_GMII_FLEX_SPD               0x10    /* Enable flexible speed
+                                                        * on Link-Up */
+#define IGP01E1000_GMII_SPD                    0x20    /* Enable SPD */
 
 /* IGP01E1000 Analog Register */
 #define IGP01E1000_ANALOG_SPARE_FUSE_STATUS       0x20D1
@@ -3032,114 +2855,6 @@ struct e1000_host_command_info {
 #define IGP01E1000_ANALOG_FUSE_FINE_1               0x0080
 #define IGP01E1000_ANALOG_FUSE_FINE_10              0x0500
 
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_DISABLE_JABBER             0x0001 /* 1=Disable Jabber */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Polarity Reversal Disabled */
-#define GG82563_PSCR_POWER_DOWN                 0x0004 /* 1=Power Down */
-#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE  0x0008 /* 1=Transmitter Disabled */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI configuration */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX configuration */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Automatic crossover */
-#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE   0x0080 /* 1=Enable Extended Distance */
-#define GG82563_PSCR_ENERGY_DETECT_MASK         0x0300
-#define GG82563_PSCR_ENERGY_DETECT_OFF          0x0000 /* 00,01=Off */
-#define GG82563_PSCR_ENERGY_DETECT_RX           0x0200 /* 10=Sense on Rx only (Energy Detect) */
-#define GG82563_PSCR_ENERGY_DETECT_RX_TM        0x0300 /* 11=Sense and Tx NLP */
-#define GG82563_PSCR_FORCE_LINK_GOOD            0x0400 /* 1=Force Link Good */
-#define GG82563_PSCR_DOWNSHIFT_ENABLE           0x0800 /* 1=Enable Downshift */
-#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK     0x7000
-#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT    12
-
-/* PHY Specific Status Register (Page 0, Register 17) */
-#define GG82563_PSSR_JABBER                0x0001 /* 1=Jabber */
-#define GG82563_PSSR_POLARITY              0x0002 /* 1=Polarity Reversed */
-#define GG82563_PSSR_LINK                  0x0008 /* 1=Link is Up */
-#define GG82563_PSSR_ENERGY_DETECT         0x0010 /* 1=Sleep, 0=Active */
-#define GG82563_PSSR_DOWNSHIFT             0x0020 /* 1=Downshift */
-#define GG82563_PSSR_CROSSOVER_STATUS      0x0040 /* 1=MDIX, 0=MDI */
-#define GG82563_PSSR_RX_PAUSE_ENABLED      0x0100 /* 1=Receive Pause Enabled */
-#define GG82563_PSSR_TX_PAUSE_ENABLED      0x0200 /* 1=Transmit Pause Enabled */
-#define GG82563_PSSR_LINK_UP               0x0400 /* 1=Link Up */
-#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
-#define GG82563_PSSR_PAGE_RECEIVED         0x1000 /* 1=Page Received */
-#define GG82563_PSSR_DUPLEX                0x2000 /* 1-Full-Duplex */
-#define GG82563_PSSR_SPEED_MASK            0xC000
-#define GG82563_PSSR_SPEED_10MBPS          0x0000 /* 00=10Mbps */
-#define GG82563_PSSR_SPEED_100MBPS         0x4000 /* 01=100Mbps */
-#define GG82563_PSSR_SPEED_1000MBPS        0x8000 /* 10=1000Mbps */
-
-/* PHY Specific Status Register 2 (Page 0, Register 19) */
-#define GG82563_PSSR2_JABBER                0x0001 /* 1=Jabber */
-#define GG82563_PSSR2_POLARITY_CHANGED      0x0002 /* 1=Polarity Changed */
-#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
-#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT   0x0020 /* 1=Downshift Detected */
-#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE  0x0040 /* 1=Crossover Changed */
-#define GG82563_PSSR2_FALSE_CARRIER         0x0100 /* 1=False Carrier */
-#define GG82563_PSSR2_SYMBOL_ERROR          0x0200 /* 1=Symbol Error */
-#define GG82563_PSSR2_LINK_STATUS_CHANGED   0x0400 /* 1=Link Status Changed */
-#define GG82563_PSSR2_AUTO_NEG_COMPLETED    0x0800 /* 1=Auto-Neg Completed */
-#define GG82563_PSSR2_PAGE_RECEIVED         0x1000 /* 1=Page Received */
-#define GG82563_PSSR2_DUPLEX_CHANGED        0x2000 /* 1=Duplex Changed */
-#define GG82563_PSSR2_SPEED_CHANGED         0x4000 /* 1=Speed Changed */
-#define GG82563_PSSR2_AUTO_NEG_ERROR        0x8000 /* 1=Auto-Neg Error */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_10BT_POLARITY_FORCE           0x0002 /* 1=Force Negative Polarity */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK       0x000C
-#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL     0x0000 /* 00,01=Normal Operation */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS      0x0008 /* 10=Select 112ns Sequence */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS       0x000C /* 11=Select 16ns Sequence */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG              0x2000 /* 1=Reverse Auto-Negotiation */
-#define GG82563_PSCR2_1000BT_DISABLE                0x4000 /* 1=Disable 1000BASE-T */
-#define GG82563_PSCR2_TRANSMITER_TYPE_MASK          0x8000
-#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B      0x0000 /* 0=Class B */
-#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A      0x8000 /* 1=Class A */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK                    0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ           0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ           0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ         0x0006
-#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ          0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX               0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
-                                                          1 = 50-80M;
-                                                          2 = 80-110M;
-                                                          3 = 110-140M;
-                                                          4 = >140M */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
-#define GG82563_KMCR_FORCE_LINK_UP                  0x0040 /* 1=Force Link Up */
-#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT         0x0080
-#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK     0x0400
-#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT          0x0400 /* 1=6.25MHz, 0=0.8MHz */
-#define GG82563_KMCR_PASS_FALSE_CARRIER             0x0800
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE         0x0001 /* 1=Enalbe SERDES Electrical Idle */
-#define GG82563_PMCR_DISABLE_PORT                   0x0002 /* 1=Disable Port */
-#define GG82563_PMCR_DISABLE_SERDES                 0x0004 /* 1=Disable SERDES */
-#define GG82563_PMCR_REVERSE_AUTO_NEG               0x0008 /* 1=Enable Reverse Auto-Negotiation */
-#define GG82563_PMCR_DISABLE_1000_NON_D0            0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
-#define GG82563_PMCR_DISABLE_1000                   0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
-#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A           0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
-#define GG82563_PMCR_FORCE_POWER_STATE              0x0080 /* 1=Force Power State */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK    0x0300
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR      0x0000 /* 00=Dr */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U     0x0100 /* 01=D0u */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A     0x0200 /* 10=D0a */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3      0x0300 /* 11=D3 */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING                     0x0010 /* Disable Padding Use */
-
-
 /* Bit definitions for valid PHY IDs. */
 /* I = Integrated
  * E = External
@@ -3154,8 +2869,6 @@ struct e1000_host_command_info {
 #define M88E1011_I_REV_4   0x04
 #define M88E1111_I_PHY_ID  0x01410CC0
 #define L1LXT971A_PHY_ID   0x001378E0
-#define GG82563_E_PHY_ID   0x01410CA0
-
 
 /* Bits...
  * 15-5: page
@@ -3166,41 +2879,41 @@ struct e1000_host_command_info {
         (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
 
 #define IGP3_PHY_PORT_CTRL           \
-        PHY_REG(769, 17) /* Port General Configuration */
+        PHY_REG(769, 17)       /* Port General Configuration */
 #define IGP3_PHY_RATE_ADAPT_CTRL \
-        PHY_REG(769, 25) /* Rate Adapter Control Register */
+        PHY_REG(769, 25)       /* Rate Adapter Control Register */
 
 #define IGP3_KMRN_FIFO_CTRL_STATS \
-        PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+        PHY_REG(770, 16)       /* KMRN FIFO's control/status register */
 #define IGP3_KMRN_POWER_MNG_CTRL \
-        PHY_REG(770, 17) /* KMRN Power Management Control Register */
+        PHY_REG(770, 17)       /* KMRN Power Management Control Register */
 #define IGP3_KMRN_INBAND_CTRL \
-        PHY_REG(770, 18) /* KMRN Inband Control Register */
+        PHY_REG(770, 18)       /* KMRN Inband Control Register */
 #define IGP3_KMRN_DIAG \
-        PHY_REG(770, 19) /* KMRN Diagnostic register */
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+        PHY_REG(770, 19)       /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002    /* RX PCS is not synced */
 #define IGP3_KMRN_ACK_TIMEOUT \
-        PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+        PHY_REG(770, 20)       /* KMRN Acknowledge Timeouts register */
 
 #define IGP3_VR_CTRL \
-        PHY_REG(776, 18) /* Voltage regulator control register */
-#define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
-#define IGP3_VR_CTRL_MODE_MASK       0x0300 /* Shutdown VR Mask */
+        PHY_REG(776, 18)       /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT       0x0200    /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK       0x0300    /* Shutdown VR Mask */
 
 #define IGP3_CAPABILITY \
-        PHY_REG(776, 19) /* IGP3 Capability Register */
+        PHY_REG(776, 19)       /* IGP3 Capability Register */
 
 /* Capabilities for SKU Control  */
-#define IGP3_CAP_INITIATE_TEAM       0x0001 /* Able to initiate a team */
-#define IGP3_CAP_WFM                 0x0002 /* Support WoL and PXE */
-#define IGP3_CAP_ASF                 0x0004 /* Support ASF */
-#define IGP3_CAP_LPLU                0x0008 /* Support Low Power Link Up */
-#define IGP3_CAP_DC_AUTO_SPEED       0x0010 /* Support AC/DC Auto Link Speed */
-#define IGP3_CAP_SPD                 0x0020 /* Support Smart Power Down */
-#define IGP3_CAP_MULT_QUEUE          0x0040 /* Support 2 tx & 2 rx queues */
-#define IGP3_CAP_RSS                 0x0080 /* Support RSS */
-#define IGP3_CAP_8021PQ              0x0100 /* Support 802.1Q & 802.1p */
-#define IGP3_CAP_AMT_CB              0x0200 /* Support active manageability and circuit breaker */
+#define IGP3_CAP_INITIATE_TEAM       0x0001    /* Able to initiate a team */
+#define IGP3_CAP_WFM                 0x0002    /* Support WoL and PXE */
+#define IGP3_CAP_ASF                 0x0004    /* Support ASF */
+#define IGP3_CAP_LPLU                0x0008    /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED       0x0010    /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD                 0x0020    /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE          0x0040    /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS                 0x0080    /* Support RSS */
+#define IGP3_CAP_8021PQ              0x0100    /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB              0x0200    /* Support active manageability and circuit breaker */
 
 #define IGP3_PPC_JORDAN_EN           0x0001
 #define IGP3_PPC_JORDAN_GIGA_SPEED   0x0002
@@ -3210,69 +2923,69 @@ struct e1000_host_command_info {
 #define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA        0x0020
 #define IGP3_KMRN_PMC_K0S_MODE1_EN_100         0x0040
 
-#define IGP3E1000_PHY_MISC_CTRL                0x1B   /* Misc. Ctrl register */
-#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000 /* Duplex Manual Set */
+#define IGP3E1000_PHY_MISC_CTRL                0x1B    /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000  /* Duplex Manual Set */
 
 #define IGP3_KMRN_EXT_CTRL  PHY_REG(770, 18)
 #define IGP3_KMRN_EC_DIS_INBAND    0x0080
 
 #define IGP03E1000_E_PHY_ID  0x02A80390
-#define IFE_E_PHY_ID         0x02A80330 /* 10/100 PHY */
+#define IFE_E_PHY_ID         0x02A80330        /* 10/100 PHY */
 #define IFE_PLUS_E_PHY_ID    0x02A80320
 #define IFE_C_E_PHY_ID       0x02A80310
 
-#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10  /* 100BaseTx Extended Status, Control and Address */
-#define IFE_PHY_SPECIAL_CONTROL           0x11  /* 100BaseTx PHY special control register */
-#define IFE_PHY_RCV_FALSE_CARRIER         0x13  /* 100BaseTx Receive False Carrier Counter */
-#define IFE_PHY_RCV_DISCONNECT            0x14  /* 100BaseTx Receive Disconnet Counter */
-#define IFE_PHY_RCV_ERROT_FRAME           0x15  /* 100BaseTx Receive Error Frame Counter */
-#define IFE_PHY_RCV_SYMBOL_ERR            0x16  /* Receive Symbol Error Counter */
-#define IFE_PHY_PREM_EOF_ERR              0x17  /* 100BaseTx Receive Premature End Of Frame Error Counter */
-#define IFE_PHY_RCV_EOF_ERR               0x18  /* 10BaseT Receive End Of Frame Error Counter */
-#define IFE_PHY_TX_JABBER_DETECT          0x19  /* 10BaseT Transmit Jabber Detect Counter */
-#define IFE_PHY_EQUALIZER                 0x1A  /* PHY Equalizer Control and Status */
-#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B  /* PHY special control and LED configuration */
-#define IFE_PHY_MDIX_CONTROL              0x1C  /* MDI/MDI-X Control register */
-#define IFE_PHY_HWI_CONTROL               0x1D  /* Hardware Integrity Control (HWI) */
-
-#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000  /* Defaut 1 = Disable auto reduced power down */
-#define IFE_PESC_100BTX_POWER_DOWN           0x0400  /* Indicates the power state of 100BASE-TX */
-#define IFE_PESC_10BTX_POWER_DOWN            0x0200  /* Indicates the power state of 10BASE-T */
-#define IFE_PESC_POLARITY_REVERSED           0x0100  /* Indicates 10BASE-T polarity */
-#define IFE_PESC_PHY_ADDR_MASK               0x007C  /* Bit 6:2 for sampled PHY address */
-#define IFE_PESC_SPEED                       0x0002  /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
-#define IFE_PESC_DUPLEX                      0x0001  /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL           0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER         0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT            0x14 /* 100BaseTx Receive Disconnect Counter */
+#define IFE_PHY_RCV_ERROT_FRAME           0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR            0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR              0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR               0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT          0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER                 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL              0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL               0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000    /* Default 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN           0x0400    /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN            0x0200    /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED           0x0100    /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK               0x007C    /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED                       0x0002    /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX                      0x0001    /* Auto-negotiation duplex result 1=Full, 0=Half */
 #define IFE_PESC_POLARITY_REVERSED_SHIFT     8
 
-#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100  /* 1 = Dyanmic Power Down disabled */
-#define IFE_PSC_FORCE_POLARITY               0x0020  /* 1=Reversed Polarity, 0=Normal */
-#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010  /* 1=Auto Polarity Disabled, 0=Enabled */
-#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001  /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100    /* 1 = Dynamic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY               0x0020    /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010    /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001    /* 1=Jabber Disabled, 0=Normal Jabber Operation */
 #define IFE_PSC_FORCE_POLARITY_SHIFT         5
 #define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT  4
 
-#define IFE_PMC_AUTO_MDIX                    0x0080  /* 1=enable MDI/MDI-X feature, default 0=disabled */
-#define IFE_PMC_FORCE_MDIX                   0x0040  /* 1=force MDIX-X, 0=force MDI */
-#define IFE_PMC_MDIX_STATUS                  0x0020  /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010  /* Resolution algorithm is completed */
+#define IFE_PMC_AUTO_MDIX                    0x0080    /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX                   0x0040    /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS                  0x0020    /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010    /* Resolution algorithm is completed */
 #define IFE_PMC_MDIX_MODE_SHIFT              6
-#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000  /* Disable auto MDI-X */
-
-#define IFE_PHC_HWI_ENABLE                   0x8000  /* Enable the HWI feature */
-#define IFE_PHC_ABILITY_CHECK                0x4000  /* 1= Test Passed, 0=failed */
-#define IFE_PHC_TEST_EXEC                    0x2000  /* PHY launch test pulses on the wire */
-#define IFE_PHC_HIGHZ                        0x0200  /* 1 = Open Circuit */
-#define IFE_PHC_LOWZ                         0x0400  /* 1 = Short Circuit */
-#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600  /* Mask for indication type of problem on the line */
-#define IFE_PHC_DISTANCE_MASK                0x01FF  /* Mask for distance to the cable problem, in 80cm granularity */
-#define IFE_PHC_RESET_ALL_MASK               0x0000  /* Disable HWI */
-#define IFE_PSCL_PROBE_MODE                  0x0020  /* LED Probe mode */
-#define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
-
-#define ICH_FLASH_COMMAND_TIMEOUT            5000    /* 5000 uSecs - adjusted */
-#define ICH_FLASH_ERASE_TIMEOUT              3000000 /* Up to 3 seconds - worst case */
-#define ICH_FLASH_CYCLE_REPEAT_COUNT         10      /* 10 cycles */
+#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000    /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE                   0x8000    /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK                0x4000    /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC                    0x2000    /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ                        0x0200    /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ                         0x0400    /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600    /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK                0x01FF    /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK               0x0000    /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE                  0x0020    /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF              0x0006    /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON               0x0007    /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT            5000      /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT              3000000   /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT         10        /* 10 cycles */
 #define ICH_FLASH_SEG_SIZE_256               256
 #define ICH_FLASH_SEG_SIZE_4K                4096
 #define ICH_FLASH_SEG_SIZE_64K               65536
@@ -3305,74 +3018,6 @@ struct e1000_host_command_info {
 #define ICH_GFPREG_BASE_MASK       0x1FFF
 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
 
-/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
-/* Offset 04h HSFSTS */
-union ich8_hws_flash_status {
-    struct ich8_hsfsts {
-#ifdef __BIG_ENDIAN
-        u16 reserved2      :6;
-        u16 fldesvalid     :1;
-        u16 flockdn        :1;
-        u16 flcdone        :1;
-        u16 flcerr         :1;
-        u16 dael           :1;
-        u16 berasesz       :2;
-        u16 flcinprog      :1;
-        u16 reserved1      :2;
-#else
-        u16 flcdone        :1;   /* bit 0 Flash Cycle Done */
-        u16 flcerr         :1;   /* bit 1 Flash Cycle Error */
-        u16 dael           :1;   /* bit 2 Direct Access error Log */
-        u16 berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
-        u16 flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
-        u16 reserved1      :2;   /* bit 13:6 Reserved */
-        u16 reserved2      :6;   /* bit 13:6 Reserved */
-        u16 fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
-        u16 flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
-#endif
-    } hsf_status;
-    u16 regval;
-};
-
-/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
-/* Offset 06h FLCTL */
-union ich8_hws_flash_ctrl {
-    struct ich8_hsflctl {
-#ifdef __BIG_ENDIAN
-        u16 fldbcount      :2;
-        u16 flockdn        :6;
-        u16 flcgo          :1;
-        u16 flcycle        :2;
-        u16 reserved       :5;
-#else
-        u16 flcgo          :1;   /* 0 Flash Cycle Go */
-        u16 flcycle        :2;   /* 2:1 Flash Cycle */
-        u16 reserved       :5;   /* 7:3 Reserved  */
-        u16 fldbcount      :2;   /* 9:8 Flash Data Byte Count */
-        u16 flockdn        :6;   /* 15:10 Reserved */
-#endif
-    } hsf_ctrl;
-    u16 regval;
-};
-
-/* ICH8 Flash Region Access Permissions */
-union ich8_hws_flash_regacc {
-    struct ich8_flracc {
-#ifdef __BIG_ENDIAN
-        u32 gmwag          :8;
-        u32 gmrag          :8;
-        u32 grwa           :8;
-        u32 grra           :8;
-#else
-        u32 grra           :8;   /* 0:7 GbE region Read Access */
-        u32 grwa           :8;   /* 8:15 GbE region Write Access */
-        u32 gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
-        u32 gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
-#endif
-    } hsf_flregacc;
-    u16 regval;
-};
-
 /* Miscellaneous PHY bit definitions. */
 #define PHY_PREAMBLE        0xFFFFFFFF
 #define PHY_SOF             0x01
@@ -3384,10 +3029,10 @@ union ich8_hws_flash_regacc {
 #define MII_CR_SPEED_100    0x2000
 #define MII_CR_SPEED_10     0x0000
 #define E1000_PHY_ADDRESS   0x01
-#define PHY_AUTO_NEG_TIME   45  /* 4.5 Seconds */
-#define PHY_FORCE_TIME      20  /* 2.0 Seconds */
+#define PHY_AUTO_NEG_TIME   45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME      20 /* 2.0 Seconds */
 #define PHY_REVISION_MASK   0xFFFFFFF0
-#define DEVICE_SPEED_MASK   0x00000300  /* Device Ctrl Reg Speed Mask */
+#define DEVICE_SPEED_MASK   0x00000300 /* Device Ctrl Reg Speed Mask */
 #define REG4_SPEED_MASK     0x01E0
 #define REG9_SPEED_MASK     0x0300
 #define ADVERTISE_10_HALF   0x0001
@@ -3396,8 +3041,8 @@ union ich8_hws_flash_regacc {
 #define ADVERTISE_100_FULL  0x0008
 #define ADVERTISE_1000_HALF 0x0010
 #define ADVERTISE_1000_FULL 0x0020
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F  /* Everything but 1000-Half */
-#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds*/
-#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds*/
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds */
 
 #endif /* _E1000_HW_H_ */
index c66dd4f9437cddfaa3f7684a7adc0d5aff4d777d..bcd192ca47b0e2c56a9183c3d71e7b070204bf1d 100644 (file)
@@ -31,7 +31,7 @@
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k3-NAPI"
+#define DRV_VERSION "7.3.21-k5-NAPI"
 const char e1000_driver_version[] = DRV_VERSION;
 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -131,7 +131,6 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data);
-static irqreturn_t e1000_intr_msi(int irq, void *data);
 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                               struct e1000_tx_ring *tx_ring);
 static int e1000_clean(struct napi_struct *napi, int budget);
@@ -258,25 +257,14 @@ module_exit(e1000_exit_module);
 
 static int e1000_request_irq(struct e1000_adapter *adapter)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
        irq_handler_t handler = e1000_intr;
        int irq_flags = IRQF_SHARED;
        int err;
 
-       if (hw->mac_type >= e1000_82571) {
-               adapter->have_msi = !pci_enable_msi(adapter->pdev);
-               if (adapter->have_msi) {
-                       handler = e1000_intr_msi;
-                       irq_flags = 0;
-               }
-       }
-
        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
                          netdev);
        if (err) {
-               if (adapter->have_msi)
-                       pci_disable_msi(adapter->pdev);
                DPRINTK(PROBE, ERR,
                        "Unable to allocate interrupt Error: %d\n", err);
        }
@@ -289,9 +277,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        free_irq(adapter->pdev->irq, netdev);
-
-       if (adapter->have_msi)
-               pci_disable_msi(adapter->pdev);
 }
 
 /**
@@ -345,76 +330,6 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
        }
 }
 
-/**
- * e1000_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
- *
- * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the network i/f is closed.
- *
- **/
-
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
-{
-       u32 ctrl_ext;
-       u32 swsm;
-       struct e1000_hw *hw = &adapter->hw;
-
-       /* Let firmware taken over control of h/w */
-       switch (hw->mac_type) {
-       case e1000_82573:
-               swsm = er32(SWSM);
-               ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
-               break;
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_80003es2lan:
-       case e1000_ich8lan:
-               ctrl_ext = er32(CTRL_EXT);
-               ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
-}
-
-/**
- * e1000_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
- *
- * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the network i/f is open.
- *
- **/
-
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
-{
-       u32 ctrl_ext;
-       u32 swsm;
-       struct e1000_hw *hw = &adapter->hw;
-
-       /* Let firmware know the driver has taken over */
-       switch (hw->mac_type) {
-       case e1000_82573:
-               swsm = er32(SWSM);
-               ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
-               break;
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_80003es2lan:
-       case e1000_ich8lan:
-               ctrl_ext = er32(CTRL_EXT);
-               ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
-               break;
-       default:
-               break;
-       }
-}
-
 static void e1000_init_manageability(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
@@ -425,20 +340,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
                /* disable hardware interception of ARP */
                manc &= ~(E1000_MANC_ARP_EN);
 
-               /* enable receiving management packets to the host */
-               /* this will probably generate destination unreachable messages
-                * from the host OS, but the packets will be handled on SMBUS */
-               if (hw->has_manc2h) {
-                       u32 manc2h = er32(MANC2H);
-
-                       manc |= E1000_MANC_EN_MNG2HOST;
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
-                       manc2h |= E1000_MNG2HOST_PORT_623;
-                       manc2h |= E1000_MNG2HOST_PORT_664;
-                       ew32(MANC2H, manc2h);
-               }
-
                ew32(MANC, manc);
        }
 }
@@ -453,12 +354,6 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
                /* re-enable hardware interception of ARP */
                manc |= E1000_MANC_ARP_EN;
 
-               if (hw->has_manc2h)
-                       manc &= ~E1000_MANC_EN_MNG2HOST;
-
-               /* don't explicitly have to mess with MANC2H since
-                * MANC has an enable disable that gates MANC2H */
-
                ew32(MANC, manc);
        }
 }
@@ -563,15 +458,6 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
                        if (er32(MANC) & E1000_MANC_SMBUS_EN)
                                goto out;
                        break;
-               case e1000_82571:
-               case e1000_82572:
-               case e1000_82573:
-               case e1000_80003es2lan:
-               case e1000_ich8lan:
-                       if (e1000_check_mng_mode(hw) ||
-                           e1000_check_phy_reset_block(hw))
-                               goto out;
-                       break;
                default:
                        goto out;
                }
@@ -599,8 +485,7 @@ void e1000_down(struct e1000_adapter *adapter)
        ew32(RCTL, rctl & ~E1000_RCTL_EN);
        /* flush and sleep below */
 
-       /* can be netif_tx_disable when NETIF_F_LLTX is removed */
-       netif_stop_queue(netdev);
+       netif_tx_disable(netdev);
 
        /* disable transmits in the hardware */
        tctl = er32(TCTL);
@@ -671,16 +556,6 @@ void e1000_reset(struct e1000_adapter *adapter)
                legacy_pba_adjust = true;
                pba = E1000_PBA_30K;
                break;
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_80003es2lan:
-               pba = E1000_PBA_38K;
-               break;
-       case e1000_82573:
-               pba = E1000_PBA_20K;
-               break;
-       case e1000_ich8lan:
-               pba = E1000_PBA_8K;
        case e1000_undefined:
        case e1000_num_macs:
                break;
@@ -744,16 +619,8 @@ void e1000_reset(struct e1000_adapter *adapter)
 
                        /* if short on rx space, rx wins and must trump tx
                         * adjustment or use Early Receive if available */
-                       if (pba < min_rx_space) {
-                               switch (hw->mac_type) {
-                               case e1000_82573:
-                                       /* ERT enabled in e1000_configure_rx */
-                                       break;
-                               default:
-                                       pba = min_rx_space;
-                                       break;
-                               }
-                       }
+                       if (pba < min_rx_space)
+                               pba = min_rx_space;
                }
        }
 
@@ -789,7 +656,6 @@ void e1000_reset(struct e1000_adapter *adapter)
 
        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
        if (hw->mac_type >= e1000_82544 &&
-           hw->mac_type <= e1000_82547_rev_2 &&
            hw->autoneg == 1 &&
            hw->autoneg_advertised == ADVERTISE_1000_FULL) {
                u32 ctrl = er32(CTRL);
@@ -806,20 +672,6 @@ void e1000_reset(struct e1000_adapter *adapter)
        e1000_reset_adaptive(hw);
        e1000_phy_get_info(hw, &adapter->phy_info);
 
-       if (!adapter->smart_power_down &&
-           (hw->mac_type == e1000_82571 ||
-            hw->mac_type == e1000_82572)) {
-               u16 phy_data = 0;
-               /* speed up time to link by disabling smart power down, ignore
-                * the return value of this function because there is nothing
-                * different we would do if it failed */
-               e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                  &phy_data);
-               phy_data &= ~IGP02E1000_PM_SPD;
-               e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
-                                   phy_data);
-       }
-
        e1000_release_manageability(adapter);
 }
 
@@ -1046,17 +898,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        err = -EIO;
-       /* Flash BAR mapping must happen after e1000_sw_init
-        * because it depends on mac_type */
-       if ((hw->mac_type == e1000_ich8lan) &&
-          (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
-               hw->flash_address = pci_ioremap_bar(pdev, 1);
-               if (!hw->flash_address)
-                       goto err_flashmap;
-       }
-
-       if (e1000_check_phy_reset_block(hw))
-               DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
        if (hw->mac_type >= e1000_82543) {
                netdev->features = NETIF_F_SG |
@@ -1064,21 +905,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                                   NETIF_F_HW_VLAN_TX |
                                   NETIF_F_HW_VLAN_RX |
                                   NETIF_F_HW_VLAN_FILTER;
-               if (hw->mac_type == e1000_ich8lan)
-                       netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
        }
 
        if ((hw->mac_type >= e1000_82544) &&
           (hw->mac_type != e1000_82547))
                netdev->features |= NETIF_F_TSO;
 
-       if (hw->mac_type > e1000_82547_rev_2)
-               netdev->features |= NETIF_F_TSO6;
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
        netdev->vlan_features |= NETIF_F_TSO;
-       netdev->vlan_features |= NETIF_F_TSO6;
        netdev->vlan_features |= NETIF_F_HW_CSUM;
        netdev->vlan_features |= NETIF_F_SG;
 
@@ -1153,15 +989,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
                eeprom_apme_mask = E1000_EEPROM_82544_APM;
                break;
-       case e1000_ich8lan:
-               e1000_read_eeprom(hw,
-                       EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
-               eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
-               break;
        case e1000_82546:
        case e1000_82546_rev_3:
-       case e1000_82571:
-       case e1000_80003es2lan:
                if (er32(STATUS) & E1000_STATUS_FUNC_1){
                        e1000_read_eeprom(hw,
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -1185,17 +1014,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                break;
        case E1000_DEV_ID_82546EB_FIBER:
        case E1000_DEV_ID_82546GB_FIBER:
-       case E1000_DEV_ID_82571EB_FIBER:
                /* Wake events only supported on port A for dual fiber
                 * regardless of eeprom setting */
                if (er32(STATUS) & E1000_STATUS_FUNC_1)
                        adapter->eeprom_wol = 0;
                break;
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
-       case E1000_DEV_ID_82571EB_QUAD_COPPER:
-       case E1000_DEV_ID_82571EB_QUAD_FIBER:
-       case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
-       case E1000_DEV_ID_82571PT_QUAD_COPPER:
                /* if quad port adapter, disable WoL on all but port A */
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
@@ -1213,39 +1037,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        /* print bus type/speed/width info */
        DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
-               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
-                (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
-               ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
-                (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
+               ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
                 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
                 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
                 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
-               ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
-                (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
-                (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
-                "32-bit"));
+               ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
 
        printk("%pM\n", netdev->dev_addr);
 
-       if (hw->bus_type == e1000_bus_type_pci_express) {
-               DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
-                       "longer be supported by this driver in the future.\n",
-                       pdev->vendor, pdev->device);
-               DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
-                       "driver instead.\n");
-       }
-
        /* reset the hardware with the new settings */
        e1000_reset(adapter);
 
-       /* If the controller is 82573 and f/w is AMT, do not set
-        * DRV_LOAD until the interface is up.  For all other cases,
-        * let the f/w know that the h/w is now under the control
-        * of the driver. */
-       if (hw->mac_type != e1000_82573 ||
-           !e1000_check_mng_mode(hw))
-               e1000_get_hw_control(adapter);
-
        strcpy(netdev->name, "eth%d");
        err = register_netdev(netdev);
        if (err)
@@ -1260,14 +1063,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
-       e1000_release_hw_control(adapter);
 err_eeprom:
-       if (!e1000_check_phy_reset_block(hw))
-               e1000_phy_hw_reset(hw);
+       e1000_phy_hw_reset(hw);
 
        if (hw->flash_address)
                iounmap(hw->flash_address);
-err_flashmap:
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
@@ -1298,18 +1098,18 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
+       set_bit(__E1000_DOWN, &adapter->flags);
+       del_timer_sync(&adapter->tx_fifo_stall_timer);
+       del_timer_sync(&adapter->watchdog_timer);
+       del_timer_sync(&adapter->phy_info_timer);
+
        cancel_work_sync(&adapter->reset_task);
 
        e1000_release_manageability(adapter);
 
-       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
-        * would have already happened in close and is redundant. */
-       e1000_release_hw_control(adapter);
-
        unregister_netdev(netdev);
 
-       if (!e1000_check_phy_reset_block(hw))
-               e1000_phy_hw_reset(hw);
+       e1000_phy_hw_reset(hw);
 
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
@@ -1472,12 +1272,6 @@ static int e1000_open(struct net_device *netdev)
                e1000_update_mng_vlan(adapter);
        }
 
-       /* If AMT is enabled, let the firmware know that the network
-        * interface is now open */
-       if (hw->mac_type == e1000_82573 &&
-           e1000_check_mng_mode(hw))
-               e1000_get_hw_control(adapter);
-
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
         * as soon as we call pci_request_irq, so we have to setup our
@@ -1503,7 +1297,6 @@ static int e1000_open(struct net_device *netdev)
        return E1000_SUCCESS;
 
 err_req_irq:
-       e1000_release_hw_control(adapter);
        e1000_power_down_phy(adapter);
        e1000_free_all_rx_resources(adapter);
 err_setup_rx:
@@ -1548,12 +1341,6 @@ static int e1000_close(struct net_device *netdev)
                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
        }
 
-       /* If AMT is enabled, let the firmware know that the network
-        * interface is now closed */
-       if (hw->mac_type == e1000_82573 &&
-           e1000_check_mng_mode(hw))
-               e1000_release_hw_control(adapter);
-
        return 0;
 }
 
@@ -1692,7 +1479,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 {
        u64 tdba;
        struct e1000_hw *hw = &adapter->hw;
-       u32 tdlen, tctl, tipg, tarc;
+       u32 tdlen, tctl, tipg;
        u32 ipgr1, ipgr2;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1714,8 +1501,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        }
 
        /* Set the default values for the Tx Inter Packet Gap timer */
-       if (hw->mac_type <= e1000_82547_rev_2 &&
-           (hw->media_type == e1000_media_type_fiber ||
+       if ((hw->media_type == e1000_media_type_fiber ||
             hw->media_type == e1000_media_type_internal_serdes))
                tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
        else
@@ -1728,10 +1514,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
                ipgr1 = DEFAULT_82542_TIPG_IPGR1;
                ipgr2 = DEFAULT_82542_TIPG_IPGR2;
                break;
-       case e1000_80003es2lan:
-               ipgr1 = DEFAULT_82543_TIPG_IPGR1;
-               ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
-               break;
        default:
                ipgr1 = DEFAULT_82543_TIPG_IPGR1;
                ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1754,21 +1536,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
 
-       if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
-               tarc = er32(TARC0);
-               /* set the speed mode bit, we'll clear it if we're not at
-                * gigabit link later */
-               tarc |= (1 << 21);
-               ew32(TARC0, tarc);
-       } else if (hw->mac_type == e1000_80003es2lan) {
-               tarc = er32(TARC0);
-               tarc |= 1;
-               ew32(TARC0, tarc);
-               tarc = er32(TARC1);
-               tarc |= 1;
-               ew32(TARC1, tarc);
-       }
-
        e1000_config_collision_dist(hw);
 
        /* Setup Transmit Descriptor Settings for eop descriptor */
@@ -1804,7 +1571,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
                                    struct e1000_rx_ring *rxdr)
 {
-       struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
        int size, desc_len;
 
@@ -1817,10 +1583,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
        }
        memset(rxdr->buffer_info, 0, size);
 
-       if (hw->mac_type <= e1000_82547_rev_2)
-               desc_len = sizeof(struct e1000_rx_desc);
-       else
-               desc_len = sizeof(union e1000_rx_desc_packet_split);
+       desc_len = sizeof(struct e1000_rx_desc);
 
        /* Round up to nearest 4K */
 
@@ -1977,7 +1740,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 {
        u64 rdba;
        struct e1000_hw *hw = &adapter->hw;
-       u32 rdlen, rctl, rxcsum, ctrl_ext;
+       u32 rdlen, rctl, rxcsum;
 
        if (adapter->netdev->mtu > ETH_DATA_LEN) {
                rdlen = adapter->rx_ring[0].count *
@@ -2004,17 +1767,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
                        ew32(ITR, 1000000000 / (adapter->itr * 256));
        }
 
-       if (hw->mac_type >= e1000_82571) {
-               ctrl_ext = er32(CTRL_EXT);
-               /* Reset delay timers after every interrupt */
-               ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
-               /* Auto-Mask interrupts upon ICR access */
-               ctrl_ext |= E1000_CTRL_EXT_IAME;
-               ew32(IAM, 0xffffffff);
-               ew32(CTRL_EXT, ctrl_ext);
-               E1000_WRITE_FLUSH();
-       }
-
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
         * the Base and Length of the Rx Descriptor Ring */
        switch (adapter->num_rx_queues) {
@@ -2329,22 +2081,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
 
        e1000_rar_set(hw, hw->mac_addr, 0);
 
-       /* With 82571 controllers, LAA may be overwritten (with the default)
-        * due to controller reset from the other port. */
-       if (hw->mac_type == e1000_82571) {
-               /* activate the work around */
-               hw->laa_is_present = 1;
-
-               /* Hold a copy of the LAA in RAR[14] This is done so that
-                * between the time RAR[0] gets clobbered  and the time it
-                * gets fixed (in e1000_watchdog), the actual LAA is in one
-                * of the RARs and no incoming packets directed to this port
-                * are dropped. Eventaully the LAA will be in RAR[0] and
-                * RAR[14] */
-               e1000_rar_set(hw, hw->mac_addr,
-                                       E1000_RAR_ENTRIES - 1);
-       }
-
        if (hw->mac_type == e1000_82542_rev2_0)
                e1000_leave_82542_rst(adapter);
 
@@ -2371,9 +2107,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
        u32 rctl;
        u32 hash_value;
        int i, rar_entries = E1000_RAR_ENTRIES;
-       int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
-                               E1000_NUM_MTA_REGISTERS_ICH8LAN :
-                               E1000_NUM_MTA_REGISTERS;
+       int mta_reg_count = E1000_NUM_MTA_REGISTERS;
        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
 
        if (!mcarray) {
@@ -2381,13 +2115,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
                return;
        }
 
-       if (hw->mac_type == e1000_ich8lan)
-               rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
-
-       /* reserve RAR[14] for LAA over-write work-around */
-       if (hw->mac_type == e1000_82571)
-               rar_entries--;
-
        /* Check for Promiscuous and All Multicast modes */
 
        rctl = er32(RCTL);
@@ -2396,15 +2123,13 @@ static void e1000_set_rx_mode(struct net_device *netdev)
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
                rctl &= ~E1000_RCTL_VFE;
        } else {
-               if (netdev->flags & IFF_ALLMULTI) {
+               if (netdev->flags & IFF_ALLMULTI)
                        rctl |= E1000_RCTL_MPE;
-               } else {
+               else
                        rctl &= ~E1000_RCTL_MPE;
-               }
-               if (adapter->hw.mac_type != e1000_ich8lan)
-                       /* Enable VLAN filter if there is a VLAN */
-                       if (adapter->vlgrp)
-                               rctl |= E1000_RCTL_VFE;
+               /* Enable VLAN filter if there is a VLAN */
+               if (adapter->vlgrp)
+                       rctl |= E1000_RCTL_VFE;
        }
 
        if (netdev->uc.count > rar_entries - 1) {
@@ -2427,7 +2152,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
         *
         * RAR 0 is used for the station MAC adddress
         * if there are not 14 addresses, go ahead and clear the filters
-        * -- with 82571 controllers only 0-13 entries are filled here
         */
        i = 1;
        if (use_uc)
@@ -2521,12 +2245,46 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
                        adapter->tx_fifo_head = 0;
                        atomic_set(&adapter->tx_fifo_stall, 0);
                        netif_wake_queue(netdev);
-               } else {
+               } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
                        mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
                }
        }
 }
 
+static bool e1000_has_link(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       bool link_active = false;
+
+       /* get_link_status is set on LSC (link status) interrupt or
+        * rx sequence error interrupt.  get_link_status will stay
+        * false until the e1000_check_for_link establishes link
+        * for copper adapters ONLY
+        */
+       switch (hw->media_type) {
+       case e1000_media_type_copper:
+               if (hw->get_link_status) {
+                       e1000_check_for_link(hw);
+                       link_active = !hw->get_link_status;
+               } else {
+                       link_active = true;
+               }
+               break;
+       case e1000_media_type_fiber:
+               e1000_check_for_link(hw);
+               link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+               break;
+       case e1000_media_type_internal_serdes:
+               e1000_check_for_link(hw);
+               link_active = hw->serdes_has_link;
+               break;
+       default:
+               break;
+       }
+
+       return link_active;
+}
+
 /**
  * e1000_watchdog - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
@@ -2538,33 +2296,16 @@ static void e1000_watchdog(unsigned long data)
        struct net_device *netdev = adapter->netdev;
        struct e1000_tx_ring *txdr = adapter->tx_ring;
        u32 link, tctl;
-       s32 ret_val;
-
-       ret_val = e1000_check_for_link(hw);
-       if ((ret_val == E1000_ERR_PHY) &&
-           (hw->phy_type == e1000_phy_igp_3) &&
-           (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
-               /* See e1000_kumeran_lock_loss_workaround() */
-               DPRINTK(LINK, INFO,
-                       "Gigabit has been disabled, downgrading speed\n");
-       }
 
-       if (hw->mac_type == e1000_82573) {
-               e1000_enable_tx_pkt_filtering(hw);
-               if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
-                       e1000_update_mng_vlan(adapter);
-       }
-
-       if ((hw->media_type == e1000_media_type_internal_serdes) &&
-          !(er32(TXCW) & E1000_TXCW_ANE))
-               link = !hw->serdes_link_down;
-       else
-               link = er32(STATUS) & E1000_STATUS_LU;
+       link = e1000_has_link(adapter);
+       if ((netif_carrier_ok(netdev)) && link)
+               goto link_up;
 
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        u32 ctrl;
                        bool txb2b = true;
+                       /* update snapshot of PHY registers on LSC */
                        e1000_get_speed_and_duplex(hw,
                                                   &adapter->link_speed,
                                                   &adapter->link_duplex);
@@ -2589,7 +2330,7 @@ static void e1000_watchdog(unsigned long data)
                        case SPEED_10:
                                txb2b = false;
                                netdev->tx_queue_len = 10;
-                               adapter->tx_timeout_factor = 8;
+                               adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = false;
@@ -2598,52 +2339,16 @@ static void e1000_watchdog(unsigned long data)
                                break;
                        }
 
-                       if ((hw->mac_type == e1000_82571 ||
-                            hw->mac_type == e1000_82572) &&
-                           !txb2b) {
-                               u32 tarc0;
-                               tarc0 = er32(TARC0);
-                               tarc0 &= ~(1 << 21);
-                               ew32(TARC0, tarc0);
-                       }
-
-                       /* disable TSO for pcie and 10/100 speeds, to avoid
-                        * some hardware issues */
-                       if (!adapter->tso_force &&
-                           hw->bus_type == e1000_bus_type_pci_express){
-                               switch (adapter->link_speed) {
-                               case SPEED_10:
-                               case SPEED_100:
-                                       DPRINTK(PROBE,INFO,
-                                       "10/100 speed: disabling TSO\n");
-                                       netdev->features &= ~NETIF_F_TSO;
-                                       netdev->features &= ~NETIF_F_TSO6;
-                                       break;
-                               case SPEED_1000:
-                                       netdev->features |= NETIF_F_TSO;
-                                       netdev->features |= NETIF_F_TSO6;
-                                       break;
-                               default:
-                                       /* oops */
-                                       break;
-                               }
-                       }
-
-                       /* enable transmits in the hardware, need to do this
-                        * after setting TARC0 */
+                       /* enable transmits in the hardware */
                        tctl = er32(TCTL);
                        tctl |= E1000_TCTL_EN;
                        ew32(TCTL, tctl);
 
                        netif_carrier_on(netdev);
-                       mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+                       if (!test_bit(__E1000_DOWN, &adapter->flags))
+                               mod_timer(&adapter->phy_info_timer,
+                                         round_jiffies(jiffies + 2 * HZ));
                        adapter->smartspeed = 0;
-               } else {
-                       /* make sure the receive unit is started */
-                       if (hw->rx_needs_kicking) {
-                               u32 rctl = er32(RCTL);
-                               ew32(RCTL, rctl | E1000_RCTL_EN);
-                       }
                }
        } else {
                if (netif_carrier_ok(netdev)) {
@@ -2652,21 +2357,16 @@ static void e1000_watchdog(unsigned long data)
                        printk(KERN_INFO "e1000: %s NIC Link is Down\n",
                               netdev->name);
                        netif_carrier_off(netdev);
-                       mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
-
-                       /* 80003ES2LAN workaround--
-                        * For packet buffer work-around on link down event;
-                        * disable receives in the ISR and
-                        * reset device here in the watchdog
-                        */
-                       if (hw->mac_type == e1000_80003es2lan)
-                               /* reset device */
-                               schedule_work(&adapter->reset_task);
+
+                       if (!test_bit(__E1000_DOWN, &adapter->flags))
+                               mod_timer(&adapter->phy_info_timer,
+                                         round_jiffies(jiffies + 2 * HZ));
                }
 
                e1000_smartspeed(adapter);
        }
 
+link_up:
        e1000_update_stats(adapter);
 
        hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
@@ -2700,13 +2400,10 @@ static void e1000_watchdog(unsigned long data)
        /* Force detection of hung controller every watchdog period */
        adapter->detect_tx_hung = true;
 
-       /* With 82571 controllers, LAA may be overwritten due to controller
-        * reset from the other port. Set the appropriate LAA in RAR[0] */
-       if (hw->mac_type == e1000_82571 && hw->laa_is_present)
-               e1000_rar_set(hw, hw->mac_addr, 0);
-
        /* Reset the timer */
-       mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+       if (!test_bit(__E1000_DOWN, &adapter->flags))
+               mod_timer(&adapter->watchdog_timer,
+                         round_jiffies(jiffies + 2 * HZ));
 }
 
 enum latency_range {
@@ -2718,6 +2415,11 @@ enum latency_range {
 
 /**
  * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
  *      Stores a new ITR value based on packets and byte
  *      counts during the last interrupt.  The advantage of per interrupt
  *      computation is faster updates and more accurate ITR for the current
@@ -2727,10 +2429,6 @@ enum latency_range {
  *      while increasing bulk throughput.
  *      this functionality is controlled by the InterruptThrottleRate module
  *      parameter (see e1000_param.c)
- * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
  **/
 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
                                     u16 itr_setting, int packets, int bytes)
@@ -3035,8 +2733,9 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                        size -= 4;
 
                buffer_info->length = size;
-               buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+               /* set time_stamp *before* dma to help avoid a possible race */
                buffer_info->time_stamp = jiffies;
+               buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
                buffer_info->next_to_watch = i;
 
                len -= size;
@@ -3071,13 +2770,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                         * Avoid terminating buffers within evenly-aligned
                         * dwords. */
                        if (unlikely(adapter->pcix_82544 &&
-                          !((unsigned long)(frag->page+offset+size-1) & 4) &&
-                          size > 4))
+                           !((unsigned long)(page_to_phys(frag->page) + offset
+                                             + size - 1) & 4) &&
+                           size > 4))
                                size -= 4;
 
                        buffer_info->length = size;
-                       buffer_info->dma = map[f] + offset;
                        buffer_info->time_stamp = jiffies;
+                       buffer_info->dma = map[f] + offset;
                        buffer_info->next_to_watch = i;
 
                        len -= size;
@@ -3186,41 +2886,6 @@ no_fifo_stall_required:
        return 0;
 }
 
-#define MINIMUM_DHCP_PACKET_SIZE 282
-static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
-                                   struct sk_buff *skb)
-{
-       struct e1000_hw *hw =  &adapter->hw;
-       u16 length, offset;
-       if (vlan_tx_tag_present(skb)) {
-               if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
-                       ( hw->mng_cookie.status &
-                         E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
-                       return 0;
-       }
-       if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
-               struct ethhdr *eth = (struct ethhdr *)skb->data;
-               if ((htons(ETH_P_IP) == eth->h_proto)) {
-                       const struct iphdr *ip =
-                               (struct iphdr *)((u8 *)skb->data+14);
-                       if (IPPROTO_UDP == ip->protocol) {
-                               struct udphdr *udp =
-                                       (struct udphdr *)((u8 *)ip +
-                                               (ip->ihl << 2));
-                               if (ntohs(udp->dest) == 67) {
-                                       offset = (u8 *)udp + 8 - skb->data;
-                                       length = skb->len - offset;
-
-                                       return e1000_mng_write_dhcp_info(hw,
-                                                       (u8 *)udp + 8,
-                                                       length);
-                               }
-                       }
-               }
-       }
-       return 0;
-}
-
 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3279,11 +2944,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       /* 82571 and newer doesn't need the workaround that limited descriptor
-        * length to 4kB */
-       if (hw->mac_type >= e1000_82571)
-               max_per_txd = 8192;
-
        mss = skb_shinfo(skb)->gso_size;
        /* The controller does a simple calculation to
         * make sure there is enough room in the FIFO before
@@ -3296,9 +2956,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                max_per_txd = min(mss << 2, max_per_txd);
                max_txd_pwr = fls(max_per_txd) - 1;
 
-               /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
-               * points to just header, pull a few bytes of payload from
-               * frags into skb->data */
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                if (skb->data_len && hdr_len == len) {
                        switch (hw->mac_type) {
@@ -3313,10 +2970,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
                                        break;
                                /* fall through */
-                       case e1000_82571:
-                       case e1000_82572:
-                       case e1000_82573:
-                       case e1000_ich8lan:
                                pull_size = min((unsigned int)4, skb->data_len);
                                if (!__pskb_pull_tail(skb, pull_size)) {
                                        DPRINTK(DRV, ERR,
@@ -3361,11 +3014,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        if (adapter->pcix_82544)
                count += nr_frags;
 
-
-       if (hw->tx_pkt_filtering &&
-           (hw->mac_type == e1000_82573))
-               e1000_transfer_dhcp_info(adapter, skb);
-
        /* need: count + 2 desc gap to keep tail from touching
         * head, otherwise try next time */
        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
@@ -3374,7 +3022,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        if (unlikely(hw->mac_type == e1000_82547)) {
                if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
                        netif_stop_queue(netdev);
-                       mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+                       if (!test_bit(__E1000_DOWN, &adapter->flags))
+                               mod_timer(&adapter->tx_fifo_stall_timer,
+                                         jiffies + 1);
                        return NETDEV_TX_BUSY;
                }
        }
@@ -3393,14 +3043,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        }
 
        if (likely(tso)) {
-               tx_ring->last_tx_tso = 1;
+               if (likely(hw->mac_type != e1000_82544))
+                       tx_ring->last_tx_tso = 1;
                tx_flags |= E1000_TX_FLAGS_TSO;
        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
                tx_flags |= E1000_TX_FLAGS_CSUM;
 
-       /* Old method was to assume IPv4 packet by default if TSO was enabled.
-        * 82571 hardware supports TSO capabilities for IPv6 as well...
-        * no longer assume, we must. */
        if (likely(skb->protocol == htons(ETH_P_IP)))
                tx_flags |= E1000_TX_FLAGS_IPV4;
 
@@ -3472,7 +3120,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
-       u16 eeprom_data = 0;
 
        if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3483,44 +3130,23 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        /* Adapter-specific max frame size limits. */
        switch (hw->mac_type) {
        case e1000_undefined ... e1000_82542_rev2_1:
-       case e1000_ich8lan:
                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
                        DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
                        return -EINVAL;
                }
                break;
-       case e1000_82573:
-               /* Jumbo Frames not supported if:
-                * - this is not an 82573L device
-                * - ASPM is enabled in any way (0x1A bits 3:2) */
-               e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
-                                 &eeprom_data);
-               if ((hw->device_id != E1000_DEV_ID_82573L) ||
-                   (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
-                       if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
-                               DPRINTK(PROBE, ERR,
-                                       "Jumbo Frames not supported.\n");
-                               return -EINVAL;
-                       }
-                       break;
-               }
-               /* ERT will be enabled later to enable wire speed receives */
-
-               /* fall through to get support */
-       case e1000_82571:
-       case e1000_82572:
-       case e1000_80003es2lan:
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
-               if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
-                       DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
-                       return -EINVAL;
-               }
-               break;
        default:
                /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
                break;
        }
 
+       while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+               msleep(1);
+       /* e1000_down has a dependency on max_frame_size */
+       hw->max_frame_size = max_frame;
+       if (netif_running(netdev))
+               e1000_down(adapter);
+
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
         * larger slab size.
@@ -3549,11 +3175,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
                adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 
+       printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
+              netdev->name, netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
-       hw->max_frame_size = max_frame;
 
        if (netif_running(netdev))
-               e1000_reinit_locked(adapter);
+               e1000_up(adapter);
+       else
+               e1000_reset(adapter);
+
+       clear_bit(__E1000_RESETTING, &adapter->flags);
 
        return 0;
 }
@@ -3596,14 +3227,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
        adapter->stats.mprc += er32(MPRC);
        adapter->stats.roc += er32(ROC);
 
-       if (hw->mac_type != e1000_ich8lan) {
-               adapter->stats.prc64 += er32(PRC64);
-               adapter->stats.prc127 += er32(PRC127);
-               adapter->stats.prc255 += er32(PRC255);
-               adapter->stats.prc511 += er32(PRC511);
-               adapter->stats.prc1023 += er32(PRC1023);
-               adapter->stats.prc1522 += er32(PRC1522);
-       }
+       adapter->stats.prc64 += er32(PRC64);
+       adapter->stats.prc127 += er32(PRC127);
+       adapter->stats.prc255 += er32(PRC255);
+       adapter->stats.prc511 += er32(PRC511);
+       adapter->stats.prc1023 += er32(PRC1023);
+       adapter->stats.prc1522 += er32(PRC1522);
 
        adapter->stats.symerrs += er32(SYMERRS);
        adapter->stats.mpc += er32(MPC);
@@ -3632,14 +3261,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
        adapter->stats.toth += er32(TOTH);
        adapter->stats.tpr += er32(TPR);
 
-       if (hw->mac_type != e1000_ich8lan) {
-               adapter->stats.ptc64 += er32(PTC64);
-               adapter->stats.ptc127 += er32(PTC127);
-               adapter->stats.ptc255 += er32(PTC255);
-               adapter->stats.ptc511 += er32(PTC511);
-               adapter->stats.ptc1023 += er32(PTC1023);
-               adapter->stats.ptc1522 += er32(PTC1522);
-       }
+       adapter->stats.ptc64 += er32(PTC64);
+       adapter->stats.ptc127 += er32(PTC127);
+       adapter->stats.ptc255 += er32(PTC255);
+       adapter->stats.ptc511 += er32(PTC511);
+       adapter->stats.ptc1023 += er32(PTC1023);
+       adapter->stats.ptc1522 += er32(PTC1522);
 
        adapter->stats.mptc += er32(MPTC);
        adapter->stats.bptc += er32(BPTC);
@@ -3659,20 +3286,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
                adapter->stats.tsctc += er32(TSCTC);
                adapter->stats.tsctfc += er32(TSCTFC);
        }
-       if (hw->mac_type > e1000_82547_rev_2) {
-               adapter->stats.iac += er32(IAC);
-               adapter->stats.icrxoc += er32(ICRXOC);
-
-               if (hw->mac_type != e1000_ich8lan) {
-                       adapter->stats.icrxptc += er32(ICRXPTC);
-                       adapter->stats.icrxatc += er32(ICRXATC);
-                       adapter->stats.ictxptc += er32(ICTXPTC);
-                       adapter->stats.ictxatc += er32(ICTXATC);
-                       adapter->stats.ictxqec += er32(ICTXQEC);
-                       adapter->stats.ictxqmtc += er32(ICTXQMTC);
-                       adapter->stats.icrxdmtc += er32(ICRXDMTC);
-               }
-       }
 
        /* Fill out the OS statistics structure */
        adapter->net_stats.multicast = adapter->stats.mprc;
@@ -3730,49 +3343,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
 
-/**
- * e1000_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-
-static irqreturn_t e1000_intr_msi(int irq, void *data)
-{
-       struct net_device *netdev = data;
-       struct e1000_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       u32 icr = er32(ICR);
-
-       /* in NAPI mode read ICR disables interrupts using IAM */
-
-       if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
-               hw->get_link_status = 1;
-               /* 80003ES2LAN workaround-- For packet buffer work-around on
-                * link down event; disable receives here in the ISR and reset
-                * adapter in watchdog */
-               if (netif_carrier_ok(netdev) &&
-                   (hw->mac_type == e1000_80003es2lan)) {
-                       /* disable receives */
-                       u32 rctl = er32(RCTL);
-                       ew32(RCTL, rctl & ~E1000_RCTL_EN);
-               }
-               /* guard against interrupt when we're going down */
-               if (!test_bit(__E1000_DOWN, &adapter->flags))
-                       mod_timer(&adapter->watchdog_timer, jiffies + 1);
-       }
-
-       if (likely(napi_schedule_prep(&adapter->napi))) {
-               adapter->total_tx_bytes = 0;
-               adapter->total_tx_packets = 0;
-               adapter->total_rx_bytes = 0;
-               adapter->total_rx_packets = 0;
-               __napi_schedule(&adapter->napi);
-       } else
-               e1000_irq_enable(adapter);
-
-       return IRQ_HANDLED;
-}
-
 /**
  * e1000_intr - Interrupt Handler
  * @irq: interrupt number
@@ -3784,43 +3354,22 @@ static irqreturn_t e1000_intr(int irq, void *data)
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl, icr = er32(ICR);
+       u32 icr = er32(ICR);
 
        if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
                return IRQ_NONE;  /* Not our interrupt */
 
-       /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
-        * not set, then the adapter didn't send an interrupt */
-       if (unlikely(hw->mac_type >= e1000_82571 &&
-                    !(icr & E1000_ICR_INT_ASSERTED)))
-               return IRQ_NONE;
-
-       /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
-        * need for the IMC write */
-
        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
                hw->get_link_status = 1;
-               /* 80003ES2LAN workaround--
-                * For packet buffer work-around on link down event;
-                * disable receives here in the ISR and
-                * reset adapter in watchdog
-                */
-               if (netif_carrier_ok(netdev) &&
-                   (hw->mac_type == e1000_80003es2lan)) {
-                       /* disable receives */
-                       rctl = er32(RCTL);
-                       ew32(RCTL, rctl & ~E1000_RCTL_EN);
-               }
                /* guard against interrupt when we're going down */
                if (!test_bit(__E1000_DOWN, &adapter->flags))
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-       if (unlikely(hw->mac_type < e1000_82571)) {
-               /* disable interrupts, without the synchronize_irq bit */
-               ew32(IMC, ~0);
-               E1000_WRITE_FLUSH();
-       }
+       /* disable interrupts, without the synchronize_irq bit */
+       ew32(IMC, ~0);
+       E1000_WRITE_FLUSH();
+
        if (likely(napi_schedule_prep(&adapter->napi))) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
@@ -3844,17 +3393,13 @@ static irqreturn_t e1000_intr(int irq, void *data)
 static int e1000_clean(struct napi_struct *napi, int budget)
 {
        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
-       struct net_device *poll_dev = adapter->netdev;
-       int tx_cleaned = 0, work_done = 0;
-
-       adapter = netdev_priv(poll_dev);
+       int tx_clean_complete = 0, work_done = 0;
 
-       tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
+       tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
 
-       adapter->clean_rx(adapter, &adapter->rx_ring[0],
-                         &work_done, budget);
+       adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
 
-       if (!tx_cleaned)
+       if (!tx_clean_complete)
                work_done = budget;
 
        /* If budget not fully consumed, exit the polling mode */
@@ -3925,7 +3470,9 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (netif_queue_stopped(netdev)) {
+
+               if (netif_queue_stopped(netdev) &&
+                   !(test_bit(__E1000_DOWN, &adapter->flags))) {
                        netif_wake_queue(netdev);
                        ++adapter->restart_queue;
                }
@@ -3935,8 +3482,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                /* Detect a transmit hang in hardware, this serializes the
                 * check with the clearing of time_stamp and movement of i */
                adapter->detect_tx_hung = false;
-               if (tx_ring->buffer_info[i].time_stamp &&
-                   time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+               if (tx_ring->buffer_info[eop].time_stamp &&
+                   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
                               (adapter->tx_timeout_factor * HZ))
                    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
 
@@ -3958,7 +3505,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                readl(hw->hw_addr + tx_ring->tdt),
                                tx_ring->next_to_use,
                                tx_ring->next_to_clean,
-                               tx_ring->buffer_info[i].time_stamp,
+                               tx_ring->buffer_info[eop].time_stamp,
                                eop,
                                jiffies,
                                eop_desc->upper.fields.status);
@@ -3999,25 +3546,13 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
                return;
        }
        /* TCP/UDP Checksum has not been calculated */
-       if (hw->mac_type <= e1000_82547_rev_2) {
-               if (!(status & E1000_RXD_STAT_TCPCS))
-                       return;
-       } else {
-               if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
-                       return;
-       }
+       if (!(status & E1000_RXD_STAT_TCPCS))
+               return;
+
        /* It must be a TCP or UDP packet with a valid checksum */
        if (likely(status & E1000_RXD_STAT_TCPCS)) {
                /* TCP checksum is good */
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else if (hw->mac_type > e1000_82547_rev_2) {
-               /* IP fragment with UDP payload */
-               /* Hardware complements the payload checksum, so we undo it
-                * and then put the value in host order for further stack use.
-                */
-               __sum16 sum = (__force __sum16)htons(csum);
-               skb->csum = csum_unfold(~sum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
        }
        adapter->hw_csum_good++;
 }
@@ -4814,20 +4349,6 @@ void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
        pcix_set_mmrbc(adapter->pdev, mmrbc);
 }
 
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
-    struct e1000_adapter *adapter = hw->back;
-    u16 cap_offset;
-
-    cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
-    if (!cap_offset)
-        return -E1000_ERR_CONFIG;
-
-    pci_read_config_word(adapter->pdev, cap_offset + reg, value);
-
-    return E1000_SUCCESS;
-}
-
 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
 {
        outl(value, port);
@@ -4850,33 +4371,27 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
                ctrl |= E1000_CTRL_VME;
                ew32(CTRL, ctrl);
 
-               if (adapter->hw.mac_type != e1000_ich8lan) {
-                       /* enable VLAN receive filtering */
-                       rctl = er32(RCTL);
-                       rctl &= ~E1000_RCTL_CFIEN;
-                       if (!(netdev->flags & IFF_PROMISC))
-                               rctl |= E1000_RCTL_VFE;
-                       ew32(RCTL, rctl);
-                       e1000_update_mng_vlan(adapter);
-               }
+               /* enable VLAN receive filtering */
+               rctl = er32(RCTL);
+               rctl &= ~E1000_RCTL_CFIEN;
+               if (!(netdev->flags & IFF_PROMISC))
+                       rctl |= E1000_RCTL_VFE;
+               ew32(RCTL, rctl);
+               e1000_update_mng_vlan(adapter);
        } else {
                /* disable VLAN tag insert/strip */
                ctrl = er32(CTRL);
                ctrl &= ~E1000_CTRL_VME;
                ew32(CTRL, ctrl);
 
-               if (adapter->hw.mac_type != e1000_ich8lan) {
-                       /* disable VLAN receive filtering */
-                       rctl = er32(RCTL);
-                       rctl &= ~E1000_RCTL_VFE;
-                       ew32(RCTL, rctl);
+               /* disable VLAN receive filtering */
+               rctl = er32(RCTL);
+               rctl &= ~E1000_RCTL_VFE;
+               ew32(RCTL, rctl);
 
-                       if (adapter->mng_vlan_id !=
-                           (u16)E1000_MNG_VLAN_NONE) {
-                               e1000_vlan_rx_kill_vid(netdev,
-                                                      adapter->mng_vlan_id);
-                               adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
-                       }
+               if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+                       e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+                       adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
                }
        }
 
@@ -4913,14 +4428,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__E1000_DOWN, &adapter->flags))
                e1000_irq_enable(adapter);
 
-       if ((hw->mng_cookie.status &
-            E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
-           (vid == adapter->mng_vlan_id)) {
-               /* release control to f/w */
-               e1000_release_hw_control(adapter);
-               return;
-       }
-
        /* remove VID from filter table */
        index = (vid >> 5) & 0x7F;
        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
@@ -5031,16 +4538,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                }
 
                if (hw->media_type == e1000_media_type_fiber ||
-                  hw->media_type == e1000_media_type_internal_serdes) {
+                   hw->media_type == e1000_media_type_internal_serdes) {
                        /* keep the laser running in D3 */
                        ctrl_ext = er32(CTRL_EXT);
                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
                        ew32(CTRL_EXT, ctrl_ext);
                }
 
-               /* Allow time for pending master requests to run */
-               e1000_disable_pciex_master(hw);
-
                ew32(WUC, E1000_WUC_PME_EN);
                ew32(WUFC, wufc);
        } else {
@@ -5056,16 +4560,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        if (adapter->en_mng_pt)
                *enable_wake = true;
 
-       if (hw->phy_type == e1000_phy_igp_3)
-               e1000_phy_powerdown_workaround(hw);
-
        if (netif_running(netdev))
                e1000_free_irq(adapter);
 
-       /* Release control of h/w to f/w.  If f/w is AMT enabled, this
-        * would have already happened in close and is redundant. */
-       e1000_release_hw_control(adapter);
-
        pci_disable_device(pdev);
 
        return 0;
@@ -5131,14 +4628,6 @@ static int e1000_resume(struct pci_dev *pdev)
 
        netif_device_attach(netdev);
 
-       /* If the controller is 82573 and f/w is AMT, do not set
-        * DRV_LOAD until the interface is up.  For all other cases,
-        * let the f/w know that the h/w is now under the control
-        * of the driver. */
-       if (hw->mac_type != e1000_82573 ||
-           !e1000_check_mng_mode(hw))
-               e1000_get_hw_control(adapter);
-
        return 0;
 }
 #endif
@@ -5174,7 +4663,7 @@ static void e1000_netpoll(struct net_device *netdev)
 /**
  * e1000_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * @state: The current pci connection state
  *
  * This function is called after a PCI bus error affecting
  * this device has been detected.
@@ -5243,7 +4732,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
 
        e1000_init_manageability(adapter);
 
@@ -5255,15 +4743,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
        }
 
        netif_device_attach(netdev);
-
-       /* If the controller is 82573 and f/w is AMT, do not set
-        * DRV_LOAD until the interface is up.  For all other cases,
-        * let the f/w know that the h/w is now under the control
-        * of the driver. */
-       if (hw->mac_type != e1000_82573 ||
-           !e1000_check_mng_mode(hw))
-               e1000_get_hw_control(adapter);
-
 }
 
 /* e1000_main.c */
index 213437d131547ff0e632e94bb75094831f6f967d..38d2741ccae9982834f3811825805d503be0c1b6 100644 (file)
@@ -518,22 +518,6 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
                        adapter->smart_power_down = opt.def;
                }
        }
-       { /* Kumeran Lock Loss Workaround */
-               opt = (struct e1000_option) {
-                       .type = enable_option,
-                       .name = "Kumeran Lock Loss Workaround",
-                       .err  = "defaulting to Enabled",
-                       .def  = OPTION_ENABLED
-               };
-
-               if (num_KumeranLockLoss > bd) {
-                       unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
-                       e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
-                       adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
-               } else {
-                       adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
-               }
-       }
 
        switch (adapter->hw.media_type) {
        case e1000_media_type_fiber:
@@ -626,12 +610,6 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
                                         .p = dplx_list }}
                };
 
-               if (e1000_check_phy_reset_block(&adapter->hw)) {
-                       DPRINTK(PROBE, INFO,
-                               "Link active due to SoL/IDER Session. "
-                               "Speed/Duplex/AutoNeg parameter ignored.\n");
-                       return;
-               }
                if (num_Duplex > bd) {
                        dplx = Duplex[bd];
                        e1000_validate_option(&dplx, &opt, adapter);
index b53b40ba88a806b545a2981d54e49f7ed44de6dc..d1e0563a67dffdb8ad988659bf90b221b2635cc6 100644 (file)
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
        .pba                    = 20,
-       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
+       .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
        .phy_ops                = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
        .pba                    = 20,
-       .max_hw_frame_size      = DEFAULT_JUMBO,
+       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
        .phy_ops                = &e82_phy_ops_bm,
index 16c193a6c95c1e21e8fdc15a69874cad1e4fd910..0687c6aa4e46e406c0dd67d33169b6574be75f7f 100644 (file)
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                goto err_pci_reg;
 
        /* AER (Advanced Error Reporting) hooks */
-       err = pci_enable_pcie_error_reporting(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-                       "0x%x\n", err);
-               /* non-fatal, continue */
-       }
+       pci_enable_pcie_error_reporting(pdev);
 
        pci_set_master(pdev);
        /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int err;
 
        /*
         * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
        free_netdev(netdev);
 
        /* AER disable */
-       err = pci_disable_pcie_error_reporting(pdev);
-       if (err)
-               dev_err(&pdev->dev,
-                       "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+       pci_disable_pcie_error_reporting(pdev);
 
        pci_disable_device(pdev);
 }
index b7311bc00258569e210949fcf2c17ceaaa62b46b..34d0c69e67f7bb53593cbe32b422db8d6979286d 100644 (file)
 #include <linux/platform_device.h>
 #include <net/ethoc.h>
 
+static int buffer_size = 0x8000; /* 32 KBytes */
+module_param(buffer_size, int, 0);
+MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
+
 /* register offsets */
 #define        MODER           0x00
 #define        INT_SOURCE      0x04
  * struct ethoc - driver-private device structure
  * @iobase:    pointer to I/O memory region
  * @membase:   pointer to buffer memory region
+ * @dma_alloc: dma allocated buffer size
  * @num_tx:    number of send buffers
  * @cur_tx:    last send buffer written
  * @dty_tx:    last buffer actually sent
 struct ethoc {
        void __iomem *iobase;
        void __iomem *membase;
+       int dma_alloc;
 
        unsigned int num_tx;
        unsigned int cur_tx;
@@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev)
        dev->cur_rx = 0;
 
        /* setup transmission buffers */
-       bd.addr = 0;
+       bd.addr = virt_to_phys(dev->membase);
        bd.stat = TX_BD_IRQ | TX_BD_CRC;
 
        for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev)
                bd.addr += ETHOC_BUFSIZ;
        }
 
-       bd.addr = dev->num_tx * ETHOC_BUFSIZ;
        bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
 
        for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
                if (ethoc_update_rx_stats(priv, &bd) == 0) {
                        int size = bd.stat >> 16;
                        struct sk_buff *skb = netdev_alloc_skb(dev, size);
+
+                       size -= 4; /* strip the CRC */
+                       skb_reserve(skb, 2); /* align TCP/IP header */
+
                        if (likely(skb)) {
-                               void *src = priv->membase + bd.addr;
+                               void *src = phys_to_virt(bd.addr);
                                memcpy_fromio(skb_put(skb, size), src, size);
                                skb->protocol = eth_type_trans(skb, dev);
                                priv->stats.rx_packets++;
@@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev)
        if (ret)
                return ret;
 
-       /* calculate the number of TX/RX buffers */
-       num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
-       priv->num_tx = min(min_tx, num_bd / 4);
+       /* calculate the number of TX/RX buffers, maximum 128 supported */
+       num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
+       priv->num_tx = max(min_tx, num_bd / 4);
        priv->num_rx = num_bd - priv->num_tx;
        ethoc_write(priv, TX_BD_NUM, priv->num_tx);
 
@@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        else
                bd.stat &= ~TX_BD_PAD;
 
-       dest = priv->membase + bd.addr;
+       dest = phys_to_virt(bd.addr);
        memcpy_toio(dest, skb->data, skb->len);
 
        bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev)
 
        /* obtain buffer memory space */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "cannot obtain memory space\n");
-               ret = -ENXIO;
-               goto free;
-       }
-
-       mem = devm_request_mem_region(&pdev->dev, res->start,
+       if (res) {
+               mem = devm_request_mem_region(&pdev->dev, res->start,
                        res->end - res->start + 1, res->name);
-       if (!mem) {
-               dev_err(&pdev->dev, "cannot request memory space\n");
-               ret = -ENXIO;
-               goto free;
+               if (!mem) {
+                       dev_err(&pdev->dev, "cannot request memory space\n");
+                       ret = -ENXIO;
+                       goto free;
+               }
+
+               netdev->mem_start = mem->start;
+               netdev->mem_end   = mem->end;
        }
 
-       netdev->mem_start = mem->start;
-       netdev->mem_end   = mem->end;
 
        /* obtain device IRQ number */
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev)
        /* setup driver-private data */
        priv = netdev_priv(netdev);
        priv->netdev = netdev;
+       priv->dma_alloc = 0;
 
        priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
                        mmio->end - mmio->start + 1);
@@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev)
                goto error;
        }
 
-       priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
-                       mem->end - mem->start + 1);
-       if (!priv->membase) {
-               dev_err(&pdev->dev, "cannot remap memory space\n");
-               ret = -ENXIO;
-               goto error;
+       if (netdev->mem_end) {
+               priv->membase = devm_ioremap_nocache(&pdev->dev,
+                       netdev->mem_start, mem->end - mem->start + 1);
+               if (!priv->membase) {
+                       dev_err(&pdev->dev, "cannot remap memory space\n");
+                       ret = -ENXIO;
+                       goto error;
+               }
+       } else {
+               /* Allocate buffer memory */
+               priv->membase = dma_alloc_coherent(NULL,
+                       buffer_size, (void *)&netdev->mem_start,
+                       GFP_KERNEL);
+               if (!priv->membase) {
+                       dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
+                               buffer_size);
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               netdev->mem_end = netdev->mem_start + buffer_size;
+               priv->dma_alloc = buffer_size;
        }
 
        /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1056,9 @@ free_mdio:
        kfree(priv->mdio->irq);
        mdiobus_free(priv->mdio);
 free:
+       if (priv->dma_alloc)
+               dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+                       netdev->mem_start);
        free_netdev(netdev);
 out:
        return ret;
@@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev)
                        kfree(priv->mdio->irq);
                        mdiobus_free(priv->mdio);
                }
-
+               if (priv->dma_alloc)
+                       dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+                               netdev->mem_start);
                unregister_netdev(netdev);
                free_netdev(netdev);
        }
index 33b55f729742efbfd0c370b762463fb8be46ff92..db4b7f1603f662992d551ef6f2baca986baac697 100644 (file)
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
                        }
                        if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
                                printk(KERN_INFO
-                                      "mkiss: %s: Switchting to crc-smack\n",
+                                      "mkiss: %s: Switching to crc-smack\n",
                                       ax->dev->name);
                                ax->crcmode = CRC_MODE_SMACK;
                        }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
                        }
                        if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
                                printk(KERN_INFO
-                                      "mkiss: %s: Switchting to crc-flexnet\n",
+                                      "mkiss: %s: Switching to crc-flexnet\n",
                                       ax->dev->name);
                                ax->crcmode = CRC_MODE_FLEX;
                        }
index 5d6c1530a8c0458e02ae42305e3accf893f9cdd1..714c3a4a44eff5a1d43b538a6c6cf380bfea445c 100644 (file)
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        if (err)
                goto err_pci_reg;
 
-       err = pci_enable_pcie_error_reporting(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-                       "0x%x\n", err);
-               /* non-fatal, continue */
-       }
+       pci_enable_pcie_error_reporting(pdev);
 
        pci_set_master(pdev);
        pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       int err;
 
        /* flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled  */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
 
        free_netdev(netdev);
 
-       err = pci_disable_pcie_error_reporting(pdev);
-       if (err)
-               dev_err(&pdev->dev,
-                       "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+       pci_disable_pcie_error_reporting(pdev);
 
        pci_disable_device(pdev);
 }
index e36e951cbc65ba93bc6acc295a5109ff17beda7a..aa7286bc4364a79bf4fffd9b4b762948621779a6 100644 (file)
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
                           cnx->remote_lp);
        } else {
                memcpy(&cnx->cap_ack_event, event,
-                      sizeof(&cnx->cap_ack_event));
+                      sizeof(cnx->cap_ack_event));
                cnx->state |= VETH_STATE_GOTCAPACK;
                veth_kick_statemachine(cnx);
        }
index 56b12f3192f1d5b18d72b77bb03aab9807708b5f..e2d5343f127553d7ad7c5d4b032bd087f66db990 100644 (file)
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
 #endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = -IXGBE_ERR_CONFIG;
+               ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
        }
index 2ec58dcdb82bf6fba6013599c7695eb76d0804bc..34b04924c8a1f6412f7231306833c3ceebafbe63 100644 (file)
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 
        switch (hw->device_id) {
        case IXGBE_DEV_ID_82599_KX4:
+       case IXGBE_DEV_ID_82599_KX4_MEZZ:
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
        case IXGBE_DEV_ID_82599_XAUI_LOM:
                /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
index 6621e172df3d79533e391777bb2cb0e78050e75a..40ff120a9ad4374e5755f4fa3b3d7010238b8917 100644 (file)
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
 /**
  *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
  *  @hw: pointer to hardware structure
- *  @addr_list: the list of new addresses
- *  @addr_count: number of addresses
- *  @next: iterator function to walk the address list
+ *  @uc_list: the list of new addresses
  *
  *  The given list replaces any existing list.  Clears the secondary addrs from
  *  receive address registers.  Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
 #endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = -IXGBE_ERR_CONFIG;
+               ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
        }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
        s32 ret_val = 0;
        ixgbe_link_speed speed;
        u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+       u32 links2, anlp1_reg, autoc_reg, links;
        bool link_up;
 
        /*
         * AN should have completed when the cable was plugged in.
         * Look for reasons to bail out.  Bail out if:
         * - FC autoneg is disabled, or if
-        * - we don't have multispeed fiber, or if
-        * - we're not running at 1G, or if
-        * - link is not up, or if
-        * - link is up but AN did not complete, or if
-        * - link is up and AN completed but timed out
+        * - link is not up.
         *
-        * Since we're being called from an LSC, link is already know to be up.
+        * Since we're being called from an LSC, link is already known to be up.
         * So use link_up_wait_to_complete=false.
         */
        hw->mac.ops.check_link(hw, &speed, &link_up, false);
-       linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-
-       if (hw->fc.disable_fc_autoneg ||
-           !hw->phy.multispeed_fiber ||
-           (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
-           !link_up ||
-           ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-           ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+
+       if (hw->fc.disable_fc_autoneg || (!link_up)) {
                hw->fc.fc_was_autonegged = false;
                hw->fc.current_mode = hw->fc.requested_mode;
-               hw_dbg(hw, "Autoneg FC was skipped.\n");
                goto out;
        }
 
+       /*
+        * On backplane, bail out if
+        * - backplane autoneg was not completed, or if
+        * - link partner is not AN enabled
+        */
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+               links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+               links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+               if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
+                   ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
+                       hw->fc.fc_was_autonegged = false;
+                       hw->fc.current_mode = hw->fc.requested_mode;
+                       goto out;
+               }
+       }
+
+       /*
+        * On multispeed fiber at 1g, bail out if
+        * - link is up but AN did not complete, or if
+        * - link is up and AN completed but timed out
+        */
+       if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
+               linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+               if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+                   ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+                       hw->fc.fc_was_autonegged = false;
+                       hw->fc.current_mode = hw->fc.requested_mode;
+                       goto out;
+               }
+       }
+
        /*
         * Read the AN advertisement and LP ability registers and resolve
         * local flow control settings accordingly
         */
-       pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-       pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-       if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-               (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+       if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+           (hw->phy.media_type != ixgbe_media_type_backplane)) {
+               pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+               pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+               if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+                   (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+                       /*
+                        * Now we need to check if the user selected Rx ONLY
+                        * of pause frames.  In this case, we had to advertise
+                        * FULL flow control because we could not advertise RX
+                        * ONLY. Hence, we must now check to see if we need to
+                        * turn OFF the TRANSMISSION of PAUSE frames.
+                        */
+                       if (hw->fc.requested_mode == ixgbe_fc_full) {
+                               hw->fc.current_mode = ixgbe_fc_full;
+                               hw_dbg(hw, "Flow Control = FULL.\n");
+                       } else {
+                               hw->fc.current_mode = ixgbe_fc_rx_pause;
+                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+                       }
+               } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+                          (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+                       hw->fc.current_mode = ixgbe_fc_tx_pause;
+                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+               } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+                          (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+                          !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+                          (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+                       hw->fc.current_mode = ixgbe_fc_rx_pause;
+                       hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+               } else {
+                       hw->fc.current_mode = ixgbe_fc_none;
+                       hw_dbg(hw, "Flow Control = NONE.\n");
+               }
+       }
+
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
                /*
-                * Now we need to check if the user selected Rx ONLY
-                * of pause frames.  In this case, we had to advertise
-                * FULL flow control because we could not advertise RX
-                * ONLY. Hence, we must now check to see if we need to
-                * turn OFF the TRANSMISSION of PAUSE frames.
+                * Read the 10g AN autoc and LP ability registers and resolve
+                * local flow control settings accordingly
                 */
-               if (hw->fc.requested_mode == ixgbe_fc_full) {
-                       hw->fc.current_mode = ixgbe_fc_full;
-                       hw_dbg(hw, "Flow Control = FULL.\n");
-               } else {
+               autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+               if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+                   (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
+                       /*
+                        * Now we need to check if the user selected Rx ONLY
+                        * of pause frames.  In this case, we had to advertise
+                        * FULL flow control because we could not advertise RX
+                        * ONLY. Hence, we must now check to see if we need to
+                        * turn OFF the TRANSMISSION of PAUSE frames.
+                        */
+                       if (hw->fc.requested_mode == ixgbe_fc_full) {
+                               hw->fc.current_mode = ixgbe_fc_full;
+                               hw_dbg(hw, "Flow Control = FULL.\n");
+                       } else {
+                               hw->fc.current_mode = ixgbe_fc_rx_pause;
+                               hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+                       }
+               } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+                          (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
+                       hw->fc.current_mode = ixgbe_fc_tx_pause;
+                       hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+               } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+                          (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+                          !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+                          (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
                        hw->fc.current_mode = ixgbe_fc_rx_pause;
                        hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+               } else {
+                       hw->fc.current_mode = ixgbe_fc_none;
+                       hw_dbg(hw, "Flow Control = NONE.\n");
                }
-       } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-               hw->fc.current_mode = ixgbe_fc_tx_pause;
-               hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
-       } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
-                  !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
-                  (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
-               hw->fc.current_mode = ixgbe_fc_rx_pause;
-               hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
-       } else {
-               hw->fc.current_mode = ixgbe_fc_none;
-               hw_dbg(hw, "Flow Control = NONE.\n");
        }
-
        /* Record that current_mode is the result of a successful autoneg */
        hw->fc.fc_was_autonegged = true;
 
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
 #endif /* CONFIG_DCB */
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = -IXGBE_ERR_CONFIG;
+               ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
        }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
        IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
        reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 
-       /* Enable and restart autoneg to inform the link partner */
-       reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
        /* Disable AN timeout */
        if (hw->fc.strict_ieee)
                reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
        IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
        hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
 
+       /*
+        * Set up the 10G flow control advertisement registers so the HW
+        * can do fc autoneg once the cable is plugged in.  If we end up
+        * using 1g instead, this is harmless.
+        */
+       reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       /*
+        * The possible values of fc.requested_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *    we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.requested_mode) {
+       case ixgbe_fc_none:
+               /* Flow control completely disabled by software override. */
+               reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               reg |= (IXGBE_AUTOC_ASM_PAUSE);
+               reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+               break;
+#ifdef CONFIG_DCB
+       case ixgbe_fc_pfc:
+               goto out;
+               break;
+#endif /* CONFIG_DCB */
+       default:
+               hw_dbg(hw, "Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+       /*
+        * AUTOC restart handles negotiation of 1G and 10G. There is
+        * no need to set the PCS1GCTL register.
+        */
+       reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
+       hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+
 out:
        return ret_val;
 }
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 
        while (timeout) {
                if (ixgbe_get_eeprom_semaphore(hw))
-                       return -IXGBE_ERR_SWFW_SYNC;
+                       return IXGBE_ERR_SWFW_SYNC;
 
                gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
                if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 
        if (!timeout) {
                hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
-               return -IXGBE_ERR_SWFW_SYNC;
+               return IXGBE_ERR_SWFW_SYNC;
        }
 
        gssr |= swmask;
index 53b0a6680254fdbd816692f0a88e1616f312a7aa..fa314cb005a4dcf15ce8884c0da5e9987f1e2604 100644 (file)
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
        {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
        {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
        {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+       {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
+       {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
+       {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
+       {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
        {"lsc_int", IXGBE_STAT(lsc_int)},
        {"tx_busy", IXGBE_STAT(tx_busy)},
        {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
index c407bd9de0dd87644556efcb5b717a835c4657d6..cbb143ca1eb88386d03209cb6b92d5b2936450d4 100644 (file)
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                               "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.37-k2"
+#define DRV_VERSION "2.0.44-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
 
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+        board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+        board_82599 },
 
        /* required last entry */
        {0, }
@@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
                adapter->tx_ring[i].head = IXGBE_TDH(j);
                adapter->tx_ring[i].tail = IXGBE_TDT(j);
-               /* Disable Tx Head Writeback RO bit, since this hoses
+               /*
+                * Disable Tx Head Writeback RO bit, since this hoses
                 * bookkeeping if things aren't delivered in order.
                 */
-               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+                       break;
+               case ixgbe_mac_82599EB:
+               default:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+                       break;
+               }
                txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
-               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+                       break;
+               case ixgbe_mac_82599EB:
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+                       break;
+               }
        }
        if (hw->mac.type == ixgbe_mac_82599EB) {
                /* We enable 8 traffic classes, DCB only */
@@ -4432,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 
        /* 82598 hardware only has a 32 bit counter in the high register */
        if (hw->mac.type == ixgbe_mac_82599EB) {
+               u64 tmp;
                adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
+               adapter->stats.gorc += (tmp << 32);
                adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
+               adapter->stats.gotc += (tmp << 32);
                adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
                IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5071,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
        /* Right now, we support IPv4 only */
        struct ixgbe_atr_input atr_input;
        struct tcphdr *th;
-       struct udphdr *uh;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
        u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5085,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
                dst_port = th->dest;
                l4type |= IXGBE_ATR_L4TYPE_TCP;
                /* l4type IPv4 type is 0, no need to assign */
-       } else if(iph->protocol == IPPROTO_UDP) {
-               uh = udp_hdr(skb);
-               src_port = uh->source;
-               dst_port = uh->dest;
-               l4type |= IXGBE_ATR_L4TYPE_UDP;
-               /* l4type IPv4 type is 0, no need to assign */
        } else {
                /* Unsupported L4 header, just bail here */
                return;
@@ -5494,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_pci_reg;
        }
 
-       err = pci_enable_pcie_error_reporting(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
-                                   "0x%x\n", err);
-               /* non-fatal, continue */
-       }
+       pci_enable_pcie_error_reporting(pdev);
 
        pci_set_master(pdev);
        pci_save_state(pdev);
@@ -5808,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int err;
 
        set_bit(__IXGBE_DOWN, &adapter->state);
        /* clear the module not found bit to make sure the worker won't
@@ -5859,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 
        free_netdev(netdev);
 
-       err = pci_disable_pcie_error_reporting(pdev);
-       if (err)
-               dev_err(&pdev->dev,
-                       "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+       pci_disable_pcie_error_reporting(pdev);
 
        pci_disable_device(pdev);
 }
index 8761d7899f7d0704440d4bb3766bc671e4043750..ef4bdd58e0162e96c1bfad79980d2fd3dc3a73bb 100644 (file)
 #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
 #define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
 #define IXGBE_DEV_ID_82599_KX4           0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
 #define IXGBE_DEV_ID_82599_CX4           0x10F9
 #define IXGBE_DEV_ID_82599_SFP           0x10FB
 #define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
 #define IXGBE_AUTOC_KX4_SUPP    0x80000000
 #define IXGBE_AUTOC_KX_SUPP     0x40000000
 #define IXGBE_AUTOC_PAUSE       0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE   0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE   0x10000000
 #define IXGBE_AUTOC_RF          0x08000000
 #define IXGBE_AUTOC_PD_TMR      0x06000000
 #define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
 #define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 #define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
 
+#define IXGBE_LINKS2_AN_SUPPORTED   0x00000040
+
 /* PCS1GLSTA Bit Masks */
 #define IXGBE_PCS1GLSTA_LINK_OK         1
 #define IXGBE_PCS1GLSTA_SYNK_OK         0x10
 #define IXGBE_PCS1GLCTL_AN_ENABLE       0x10000
 #define IXGBE_PCS1GLCTL_AN_RESTART      0x20000
 
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE               0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE           0x0400
+#define IXGBE_ANLP1_ASM_PAUSE           0x0800
+
 /* SW Semaphore Register bitmasks */
 #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
 #define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644 (file)
index 0000000..0be14d7
--- /dev/null
@@ -0,0 +1,1697 @@
+/**
+ * drivers/net/ks8851_mll.c
+ * Copyright (c) 2009 Micrel Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/**
+ * Supports:
+ * KS8851 16bit MLL chip from Micrel Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define        DRV_NAME        "ks8851_mll"
+
+static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
+#define MAX_RECV_FRAMES                        32
+#define MAX_BUF_SIZE                   2048
+#define TX_BUF_SIZE                    2000
+#define RX_BUF_SIZE                    2000
+
+#define KS_CCR                         0x08
+#define CCR_EEPROM                     (1 << 9)
+#define CCR_SPI                                (1 << 8)
+#define CCR_8BIT                       (1 << 7)
+#define CCR_16BIT                      (1 << 6)
+#define CCR_32BIT                      (1 << 5)
+#define CCR_SHARED                     (1 << 4)
+#define CCR_32PIN                      (1 << 0)
+
+/* MAC address registers */
+#define KS_MARL                                0x10
+#define KS_MARM                                0x12
+#define KS_MARH                                0x14
+
+#define KS_OBCR                                0x20
+#define OBCR_ODS_16MA                  (1 << 6)
+
+#define KS_EEPCR                       0x22
+#define EEPCR_EESA                     (1 << 4)
+#define EEPCR_EESB                     (1 << 3)
+#define EEPCR_EEDO                     (1 << 2)
+#define EEPCR_EESCK                    (1 << 1)
+#define EEPCR_EECS                     (1 << 0)
+
+#define KS_MBIR                                0x24
+#define MBIR_TXMBF                     (1 << 12)
+#define MBIR_TXMBFA                    (1 << 11)
+#define MBIR_RXMBF                     (1 << 4)
+#define MBIR_RXMBFA                    (1 << 3)
+
+#define KS_GRR                         0x26
+#define GRR_QMU                                (1 << 1)
+#define GRR_GSR                                (1 << 0)
+
+#define KS_WFCR                                0x2A
+#define WFCR_MPRXE                     (1 << 7)
+#define WFCR_WF3E                      (1 << 3)
+#define WFCR_WF2E                      (1 << 2)
+#define WFCR_WF1E                      (1 << 1)
+#define WFCR_WF0E                      (1 << 0)
+
+#define KS_WF0CRC0                     0x30
+#define KS_WF0CRC1                     0x32
+#define KS_WF0BM0                      0x34
+#define KS_WF0BM1                      0x36
+#define KS_WF0BM2                      0x38
+#define KS_WF0BM3                      0x3A
+
+#define KS_WF1CRC0                     0x40
+#define KS_WF1CRC1                     0x42
+#define KS_WF1BM0                      0x44
+#define KS_WF1BM1                      0x46
+#define KS_WF1BM2                      0x48
+#define KS_WF1BM3                      0x4A
+
+#define KS_WF2CRC0                     0x50
+#define KS_WF2CRC1                     0x52
+#define KS_WF2BM0                      0x54
+#define KS_WF2BM1                      0x56
+#define KS_WF2BM2                      0x58
+#define KS_WF2BM3                      0x5A
+
+#define KS_WF3CRC0                     0x60
+#define KS_WF3CRC1                     0x62
+#define KS_WF3BM0                      0x64
+#define KS_WF3BM1                      0x66
+#define KS_WF3BM2                      0x68
+#define KS_WF3BM3                      0x6A
+
+#define KS_TXCR                                0x70
+#define TXCR_TCGICMP                   (1 << 8)
+#define TXCR_TCGUDP                    (1 << 7)
+#define TXCR_TCGTCP                    (1 << 6)
+#define TXCR_TCGIP                     (1 << 5)
+#define TXCR_FTXQ                      (1 << 4)
+#define TXCR_TXFCE                     (1 << 3)
+#define TXCR_TXPE                      (1 << 2)
+#define TXCR_TXCRC                     (1 << 1)
+#define TXCR_TXE                       (1 << 0)
+
+#define KS_TXSR                                0x72
+#define TXSR_TXLC                      (1 << 13)
+#define TXSR_TXMC                      (1 << 12)
+#define TXSR_TXFID_MASK                        (0x3f << 0)
+#define TXSR_TXFID_SHIFT               (0)
+#define TXSR_TXFID_GET(_v)             (((_v) >> 0) & 0x3f)
+
+
+#define KS_RXCR1                       0x74
+#define RXCR1_FRXQ                     (1 << 15)
+#define RXCR1_RXUDPFCC                 (1 << 14)
+#define RXCR1_RXTCPFCC                 (1 << 13)
+#define RXCR1_RXIPFCC                  (1 << 12)
+#define RXCR1_RXPAFMA                  (1 << 11)
+#define RXCR1_RXFCE                    (1 << 10)
+#define RXCR1_RXEFE                    (1 << 9)
+#define RXCR1_RXMAFMA                  (1 << 8)
+#define RXCR1_RXBE                     (1 << 7)
+#define RXCR1_RXME                     (1 << 6)
+#define RXCR1_RXUE                     (1 << 5)
+#define RXCR1_RXAE                     (1 << 4)
+#define RXCR1_RXINVF                   (1 << 1)
+#define RXCR1_RXE                      (1 << 0)
+#define RXCR1_FILTER_MASK              (RXCR1_RXINVF | RXCR1_RXAE | \
+                                        RXCR1_RXMAFMA | RXCR1_RXPAFMA)
+
+#define KS_RXCR2                       0x76
+#define RXCR2_SRDBL_MASK               (0x7 << 5)
+#define RXCR2_SRDBL_SHIFT              (5)
+#define RXCR2_SRDBL_4B                 (0x0 << 5)
+#define RXCR2_SRDBL_8B                 (0x1 << 5)
+#define RXCR2_SRDBL_16B                        (0x2 << 5)
+#define RXCR2_SRDBL_32B                        (0x3 << 5)
+/* #define RXCR2_SRDBL_FRAME           (0x4 << 5) */
+#define RXCR2_IUFFP                    (1 << 4)
+#define RXCR2_RXIUFCEZ                 (1 << 3)
+#define RXCR2_UDPLFE                   (1 << 2)
+#define RXCR2_RXICMPFCC                        (1 << 1)
+#define RXCR2_RXSAF                    (1 << 0)
+
+#define KS_TXMIR                       0x78
+
+#define KS_RXFHSR                      0x7C
+#define RXFSHR_RXFV                    (1 << 15)
+#define RXFSHR_RXICMPFCS               (1 << 13)
+#define RXFSHR_RXIPFCS                 (1 << 12)
+#define RXFSHR_RXTCPFCS                        (1 << 11)
+#define RXFSHR_RXUDPFCS                        (1 << 10)
+#define RXFSHR_RXBF                    (1 << 7)
+#define RXFSHR_RXMF                    (1 << 6)
+#define RXFSHR_RXUF                    (1 << 5)
+#define RXFSHR_RXMR                    (1 << 4)
+#define RXFSHR_RXFT                    (1 << 3)
+#define RXFSHR_RXFTL                   (1 << 2)
+#define RXFSHR_RXRF                    (1 << 1)
+#define RXFSHR_RXCE                    (1 << 0)
+#define        RXFSHR_ERR                      (RXFSHR_RXCE | RXFSHR_RXRF |\
+                                       RXFSHR_RXFTL | RXFSHR_RXMR |\
+                                       RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
+                                       RXFSHR_RXTCPFCS)
+#define KS_RXFHBCR                     0x7E
+#define RXFHBCR_CNT_MASK               0x0FFF
+
+#define KS_TXQCR                       0x80
+#define TXQCR_AETFE                    (1 << 2)
+#define TXQCR_TXQMAM                   (1 << 1)
+#define TXQCR_METFE                    (1 << 0)
+
+#define KS_RXQCR                       0x82
+#define RXQCR_RXDTTS                   (1 << 12)
+#define RXQCR_RXDBCTS                  (1 << 11)
+#define RXQCR_RXFCTS                   (1 << 10)
+#define RXQCR_RXIPHTOE                 (1 << 9)
+#define RXQCR_RXDTTE                   (1 << 7)
+#define RXQCR_RXDBCTE                  (1 << 6)
+#define RXQCR_RXFCTE                   (1 << 5)
+#define RXQCR_ADRFE                    (1 << 4)
+#define RXQCR_SDA                      (1 << 3)
+#define RXQCR_RRXEF                    (1 << 0)
+#define RXQCR_CMD_CNTL                 (RXQCR_RXFCTE|RXQCR_ADRFE)
+
+#define KS_TXFDPR                      0x84
+#define TXFDPR_TXFPAI                  (1 << 14)
+#define TXFDPR_TXFP_MASK               (0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT              (0)
+
+#define KS_RXFDPR                      0x86
+#define RXFDPR_RXFPAI                  (1 << 14)
+
+#define KS_RXDTTR                      0x8C
+#define KS_RXDBCTR                     0x8E
+
+#define KS_IER                         0x90
+#define KS_ISR                         0x92
+#define IRQ_LCI                                (1 << 15)
+#define IRQ_TXI                                (1 << 14)
+#define IRQ_RXI                                (1 << 13)
+#define IRQ_RXOI                       (1 << 11)
+#define IRQ_TXPSI                      (1 << 9)
+#define IRQ_RXPSI                      (1 << 8)
+#define IRQ_TXSAI                      (1 << 6)
+#define IRQ_RXWFDI                     (1 << 5)
+#define IRQ_RXMPDI                     (1 << 4)
+#define IRQ_LDI                                (1 << 3)
+#define IRQ_EDI                                (1 << 2)
+#define IRQ_SPIBEI                     (1 << 1)
+#define IRQ_DEDI                       (1 << 0)
+
+#define KS_RXFCTR                      0x9C
+#define RXFCTR_THRESHOLD_MASK          0x00FF
+
+#define KS_RXFC                                0x9D
+#define RXFCTR_RXFC_MASK               (0xff << 8)
+#define RXFCTR_RXFC_SHIFT              (8)
+#define RXFCTR_RXFC_GET(_v)            (((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK              (0xff << 0)
+#define RXFCTR_RXFCT_SHIFT             (0)
+
+#define KS_TXNTFSR                     0x9E
+
+#define KS_MAHTR0                      0xA0
+#define KS_MAHTR1                      0xA2
+#define KS_MAHTR2                      0xA4
+#define KS_MAHTR3                      0xA6
+
+#define KS_FCLWR                       0xB0
+#define KS_FCHWR                       0xB2
+#define KS_FCOWR                       0xB4
+
+#define KS_CIDER                       0xC0
+#define CIDER_ID                       0x8870
+#define CIDER_REV_MASK                 (0x7 << 1)
+#define CIDER_REV_SHIFT                        (1)
+#define CIDER_REV_GET(_v)              (((_v) >> 1) & 0x7)
+
+#define KS_CGCR                                0xC6
+#define KS_IACR                                0xC8
+#define IACR_RDEN                      (1 << 12)
+#define IACR_TSEL_MASK                 (0x3 << 10)
+#define IACR_TSEL_SHIFT                        (10)
+#define IACR_TSEL_MIB                  (0x3 << 10)
+#define IACR_ADDR_MASK                 (0x1f << 0)
+#define IACR_ADDR_SHIFT                        (0)
+
+#define KS_IADLR                       0xD0
+#define KS_IAHDR                       0xD2
+
+#define KS_PMECR                       0xD4
+#define PMECR_PME_DELAY                        (1 << 14)
+#define PMECR_PME_POL                  (1 << 12)
+#define PMECR_WOL_WAKEUP               (1 << 11)
+#define PMECR_WOL_MAGICPKT             (1 << 10)
+#define PMECR_WOL_LINKUP               (1 << 9)
+#define PMECR_WOL_ENERGY               (1 << 8)
+#define PMECR_AUTO_WAKE_EN             (1 << 7)
+#define PMECR_WAKEUP_NORMAL            (1 << 6)
+#define PMECR_WKEVT_MASK               (0xf << 2)
+#define PMECR_WKEVT_SHIFT              (2)
+#define PMECR_WKEVT_GET(_v)            (((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY             (0x1 << 2)
+#define PMECR_WKEVT_LINK               (0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT           (0x4 << 2)
+#define PMECR_WKEVT_FRAME              (0x8 << 2)
+#define PMECR_PM_MASK                  (0x3 << 0)
+#define PMECR_PM_SHIFT                 (0)
+#define PMECR_PM_NORMAL                        (0x0 << 0)
+#define PMECR_PM_ENERGY                        (0x1 << 0)
+#define PMECR_PM_SOFTDOWN              (0x2 << 0)
+#define PMECR_PM_POWERSAVE             (0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR                      0xE4
+#define P1MBCR_FORCE_FDX               (1 << 8)
+
+#define KS_P1MBSR                      0xE6
+#define P1MBSR_AN_COMPLETE             (1 << 5)
+#define P1MBSR_AN_CAPABLE              (1 << 3)
+#define P1MBSR_LINK_UP                 (1 << 2)
+
+#define KS_PHY1ILR                     0xE8
+#define KS_PHY1IHR                     0xEA
+#define KS_P1ANAR                      0xEC
+#define KS_P1ANLPR                     0xEE
+
+#define KS_P1SCLMD                     0xF4
+#define P1SCLMD_LEDOFF                 (1 << 15)
+#define P1SCLMD_TXIDS                  (1 << 14)
+#define P1SCLMD_RESTARTAN              (1 << 13)
+#define P1SCLMD_DISAUTOMDIX            (1 << 10)
+#define P1SCLMD_FORCEMDIX              (1 << 9)
+#define P1SCLMD_AUTONEGEN              (1 << 7)
+#define P1SCLMD_FORCE100               (1 << 6)
+#define P1SCLMD_FORCEFDX               (1 << 5)
+#define P1SCLMD_ADV_FLOW               (1 << 4)
+#define P1SCLMD_ADV_100BT_FDX          (1 << 3)
+#define P1SCLMD_ADV_100BT_HDX          (1 << 2)
+#define P1SCLMD_ADV_10BT_FDX           (1 << 1)
+#define P1SCLMD_ADV_10BT_HDX           (1 << 0)
+
+#define KS_P1CR                                0xF6
+#define P1CR_HP_MDIX                   (1 << 15)
+#define P1CR_REV_POL                   (1 << 13)
+#define P1CR_OP_100M                   (1 << 10)
+#define P1CR_OP_FDX                    (1 << 9)
+#define P1CR_OP_MDI                    (1 << 7)
+#define P1CR_AN_DONE                   (1 << 6)
+#define P1CR_LINK_GOOD                 (1 << 5)
+#define P1CR_PNTR_FLOW                 (1 << 4)
+#define P1CR_PNTR_100BT_FDX            (1 << 3)
+#define P1CR_PNTR_100BT_HDX            (1 << 2)
+#define P1CR_PNTR_10BT_FDX             (1 << 1)
+#define P1CR_PNTR_10BT_HDX             (1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC                      (1 << 15)
+#define TXFR_TXFID_MASK                        (0x3f << 0)
+#define TXFR_TXFID_SHIFT               (0)
+
+#define KS_P1SR                                0xF8
+#define P1SR_HP_MDIX                   (1 << 15)
+#define P1SR_REV_POL                   (1 << 13)
+#define P1SR_OP_100M                   (1 << 10)
+#define P1SR_OP_FDX                    (1 << 9)
+#define P1SR_OP_MDI                    (1 << 7)
+#define P1SR_AN_DONE                   (1 << 6)
+#define P1SR_LINK_GOOD                 (1 << 5)
+#define P1SR_PNTR_FLOW                 (1 << 4)
+#define P1SR_PNTR_100BT_FDX            (1 << 3)
+#define P1SR_PNTR_100BT_HDX            (1 << 2)
+#define P1SR_PNTR_10BT_FDX             (1 << 1)
+#define P1SR_PNTR_10BT_HDX             (1 << 0)
+
+#define        ENUM_BUS_NONE                   0
+#define        ENUM_BUS_8BIT                   1
+#define        ENUM_BUS_16BIT                  2
+#define        ENUM_BUS_32BIT                  3
+
+#define MAX_MCAST_LST                  32
+#define HW_MCAST_SIZE                  8
+#define MAC_ADDR_LEN                   6
+
+/**
+ * union ks_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks_tx_hdr {
+       u8      txb[4];
+       __le16  txw[2];
+};
+
+/**
+ * struct ks_net - KS8851 driver private data
+ * @net_device         : The network device we're bound to
+ * @hw_addr    : start address of data register.
+ * @hw_addr_cmd        : start address of command register.
+ * @txh        : temporaly buffer to save status/length.
+ * @lock       : Lock to ensure that the device is not accessed when busy.
+ * @pdev       : Pointer to platform device.
+ * @mii                : The MII state information for the mii calls.
+ * @frame_head_info    : frame header information for multi-pkt rx.
+ * @statelock  : Lock on this structure for tx list.
+ * @msg_enable : The message flags controlling driver output (see ethtool).
+ * @frame_cnt          : number of frames received.
+ * @bus_width          : i/o bus width.
+ * @irq        : irq number assigned to this device.
+ * @rc_rxqcr   : Cached copy of KS_RXQCR.
+ * @rc_txcr    : Cached copy of KS_TXCR.
+ * @rc_ier     : Cached copy of KS_IER.
+ * @sharedbus          : Multipex(addr and data bus) mode indicator.
+ * @cmd_reg_cache      : command register cached.
+ * @cmd_reg_cache_int  : command register cached. Used in the irq handler.
+ * @promiscuous        : promiscuous mode indicator.
+ * @all_mcast          : mutlicast indicator.
+ * @mcast_lst_size     : size of multicast list.
+ * @mcast_lst          : multicast list.
+ * @mcast_bits         : multicast enabed.
+ * @mac_addr                   : MAC address assigned to this device.
+ * @fid                : frame id.
+ * @extra_byte         : number of extra byte prepended rx pkt.
+ * @enabled                    : indicator this device works.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not accessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ */
+
+/* Receive multiplex framer header info */
+struct type_frame_head {
+       u16     sts;         /* Frame status */
+       u16     len;         /* Byte count */
+};
+
+struct ks_net {
+       struct net_device       *netdev;
+       void __iomem            *hw_addr;
+       void __iomem            *hw_addr_cmd;
+       union ks_tx_hdr         txh ____cacheline_aligned;
+       struct mutex            lock; /* spinlock to be interrupt safe */
+       struct platform_device *pdev;
+       struct mii_if_info      mii;
+       struct type_frame_head  *frame_head_info;
+       spinlock_t              statelock;
+       u32                     msg_enable;
+       u32                     frame_cnt;
+       int                     bus_width;
+       int                     irq;
+
+       u16                     rc_rxqcr;
+       u16                     rc_txcr;
+       u16                     rc_ier;
+       u16                     sharedbus;
+       u16                     cmd_reg_cache;
+       u16                     cmd_reg_cache_int;
+       u16                     promiscuous;
+       u16                     all_mcast;
+       u16                     mcast_lst_size;
+       u8                      mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+       u8                      mcast_bits[HW_MCAST_SIZE];
+       u8                      mac_addr[6];
+       u8                      fid;
+       u8                      extra_byte;
+       u8                      enabled;
+};
+
+static int msg_enable;
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define BE3             0x8000      /* Byte Enable 3 */
+#define BE2             0x4000      /* Byte Enable 2 */
+#define BE1             0x2000      /* Byte Enable 1 */
+#define BE0             0x1000      /* Byte Enable 0 */
+
+/**
+ * register read/write calls.
+ *
+ * All these calls issue transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transfering packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks_rdreg8 - read 8 bit register from device
+ * @ks   : The chip information
+ * @offset: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+ */
+static u8 ks_rdreg8(struct ks_net *ks, int offset)
+{
+       u16 data;
+       u8 shift_bit = offset & 0x03;
+       u8 shift_data = (offset & 1) << 3;
+       ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       data  = ioread16(ks->hw_addr);
+       return (u8)(data >> shift_data);
+}
+
+/**
+ * ks_rdreg16 - read 16 bit register from device
+ * @ks   : The chip information
+ * @offset: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+
+static u16 ks_rdreg16(struct ks_net *ks, int offset)
+{
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       return ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_wrreg8 - write 8bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+{
+       u8  shift_bit = (offset & 0x03);
+       u16 value_write = (u16)(value << ((offset & 1) << 3));
+       ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       iowrite16(value_write, ks->hw_addr);
+}
+
+/**
+ * ks_wrreg16 - write 16bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+
+static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+{
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       iowrite16(value, ks->hw_addr);
+}
+
+/**
+ * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip state
+ * @wptr: buffer address to save data
+ * @len: length in byte to read
+ *
+ */
+static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+       len >>= 1;
+       while (len--)
+               *wptr++ = (u16)ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip information
+ * @wptr: buffer address
+ * @len: length in byte to write
+ *
+ */
+static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+       len >>= 1;
+       while (len--)
+               iowrite16(*wptr++, ks->hw_addr);
+}
+
+/**
+ * ks_tx_fifo_space - return the available hardware buffer size.
+ * @ks: The chip information
+ *
+ */
+static inline u16 ks_tx_fifo_space(struct ks_net *ks)
+{
+       return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
+}
+
+/**
+ * ks_save_cmd_reg - save the command register from the cache.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_save_cmd_reg(struct ks_net *ks)
+{
+       /*ks8851 MLL has a bug to read back the command register.
+       * So rely on software to save the content of command register.
+       */
+       ks->cmd_reg_cache_int = ks->cmd_reg_cache;
+}
+
+/**
+ * ks_restore_cmd_reg - restore the command register from the cache and
+ *     write to hardware register.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_restore_cmd_reg(struct ks_net *ks)
+{
+       ks->cmd_reg_cache = ks->cmd_reg_cache_int;
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+}
+
+/**
+ * ks_set_powermode - set power mode of the device
+ * @ks: The chip information
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
+{
+       unsigned pmecr;
+
+       if (netif_msg_hw(ks))
+               ks_dbg(ks, "setting power mode %d\n", pwrmode);
+
+       ks_rdreg16(ks, KS_GRR);
+       pmecr = ks_rdreg16(ks, KS_PMECR);
+       pmecr &= ~PMECR_PM_MASK;
+       pmecr |= pwrmode;
+
+       ks_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks_read_config - read chip configuration of bus width.
+ * @ks: The chip information
+ *
+ */
+static void ks_read_config(struct ks_net *ks)
+{
+       u16 reg_data = 0;
+
+       /* Regardless of bus width, 8 bit read should always work.*/
+       reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+       reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
+
+       /* addr/data bus are multiplexed */
+       ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+
+       /* There are garbage data when reading data from QMU,
+       depending on bus-width.
+       */
+
+       if (reg_data & CCR_8BIT) {
+               ks->bus_width = ENUM_BUS_8BIT;
+               ks->extra_byte = 1;
+       } else if (reg_data & CCR_16BIT) {
+               ks->bus_width = ENUM_BUS_16BIT;
+               ks->extra_byte = 2;
+       } else {
+               ks->bus_width = ENUM_BUS_32BIT;
+               ks->extra_byte = 4;
+       }
+}
+
+/**
+ * ks_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks_soft_reset(struct ks_net *ks, unsigned op)
+{
+       /* Disable interrupt first */
+       ks_wrreg16(ks, KS_IER, 0x0000);
+       ks_wrreg16(ks, KS_GRR, op);
+       mdelay(10);     /* wait a short time to effect reset */
+       ks_wrreg16(ks, KS_GRR, 0);
+       mdelay(1);      /* wait for condition to clear */
+}
+
+
+/**
+ * ks_read_qmu - read 1 pkt data from the QMU.
+ * @ks: The chip information
+ * @buf: buffer address to save 1 pkt
+ * @len: Pkt length
+ * Here is the sequence to read 1 pkt:
+ *     1. set sudo DMA mode
+ *     2. read prepend data
+ *     3. read pkt data
+ *     4. reset sudo DMA Mode
+ */
+static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+{
+       u32 r =  ks->extra_byte & 0x1 ;
+       u32 w = ks->extra_byte - r;
+
+       /* 1. set sudo DMA mode */
+       ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+       ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+
+       /* 2. read prepend data */
+       /**
+        * read 4 + extra bytes and discard them.
+        * extra bytes for dummy, 2 for status, 2 for len
+        */
+
+       /* use likely(r) for 8 bit access for performance */
+       if (unlikely(r))
+               ioread8(ks->hw_addr);
+       ks_inblk(ks, buf, w + 2 + 2);
+
+       /* 3. read pkt data */
+       ks_inblk(ks, buf, ALIGN(len, 4));
+
+       /* 4. reset sudo DMA Mode */
+       ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+}
+
+/**
+ * ks_rcv - read multiple pkts data from the QMU.
+ * @ks: The chip information
+ * @netdev: The network device being opened.
+ *
+ * Read all of header information before reading pkt content.
+ * It is not allowed only port of pkts in QMU after issuing
+ * interrupt ack.
+ */
+static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
+{
+       u32     i;
+       struct type_frame_head *frame_hdr = ks->frame_head_info;
+       struct sk_buff *skb;
+
+       ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
+
+       /* read all header information */
+       for (i = 0; i < ks->frame_cnt; i++) {
+               /* Checking Received packet status */
+               frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
+               /* Get packet len from hardware */
+               frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
+               frame_hdr++;
+       }
+
+       frame_hdr = ks->frame_head_info;
+       while (ks->frame_cnt--) {
+               skb = dev_alloc_skb(frame_hdr->len + 16);
+               if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
+                       (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+                       skb_reserve(skb, 2);
+                       /* read data block including CRC 4 bytes */
+                       ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
+                       skb_put(skb, frame_hdr->len);
+                       skb->dev = netdev;
+                       skb->protocol = eth_type_trans(skb, netdev);
+                       netif_rx(skb);
+               } else {
+                       printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+                       ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+                       if (skb)
+                               dev_kfree_skb_irq(skb);
+               }
+               frame_hdr++;
+       }
+}
+
+/**
+ * ks_update_link_status - link status update.
+ * @netdev: The network device being opened.
+ * @ks: The chip information
+ *
+ */
+
+static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
+{
+       /* check the status of the link */
+       u32 link_up_status;
+       if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
+               netif_carrier_on(netdev);
+               link_up_status = true;
+       } else {
+               netif_carrier_off(netdev);
+               link_up_status = false;
+       }
+       if (netif_msg_link(ks))
+               ks_dbg(ks, "%s: %s\n",
+                       __func__, link_up_status ? "UP" : "DOWN");
+}
+
+/**
+ * ks_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks_net.
+ *
+ * This is the handler invoked to find out what happened
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+
+static irqreturn_t ks_irq(int irq, void *pw)
+{
+       struct ks_net *ks = pw;
+       struct net_device *netdev = ks->netdev;
+       u16 status;
+
+       /*this should be the first in IRQ handler */
+       ks_save_cmd_reg(ks);
+
+       status = ks_rdreg16(ks, KS_ISR);
+       if (unlikely(!status)) {
+               ks_restore_cmd_reg(ks);
+               return IRQ_NONE;
+       }
+
+       ks_wrreg16(ks, KS_ISR, status);
+
+       if (likely(status & IRQ_RXI))
+               ks_rcv(ks, netdev);
+
+       if (unlikely(status & IRQ_LCI))
+               ks_update_link_status(netdev, ks);
+
+       if (unlikely(status & IRQ_TXI))
+               netif_wake_queue(netdev);
+
+       if (unlikely(status & IRQ_LDI)) {
+
+               u16 pmecr = ks_rdreg16(ks, KS_PMECR);
+               pmecr &= ~PMECR_WKEVT_MASK;
+               ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+       }
+
+       /* this should be the last in IRQ handler*/
+       ks_restore_cmd_reg(ks);
+       return IRQ_HANDLED;
+}
+
+
+/**
+ * ks_net_open - open network device
+ * @netdev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks_net_open(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       int err;
+
+#define        KS_INT_FLAGS    (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+       /* lock the card, even if we may not actually do anything
+        * else at the moment.
+        */
+
+       if (netif_msg_ifup(ks))
+               ks_dbg(ks, "%s - entry\n", __func__);
+
+       /* reset the HW */
+       err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
+
+       if (err) {
+               printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
+                       ks->irq, err);
+               return err;
+       }
+
+       if (netif_msg_ifup(ks))
+               ks_dbg(ks, "network device %s up\n", netdev->name);
+
+       return 0;
+}
+
+/**
+ * ks_net_stop - close network device
+ * @netdev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks_net_stop(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+
+       if (netif_msg_ifdown(ks))
+               ks_info(ks, "%s: shutting down\n", netdev->name);
+
+       netif_stop_queue(netdev);
+
+       kfree(ks->frame_head_info);
+
+       mutex_lock(&ks->lock);
+
+       /* turn off the IRQs and ack any outstanding */
+       ks_wrreg16(ks, KS_IER, 0x0000);
+       ks_wrreg16(ks, KS_ISR, 0xffff);
+
+       /* shutdown RX process */
+       ks_wrreg16(ks, KS_RXCR1, 0x0000);
+
+       /* shutdown TX process */
+       ks_wrreg16(ks, KS_TXCR, 0x0000);
+
+       /* set powermode to soft power down to save power */
+       ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+       free_irq(ks->irq, netdev);
+       mutex_unlock(&ks->lock);
+       return 0;
+}
+
+
+/**
+ * ks_write_qmu - write 1 pkt data to the QMU.
+ * @ks: The chip information
+ * @pdata: buffer address to save 1 pkt
+ * @len: Pkt length in byte
+ * Here is the sequence to write 1 pkt:
+ *     1. set sudo DMA mode
+ *     2. write status/length
+ *     3. write pkt data
+ *     4. reset sudo DMA Mode
+ *     5. reset sudo DMA mode
+ *     6. Wait until pkt is out
+ */
+static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+{
+       unsigned fid = ks->fid;
+
+       fid = ks->fid;
+       ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
+
+       /* reduce the tx interrupt occurrances. */
+       if (!fid)
+               fid |= TXFR_TXIC;       /* irq on completion */
+
+       /* start header at txb[0] to align txw entries */
+       ks->txh.txw[0] = cpu_to_le16(fid);
+       ks->txh.txw[1] = cpu_to_le16(len);
+
+       /* 1. set sudo-DMA mode */
+       ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+       /* 2. write status/lenth info */
+       ks_outblk(ks, ks->txh.txw, 4);
+       /* 3. write pkt data */
+       ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+       /* 4. reset sudo-DMA mode */
+       ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+       /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+       ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+       /* 6. wait until TXQCR_METFE is auto-cleared */
+       while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
+               ;
+}
+
+static void ks_disable_int(struct ks_net *ks)
+{
+       ks_wrreg16(ks, KS_IER, 0x0000);
+}  /* ks_disable_int */
+
+static void ks_enable_int(struct ks_net *ks)
+{
+       ks_wrreg16(ks, KS_IER, ks->rc_ier);
+}  /* ks_enable_int */
+
+/**
+ * ks_start_xmit - transmit packet
+ * @skb                : The buffer to transmit
+ * @netdev     : The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb.
+ * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
+ * So while tx is in-progress, prevent IRQ interrupt from happenning.
+ */
+static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       int retv = NETDEV_TX_OK;
+       struct ks_net *ks = netdev_priv(netdev);
+
+       disable_irq(netdev->irq);
+       ks_disable_int(ks);
+       spin_lock(&ks->statelock);
+
+       /* Extra space are required:
+       *  4 byte for alignment, 4 for status/length, 4 for CRC
+       */
+
+       if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
+               ks_write_qmu(ks, skb->data, skb->len);
+               dev_kfree_skb(skb);
+       } else
+               retv = NETDEV_TX_BUSY;
+       spin_unlock(&ks->statelock);
+       ks_enable_int(ks);
+       enable_irq(netdev->irq);
+       return retv;
+}
+
+/**
+ * ks_start_rx - ready to serve pkts
+ * @ks         : The chip information
+ *
+ */
+static void ks_start_rx(struct ks_net *ks)
+{
+       u16 cntl;
+
+       /* Enables QMU Receive (RXCR1). */
+       cntl = ks_rdreg16(ks, KS_RXCR1);
+       cntl |= RXCR1_RXE ;
+       ks_wrreg16(ks, KS_RXCR1, cntl);
+}  /* ks_start_rx */
+
+/**
+ * ks_stop_rx - stop to serve pkts
+ * @ks         : The chip information
+ *
+ */
+static void ks_stop_rx(struct ks_net *ks)
+{
+       u16 cntl;
+
+       /* Disables QMU Receive (RXCR1). */
+       cntl = ks_rdreg16(ks, KS_RXCR1);
+       cntl &= ~RXCR1_RXE ;
+       ks_wrreg16(ks, KS_RXCR1, cntl);
+
+}  /* ks_stop_rx */
+
+static unsigned long const ethernet_polynomial = 0x04c11db7U;
+
+static unsigned long ether_gen_crc(int length, u8 *data)
+{
+       long crc = -1;
+       while (--length >= 0) {
+               u8 current_octet = *data++;
+               int bit;
+
+               for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+                       crc = (crc << 1) ^
+                               ((crc < 0) ^ (current_octet & 1) ?
+                       ethernet_polynomial : 0);
+               }
+       }
+       return (unsigned long)crc;
+}  /* ether_gen_crc */
+
+/**
+* ks_set_grpaddr - set multicast information
+* @ks : The chip information
+*/
+
+static void ks_set_grpaddr(struct ks_net *ks)
+{
+       u8      i;
+       u32     index, position, value;
+
+       memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
+
+       for (i = 0; i < ks->mcast_lst_size; i++) {
+               position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
+               index = position >> 3;
+               value = 1 << (position & 7);
+               ks->mcast_bits[index] |= (u8)value;
+       }
+
+       for (i  = 0; i < HW_MCAST_SIZE; i++) {
+               if (i & 1) {
+                       ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
+                               (ks->mcast_bits[i] << 8) |
+                               ks->mcast_bits[i - 1]);
+               }
+       }
+}  /* ks_set_grpaddr */
+
+/*
+* ks_clear_mcast - clear multicast information
+*
+* @ks : The chip information
+* This routine removes all mcast addresses set in the hardware.
+*/
+
+static void ks_clear_mcast(struct ks_net *ks)
+{
+       u16     i, mcast_size;
+       for (i = 0; i < HW_MCAST_SIZE; i++)
+               ks->mcast_bits[i] = 0;
+
+       mcast_size = HW_MCAST_SIZE >> 2;
+       for (i = 0; i < mcast_size; i++)
+               ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
+}
+
+static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
+{
+       u16             cntl;
+       ks->promiscuous = promiscuous_mode;
+       ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
+       cntl = ks_rdreg16(ks, KS_RXCR1);
+
+       cntl &= ~RXCR1_FILTER_MASK;
+       if (promiscuous_mode)
+               /* Enable Promiscuous mode */
+               cntl |= RXCR1_RXAE | RXCR1_RXINVF;
+       else
+               /* Disable Promiscuous mode (default normal mode) */
+               cntl |= RXCR1_RXPAFMA;
+
+       ks_wrreg16(ks, KS_RXCR1, cntl);
+
+       if (ks->enabled)
+               ks_start_rx(ks);
+
+}  /* ks_set_promis */
+
+static void ks_set_mcast(struct ks_net *ks, u16 mcast)
+{
+       u16     cntl;
+
+       ks->all_mcast = mcast;
+       ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
+       cntl = ks_rdreg16(ks, KS_RXCR1);
+       cntl &= ~RXCR1_FILTER_MASK;
+       if (mcast)
+               /* Enable "Perfect with Multicast address passed mode" */
+               cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+       else
+               /**
+                * Disable "Perfect with Multicast address passed
+                * mode" (normal mode).
+                */
+               cntl |= RXCR1_RXPAFMA;
+
+       ks_wrreg16(ks, KS_RXCR1, cntl);
+
+       if (ks->enabled)
+               ks_start_rx(ks);
+}  /* ks_set_mcast */
+
+static void ks_set_rx_mode(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       struct dev_mc_list *ptr;
+
+       /* Turn on/off promiscuous mode. */
+       if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
+               ks_set_promis(ks,
+                       (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
+       /* Turn on/off all mcast mode. */
+       else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
+               ks_set_mcast(ks,
+                       (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
+       else
+               ks_set_promis(ks, false);
+
+       if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
+               if (netdev->mc_count <= MAX_MCAST_LST) {
+                       int i = 0;
+                       for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
+                               if (!(*ptr->dmi_addr & 1))
+                                       continue;
+                               if (i >= MAX_MCAST_LST)
+                                       break;
+                               memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
+                               MAC_ADDR_LEN);
+                       }
+                       ks->mcast_lst_size = (u8)i;
+                       ks_set_grpaddr(ks);
+               } else {
+                       /**
+                        * List too big to support so
+                        * turn on all mcast mode.
+                        */
+                       ks->mcast_lst_size = MAX_MCAST_LST;
+                       ks_set_mcast(ks, true);
+               }
+       } else {
+               ks->mcast_lst_size = 0;
+               ks_clear_mcast(ks);
+       }
+} /* ks_set_rx_mode */
+
+static void ks_set_mac(struct ks_net *ks, u8 *data)
+{
+       u16 *pw = (u16 *)data;
+       u16 w, u;
+
+       ks_stop_rx(ks);  /* Stop receiving for reconfiguration */
+
+       u = *pw++;
+       w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+       ks_wrreg16(ks, KS_MARH, w);
+
+       u = *pw++;
+       w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+       ks_wrreg16(ks, KS_MARM, w);
+
+       u = *pw;
+       w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+       ks_wrreg16(ks, KS_MARL, w);
+
+       memcpy(ks->mac_addr, data, 6);
+
+       if (ks->enabled)
+               ks_start_rx(ks);
+}
+
+static int ks_set_mac_address(struct net_device *netdev, void *paddr)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       struct sockaddr *addr = paddr;
+       u8 *da;
+
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+       da = (u8 *)netdev->dev_addr;
+
+       ks_set_mac(ks, da);
+       return 0;
+}
+
+static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return -EINVAL;
+
+       return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks_netdev_ops = {
+       .ndo_open               = ks_net_open,
+       .ndo_stop               = ks_net_stop,
+       .ndo_do_ioctl           = ks_net_ioctl,
+       .ndo_start_xmit         = ks_start_xmit,
+       .ndo_set_mac_address    = ks_set_mac_address,
+       .ndo_set_rx_mode        = ks_set_rx_mode,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void ks_get_drvinfo(struct net_device *netdev,
+                              struct ethtool_drvinfo *di)
+{
+       strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
+       strlcpy(di->version, "1.00", sizeof(di->version));
+       strlcpy(di->bus_info, dev_name(netdev->dev.parent),
+               sizeof(di->bus_info));
+}
+
+static u32 ks_get_msglevel(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       return ks->msg_enable;
+}
+
+static void ks_set_msglevel(struct net_device *netdev, u32 to)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       ks->msg_enable = to;
+}
+
+static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks_get_link(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       return mii_link_ok(&ks->mii);
+}
+
+static int ks_nway_reset(struct net_device *netdev)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       return mii_nway_restart(&ks->mii);
+}
+
+static const struct ethtool_ops ks_ethtool_ops = {
+       .get_drvinfo    = ks_get_drvinfo,
+       .get_msglevel   = ks_get_msglevel,
+       .set_msglevel   = ks_set_msglevel,
+       .get_settings   = ks_get_settings,
+       .set_settings   = ks_set_settings,
+       .get_link       = ks_get_link,
+       .nway_reset     = ks_nway_reset,
+};
+
+/* MII interface controls */
+
+/**
+ * ks_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks_phy_reg(int reg)
+{
+       switch (reg) {
+       case MII_BMCR:
+               return KS_P1MBCR;
+       case MII_BMSR:
+               return KS_P1MBSR;
+       case MII_PHYSID1:
+               return KS_PHY1ILR;
+       case MII_PHYSID2:
+               return KS_PHY1IHR;
+       case MII_ADVERTISE:
+               return KS_P1ANAR;
+       case MII_LPA:
+               return KS_P1ANLPR;
+       }
+
+       return 0x0;
+}
+
+/**
+ * ks_phy_read - MII interface PHY register read.
+ * @netdev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existant values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       int ksreg;
+       int result;
+
+       ksreg = ks_phy_reg(reg);
+       if (!ksreg)
+               return 0x0;     /* no error return allowed, so use zero */
+
+       mutex_lock(&ks->lock);
+       result = ks_rdreg16(ks, ksreg);
+       mutex_unlock(&ks->lock);
+
+       return result;
+}
+
+static void ks_phy_write(struct net_device *netdev,
+                            int phy, int reg, int value)
+{
+       struct ks_net *ks = netdev_priv(netdev);
+       int ksreg;
+
+       ksreg = ks_phy_reg(reg);
+       if (ksreg) {
+               mutex_lock(&ks->lock);
+               ks_wrreg16(ks, ksreg, value);
+               mutex_unlock(&ks->lock);
+       }
+}
+
+/**
+ * ks_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks_read_selftest(struct ks_net *ks)
+{
+       unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+       int ret = 0;
+       unsigned rd;
+
+       rd = ks_rdreg16(ks, KS_MBIR);
+
+       if ((rd & both_done) != both_done) {
+               ks_warn(ks, "Memory selftest not finished\n");
+               return 0;
+       }
+
+       if (rd & MBIR_TXMBFA) {
+               ks_err(ks, "TX memory selftest fails\n");
+               ret |= 1;
+       }
+
+       if (rd & MBIR_RXMBFA) {
+               ks_err(ks, "RX memory selftest fails\n");
+               ret |= 2;
+       }
+
+       ks_info(ks, "the selftest passes\n");
+       return ret;
+}
+
+static void ks_disable(struct ks_net *ks)
+{
+       u16     w;
+
+       w = ks_rdreg16(ks, KS_TXCR);
+
+       /* Disables QMU Transmit (TXCR). */
+       w  &= ~TXCR_TXE;
+       ks_wrreg16(ks, KS_TXCR, w);
+
+       /* Disables QMU Receive (RXCR1). */
+       w = ks_rdreg16(ks, KS_RXCR1);
+       w &= ~RXCR1_RXE ;
+       ks_wrreg16(ks, KS_RXCR1, w);
+
+       ks->enabled = false;
+
+}  /* ks_disable */
+
+static void ks_setup(struct ks_net *ks)
+{
+       u16     w;
+
+       /**
+        * Configure QMU Transmit
+        */
+
+       /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
+       ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+       /* Setup Receive Frame Data Pointer Auto-Increment */
+       ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+
+       /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
+       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+
+       /* Setup RxQ Command Control (RXQCR) */
+       ks->rc_rxqcr = RXQCR_CMD_CNTL;
+       ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+       /**
+        * set the force mode to half duplex, default is full duplex
+        *  because if the auto-negotiation fails, most switch uses
+        *  half-duplex.
+        */
+
+       w = ks_rdreg16(ks, KS_P1MBCR);
+       w &= ~P1MBCR_FORCE_FDX;
+       ks_wrreg16(ks, KS_P1MBCR, w);
+
+       w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
+       ks_wrreg16(ks, KS_TXCR, w);
+
+       w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
+
+       if (ks->promiscuous)         /* bPromiscuous */
+               w |= (RXCR1_RXAE | RXCR1_RXINVF);
+       else if (ks->all_mcast) /* Multicast address passed mode */
+               w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+       else                                   /* Normal mode */
+               w |= RXCR1_RXPAFMA;
+
+       ks_wrreg16(ks, KS_RXCR1, w);
+}  /*ks_setup */
+
+
+static void ks_setup_int(struct ks_net *ks)
+{
+       ks->rc_ier = 0x00;
+       /* Clear the interrupts status of the hardware. */
+       ks_wrreg16(ks, KS_ISR, 0xffff);
+
+       /* Enables the interrupts of the hardware. */
+       ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
+}  /* ks_setup_int */
+
+void ks_enable(struct ks_net *ks)
+{
+       u16 w;
+
+       w = ks_rdreg16(ks, KS_TXCR);
+       /* Enables QMU Transmit (TXCR). */
+       ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
+
+       /*
+        * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
+        * Enable
+        */
+
+       w = ks_rdreg16(ks, KS_RXQCR);
+       ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
+
+       /* Enables QMU Receive (RXCR1). */
+       w = ks_rdreg16(ks, KS_RXCR1);
+       ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
+       ks->enabled = true;
+}  /* ks_enable */
+
+static int ks_hw_init(struct ks_net *ks)
+{
+#define        MHEADER_SIZE    (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
+       ks->promiscuous = 0;
+       ks->all_mcast = 0;
+       ks->mcast_lst_size = 0;
+
+       ks->frame_head_info = (struct type_frame_head *) \
+               kmalloc(MHEADER_SIZE, GFP_KERNEL);
+       if (!ks->frame_head_info) {
+               printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+               return false;
+       }
+
+       ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
+       return true;
+}
+
+
+static int __devinit ks8851_probe(struct platform_device *pdev)
+{
+       int err = -ENOMEM;
+       struct resource *io_d, *io_c;
+       struct net_device *netdev;
+       struct ks_net *ks;
+       u16 id, data;
+
+       io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+       if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
+               goto err_mem_region;
+
+       if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
+               goto err_mem_region1;
+
+       netdev = alloc_etherdev(sizeof(struct ks_net));
+       if (!netdev)
+               goto err_alloc_etherdev;
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+
+       ks = netdev_priv(netdev);
+       ks->netdev = netdev;
+       ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
+
+       if (!ks->hw_addr)
+               goto err_ioremap;
+
+       ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
+       if (!ks->hw_addr_cmd)
+               goto err_ioremap1;
+
+       ks->irq = platform_get_irq(pdev, 0);
+
+       if (ks->irq < 0) {
+               err = ks->irq;
+               goto err_get_irq;
+       }
+
+       ks->pdev = pdev;
+
+       mutex_init(&ks->lock);
+       spin_lock_init(&ks->statelock);
+
+       netdev->netdev_ops = &ks_netdev_ops;
+       netdev->ethtool_ops = &ks_ethtool_ops;
+
+       /* setup mii state */
+       ks->mii.dev             = netdev;
+       ks->mii.phy_id          = 1,
+       ks->mii.phy_id_mask     = 1;
+       ks->mii.reg_num_mask    = 0xf;
+       ks->mii.mdio_read       = ks_phy_read;
+       ks->mii.mdio_write      = ks_phy_write;
+
+       ks_info(ks, "message enable is %d\n", msg_enable);
+       /* set the default message enable */
+       ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+                                                    NETIF_MSG_PROBE |
+                                                    NETIF_MSG_LINK));
+       ks_read_config(ks);
+
+       /* simple check for a valid chip being connected to the bus */
+       if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+               ks_err(ks, "failed to read device ID\n");
+               err = -ENODEV;
+               goto err_register;
+       }
+
+       if (ks_read_selftest(ks)) {
+               ks_err(ks, "failed to read device ID\n");
+               err = -ENODEV;
+               goto err_register;
+       }
+
+       err = register_netdev(netdev);
+       if (err)
+               goto err_register;
+
+       platform_set_drvdata(pdev, netdev);
+
+       ks_soft_reset(ks, GRR_GSR);
+       ks_hw_init(ks);
+       ks_disable(ks);
+       ks_setup(ks);
+       ks_setup_int(ks);
+       ks_enable_int(ks);
+       ks_enable(ks);
+       memcpy(netdev->dev_addr, ks->mac_addr, 6);
+
+       data = ks_rdreg16(ks, KS_OBCR);
+       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+
+       /**
+        * If you want to use the default MAC addr,
+        * comment out the 2 functions below.
+        */
+
+       random_ether_addr(netdev->dev_addr);
+       ks_set_mac(ks, netdev->dev_addr);
+
+       id = ks_rdreg16(ks, KS_CIDER);
+
+       printk(KERN_INFO DRV_NAME
+               " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+               (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+       return 0;
+
+err_register:
+err_get_irq:
+       iounmap(ks->hw_addr_cmd);
+err_ioremap1:
+       iounmap(ks->hw_addr);
+err_ioremap:
+       free_netdev(netdev);
+err_alloc_etherdev:
+       release_mem_region(io_c->start, resource_size(io_c));
+err_mem_region1:
+       release_mem_region(io_d->start, resource_size(io_d));
+err_mem_region:
+       return err;
+}
+
+static int __devexit ks8851_remove(struct platform_device *pdev)
+{
+       struct net_device *netdev = platform_get_drvdata(pdev);
+       struct ks_net *ks = netdev_priv(netdev);
+       struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       unregister_netdev(netdev);
+       iounmap(ks->hw_addr);
+       free_netdev(netdev);
+       release_mem_region(iomem->start, resource_size(iomem));
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+
+}
+
+static struct platform_driver ks8851_platform_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = ks8851_probe,
+       .remove = __devexit_p(ks8851_remove),
+};
+
+static int __init ks8851_init(void)
+{
+       return platform_driver_register(&ks8851_platform_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+       platform_driver_unregister(&ks8851_platform_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 MLL Network driver");
+MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
+MODULE_LICENSE("GPL");
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
index 92ceb689b4d49c854a669cda872cff1da7822d18..2af81735386b86066c30d7ebbf9d99da59e2236e 100644 (file)
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
 
 static struct platform_driver meth_driver = {
        .probe  = meth_probe,
-       .remove = __devexit_p(meth_remove),
+       .remove = __exit_p(meth_remove),
        .driver = {
                .name   = "meth",
                .owner  = THIS_MODULE,
index b5aa974827e53bc5b69b209797b332028eec6a42..9b9eab107704bd4fa3946c62d9369fb7e74365c5 100644 (file)
@@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        /* 4 fragments per cmd des */
        no_of_desc = (frag_count + 3) >> 2;
 
-       if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
+       if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
                netif_stop_queue(netdev);
                return NETDEV_TX_BUSY;
        }
index 064a4fe1dd90efcee86a805ed6e94e1037385c2b..28a86224879db42187fae1adffcca056236b79b0 100644 (file)
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
        struct pasemi_mac *mac = netdev_priv(netdev);
        struct phy_device *phydev = mac->phydev;
 
+       if (!phydev)
+               return -EOPNOTSUPP;
+
        return phy_ethtool_gset(phydev, cmd);
 }
 
index 474876c879cba3579f8745e5927c1a1042af8334..bd3447f04902ea805e40e4017d5dd610611f914d 100644 (file)
@@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
-       PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
-       PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
-       PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
        PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
-       PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
+       PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
-       PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
+       PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
        PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
        PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
                0xb4be14e3, 0x43ac239b, 0x0877b627),
index cc394d073755413c1bae92367c727f172b6d11e3..5910df60c93eee6a16705ef350546aef6593107a 100644 (file)
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
  * session or the special tunnel type.
  */
 static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, int optlen)
+                              char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct pppol2tp_session *session = sk->sk_user_data;
index a9845a2f243f03b68d297539fb6a7dd7b645c0f5..3ec6e85587a2280bdda0bb3f095b32e292a209db 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
 
 /*
  * General definitions...
@@ -135,9 +136,9 @@ enum {
        RST_FO_TFO = (1 << 0),
        RST_FO_RR_MASK = 0x00060000,
        RST_FO_RR_CQ_CAM = 0x00000000,
-       RST_FO_RR_DROP = 0x00000001,
-       RST_FO_RR_DQ = 0x00000002,
-       RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
+       RST_FO_RR_DROP = 0x00000002,
+       RST_FO_RR_DQ = 0x00000004,
+       RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
        RST_FO_FRB = (1 << 12),
        RST_FO_MOP = (1 << 13),
        RST_FO_REG = (1 << 14),
@@ -1381,15 +1382,15 @@ struct intr_context {
 
 /* adapter flags definitions. */
 enum {
-       QL_ADAPTER_UP = (1 << 0),       /* Adapter has been brought up. */
-       QL_LEGACY_ENABLED = (1 << 3),
-       QL_MSI_ENABLED = (1 << 3),
-       QL_MSIX_ENABLED = (1 << 4),
-       QL_DMA64 = (1 << 5),
-       QL_PROMISCUOUS = (1 << 6),
-       QL_ALLMULTI = (1 << 7),
-       QL_PORT_CFG = (1 << 8),
-       QL_CAM_RT_SET = (1 << 9),
+       QL_ADAPTER_UP = 0,      /* Adapter has been brought up. */
+       QL_LEGACY_ENABLED = 1,
+       QL_MSI_ENABLED = 2,
+       QL_MSIX_ENABLED = 3,
+       QL_DMA64 = 4,
+       QL_PROMISCUOUS = 5,
+       QL_ALLMULTI = 6,
+       QL_PORT_CFG = 7,
+       QL_CAM_RT_SET = 8,
 };
 
 /* link_status bit definitions */
@@ -1477,7 +1478,6 @@ struct ql_adapter {
        u32 mailbox_in;
        u32 mailbox_out;
        struct mbox_params idc_mbc;
-       struct mutex    mpi_mutex;
 
        int tx_ring_size;
        int rx_ring_size;
index 68f9bd280f86b028843b02911f9fd3a63ad861f7..52073946bce35cec6da556f032f1ae4c8b2cc746 100644 (file)
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
        if (!netif_running(qdev->ndev))
                return status;
 
-       spin_lock(&qdev->hw_lock);
        /* Skip the default queue, and update the outbound handler
         * queues if they changed.
         */
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
                }
        }
 exit:
-       spin_unlock(&qdev->hw_lock);
        return status;
 }
 
index 7783c5db81dcf12174e2dc95217336987c6ff10e..61680715cde0364f45a5cab49388b336d0957369 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
@@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
                return;
-       spin_lock(&qdev->hw_lock);
        if (ql_set_mac_addr_reg
            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
                QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
        }
-       spin_unlock(&qdev->hw_lock);
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 }
 
@@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
        if (status)
                return;
 
-       spin_lock(&qdev->hw_lock);
        if (ql_set_mac_addr_reg
            (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
                QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
        }
-       spin_unlock(&qdev->hw_lock);
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 
 }
@@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
        /*
         * Check MPI processor activity.
         */
-       if (var & STS_PI) {
+       if ((var & STS_PI) &&
+               (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
                /*
                 * We've got an async event or mailbox completion.
                 * Handle it and clear the source of the interrupt.
                 */
                QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
                ql_disable_completion_interrupt(qdev, intr_context->intr);
-               queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
-                                     &qdev->mpi_work, 0);
+               ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+               queue_delayed_work_on(smp_processor_id(),
+                               qdev->workqueue, &qdev->mpi_work, 0);
                work_done++;
        }
 
@@ -3142,14 +3139,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
 {
        int status = 0;
 
-       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+       /* Clear all the entries in the routing table. */
+       status = ql_clear_routing_entries(qdev);
        if (status)
                return status;
 
-       /* Clear all the entries in the routing table. */
-       status = ql_clear_routing_entries(qdev);
+       status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
        if (status)
-               goto exit;
+               return status;
 
        status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
        if (status) {
@@ -3380,12 +3377,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
 
        ql_free_rx_buffers(qdev);
 
-       spin_lock(&qdev->hw_lock);
        status = ql_adapter_reset(qdev);
        if (status)
                QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
                        qdev->func);
-       spin_unlock(&qdev->hw_lock);
        return status;
 }
 
@@ -3587,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
        status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
        if (status)
                return;
-       spin_lock(&qdev->hw_lock);
        /*
         * Set or clear promiscuous mode if a
         * transition is taking place.
@@ -3664,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
                }
        }
 exit:
-       spin_unlock(&qdev->hw_lock);
        ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
 }
 
@@ -3684,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
                return status;
-       spin_lock(&qdev->hw_lock);
        status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
                        MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
-       spin_unlock(&qdev->hw_lock);
        if (status)
                QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3705,7 +3696,7 @@ static void ql_asic_reset_work(struct work_struct *work)
        struct ql_adapter *qdev =
            container_of(work, struct ql_adapter, asic_reset_work.work);
        int status;
-
+       rtnl_lock();
        status = ql_adapter_down(qdev);
        if (status)
                goto error;
@@ -3713,12 +3704,12 @@ static void ql_asic_reset_work(struct work_struct *work)
        status = ql_adapter_up(qdev);
        if (status)
                goto error;
-
+       rtnl_unlock();
        return;
 error:
        QPRINTK(qdev, IFUP, ALERT,
                "Driver up/down cycle failed, closing device\n");
-       rtnl_lock();
+
        set_bit(QL_ADAPTER_UP, &qdev->flags);
        dev_close(qdev->ndev);
        rtnl_unlock();
@@ -3834,11 +3825,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
                return err;
        }
 
+       qdev->ndev = ndev;
+       qdev->pdev = pdev;
+       pci_set_drvdata(pdev, ndev);
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (pos <= 0) {
                dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
                        "aborting.\n");
-               goto err_out;
+               return pos;
        } else {
                pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
                val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3845,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        err = pci_request_regions(pdev, DRV_NAME);
        if (err) {
                dev_err(&pdev->dev, "PCI region request failed.\n");
-               goto err_out;
+               return err;
        }
 
        pci_set_master(pdev);
@@ -3869,7 +3863,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
                goto err_out;
        }
 
-       pci_set_drvdata(pdev, ndev);
        qdev->reg_base =
            ioremap_nocache(pci_resource_start(pdev, 1),
                            pci_resource_len(pdev, 1));
@@ -3889,8 +3882,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
                goto err_out;
        }
 
-       qdev->ndev = ndev;
-       qdev->pdev = pdev;
        err = ql_get_board_info(qdev);
        if (err) {
                dev_err(&pdev->dev, "Register access failed.\n");
@@ -3930,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
        INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
        INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
        INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
-       mutex_init(&qdev->mpi_mutex);
        init_completion(&qdev->ide_completion);
 
        if (!cards_found) {
index 6685bd97da91f211452743dd7cfd9c5ae77996a3..c2e43073047eabfa58b40945de95e7d5c7c944cb 100644 (file)
@@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
 {
        int status, count;
 
-       mutex_lock(&qdev->mpi_mutex);
 
        /* Begin polled mode for MPI */
        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
                status = -EIO;
        }
 end:
-       mutex_unlock(&qdev->mpi_mutex);
        /* End polled mode for MPI */
        ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
        return status;
@@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev)
 static int ql_set_port_cfg(struct ql_adapter *qdev)
 {
        int status;
+       rtnl_lock();
        status = ql_mb_set_port_cfg(qdev);
+       rtnl_unlock();
        if (status)
                return status;
        status = ql_idc_wait(qdev);
@@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
            container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
        int status;
 
+       rtnl_lock();
        status = ql_mb_get_port_cfg(qdev);
+       rtnl_unlock();
        if (status) {
                QPRINTK(qdev, DRV, ERR,
                        "Bug: Failed to get port config data.\n");
@@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work)
                 * needs to be set.
                 * */
                set_bit(QL_CAM_RT_SET, &qdev->flags);
+               rtnl_lock();
                status = ql_mb_idc_ack(qdev);
+               rtnl_unlock();
                if (status) {
                        QPRINTK(qdev, DRV, ERR,
                        "Bug: No pending IDC!\n");
@@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work)
        struct mbox_params *mbcp = &mbc;
        int err = 0;
 
-       mutex_lock(&qdev->mpi_mutex);
+       rtnl_lock();
 
        while (ql_read32(qdev, STS) & STS_PI) {
                memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work)
                        break;
        }
 
-       mutex_unlock(&qdev->mpi_mutex);
+       rtnl_unlock();
        ql_enable_completion_interrupt(qdev, 0);
 }
 
index ecf3279fbef516857cca35a56cd0562805e92d52..f4dfd1f679a9d925081a364cd500727734326b6e 100644 (file)
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
 
 static struct platform_driver sgiseeq_driver = {
        .probe  = sgiseeq_probe,
-       .remove = __devexit_p(sgiseeq_remove),
+       .remove = __exit_p(sgiseeq_remove),
        .driver = {
                .name   = "sgiseeq",
                .owner  = THIS_MODULE,
index 55bad408196604d454b4e4f8e241cb2b98365e83..01f6811f13249af0aabf7114f32d1f413fdee775 100644 (file)
@@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 #endif
 
        err = -ENOMEM;
-       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+       /* space for skge@pci:0000:04:00.0 */
+       hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
+                    + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
        if (!hw) {
                dev_err(&pdev->dev, "cannot allocate hardware struct\n");
                goto err_out_free_regions;
        }
+       sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
 
        hw->pdev = pdev;
        spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
                goto err_out_free_netdev;
        }
 
-       err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+       err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
        if (err) {
                dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
                       dev->name, pdev->irq);
@@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
        }
        skge_show_addr(dev);
 
-       if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
-               if (register_netdev(dev1) == 0)
+       if (hw->ports > 1) {
+               dev1 = skge_devinit(hw, 1, using_dac);
+               if (dev1 && register_netdev(dev1) == 0)
                        skge_show_addr(dev1);
                else {
                        /* Failure to register second port need not be fatal */
                        dev_warn(&pdev->dev, "register of second port failed\n");
                        hw->dev[1] = NULL;
-                       free_netdev(dev1);
+                       hw->ports = 1;
+                       if (dev1)
+                               free_netdev(dev1);
                }
        }
        pci_set_drvdata(pdev, hw);
index 17caccbb768577f44ea259f672b53cfe7a73407d..831de1b6e96e49284973b2eb013ef490f3e0eb3c 100644 (file)
@@ -2423,6 +2423,8 @@ struct skge_hw {
        u16                  phy_addr;
        spinlock_t           phy_lock;
        struct tasklet_struct phy_task;
+
+       char                 irq_name[0]; /* skge@pci:000:04:00.0 */
 };
 
 enum pause_control {
index ef1165718dd77328da13bf06308d59baae82bc59..2ab5c39f33cabfa4c40a3e2a5fe71e67d7b5778c 100644 (file)
@@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
        wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
 
        err = -ENOMEM;
-       hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+
+       hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+                    + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
        if (!hw) {
                dev_err(&pdev->dev, "cannot allocate hardware struct\n");
                goto err_out_free_regions;
        }
 
        hw->pdev = pdev;
+       sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
        if (!hw->regs) {
@@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 
        err = request_irq(pdev->irq, sky2_intr,
                          (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
-                         dev->name, hw);
+                         hw->irq_name, hw);
        if (err) {
                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
                goto err_out_unregister;
index e0f23a101043c6ee977a52bb961678261226815d..ed54129698b4098306bcba5424bc285342e0a88c 100644 (file)
@@ -2085,6 +2085,8 @@ struct sky2_hw {
        struct timer_list    watchdog_timer;
        struct work_struct   restart_work;
        wait_queue_head_t    msi_wait;
+
+       char                 irq_name[0];
 };
 
 static inline int sky2_is_copper(const struct sky2_hw *hw)
index f09bc5dfe8b2c7d652f8d3848ee204d5c2630fba..ba5d3fe753b694d58b93bedf2fc050f001995fef 100644 (file)
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
        struct tg3 *tp = bp->priv;
        u32 val;
 
-       if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
-               return -EAGAIN;
+       spin_lock_bh(&tp->lock);
 
        if (tg3_readphy(tp, reg, &val))
-               return -EIO;
+               val = -EIO;
+
+       spin_unlock_bh(&tp->lock);
 
        return val;
 }
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 {
        struct tg3 *tp = bp->priv;
+       u32 ret = 0;
 
-       if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
-               return -EAGAIN;
+       spin_lock_bh(&tp->lock);
 
        if (tg3_writephy(tp, reg, val))
-               return -EIO;
+               ret = -EIO;
 
-       return 0;
+       spin_unlock_bh(&tp->lock);
+
+       return ret;
 }
 
 static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
 
 static void tg3_mdio_start(struct tg3 *tp)
 {
-       if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
-               mutex_lock(&tp->mdio_bus->mdio_lock);
-               tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
-               mutex_unlock(&tp->mdio_bus->mdio_lock);
-       }
-
        tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
        tw32_f(MAC_MI_MODE, tp->mi_mode);
        udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
                tg3_mdio_config_5785(tp);
 }
 
-static void tg3_mdio_stop(struct tg3 *tp)
-{
-       if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
-               mutex_lock(&tp->mdio_bus->mdio_lock);
-               tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
-               mutex_unlock(&tp->mdio_bus->mdio_lock);
-       }
-}
-
 static int tg3_mdio_init(struct tg3 *tp)
 {
        int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
                tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
                mdiobus_unregister(tp->mdio_bus);
                mdiobus_free(tp->mdio_bus);
-               tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
        }
 }
 
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
        struct tg3 *tp = netdev_priv(dev);
        struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
 
-       spin_lock(&tp->lock);
+       spin_lock_bh(&tp->lock);
 
        mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
                                    MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
        tp->link_config.active_speed = phydev->speed;
        tp->link_config.active_duplex = phydev->duplex;
 
-       spin_unlock(&tp->lock);
+       spin_unlock_bh(&tp->lock);
 
        if (linkmesg)
                tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_nvram_lock(tp);
 
-       tg3_mdio_stop(tp);
-
        tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
 
        /* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
 
        del_timer_sync(&tp->timer);
 
+       tg3_phy_stop(tp);
+
        tg3_full_lock(tp, 1);
 #if 0
        tg3_dump_state(tp);
index 82b45d8797b4eb6b2e6b8ad5ba6f08ed2a23709c..bab7940158e65d208ca734f22d12e432620f69bb 100644 (file)
@@ -2412,7 +2412,6 @@ struct ring_info {
 
 struct tx_ring_info {
        struct sk_buff                  *skb;
-       u32                             prev_vlan_tag;
 };
 
 struct tg3_config_info {
@@ -2749,7 +2748,6 @@ struct tg3 {
 #define TG3_FLG3_5701_DMA_BUG          0x00000008
 #define TG3_FLG3_USE_PHYLIB            0x00000010
 #define TG3_FLG3_MDIOBUS_INITED                0x00000020
-#define TG3_FLG3_MDIOBUS_PAUSED                0x00000040
 #define TG3_FLG3_PHY_CONNECTED         0x00000080
 #define TG3_FLG3_RGMII_STD_IBND_DISABLE        0x00000100
 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN  0x00000200
index d032bba9bc4cde252ef23518428f96eaa33df113..0caa8008c51c0483b69e0bfa553555aa7fe05864 100644 (file)
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
                goto halt_fail_and_release;
        }
        memcpy(net->dev_addr, bp, ETH_ALEN);
+       memcpy(net->perm_addr, bp, ETH_ALEN);
 
        /* set a nonzero filter to enable data transfers */
        memset(u.set, 0, sizeof *u.set);
index d445845f2779ce1b57d6560fac856d4f9fe07888..8d009760277cede76fead3a54edf2c7669a7acbc 100644 (file)
@@ -948,7 +948,7 @@ free:
        return err;
 }
 
-static void virtnet_remove(struct virtio_device *vdev)
+static void __devexit virtnet_remove(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
        struct sk_buff *skb;
index 49ea9c92b7e6ea29e733ad2bac893660a074243a..d7a764a2fc1abc3e809af4ae277925e20b331ca6 100644 (file)
@@ -31,13 +31,12 @@ config STRIP
        ---help---
          Say Y if you have a Metricom radio and intend to use Starmode Radio
          IP. STRIP is a radio protocol developed for the MosquitoNet project
-         (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet
-         traffic using Metricom radios.  Metricom radios are small, battery
-         powered, 100kbit/sec packet radio transceivers, about the size and
-         weight of a cellular telephone. (You may also have heard them called
-         "Metricom modems" but we avoid the term "modem" because it misleads
-         many people into thinking that you can plug a Metricom modem into a
-         phone line and use it as a modem.)
+         to send Internet traffic using Metricom radios.  Metricom radios are
+         small, battery powered, 100kbit/sec packet radio transceivers, about
+         the size and weight of a cellular telephone. (You may also have heard
+         them called "Metricom modems" but we avoid the term "modem" because
+         it misleads many people into thinking that you can plug a Metricom
+         modem into a phone line and use it as a modem.)
 
          You can use STRIP on any Linux machine with a serial port, although
          it is obviously most useful for people with laptop computers. If you
index b3e5cf3735b00ddfc69e99f38d500c5a5e1e86fb..dbd488da18b1ffd8d3ce4ecdc61350b6fa1d046e 100644 (file)
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
        u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
        u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
        int chain, idx, i;
-       u8 f;
+       u32 phy_data = 0;
+       u8 f, tmp;
 
        switch (channel->band) {
        case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
                }
 
                for (i = 0; i < 76; i++) {
-                       u32 phy_data;
-                       u8 tmp;
-
                        if (i < 25) {
                                tmp = ar9170_interpolate_val(i, &pwrs[0][0],
                                                             &vpds[0][0]);
index e96091b314995ec0ebd2f66bfc860a61ca255ac9..9c1397996e0a57753e97f372bce2ad1296abcbb9 100644 (file)
@@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
                        q->mmio_base + B43_PIO_TXDATA,
                        sizeof(u16));
        if (data_len & 1) {
+               u8 tail[2] = { 0, };
+
                /* Write the last byte. */
                ctl &= ~B43_PIO_TXCTL_WRITEHI;
                b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
-               b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
+               tail[0] = data[data_len - 1];
+               ssb_block_write(dev->dev, tail, 2,
+                               q->mmio_base + B43_PIO_TXDATA,
+                               sizeof(u16));
        }
 
        return ctl;
@@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
                        q->mmio_base + B43_PIO8_TXDATA,
                        sizeof(u32));
        if (data_len & 3) {
-               u32 value = 0;
+               u8 tail[4] = { 0, };
 
                /* Write the last few bytes. */
                ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
                         B43_PIO8_TXCTL_24_31);
-               data = &(data[data_len - 1]);
                switch (data_len & 3) {
                case 3:
-                       ctl |= B43_PIO8_TXCTL_16_23;
-                       value |= (u32)(*data) << 16;
-                       data--;
+                       ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
+                       tail[0] = data[data_len - 3];
+                       tail[1] = data[data_len - 2];
+                       tail[2] = data[data_len - 1];
+                       break;
                case 2:
                        ctl |= B43_PIO8_TXCTL_8_15;
-                       value |= (u32)(*data) << 8;
-                       data--;
+                       tail[0] = data[data_len - 2];
+                       tail[1] = data[data_len - 1];
+                       break;
                case 1:
-                       value |= (u32)(*data);
+                       tail[0] = data[data_len - 1];
+                       break;
                }
                b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
-               b43_piotx_write32(q, B43_PIO8_TXDATA, value);
+               ssb_block_write(dev->dev, tail, 4,
+                               q->mmio_base + B43_PIO8_TXDATA,
+                               sizeof(u32));
        }
 
        return ctl;
@@ -693,21 +703,25 @@ data_ready:
                               q->mmio_base + B43_PIO8_RXDATA,
                               sizeof(u32));
                if (len & 3) {
-                       u32 value;
-                       char *data;
+                       u8 tail[4] = { 0, };
 
                        /* Read the last few bytes. */
-                       value = b43_piorx_read32(q, B43_PIO8_RXDATA);
-                       data = &(skb->data[len + padding - 1]);
+                       ssb_block_read(dev->dev, tail, 4,
+                                      q->mmio_base + B43_PIO8_RXDATA,
+                                      sizeof(u32));
                        switch (len & 3) {
                        case 3:
-                               *data = (value >> 16);
-                               data--;
+                               skb->data[len + padding - 3] = tail[0];
+                               skb->data[len + padding - 2] = tail[1];
+                               skb->data[len + padding - 1] = tail[2];
+                               break;
                        case 2:
-                               *data = (value >> 8);
-                               data--;
+                               skb->data[len + padding - 2] = tail[0];
+                               skb->data[len + padding - 1] = tail[1];
+                               break;
                        case 1:
-                               *data = value;
+                               skb->data[len + padding - 1] = tail[0];
+                               break;
                        }
                }
        } else {
@@ -715,11 +729,13 @@ data_ready:
                               q->mmio_base + B43_PIO_RXDATA,
                               sizeof(u16));
                if (len & 1) {
-                       u16 value;
+                       u8 tail[2] = { 0, };
 
                        /* Read the last byte. */
-                       value = b43_piorx_read16(q, B43_PIO_RXDATA);
-                       skb->data[len + padding - 1] = value;
+                       ssb_block_read(dev->dev, tail, 2,
+                                      q->mmio_base + B43_PIO_RXDATA,
+                                      sizeof(u16));
+                       skb->data[len + padding - 1] = tail[0];
                }
        }
 
index a95caa01414331bfabb356658b43c628c49cddb8..2716b91ba9fa753b879c80e0de57ae430356a575 100644 (file)
@@ -99,6 +99,8 @@ static struct iwl_lib_ops iwl1000_lib = {
        .setup_deferred_work = iwl5000_setup_deferred_work,
        .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
        .load_ucode = iwl5000_load_ucode,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
        .init_alive_start = iwl5000_init_alive_start,
        .alive_notify = iwl5000_alive_notify,
        .send_tx_power = iwl5000_send_tx_power,
index e9a685d8e3a1650d9320ab06e6d49010607f5df4..e70c5b0af364d35b796a6bad2447aaedfd82e4d5 100644 (file)
@@ -2839,6 +2839,8 @@ static struct iwl_lib_ops iwl3945_lib = {
        .txq_free_tfd = iwl3945_hw_txq_free_tfd,
        .txq_init = iwl3945_hw_tx_queue_init,
        .load_ucode = iwl3945_load_bsm,
+       .dump_nic_event_log = iwl3945_dump_nic_event_log,
+       .dump_nic_error_log = iwl3945_dump_nic_error_log,
        .apm_ops = {
                .init = iwl3945_apm_init,
                .reset = iwl3945_apm_reset,
index f24036909916066d4724c59f68734c8c031727d4..21679bf3a1aaae4ac376338407ead09ec16ec29f 100644 (file)
@@ -209,6 +209,8 @@ extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
                                         struct iwl_host_cmd *cmd);
 extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
                                        struct ieee80211_hdr *hdr,int left);
+extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv);
+extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
 
 /*
  * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
index 3259b88415445ca0428bc03c41fe4e5110f7b015..a22a0501c1901a1fa996f538e5f996fcceee378b 100644 (file)
@@ -2298,6 +2298,8 @@ static struct iwl_lib_ops iwl4965_lib = {
        .alive_notify = iwl4965_alive_notify,
        .init_alive_start = iwl4965_init_alive_start,
        .load_ucode = iwl4965_load_bsm,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
        .apm_ops = {
                .init = iwl4965_apm_init,
                .reset = iwl4965_apm_reset,
index a6391c7fea532eb7e0d73b59beffec10e2abc322..eb08f4411000cdd8f430de8b0342f97dd8627521 100644 (file)
@@ -1535,6 +1535,8 @@ struct iwl_lib_ops iwl5000_lib = {
        .rx_handler_setup = iwl5000_rx_handler_setup,
        .setup_deferred_work = iwl5000_setup_deferred_work,
        .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
        .load_ucode = iwl5000_load_ucode,
        .init_alive_start = iwl5000_init_alive_start,
        .alive_notify = iwl5000_alive_notify,
@@ -1585,6 +1587,8 @@ static struct iwl_lib_ops iwl5150_lib = {
        .rx_handler_setup = iwl5000_rx_handler_setup,
        .setup_deferred_work = iwl5000_setup_deferred_work,
        .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
        .load_ucode = iwl5000_load_ucode,
        .init_alive_start = iwl5000_init_alive_start,
        .alive_notify = iwl5000_alive_notify,
index 82b9c93dff54670a59fdd3e41bbb75795c37ac9b..c295b8ee922896ab16be8812bef15b0641b7cf2f 100644 (file)
@@ -100,6 +100,8 @@ static struct iwl_lib_ops iwl6000_lib = {
        .setup_deferred_work = iwl5000_setup_deferred_work,
        .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
        .load_ucode = iwl5000_load_ucode,
+       .dump_nic_event_log = iwl_dump_nic_event_log,
+       .dump_nic_error_log = iwl_dump_nic_error_log,
        .init_alive_start = iwl5000_init_alive_start,
        .alive_notify = iwl5000_alive_notify,
        .send_tx_power = iwl5000_send_tx_power,
index 00457bff1ed157c04a235af371daabddd2848ec8..cdc07c477457b557a82288dd676eaeca6a4eee71 100644 (file)
@@ -1526,6 +1526,191 @@ static int iwl_read_ucode(struct iwl_priv *priv)
        return ret;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+static const char *desc_lookup_text[] = {
+       "OK",
+       "FAIL",
+       "BAD_PARAM",
+       "BAD_CHECKSUM",
+       "NMI_INTERRUPT_WDG",
+       "SYSASSERT",
+       "FATAL_ERROR",
+       "BAD_COMMAND",
+       "HW_ERROR_TUNE_LOCK",
+       "HW_ERROR_TEMPERATURE",
+       "ILLEGAL_CHAN_FREQ",
+       "VCC_NOT_STABLE",
+       "FH_ERROR",
+       "NMI_INTERRUPT_HOST",
+       "NMI_INTERRUPT_ACTION_PT",
+       "NMI_INTERRUPT_UNKNOWN",
+       "UCODE_VERSION_MISMATCH",
+       "HW_ERROR_ABS_LOCK",
+       "HW_ERROR_CAL_LOCK_FAIL",
+       "NMI_INTERRUPT_INST_ACTION_PT",
+       "NMI_INTERRUPT_DATA_ACTION_PT",
+       "NMI_TRM_HW_ER",
+       "NMI_INTERRUPT_TRM",
+       "NMI_INTERRUPT_BREAK_POINT"
+       "DEBUG_0",
+       "DEBUG_1",
+       "DEBUG_2",
+       "DEBUG_3",
+       "UNKNOWN"
+};
+
+static const char *desc_lookup(int i)
+{
+       int max = ARRAY_SIZE(desc_lookup_text) - 1;
+
+       if (i < 0 || i > max)
+               i = max;
+
+       return desc_lookup_text[i];
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+       u32 data2, line;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+
+       if (priv->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+       else
+               base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
+               return;
+       }
+
+       count = iwl_read_targ_mem(priv, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+                       priv->status, count);
+       }
+
+       desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
+       blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
+       blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
+       ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
+       ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
+       data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
+       data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
+       line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
+       time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
+
+       IWL_ERR(priv, "Desc                               Time       "
+               "data1      data2      line\n");
+       IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
+               desc_lookup(desc), desc, time, data1, data2, line);
+       IWL_ERR(priv, "blink1  blink2  ilink1  ilink2\n");
+       IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
+               ilink1, ilink2);
+
+}
+
+#define EVENT_START_OFFSET  (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+                               u32 num_events, u32 mode)
+{
+       u32 i;
+       u32 base;       /* SRAM byte address of event log header */
+       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+       u32 ptr;        /* SRAM byte address of log data */
+       u32 ev, time, data; /* event log data */
+
+       if (num_events == 0)
+               return;
+       if (priv->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       else
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+
+       if (mode == 0)
+               event_size = 2 * sizeof(u32);
+       else
+               event_size = 3 * sizeof(u32);
+
+       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+       /* "time" is actually "data" for mode 0 (no timestamp).
+       * place event id # at far right for easier visual parsing. */
+       for (i = 0; i < num_events; i++) {
+               ev = iwl_read_targ_mem(priv, ptr);
+               ptr += sizeof(u32);
+               time = iwl_read_targ_mem(priv, ptr);
+               ptr += sizeof(u32);
+               if (mode == 0) {
+                       /* data, ev */
+                       IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+               } else {
+                       data = iwl_read_targ_mem(priv, ptr);
+                       ptr += sizeof(u32);
+                       IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+                                       time, data, ev);
+               }
+       }
+}
+
+void iwl_dump_nic_event_log(struct iwl_priv *priv)
+{
+       u32 base;       /* SRAM byte address of event log header */
+       u32 capacity;   /* event log capacity in # entries */
+       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+       u32 num_wraps;  /* # times uCode wrapped to top of log */
+       u32 next_entry; /* index of next entry to be written by uCode */
+       u32 size;       /* # entries that we'll print */
+
+       if (priv->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+       else
+               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+
+       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
+               return;
+       }
+
+       /* event log header */
+       capacity = iwl_read_targ_mem(priv, base);
+       mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+       num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+       next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+       size = num_wraps ? capacity : next_entry;
+
+       /* bail out if nothing in log */
+       if (size == 0) {
+               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+               return;
+       }
+
+       IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
+                       size, num_wraps);
+
+       /* if uCode has wrapped back to top of log, start at the oldest entry,
+        * i.e the next one that uCode would fill. */
+       if (num_wraps)
+               iwl_print_event_log(priv, next_entry,
+                                       capacity - next_entry, mode);
+       /* (then/else) start at top of log */
+       iwl_print_event_log(priv, 0, next_entry, mode);
+
+}
+#endif
+
 /**
  * iwl_alive_start - called after REPLY_ALIVE notification received
  *                   from protocol/runtime uCode (initialization uCode's
index fd26c0dc9c544124b3bfb861519b058fdcb5e2c8..484d5c1a7312053121ac09ac75d39b435492701a 100644 (file)
@@ -1309,189 +1309,6 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
        IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
        IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
 }
-
-static const char *desc_lookup_text[] = {
-       "OK",
-       "FAIL",
-       "BAD_PARAM",
-       "BAD_CHECKSUM",
-       "NMI_INTERRUPT_WDG",
-       "SYSASSERT",
-       "FATAL_ERROR",
-       "BAD_COMMAND",
-       "HW_ERROR_TUNE_LOCK",
-       "HW_ERROR_TEMPERATURE",
-       "ILLEGAL_CHAN_FREQ",
-       "VCC_NOT_STABLE",
-       "FH_ERROR",
-       "NMI_INTERRUPT_HOST",
-       "NMI_INTERRUPT_ACTION_PT",
-       "NMI_INTERRUPT_UNKNOWN",
-       "UCODE_VERSION_MISMATCH",
-       "HW_ERROR_ABS_LOCK",
-       "HW_ERROR_CAL_LOCK_FAIL",
-       "NMI_INTERRUPT_INST_ACTION_PT",
-       "NMI_INTERRUPT_DATA_ACTION_PT",
-       "NMI_TRM_HW_ER",
-       "NMI_INTERRUPT_TRM",
-       "NMI_INTERRUPT_BREAK_POINT"
-       "DEBUG_0",
-       "DEBUG_1",
-       "DEBUG_2",
-       "DEBUG_3",
-       "UNKNOWN"
-};
-
-static const char *desc_lookup(int i)
-{
-       int max = ARRAY_SIZE(desc_lookup_text) - 1;
-
-       if (i < 0 || i > max)
-               i = max;
-
-       return desc_lookup_text[i];
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-static void iwl_dump_nic_error_log(struct iwl_priv *priv)
-{
-       u32 data2, line;
-       u32 desc, time, count, base, data1;
-       u32 blink1, blink2, ilink1, ilink2;
-
-       if (priv->ucode_type == UCODE_INIT)
-               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
-       else
-               base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
-       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
-               return;
-       }
-
-       count = iwl_read_targ_mem(priv, base);
-
-       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-                       priv->status, count);
-       }
-
-       desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
-       blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
-       blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
-       ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
-       ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
-       data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
-       data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
-       line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
-       time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
-
-       IWL_ERR(priv, "Desc                               Time       "
-               "data1      data2      line\n");
-       IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
-               desc_lookup(desc), desc, time, data1, data2, line);
-       IWL_ERR(priv, "blink1  blink2  ilink1  ilink2\n");
-       IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
-               ilink1, ilink2);
-
-}
-
-#define EVENT_START_OFFSET  (4 * sizeof(u32))
-
-/**
- * iwl_print_event_log - Dump error event log to syslog
- *
- */
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
-                               u32 num_events, u32 mode)
-{
-       u32 i;
-       u32 base;       /* SRAM byte address of event log header */
-       u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
-       u32 ptr;        /* SRAM byte address of log data */
-       u32 ev, time, data; /* event log data */
-
-       if (num_events == 0)
-               return;
-       if (priv->ucode_type == UCODE_INIT)
-               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
-       else
-               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
-       if (mode == 0)
-               event_size = 2 * sizeof(u32);
-       else
-               event_size = 3 * sizeof(u32);
-
-       ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
-       /* "time" is actually "data" for mode 0 (no timestamp).
-       * place event id # at far right for easier visual parsing. */
-       for (i = 0; i < num_events; i++) {
-               ev = iwl_read_targ_mem(priv, ptr);
-               ptr += sizeof(u32);
-               time = iwl_read_targ_mem(priv, ptr);
-               ptr += sizeof(u32);
-               if (mode == 0) {
-                       /* data, ev */
-                       IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
-               } else {
-                       data = iwl_read_targ_mem(priv, ptr);
-                       ptr += sizeof(u32);
-                       IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
-                                       time, data, ev);
-               }
-       }
-}
-
-void iwl_dump_nic_event_log(struct iwl_priv *priv)
-{
-       u32 base;       /* SRAM byte address of event log header */
-       u32 capacity;   /* event log capacity in # entries */
-       u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
-       u32 num_wraps;  /* # times uCode wrapped to top of log */
-       u32 next_entry; /* index of next entry to be written by uCode */
-       u32 size;       /* # entries that we'll print */
-
-       if (priv->ucode_type == UCODE_INIT)
-               base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
-       else
-               base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
-       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
-               return;
-       }
-
-       /* event log header */
-       capacity = iwl_read_targ_mem(priv, base);
-       mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
-       num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
-       next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
-       size = num_wraps ? capacity : next_entry;
-
-       /* bail out if nothing in log */
-       if (size == 0) {
-               IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
-               return;
-       }
-
-       IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
-                       size, num_wraps);
-
-       /* if uCode has wrapped back to top of log, start at the oldest entry,
-        * i.e the next one that uCode would fill. */
-       if (num_wraps)
-               iwl_print_event_log(priv, next_entry,
-                                       capacity - next_entry, mode);
-       /* (then/else) start at top of log */
-       iwl_print_event_log(priv, 0, next_entry, mode);
-
-}
 #endif
 /**
  * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1506,8 +1323,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
 
 #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) {
-               iwl_dump_nic_error_log(priv);
-               iwl_dump_nic_event_log(priv);
+               priv->cfg->ops->lib->dump_nic_error_log(priv);
+               priv->cfg->ops->lib->dump_nic_event_log(priv);
                iwl_print_rx_config_cmd(priv);
        }
 #endif
index 7ff9ffb2b7026c2280e7623eaa77d95f4ad89e2d..e50103a956b1052015ddcd072e557e4f694c0540 100644 (file)
@@ -166,6 +166,8 @@ struct iwl_lib_ops {
        int (*is_valid_rtc_data_addr)(u32 addr);
        /* 1st ucode load */
        int (*load_ucode)(struct iwl_priv *priv);
+       void (*dump_nic_event_log)(struct iwl_priv *priv);
+       void (*dump_nic_error_log)(struct iwl_priv *priv);
        /* power management */
        struct iwl_apm_ops apm_ops;
 
@@ -540,7 +542,19 @@ int iwl_pci_resume(struct pci_dev *pdev);
 /*****************************************************
 *  Error Handling Debugging
 ******************************************************/
+#ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_dump_nic_event_log(struct iwl_priv *priv);
+void iwl_dump_nic_error_log(struct iwl_priv *priv);
+#else
+static inline void iwl_dump_nic_event_log(struct iwl_priv *priv)
+{
+}
+
+static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+}
+#endif
+
 void iwl_clear_isr_stats(struct iwl_priv *priv);
 
 /*****************************************************
index fb844859a443733439e15019d11003dfbf8a5c99..a198bcf6102287121da544906fe333c6838099e6 100644 (file)
@@ -410,7 +410,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
                pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
                hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
                                   buf_size - pos, 0);
-               pos += strlen(buf);
+               pos += strlen(buf + pos);
                if (buf_size - pos > 0)
                        buf[pos++] = '\n';
        }
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
        if (sscanf(buf, "%d", &event_log_flag) != 1)
                return -EFAULT;
        if (event_log_flag == 1)
-               iwl_dump_nic_event_log(priv);
+               priv->cfg->ops->lib->dump_nic_event_log(priv);
 
        return count;
 }
@@ -909,7 +909,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
                                                "0x%.4x ", ofs);
                                hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
                                                   buf + pos, bufsz - pos, 0);
-                               pos += strlen(buf);
+                               pos += strlen(buf + pos);
                                if (bufsz - pos > 0)
                                        buf[pos++] = '\n';
                        }
@@ -932,7 +932,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
                                                "0x%.4x ", ofs);
                                hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
                                                   buf + pos, bufsz - pos, 0);
-                               pos += strlen(buf);
+                               pos += strlen(buf + pos);
                                if (bufsz - pos > 0)
                                        buf[pos++] = '\n';
                        }
index a7422e52d8836f8333137b79c631f7f0d0467db9..c18907544701c6a81a31857145a29b6bc05b9129 100644 (file)
@@ -197,6 +197,12 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
                pci_free_consistent(dev, priv->hw_params.tfd_size *
                                    txq->q.n_bd, txq->tfds, txq->q.dma_addr);
 
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
        /* 0-fill queue descriptor structure */
        memset(txq, 0, sizeof(*txq));
 }
index 4f2d43937283ed02c5a21b8dc4338b81eb6f3de7..c390dbd877e4a59000571e2064e7eba4611bddbd 100644 (file)
@@ -1481,6 +1481,7 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
        tasklet_kill(&priv->irq_tasklet);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static const char *desc_lookup(int i)
 {
        switch (i) {
@@ -1504,7 +1505,7 @@ static const char *desc_lookup(int i)
 #define ERROR_START_OFFSET  (1 * sizeof(u32))
 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
 
-static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
+void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
 {
        u32 i;
        u32 desc, time, count, base, data1;
@@ -1598,7 +1599,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
        }
 }
 
-static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
+void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
 {
        u32 base;       /* SRAM byte address of event log header */
        u32 capacity;   /* event log capacity in # entries */
@@ -1640,6 +1641,16 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
        iwl3945_print_event_log(priv, 0, next_entry, mode);
 
 }
+#else
+void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
+{
+}
+
+void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
+{
+}
+
+#endif
 
 static void iwl3945_irq_tasklet(struct iwl_priv *priv)
 {
@@ -3683,21 +3694,6 @@ static ssize_t dump_error_log(struct device *d,
 
 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
 
-static ssize_t dump_event_log(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       char *p = (char *)buf;
-
-       if (p[0] == '1')
-               iwl3945_dump_nic_event_log(priv);
-
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
-
 /*****************************************************************************
  *
  * driver setup and tear down
@@ -3742,7 +3738,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
        &dev_attr_antenna.attr,
        &dev_attr_channels.attr,
        &dev_attr_dump_errors.attr,
-       &dev_attr_dump_events.attr,
        &dev_attr_flags.attr,
        &dev_attr_filter_flags.attr,
 #ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
index 896f532182f06e27ba9ac1669ddd41364883d9a0..38cfd79e05902ceeff7f27731d5f39fe37503943 100644 (file)
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
                data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
                if (WARN_ON(!data->beacon_int))
                        data->beacon_int = 1;
+               if (data->started)
+                       mod_timer(&data->beacon_timer,
+                                 jiffies + data->beacon_int);
        }
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
index 1cbd9b4a3efc9c560784280e7c76f6160be00161..b8f5ee33445e15d877c662ad315e6598af01b794 100644 (file)
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
        /* Huawei-3Com */
        { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
        /* Hercules */
+       { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
        { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
        /* Linksys */
index fbf965b31c14b3cacf165a4b9f82c6e5a667508d..17f38a781d47b5d3a2510da5a2e680e7bf8f2346 100644 (file)
@@ -192,6 +192,10 @@ config PCMCIA_AU1X00
        tristate "Au1x00 pcmcia support"
        depends on SOC_AU1X00 && PCMCIA
 
+config PCMCIA_BCM63XX
+       tristate "bcm63xx pcmcia support"
+       depends on BCM63XX && PCMCIA
+
 config PCMCIA_SA1100
        tristate "SA1100 support"
        depends on ARM && ARCH_SA1100 && PCMCIA
index 3247828aa203b8f73c07cc1af1851805f696dc2d..a03a38acd77d5a1ed1b34f63a24b03611c67d47f 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_PCMCIA_SA1111)                   += sa11xx_core.o sa1111_cs.o
 obj-$(CONFIG_M32R_PCC)                         += m32r_pcc.o
 obj-$(CONFIG_M32R_CFC)                         += m32r_cfc.o
 obj-$(CONFIG_PCMCIA_AU1X00)                    += au1x00_ss.o
+obj-$(CONFIG_PCMCIA_BCM63XX)                   += bcm63xx_pcmcia.o
 obj-$(CONFIG_PCMCIA_VRC4171)                   += vrc4171_card.o
 obj-$(CONFIG_PCMCIA_VRC4173)                   += vrc4173_cardu.o
 obj-$(CONFIG_OMAP_CF)                          += omap_cf.o
index 9e1140f085fdfb70adca24d75e33d2a5b007b8e7..e1dccedc5960e30fd68c3e43e983090966d92833 100644 (file)
@@ -363,7 +363,7 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
        struct at91_cf_socket   *cf = platform_get_drvdata(pdev);
        struct at91_cf_data     *board = cf->board;
 
-       pcmcia_socket_dev_suspend(&pdev->dev, mesg);
+       pcmcia_socket_dev_suspend(&pdev->dev);
        if (device_may_wakeup(&pdev->dev)) {
                enable_irq_wake(board->det_pin);
                if (board->irq_pin)
index 90013341cd5f8bd90f7c34f228984a6325baa3cd..02088704ac2ca8fcac8115a6ca211227b899f982 100644 (file)
@@ -515,7 +515,7 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
 static int au1x00_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int au1x00_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
new file mode 100644 (file)
index 0000000..bc88a3b
--- /dev/null
@@ -0,0 +1,536 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+#include "bcm63xx_pcmcia.h"
+
+#define PFX    "bcm63xx_pcmcia: "
+
+#ifdef CONFIG_CARDBUS
+/* if cardbus is used, platform device needs reference to actual pci
+ * device */
+static struct pci_dev *bcm63xx_cb_dev;
+#endif
+
+/*
+ * read/write helper for pcmcia regs
+ */
+static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off)
+{
+       return bcm_readl(skt->base + off);
+}
+
+static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt,
+                                u32 val, u32 off)
+{
+       bcm_writel(val, skt->base + off);
+}
+
+/*
+ * This callback should (re-)initialise the socket, turn on status
+ * interrupts and PCMCIA bus, and wait for power to stabilise so that
+ * the card status signals report correctly.
+ *
+ * Hardware cannot do that.
+ */
+static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock)
+{
+       return 0;
+}
+
+/*
+ * This callback should remove power on the socket, disable IRQs from
+ * the card, turn off status interrupts, and disable the PCMCIA bus.
+ *
+ * Hardware cannot do that.
+ */
+static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock)
+{
+       return 0;
+}
+
+/*
+ * Implements the set_socket() operation for the in-kernel PCMCIA
+ * service (formerly SS_SetSocket in Card Services). We more or
+ * less punt all of this work and let the kernel handle the details
+ * of power configuration, reset, &c. We also record the value of
+ * `state' in order to regurgitate it to the PCMCIA core later.
+ */
+static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock,
+                                    socket_state_t *state)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+       unsigned long flags;
+       u32 val;
+
+       skt = sock->driver_data;
+
+       spin_lock_irqsave(&skt->lock, flags);
+
+       /* note: hardware cannot control socket power, so we will
+        * always report SS_POWERON */
+
+       /* apply socket reset */
+       val = pcmcia_readl(skt, PCMCIA_C1_REG);
+       if (state->flags & SS_RESET)
+               val |= PCMCIA_C1_RESET_MASK;
+       else
+               val &= ~PCMCIA_C1_RESET_MASK;
+
+       /* reverse reset logic for cardbus card */
+       if (skt->card_detected && (skt->card_type & CARD_CARDBUS))
+               val ^= PCMCIA_C1_RESET_MASK;
+
+       pcmcia_writel(skt, val, PCMCIA_C1_REG);
+
+       /* keep requested state for event reporting */
+       skt->requested_state = *state;
+
+       spin_unlock_irqrestore(&skt->lock, flags);
+
+       return 0;
+}
+
+/*
+ * identity cardtype from VS[12] input, CD[12] input while only VS2 is
+ * floating, and CD[12] input while only VS1 is floating
+ */
+enum {
+       IN_VS1 = (1 << 0),
+       IN_VS2 = (1 << 1),
+       IN_CD1_VS2H = (1 << 2),
+       IN_CD2_VS2H = (1 << 3),
+       IN_CD1_VS1H = (1 << 4),
+       IN_CD2_VS1H = (1 << 5),
+};
+
+static const u8 vscd_to_cardtype[] = {
+
+       /* VS1 float, VS2 float */
+       [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V),
+
+       /* VS1 grounded, VS2 float */
+       [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V),
+
+       /* VS1 grounded, VS2 grounded */
+       [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV),
+
+       /* VS1 tied to CD1, VS2 float */
+       [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V),
+
+       /* VS1 grounded, VS2 tied to CD2 */
+       [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV),
+
+       /* VS1 tied to CD2, VS2 grounded */
+       [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV),
+
+       /* VS1 float, VS2 grounded */
+       [IN_VS1] = (CARD_PCCARD | CARD_XV),
+
+       /* VS1 float, VS2 tied to CD2 */
+       [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V),
+
+       /* VS1 float, VS2 tied to CD1 */
+       [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV),
+
+       /* VS1 tied to CD2, VS2 float */
+       [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV),
+
+       /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */
+       [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */
+};
+
+/*
+ * poll hardware to check card insertion status
+ */
+static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt)
+{
+       unsigned int stat;
+       u32 val;
+
+       stat = 0;
+
+       /* check CD for card presence */
+       val = pcmcia_readl(skt, PCMCIA_C1_REG);
+
+       if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK))
+               stat |= SS_DETECT;
+
+       /* if new insertion, detect cardtype */
+       if ((stat & SS_DETECT) && !skt->card_detected) {
+               unsigned int stat = 0;
+
+               /* float VS1, float VS2 */
+               val |= PCMCIA_C1_VS1OE_MASK;
+               val |= PCMCIA_C1_VS2OE_MASK;
+               pcmcia_writel(skt, val, PCMCIA_C1_REG);
+
+               /* wait for output to stabilize and read VS[12] */
+               udelay(10);
+               val = pcmcia_readl(skt, PCMCIA_C1_REG);
+               stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0;
+               stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0;
+
+               /* drive VS1 low, float VS2 */
+               val &= ~PCMCIA_C1_VS1OE_MASK;
+               val |= PCMCIA_C1_VS2OE_MASK;
+               pcmcia_writel(skt, val, PCMCIA_C1_REG);
+
+               /* wait for output to stabilize and read CD[12] */
+               udelay(10);
+               val = pcmcia_readl(skt, PCMCIA_C1_REG);
+               stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0;
+               stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0;
+
+               /* float VS1, drive VS2 low */
+               val |= PCMCIA_C1_VS1OE_MASK;
+               val &= ~PCMCIA_C1_VS2OE_MASK;
+               pcmcia_writel(skt, val, PCMCIA_C1_REG);
+
+               /* wait for output to stabilize and read CD[12] */
+               udelay(10);
+               val = pcmcia_readl(skt, PCMCIA_C1_REG);
+               stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0;
+               stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0;
+
+               /* guess cardtype from all this */
+               skt->card_type = vscd_to_cardtype[stat];
+               if (!skt->card_type)
+                       dev_err(&skt->socket.dev, "unsupported card type\n");
+
+               /* drive both VS pin to 0 again */
+               val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK);
+
+               /* enable correct logic */
+               val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK);
+               if (skt->card_type & CARD_PCCARD)
+                       val |= PCMCIA_C1_EN_PCMCIA_MASK;
+               else
+                       val |= PCMCIA_C1_EN_CARDBUS_MASK;
+
+               pcmcia_writel(skt, val, PCMCIA_C1_REG);
+       }
+       skt->card_detected = (stat & SS_DETECT) ? 1 : 0;
+
+       /* report card type/voltage */
+       if (skt->card_type & CARD_CARDBUS)
+               stat |= SS_CARDBUS;
+       if (skt->card_type & CARD_3V)
+               stat |= SS_3VCARD;
+       if (skt->card_type & CARD_XV)
+               stat |= SS_XVCARD;
+       stat |= SS_POWERON;
+
+       if (gpio_get_value(skt->pd->ready_gpio))
+               stat |= SS_READY;
+
+       return stat;
+}
+
+/*
+ * core request to get current socket status
+ */
+static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock,
+                                    unsigned int *status)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+
+       skt = sock->driver_data;
+
+       spin_lock_bh(&skt->lock);
+       *status = __get_socket_status(skt);
+       spin_unlock_bh(&skt->lock);
+
+       return 0;
+}
+
+/*
+ * socket polling timer callback
+ */
+static void bcm63xx_pcmcia_poll(unsigned long data)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+       unsigned int stat, events;
+
+       skt = (struct bcm63xx_pcmcia_socket *)data;
+
+       spin_lock_bh(&skt->lock);
+
+       stat = __get_socket_status(skt);
+
+       /* keep only changed bits, and mask with required one from the
+        * core */
+       events = (stat ^ skt->old_status) & skt->requested_state.csc_mask;
+       skt->old_status = stat;
+       spin_unlock_bh(&skt->lock);
+
+       if (events)
+               pcmcia_parse_events(&skt->socket, events);
+
+       mod_timer(&skt->timer,
+                 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
+}
+
+static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock,
+                                    struct pccard_io_map *map)
+{
+       /* this doesn't seem to be called by pcmcia layer if static
+        * mapping is used */
+       return 0;
+}
+
+static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock,
+                                     struct pccard_mem_map *map)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+       struct resource *res;
+
+       skt = sock->driver_data;
+       if (map->flags & MAP_ATTRIB)
+               res = skt->attr_res;
+       else
+               res = skt->common_res;
+
+       map->static_start = res->start + map->card_start;
+       return 0;
+}
+
+static struct pccard_operations bcm63xx_pcmcia_operations = {
+       .init                   = bcm63xx_pcmcia_sock_init,
+       .suspend                = bcm63xx_pcmcia_suspend,
+       .get_status             = bcm63xx_pcmcia_get_status,
+       .set_socket             = bcm63xx_pcmcia_set_socket,
+       .set_io_map             = bcm63xx_pcmcia_set_io_map,
+       .set_mem_map            = bcm63xx_pcmcia_set_mem_map,
+};
+
+/*
+ * register pcmcia socket to core
+ */
+static int __devinit bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+       struct pcmcia_socket *sock;
+       struct resource *res, *irq_res;
+       unsigned int regmem_size = 0, iomem_size = 0;
+       u32 val;
+       int ret;
+
+       skt = kzalloc(sizeof(*skt), GFP_KERNEL);
+       if (!skt)
+               return -ENOMEM;
+       spin_lock_init(&skt->lock);
+       sock = &skt->socket;
+       sock->driver_data = skt;
+
+       /* make sure we have all resources we need */
+       skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       skt->pd = pdev->dev.platform_data;
+       if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* remap pcmcia registers */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       regmem_size = resource_size(res);
+       if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) {
+               ret = -EINVAL;
+               goto err;
+       }
+       skt->reg_res = res;
+
+       skt->base = ioremap(res->start, regmem_size);
+       if (!skt->base) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       /* remap io registers */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+       iomem_size = resource_size(res);
+       skt->io_base = ioremap(res->start, iomem_size);
+       if (!skt->io_base) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       /* resources are static */
+       sock->resource_ops = &pccard_static_ops;
+       sock->ops = &bcm63xx_pcmcia_operations;
+       sock->owner = THIS_MODULE;
+       sock->dev.parent = &pdev->dev;
+       sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
+       sock->io_offset = (unsigned long)skt->io_base;
+       sock->pci_irq = irq_res->start;
+
+#ifdef CONFIG_CARDBUS
+       sock->cb_dev = bcm63xx_cb_dev;
+       if (bcm63xx_cb_dev)
+               sock->features |= SS_CAP_CARDBUS;
+#endif
+
+       /* assume common & attribute memory have the same size */
+       sock->map_size = resource_size(skt->common_res);
+
+       /* initialize polling timer */
+       setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt);
+
+       /* initialize  pcmcia  control register,  drive  VS[12] to  0,
+        * leave CB IDSEL to the old  value since it is set by the PCI
+        * layer */
+       val = pcmcia_readl(skt, PCMCIA_C1_REG);
+       val &= PCMCIA_C1_CBIDSEL_MASK;
+       val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK;
+       pcmcia_writel(skt, val, PCMCIA_C1_REG);
+
+       /*
+        * Hardware has only one set of timings registers, not one for
+        * each memory access type, so we configure them for the
+        * slowest one: attribute memory.
+        */
+       val = PCMCIA_C2_DATA16_MASK;
+       val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT;
+       val |= 6 << PCMCIA_C2_INACTIVE_SHIFT;
+       val |= 3 << PCMCIA_C2_SETUP_SHIFT;
+       val |= 3 << PCMCIA_C2_HOLD_SHIFT;
+       pcmcia_writel(skt, val, PCMCIA_C2_REG);
+
+       ret = pcmcia_register_socket(sock);
+       if (ret)
+               goto err;
+
+       /* start polling socket */
+       mod_timer(&skt->timer,
+                 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
+
+       platform_set_drvdata(pdev, skt);
+       return 0;
+
+err:
+       if (skt->io_base)
+               iounmap(skt->io_base);
+       if (skt->base)
+               iounmap(skt->base);
+       if (skt->reg_res)
+               release_mem_region(skt->reg_res->start, regmem_size);
+       kfree(skt);
+       return ret;
+}
+
+static int __devexit bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
+{
+       struct bcm63xx_pcmcia_socket *skt;
+       struct resource *res;
+
+       skt = platform_get_drvdata(pdev);
+       del_timer_sync(&skt->timer);
+       iounmap(skt->base);
+       iounmap(skt->io_base);
+       res = skt->reg_res;
+       release_mem_region(res->start, resource_size(res));
+       kfree(skt);
+       return 0;
+}
+
+struct platform_driver bcm63xx_pcmcia_driver = {
+       .probe  = bcm63xx_drv_pcmcia_probe,
+       .remove = __devexit_p(bcm63xx_drv_pcmcia_remove),
+       .driver = {
+               .name   = "bcm63xx_pcmcia",
+               .owner  = THIS_MODULE,
+       },
+};
+
+#ifdef CONFIG_CARDBUS
+static int __devinit bcm63xx_cb_probe(struct pci_dev *dev,
+                                     const struct pci_device_id *id)
+{
+       /* keep pci device */
+       bcm63xx_cb_dev = dev;
+       return platform_driver_register(&bcm63xx_pcmcia_driver);
+}
+
+static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
+{
+       platform_driver_unregister(&bcm63xx_pcmcia_driver);
+       bcm63xx_cb_dev = NULL;
+}
+
+static struct pci_device_id bcm63xx_cb_table[] = {
+       {
+               .vendor         = PCI_VENDOR_ID_BROADCOM,
+               .device         = BCM6348_CPU_ID,
+               .subvendor      = PCI_VENDOR_ID_BROADCOM,
+               .subdevice      = PCI_ANY_ID,
+               .class          = PCI_CLASS_BRIDGE_CARDBUS << 8,
+               .class_mask     = ~0,
+       },
+
+       {
+               .vendor         = PCI_VENDOR_ID_BROADCOM,
+               .device         = BCM6358_CPU_ID,
+               .subvendor      = PCI_VENDOR_ID_BROADCOM,
+               .subdevice      = PCI_ANY_ID,
+               .class          = PCI_CLASS_BRIDGE_CARDBUS << 8,
+               .class_mask     = ~0,
+       },
+
+       { },
+};
+
+MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table);
+
+static struct pci_driver bcm63xx_cardbus_driver = {
+       .name           = "bcm63xx_cardbus",
+       .id_table       = bcm63xx_cb_table,
+       .probe          = bcm63xx_cb_probe,
+       .remove         = __devexit_p(bcm63xx_cb_exit),
+};
+#endif
+
+/*
+ * if cardbus support is enabled, register our platform device after
+ * our fake cardbus bridge has been registered
+ */
+static int __init bcm63xx_pcmcia_init(void)
+{
+#ifdef CONFIG_CARDBUS
+       return pci_register_driver(&bcm63xx_cardbus_driver);
+#else
+       return platform_driver_register(&bcm63xx_pcmcia_driver);
+#endif
+}
+
+static void __exit bcm63xx_pcmcia_exit(void)
+{
+#ifdef CONFIG_CARDBUS
+       return pci_unregister_driver(&bcm63xx_cardbus_driver);
+#else
+       platform_driver_unregister(&bcm63xx_pcmcia_driver);
+#endif
+}
+
+module_init(bcm63xx_pcmcia_init);
+module_exit(bcm63xx_pcmcia_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller");
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.h b/drivers/pcmcia/bcm63xx_pcmcia.h
new file mode 100644 (file)
index 0000000..ed95739
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef BCM63XX_PCMCIA_H_
+#define BCM63XX_PCMCIA_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <pcmcia/ss.h>
+#include <bcm63xx_dev_pcmcia.h>
+
+/* socket polling rate in ms */
+#define BCM63XX_PCMCIA_POLL_RATE       500
+
+enum {
+       CARD_CARDBUS = (1 << 0),
+       CARD_PCCARD = (1 << 1),
+       CARD_5V = (1 << 2),
+       CARD_3V = (1 << 3),
+       CARD_XV = (1 << 4),
+       CARD_YV = (1 << 5),
+};
+
+struct bcm63xx_pcmcia_socket {
+       struct pcmcia_socket socket;
+
+       /* platform specific data */
+       struct bcm63xx_pcmcia_platform_data *pd;
+
+       /* all regs access are protected by this spinlock */
+       spinlock_t lock;
+
+       /* pcmcia registers resource */
+       struct resource *reg_res;
+
+       /* base remapped address of registers */
+       void __iomem *base;
+
+       /* whether a card is detected at the moment */
+       int card_detected;
+
+       /* type of detected card (mask of above enum) */
+       u8 card_type;
+
+       /* keep last socket status to implement event reporting */
+       unsigned int old_status;
+
+       /* backup of requested socket state */
+       socket_state_t requested_state;
+
+       /* timer used for socket status polling */
+       struct timer_list timer;
+
+       /* attribute/common memory resources */
+       struct resource *attr_res;
+       struct resource *common_res;
+       struct resource *io_res;
+
+       /* base address of io memory */
+       void __iomem *io_base;
+};
+
+#endif /* BCM63XX_PCMCIA_H_ */
index b59d4115d20f541e5176ba309cb694687c1c1d17..300b368605c92c43b2b2e2e7a324cc5645489ee4 100644 (file)
@@ -302,7 +302,7 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev)
 
 static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
-       return pcmcia_socket_dev_suspend(&pdev->dev, mesg);
+       return pcmcia_socket_dev_suspend(&pdev->dev);
 }
 
 static int bfin_cf_resume(struct platform_device *pdev)
index 0660ad18258953af06dfa7cc72afda3b9259f716..934d4bee39a09b040d670fd35b656977ec5ac168 100644 (file)
@@ -101,7 +101,7 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem);
 static int socket_resume(struct pcmcia_socket *skt);
 static int socket_suspend(struct pcmcia_socket *skt);
 
-int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
+int pcmcia_socket_dev_suspend(struct device *dev)
 {
        struct pcmcia_socket *socket;
 
index 46561face128842e4a3bd8c7c3138e36a59d7fc9..a04f21c8170f38257c2c46545500c42e0c95bcb8 100644 (file)
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
 #ifdef CONFIG_PM
 static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int i82092aa_socket_resume (struct pci_dev *dev)
index 40d4953e4b12ee2be19effe5e8472a16686bbc7b..b906abe26ad08d49b972053b2ec24e2374415eb7 100644 (file)
@@ -1241,7 +1241,7 @@ static int pcic_init(struct pcmcia_socket *s)
 static int i82365_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int i82365_drv_pcmcia_resume(struct platform_device *dev)
index 62b4ecc97c46fe513014e04c91167cb64248080c..d1d89c4491ad185df3de5da8f56656c2eecad9d0 100644 (file)
@@ -699,7 +699,7 @@ static struct pccard_operations pcc_operations = {
 static int cfc_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int cfc_drv_pcmcia_resume(struct platform_device *dev)
index 12034b41d196a9e4a566977877b7bc5d84f091d2..a0655839c8d33ea7a755c940724e62a5f5e2626a 100644 (file)
@@ -675,7 +675,7 @@ static struct pccard_operations pcc_operations = {
 static int pcc_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int pcc_drv_pcmcia_resume(struct platform_device *dev)
index d1ad0966392dc5bc4fcf31b136ff8757525a2c50..c69f2c4fe5204dd19fb6c6034278042d785f7a36 100644 (file)
@@ -1296,7 +1296,7 @@ static int m8xx_remove(struct of_device *ofdev)
 #ifdef CONFIG_PM
 static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&pdev->dev, state);
+       return pcmcia_socket_dev_suspend(&pdev->dev);
 }
 
 static int m8xx_resume(struct platform_device *pdev)
index f3736398900eeb1b581f0c500d1877290855917d..68570bc3ac8630d204320fedee16b0bfdb81d3e1 100644 (file)
@@ -334,7 +334,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
 
 static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
-       return pcmcia_socket_dev_suspend(&pdev->dev, mesg);
+       return pcmcia_socket_dev_suspend(&pdev->dev);
 }
 
 static int omap_cf_resume(struct platform_device *pdev)
index 8bed1dab903983e4f5d8a11f238b59567931cec0..1c39d3438f20c3a8cd8a6a7ca386e0c862e2cc5c 100644 (file)
@@ -758,7 +758,7 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
 #ifdef CONFIG_PM
 static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int pd6729_socket_resume(struct pci_dev *dev)
index 87e22ef8eb0213f75ec82efd30c151a564575cd9..0e35acb1366b118773e34db9578daf1b30582119 100644 (file)
@@ -302,7 +302,7 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
 
 static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
 {
-       return pcmcia_socket_dev_suspend(dev, PMSG_SUSPEND);
+       return pcmcia_socket_dev_suspend(dev);
 }
 
 static int pxa2xx_drv_pcmcia_resume(struct device *dev)
index f424146a2bc946c6b8a9eb0683e4e7f8afd3c56f..ac8aa09ba0da56b33f12e3a9ba0168ef73e54779 100644 (file)
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
        .socket_suspend         = assabet_pcmcia_socket_suspend,
 };
 
-int __init pcmcia_assabet_init(struct device *dev)
+int pcmcia_assabet_init(struct device *dev)
 {
        int ret = -ENODEV;
 
index d8da5ac844e96b4739f130953e086216b9f2023b..2d0e997515308d931bb683d5f58ae8bd624ed5c3 100644 (file)
@@ -89,7 +89,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
 static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int sa11x0_drv_pcmcia_resume(struct platform_device *dev)
index 4c41e86ccff9c9180999b22aa815d1f4f4f73dcb..0c76d337815bca429b2ef094e19c01f1bd39e8a8 100644 (file)
@@ -123,7 +123,7 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
        .socket_suspend         = sa1111_pcmcia_socket_suspend,
 };
 
-int __init pcmcia_neponset_init(struct sa1111_dev *sadev)
+int pcmcia_neponset_init(struct sa1111_dev *sadev)
 {
        int ret = -ENODEV;
 
index 401052a21ce8f093300a33871a26d3a21d7697c0..4be4e172ffa180aebccd78bd0fa0e241e6b97f74 100644 (file)
@@ -159,7 +159,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
 
 static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int pcmcia_resume(struct sa1111_dev *dev)
index 8eb04230fec7a475c24bc012285dd86089b986ce..582413fcb62f5c4e4f0b1882e042771d5cf0a639 100644 (file)
@@ -366,7 +366,7 @@ static int __init get_tcic_id(void)
 static int tcic_drv_pcmcia_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int tcic_drv_pcmcia_resume(struct platform_device *dev)
index d4ad50d737b060392fdf2c82bc80a211a57f5be1..c9fcbdc164eabcd812d12e808b310417795603f7 100644 (file)
@@ -707,7 +707,7 @@ __setup("vrc4171_card=", vrc4171_card_setup);
 static int vrc4171_card_suspend(struct platform_device *dev,
                                     pm_message_t state)
 {
-       return pcmcia_socket_dev_suspend(&dev->dev, state);
+       return pcmcia_socket_dev_suspend(&dev->dev);
 }
 
 static int vrc4171_card_resume(struct platform_device *dev)
index b459e87a30acf2938c4b2b01e9df276623c54937..abe0e44c6e9efb05bf16344fcf292700cfe316c0 100644 (file)
@@ -1225,60 +1225,71 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
 }
 
 #ifdef CONFIG_PM
-static int yenta_dev_suspend (struct pci_dev *dev, pm_message_t state)
+static int yenta_dev_suspend_noirq(struct device *dev)
 {
-       struct yenta_socket *socket = pci_get_drvdata(dev);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct yenta_socket *socket = pci_get_drvdata(pdev);
        int ret;
 
-       ret = pcmcia_socket_dev_suspend(&dev->dev, state);
+       ret = pcmcia_socket_dev_suspend(dev);
 
-       if (socket) {
-               if (socket->type && socket->type->save_state)
-                       socket->type->save_state(socket);
+       if (!socket)
+               return ret;
 
-               /* FIXME: pci_save_state needs to have a better interface */
-               pci_save_state(dev);
-               pci_read_config_dword(dev, 16*4, &socket->saved_state[0]);
-               pci_read_config_dword(dev, 17*4, &socket->saved_state[1]);
-               pci_disable_device(dev);
+       if (socket->type && socket->type->save_state)
+               socket->type->save_state(socket);
 
-               /*
-                * Some laptops (IBM T22) do not like us putting the Cardbus
-                * bridge into D3.  At a guess, some other laptop will
-                * probably require this, so leave it commented out for now.
-                */
-               /* pci_set_power_state(dev, 3); */
-       }
+       pci_save_state(pdev);
+       pci_read_config_dword(pdev, 16*4, &socket->saved_state[0]);
+       pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
+       pci_disable_device(pdev);
+
+       /*
+        * Some laptops (IBM T22) do not like us putting the Cardbus
+        * bridge into D3.  At a guess, some other laptop will
+        * probably require this, so leave it commented out for now.
+        */
+       /* pci_set_power_state(dev, 3); */
 
        return ret;
 }
 
-
-static int yenta_dev_resume (struct pci_dev *dev)
+static int yenta_dev_resume_noirq(struct device *dev)
 {
-       struct yenta_socket *socket = pci_get_drvdata(dev);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct yenta_socket *socket = pci_get_drvdata(pdev);
+       int ret;
 
-       if (socket) {
-               int rc;
+       if (!socket)
+               return 0;
 
-               pci_set_power_state(dev, 0);
-               /* FIXME: pci_restore_state needs to have a better interface */
-               pci_restore_state(dev);
-               pci_write_config_dword(dev, 16*4, socket->saved_state[0]);
-               pci_write_config_dword(dev, 17*4, socket->saved_state[1]);
+       pci_write_config_dword(pdev, 16*4, socket->saved_state[0]);
+       pci_write_config_dword(pdev, 17*4, socket->saved_state[1]);
 
-               rc = pci_enable_device(dev);
-               if (rc)
-                       return rc;
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
 
-               pci_set_master(dev);
+       pci_set_master(pdev);
 
-               if (socket->type && socket->type->restore_state)
-                       socket->type->restore_state(socket);
-       }
+       if (socket->type && socket->type->restore_state)
+               socket->type->restore_state(socket);
 
-       return pcmcia_socket_dev_resume(&dev->dev);
+       return pcmcia_socket_dev_resume(dev);
 }
+
+static struct dev_pm_ops yenta_pm_ops = {
+       .suspend_noirq = yenta_dev_suspend_noirq,
+       .resume_noirq = yenta_dev_resume_noirq,
+       .freeze_noirq = yenta_dev_suspend_noirq,
+       .thaw_noirq = yenta_dev_resume_noirq,
+       .poweroff_noirq = yenta_dev_suspend_noirq,
+       .restore_noirq = yenta_dev_resume_noirq,
+};
+
+#define YENTA_PM_OPS   (&yenta_pm_ops)
+#else
+#define YENTA_PM_OPS   NULL
 #endif
 
 #define CB_ID(vend,dev,type)                           \
@@ -1376,10 +1387,7 @@ static struct pci_driver yenta_cardbus_driver = {
        .id_table       = yenta_table,
        .probe          = yenta_probe,
        .remove         = __devexit_p(yenta_close),
-#ifdef CONFIG_PM
-       .suspend        = yenta_dev_suspend,
-       .resume         = yenta_dev_resume,
-#endif
+       .driver.pm      = YENTA_PM_OPS,
 };
 
 
index f9f68e0e73443b4c7348b31744ae1fe5cb860a7c..a2a742c8ff7e30dab430ef7ece4abc1c2a930dd3 100644 (file)
@@ -1041,6 +1041,9 @@ static int sony_nc_resume(struct acpi_device *device)
                        sony_backlight_update_status(sony_backlight_device) < 0)
                printk(KERN_WARNING DRV_PFX "unable to restore brightness level\n");
 
+       /* re-read rfkill state */
+       sony_nc_rfkill_update();
+
        return 0;
 }
 
@@ -1078,6 +1081,8 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        struct rfkill *rfk;
        enum rfkill_type type;
        const char *name;
+       int result;
+       bool hwblock;
 
        switch (nc_type) {
        case SONY_WIFI:
@@ -1105,6 +1110,10 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
        if (!rfk)
                return -ENOMEM;
 
+       sony_call_snc_handle(0x124, 0x200, &result);
+       hwblock = !(result & 0x1);
+       rfkill_set_hw_state(rfk, hwblock);
+
        err = rfkill_register(rfk);
        if (err) {
                rfkill_destroy(rfk);
@@ -1202,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device)
                }
        }
 
-       /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
-        * should be respected as we already checked for the device presence above */
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
-               dprintk("Invoking _INI\n");
-               if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
-                                               NULL, NULL)))
-                       dprintk("_INI Method failed\n");
-       }
-
        if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
                                         &handle))) {
                if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
@@ -1390,27 +1390,20 @@ struct sonypi_eventtypes {
        struct sonypi_event     *events;
 };
 
-struct device_ctrl {
+struct sony_pic_dev {
+       struct acpi_device              *acpi_dev;
+       struct sony_pic_irq             *cur_irq;
+       struct sony_pic_ioport          *cur_ioport;
+       struct list_head                interrupts;
+       struct list_head                ioports;
+       struct mutex                    lock;
+       struct sonypi_eventtypes        *event_types;
+       int                             (*handle_irq)(const u8, const u8);
        int                             model;
-       int                             (*handle_irq)(const u8, const u8);
        u16                             evport_offset;
-       u8                              has_camera;
-       u8                              has_bluetooth;
-       u8                              has_wwan;
-       struct sonypi_eventtypes        *event_types;
-};
-
-struct sony_pic_dev {
-       struct device_ctrl      *control;
-       struct acpi_device      *acpi_dev;
-       struct sony_pic_irq     *cur_irq;
-       struct sony_pic_ioport  *cur_ioport;
-       struct list_head        interrupts;
-       struct list_head        ioports;
-       struct mutex            lock;
-       u8                      camera_power;
-       u8                      bluetooth_power;
-       u8                      wwan_power;
+       u8                              camera_power;
+       u8                              bluetooth_power;
+       u8                              wwan_power;
 };
 
 static struct sony_pic_dev spic_dev = {
@@ -1418,6 +1411,8 @@ static struct sony_pic_dev spic_dev = {
        .ioports        = LIST_HEAD_INIT(spic_dev.ioports),
 };
 
+static int spic_drv_registered;
+
 /* Event masks */
 #define SONYPI_JOGGER_MASK                     0x00000001
 #define SONYPI_CAPTURE_MASK                    0x00000002
@@ -1715,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev)
        return 1;
 }
 
-static struct device_ctrl spic_types[] = {
-       {
-               .model = SONYPI_DEVICE_TYPE1,
-               .handle_irq = NULL,
-               .evport_offset = SONYPI_TYPE1_OFFSET,
-               .event_types = type1_events,
-       },
-       {
-               .model = SONYPI_DEVICE_TYPE2,
-               .handle_irq = NULL,
-               .evport_offset = SONYPI_TYPE2_OFFSET,
-               .event_types = type2_events,
-       },
-       {
-               .model = SONYPI_DEVICE_TYPE3,
-               .handle_irq = type3_handle_irq,
-               .evport_offset = SONYPI_TYPE3_OFFSET,
-               .event_types = type3_events,
-       },
-};
-
 static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
 {
        struct pci_dev *pcidev;
@@ -1743,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
        pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
                        PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
        if (pcidev) {
-               dev->control = &spic_types[0];
+               dev->model = SONYPI_DEVICE_TYPE1;
+               dev->evport_offset = SONYPI_TYPE1_OFFSET;
+               dev->event_types = type1_events;
                goto out;
        }
 
        pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
                        PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
        if (pcidev) {
-               dev->control = &spic_types[2];
+               dev->model = SONYPI_DEVICE_TYPE2;
+               dev->evport_offset = SONYPI_TYPE2_OFFSET;
+               dev->event_types = type2_events;
                goto out;
        }
 
        pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
                        PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
        if (pcidev) {
-               dev->control = &spic_types[2];
+               dev->model = SONYPI_DEVICE_TYPE3;
+               dev->handle_irq = type3_handle_irq;
+               dev->evport_offset = SONYPI_TYPE3_OFFSET;
+               dev->event_types = type3_events;
                goto out;
        }
 
        pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
                        PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
        if (pcidev) {
-               dev->control = &spic_types[2];
+               dev->model = SONYPI_DEVICE_TYPE3;
+               dev->handle_irq = type3_handle_irq;
+               dev->evport_offset = SONYPI_TYPE3_OFFSET;
+               dev->event_types = type3_events;
                goto out;
        }
 
        pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
                        PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
        if (pcidev) {
-               dev->control = &spic_types[2];
+               dev->model = SONYPI_DEVICE_TYPE3;
+               dev->handle_irq = type3_handle_irq;
+               dev->evport_offset = SONYPI_TYPE3_OFFSET;
+               dev->event_types = type3_events;
                goto out;
        }
 
        /* default */
-       dev->control = &spic_types[1];
+       dev->model = SONYPI_DEVICE_TYPE2;
+       dev->evport_offset = SONYPI_TYPE2_OFFSET;
+       dev->event_types = type2_events;
 
 out:
        if (pcidev)
                pci_dev_put(pcidev);
 
        printk(KERN_INFO DRV_PFX "detected Type%d model\n",
-                       dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 :
-                       dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
+                       dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
+                       dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
 }
 
 /* camera tests and poweron/poweroff */
@@ -2557,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device,
        buffer.pointer = resource;
 
        /* setup Type 1 resources */
-       if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) {
+       if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
 
                /* setup io resources */
                resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2640,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
                data_mask = inb_p(dev->cur_ioport->io2.minimum);
        else
                data_mask = inb_p(dev->cur_ioport->io1.minimum +
-                               dev->control->evport_offset);
+                               dev->evport_offset);
 
        dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
                        ev, data_mask, dev->cur_ioport->io1.minimum,
-                       dev->control->evport_offset);
+                       dev->evport_offset);
 
        if (ev == 0x00 || ev == 0xff)
                return IRQ_HANDLED;
 
-       for (i = 0; dev->control->event_types[i].mask; i++) {
+       for (i = 0; dev->event_types[i].mask; i++) {
 
-               if ((data_mask & dev->control->event_types[i].data) !=
-                   dev->control->event_types[i].data)
+               if ((data_mask & dev->event_types[i].data) !=
+                   dev->event_types[i].data)
                        continue;
 
-               if (!(mask & dev->control->event_types[i].mask))
+               if (!(mask & dev->event_types[i].mask))
                        continue;
 
-               for (j = 0; dev->control->event_types[i].events[j].event; j++) {
-                       if (ev == dev->control->event_types[i].events[j].data) {
+               for (j = 0; dev->event_types[i].events[j].event; j++) {
+                       if (ev == dev->event_types[i].events[j].data) {
                                device_event =
-                                       dev->control->
-                                               event_types[i].events[j].event;
+                                       dev->event_types[i].events[j].event;
                                goto found;
                        }
                }
@@ -2670,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
        /* Still not able to decode the event try to pass
         * it over to the minidriver
         */
-       if (dev->control->handle_irq &&
-                       dev->control->handle_irq(data_mask, ev) == 0)
+       if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
                return IRQ_HANDLED;
 
        dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
                        ev, data_mask, dev->cur_ioport->io1.minimum,
-                       dev->control->evport_offset);
+                       dev->evport_offset);
        return IRQ_HANDLED;
 
 found:
@@ -2807,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device)
        /* request IRQ */
        list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
                if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
-                                       IRQF_SHARED, "sony-laptop", &spic_dev)) {
+                                       IRQF_DISABLED, "sony-laptop", &spic_dev)) {
                        dprintk("IRQ: %d - triggering: %d - "
                                        "polarity: %d - shr: %d\n",
                                        irq->irq.interrupts[0],
@@ -2940,6 +2927,7 @@ static int __init sony_laptop_init(void)
                                        "Unable to register SPIC driver.");
                        goto out;
                }
+               spic_drv_registered = 1;
        }
 
        result = acpi_bus_register_driver(&sony_nc_driver);
@@ -2951,7 +2939,7 @@ static int __init sony_laptop_init(void)
        return 0;
 
 out_unregister_pic:
-       if (!no_spic)
+       if (spic_drv_registered)
                acpi_bus_unregister_driver(&sony_pic_driver);
 out:
        return result;
@@ -2960,7 +2948,7 @@ out:
 static void __exit sony_laptop_exit(void)
 {
        acpi_bus_unregister_driver(&sony_nc_driver);
-       if (!no_spic)
+       if (spic_drv_registered)
                acpi_bus_unregister_driver(&sony_pic_driver);
 }
 
index 1b78f639ead390547445c5e939d20235914e9894..76769978285f0cd28a15eb2004b633bce16a6418 100644 (file)
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
                           filp->f_path.dentry->d_inode->i_private);
 }
 
-static struct file_operations debugfs_fops = {
+static const struct file_operations debugfs_fops = {
        .owner   = THIS_MODULE,
        .open    = qstat_seq_open,
        .read    = seq_read,
index eff943923c6fac1d3e0862c0134b3a2a9920efa0..968e3c7c263269bda6c7d04261871729ce8bbbe6 100644 (file)
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
        return single_open(filp, qdio_perf_proc_show, NULL);
 }
 
-static struct file_operations qdio_perf_proc_fops = {
+static const struct file_operations qdio_perf_proc_fops = {
        .owner   = THIS_MODULE,
        .open    = qdio_perf_seq_open,
        .read    = seq_read,
index 0cb049f5cc567c0f107b6b8f913ab4f258e58d4d..747a5e5c1276f62feb3f12f4c2ade154797e6d0b 100644 (file)
@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
        }
 }
 
-static struct file_operations sg_fops = {
+static const struct file_operations sg_fops = {
        .owner = THIS_MODULE,
        .read = sg_read,
        .write = sg_write,
@@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
                                  size_t count, loff_t *off);
-static struct file_operations adio_fops = {
-       /* .owner, .read and .llseek added in sg_proc_init() */
+static const struct file_operations adio_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_single_open_adio,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .write = sg_proc_write_adio,
        .release = single_release,
 };
@@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = {
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
 static ssize_t sg_proc_write_dressz(struct file *filp, 
                const char __user *buffer, size_t count, loff_t *off);
-static struct file_operations dressz_fops = {
+static const struct file_operations dressz_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_single_open_dressz,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .write = sg_proc_write_dressz,
        .release = single_release,
 };
 
 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
-static struct file_operations version_fops = {
+static const struct file_operations version_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_single_open_version,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .release = single_release,
 };
 
 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
-static struct file_operations devhdr_fops = {
+static const struct file_operations devhdr_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_single_open_devhdr,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .release = single_release,
 };
 
@@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file);
 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
 static void dev_seq_stop(struct seq_file *s, void *v);
-static struct file_operations dev_fops = {
+static const struct file_operations dev_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_open_dev,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .release = seq_release,
 };
 static const struct seq_operations dev_seq_ops = {
@@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = {
 
 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
-static struct file_operations devstrs_fops = {
+static const struct file_operations devstrs_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_open_devstrs,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .release = seq_release,
 };
 static const struct seq_operations devstrs_seq_ops = {
@@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = {
 
 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
 static int sg_proc_open_debug(struct inode *inode, struct file *file);
-static struct file_operations debug_fops = {
+static const struct file_operations debug_fops = {
+       .owner = THIS_MODULE,
        .open = sg_proc_open_debug,
+       .read = seq_read,
+       .llseek = seq_lseek,
        .release = seq_release,
 };
 static const struct seq_operations debug_seq_ops = {
@@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = {
 
 struct sg_proc_leaf {
        const char * name;
-       struct file_operations * fops;
+       const struct file_operations * fops;
 };
 
 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
@@ -2295,9 +2315,6 @@ sg_proc_init(void)
        for (k = 0; k < num_leaves; ++k) {
                leaf = &sg_proc_leaf_arr[k];
                mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
-               leaf->fops->owner = THIS_MODULE;
-               leaf->fops->read = seq_read;
-               leaf->fops->llseek = seq_lseek;
                proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
        }
        return 0;
index 2209620d2349337a44b6a9b035c8cd6583f96b79..b1ae774016f12b5271ed4d90dc3150838f1415d6 100644 (file)
@@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port)
        return (serial8250_reg.minor - 64) + port->line;
 }
 
+static unsigned int skip_txen_test; /* force skip of txen test at init time */
+
 /*
  * Debugging.
  */
@@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port)
           is variable. So, let's just don't test if we receive
           TX irq. This way, we'll never enable UART_BUG_TXEN.
         */
-       if (up->port.flags & UPF_NO_TXEN_TEST)
+       if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
                goto dont_test_tx_en;
 
        /*
@@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
 module_param(nr_uarts, uint, 0644);
 MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
 
+module_param(skip_txen_test, uint, 0644);
+MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+
 #ifdef CONFIG_SERIAL_8250_RSA
 module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
 MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
index 03422ce878cf0ffd08deeadac3b71c6699ebb5ce..e52257257279a5a7cc1ae50d134c7e08d91b7243 100644 (file)
@@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE
 
 config SERIAL_UARTLITE
        tristate "Xilinx uartlite serial port support"
-       depends on PPC32 || MICROBLAZE
+       depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
        select SERIAL_CORE
        help
          Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1458,4 +1458,23 @@ config SERIAL_TIMBERDALE
        ---help---
        Add support for UART controller on timberdale.
 
+config SERIAL_BCM63XX
+       tristate "bcm63xx serial port support"
+       select SERIAL_CORE
+       depends on BCM63XX
+       help
+         If you have a bcm63xx CPU, you can enable its onboard
+         serial port by enabling this options.
+
+          To compile this driver as a module, choose M here: the
+          module will be called bcm963xx_uart.
+
+config SERIAL_BCM63XX_CONSOLE
+       bool "Console on bcm63xx serial port"
+       depends on SERIAL_BCM63XX=y
+       select SERIAL_CORE_CONSOLE
+       help
+         If you have enabled the serial port on the bcm63xx CPU
+         you can make it the console by answering Y to this option.
+
 endmenu
index 97f6fcc8b4320c0de348691198beb8f044aa8e60..d21d5dd5d0482bee8c211af57efaf821cce67246 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
 obj-$(CONFIG_SERIAL_PXA) += pxa.o
 obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
 obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
+obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
 obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
 obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
 obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
new file mode 100644 (file)
index 0000000..beddaa6
--- /dev/null
@@ -0,0 +1,890 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from many drivers using generic_serial interface.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ *
+ *  Serial driver for BCM63xx integrated UART.
+ *
+ * Hardware flow control was _not_ tested since I only have RX/TX on
+ * my board.
+ */
+
+#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/sysrq.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include <bcm63xx_clk.h>
+#include <bcm63xx_irq.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+
+#define BCM63XX_NR_UARTS       1
+
+static struct uart_port ports[BCM63XX_NR_UARTS];
+
+/*
+ * rx interrupt mask / stat
+ *
+ * mask:
+ *  - rx fifo full
+ *  - rx fifo above threshold
+ *  - rx fifo not empty for too long
+ */
+#define UART_RX_INT_MASK       (UART_IR_MASK(UART_IR_RXOVER) |         \
+                               UART_IR_MASK(UART_IR_RXTHRESH) |        \
+                               UART_IR_MASK(UART_IR_RXTIMEOUT))
+
+#define UART_RX_INT_STAT       (UART_IR_STAT(UART_IR_RXOVER) |         \
+                               UART_IR_STAT(UART_IR_RXTHRESH) |        \
+                               UART_IR_STAT(UART_IR_RXTIMEOUT))
+
+/*
+ * tx interrupt mask / stat
+ *
+ * mask:
+ * - tx fifo empty
+ * - tx fifo below threshold
+ */
+#define UART_TX_INT_MASK       (UART_IR_MASK(UART_IR_TXEMPTY) |        \
+                               UART_IR_MASK(UART_IR_TXTRESH))
+
+#define UART_TX_INT_STAT       (UART_IR_STAT(UART_IR_TXEMPTY) |        \
+                               UART_IR_STAT(UART_IR_TXTRESH))
+
+/*
+ * external input interrupt
+ *
+ * mask: any edge on CTS, DCD
+ */
+#define UART_EXTINP_INT_MASK   (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
+                                UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
+
+/*
+ * handy uart register accessor
+ */
+static inline unsigned int bcm_uart_readl(struct uart_port *port,
+                                        unsigned int offset)
+{
+       return bcm_readl(port->membase + offset);
+}
+
+static inline void bcm_uart_writel(struct uart_port *port,
+                                 unsigned int value, unsigned int offset)
+{
+       bcm_writel(value, port->membase + offset);
+}
+
+/*
+ * serial core request to check if uart tx fifo is empty
+ */
+static unsigned int bcm_uart_tx_empty(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_IR_REG);
+       return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
+}
+
+/*
+ * serial core request to set RTS and DTR pin state and loopback mode
+ */
+static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_MCTL_REG);
+       val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
+       /* invert of written value is reflected on the pin */
+       if (!(mctrl & TIOCM_DTR))
+               val |= UART_MCTL_DTR_MASK;
+       if (!(mctrl & TIOCM_RTS))
+               val |= UART_MCTL_RTS_MASK;
+       bcm_uart_writel(port, val, UART_MCTL_REG);
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       if (mctrl & TIOCM_LOOP)
+               val |= UART_CTL_LOOPBACK_MASK;
+       else
+               val &= ~UART_CTL_LOOPBACK_MASK;
+       bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * serial core request to return RI, CTS, DCD and DSR pin state
+ */
+static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
+{
+       unsigned int val, mctrl;
+
+       mctrl = 0;
+       val = bcm_uart_readl(port, UART_EXTINP_REG);
+       if (val & UART_EXTINP_RI_MASK)
+               mctrl |= TIOCM_RI;
+       if (val & UART_EXTINP_CTS_MASK)
+               mctrl |= TIOCM_CTS;
+       if (val & UART_EXTINP_DCD_MASK)
+               mctrl |= TIOCM_CD;
+       if (val & UART_EXTINP_DSR_MASK)
+               mctrl |= TIOCM_DSR;
+       return mctrl;
+}
+
+/*
+ * serial core request to disable tx ASAP (used for flow control)
+ */
+static void bcm_uart_stop_tx(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val &= ~(UART_CTL_TXEN_MASK);
+       bcm_uart_writel(port, val, UART_CTL_REG);
+
+       val = bcm_uart_readl(port, UART_IR_REG);
+       val &= ~UART_TX_INT_MASK;
+       bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to (re)enable tx
+ */
+static void bcm_uart_start_tx(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_IR_REG);
+       val |= UART_TX_INT_MASK;
+       bcm_uart_writel(port, val, UART_IR_REG);
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val |= UART_CTL_TXEN_MASK;
+       bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * serial core request to stop rx, called before port shutdown
+ */
+static void bcm_uart_stop_rx(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_IR_REG);
+       val &= ~UART_RX_INT_MASK;
+       bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to enable modem status interrupt reporting
+ */
+static void bcm_uart_enable_ms(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_IR_REG);
+       val |= UART_IR_MASK(UART_IR_EXTIP);
+       bcm_uart_writel(port, val, UART_IR_REG);
+}
+
+/*
+ * serial core request to start/stop emitting break char
+ */
+static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
+{
+       unsigned long flags;
+       unsigned int val;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       if (ctl)
+               val |= UART_CTL_XMITBRK_MASK;
+       else
+               val &= ~UART_CTL_XMITBRK_MASK;
+       bcm_uart_writel(port, val, UART_CTL_REG);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * return port type in string format
+ */
+static const char *bcm_uart_type(struct uart_port *port)
+{
+       return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
+}
+
+/*
+ * read all chars in rx fifo and send them to core
+ */
+static void bcm_uart_do_rx(struct uart_port *port)
+{
+       struct tty_struct *tty;
+       unsigned int max_count;
+
+       /* limit number of char read in interrupt, should not be
+        * higher than fifo size anyway since we're much faster than
+        * serial port */
+       max_count = 32;
+       tty = port->info->port.tty;
+       do {
+               unsigned int iestat, c, cstat;
+               char flag;
+
+               /* get overrun/fifo empty information from ier
+                * register */
+               iestat = bcm_uart_readl(port, UART_IR_REG);
+               if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
+                       break;
+
+               cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
+               port->icount.rx++;
+               flag = TTY_NORMAL;
+               c &= 0xff;
+
+               if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
+                       /* do stats first */
+                       if (cstat & UART_FIFO_BRKDET_MASK) {
+                               port->icount.brk++;
+                               if (uart_handle_break(port))
+                                       continue;
+                       }
+
+                       if (cstat & UART_FIFO_PARERR_MASK)
+                               port->icount.parity++;
+                       if (cstat & UART_FIFO_FRAMEERR_MASK)
+                               port->icount.frame++;
+
+                       /* update flag wrt read_status_mask */
+                       cstat &= port->read_status_mask;
+                       if (cstat & UART_FIFO_BRKDET_MASK)
+                               flag = TTY_BREAK;
+                       if (cstat & UART_FIFO_FRAMEERR_MASK)
+                               flag = TTY_FRAME;
+                       if (cstat & UART_FIFO_PARERR_MASK)
+                               flag = TTY_PARITY;
+               }
+
+               if (uart_handle_sysrq_char(port, c))
+                       continue;
+
+               if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+                       port->icount.overrun++;
+                       tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+               }
+
+               if ((cstat & port->ignore_status_mask) == 0)
+                       tty_insert_flip_char(tty, c, flag);
+
+       } while (--max_count);
+
+       tty_flip_buffer_push(tty);
+}
+
+/*
+ * fill tx fifo with chars to send, stop when fifo is about to be full
+ * or when all chars have been sent.
+ */
+static void bcm_uart_do_tx(struct uart_port *port)
+{
+       struct circ_buf *xmit;
+       unsigned int val, max_count;
+
+       if (port->x_char) {
+               bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
+               port->icount.tx++;
+               port->x_char = 0;
+               return;
+       }
+
+       if (uart_tx_stopped(port)) {
+               bcm_uart_stop_tx(port);
+               return;
+       }
+
+       xmit = &port->info->xmit;
+       if (uart_circ_empty(xmit))
+               goto txq_empty;
+
+       val = bcm_uart_readl(port, UART_MCTL_REG);
+       val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
+       max_count = port->fifosize - val;
+
+       while (max_count--) {
+               unsigned int c;
+
+               c = xmit->buf[xmit->tail];
+               bcm_uart_writel(port, c, UART_FIFO_REG);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+               if (uart_circ_empty(xmit))
+                       break;
+       }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+
+       if (uart_circ_empty(xmit))
+               goto txq_empty;
+       return;
+
+txq_empty:
+       /* nothing to send, disable transmit interrupt */
+       val = bcm_uart_readl(port, UART_IR_REG);
+       val &= ~UART_TX_INT_MASK;
+       bcm_uart_writel(port, val, UART_IR_REG);
+       return;
+}
+
+/*
+ * process uart interrupt
+ */
+static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
+{
+       struct uart_port *port;
+       unsigned int irqstat;
+
+       port = dev_id;
+       spin_lock(&port->lock);
+
+       irqstat = bcm_uart_readl(port, UART_IR_REG);
+       if (irqstat & UART_RX_INT_STAT)
+               bcm_uart_do_rx(port);
+
+       if (irqstat & UART_TX_INT_STAT)
+               bcm_uart_do_tx(port);
+
+       if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
+               unsigned int estat;
+
+               estat = bcm_uart_readl(port, UART_EXTINP_REG);
+               if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
+                       uart_handle_cts_change(port,
+                                              estat & UART_EXTINP_CTS_MASK);
+               if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
+                       uart_handle_dcd_change(port,
+                                              estat & UART_EXTINP_DCD_MASK);
+       }
+
+       spin_unlock(&port->lock);
+       return IRQ_HANDLED;
+}
+
+/*
+ * enable rx & tx operation on uart
+ */
+static void bcm_uart_enable(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
+       bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * disable rx & tx operation on uart
+ */
+static void bcm_uart_disable(struct uart_port *port)
+{
+       unsigned int val;
+
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
+                UART_CTL_RXEN_MASK);
+       bcm_uart_writel(port, val, UART_CTL_REG);
+}
+
+/*
+ * clear all unread data in rx fifo and unsent data in tx fifo
+ */
+static void bcm_uart_flush(struct uart_port *port)
+{
+       unsigned int val;
+
+       /* empty rx and tx fifo */
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
+       bcm_uart_writel(port, val, UART_CTL_REG);
+
+       /* read any pending char to make sure all irq status are
+        * cleared */
+       (void)bcm_uart_readl(port, UART_FIFO_REG);
+}
+
+/*
+ * serial core request to initialize uart and start rx operation
+ */
+static int bcm_uart_startup(struct uart_port *port)
+{
+       unsigned int val;
+       int ret;
+
+       /* mask all irq and flush port */
+       bcm_uart_disable(port);
+       bcm_uart_writel(port, 0, UART_IR_REG);
+       bcm_uart_flush(port);
+
+       /* clear any pending external input interrupt */
+       (void)bcm_uart_readl(port, UART_EXTINP_REG);
+
+       /* set rx/tx fifo thresh to fifo half size */
+       val = bcm_uart_readl(port, UART_MCTL_REG);
+       val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
+       val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
+       val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
+       bcm_uart_writel(port, val, UART_MCTL_REG);
+
+       /* set rx fifo timeout to 1 char time */
+       val = bcm_uart_readl(port, UART_CTL_REG);
+       val &= ~UART_CTL_RXTMOUTCNT_MASK;
+       val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
+       bcm_uart_writel(port, val, UART_CTL_REG);
+
+       /* report any edge on dcd and cts */
+       val = UART_EXTINP_INT_MASK;
+       val |= UART_EXTINP_DCD_NOSENSE_MASK;
+       val |= UART_EXTINP_CTS_NOSENSE_MASK;
+       bcm_uart_writel(port, val, UART_EXTINP_REG);
+
+       /* register irq and enable rx interrupts */
+       ret = request_irq(port->irq, bcm_uart_interrupt, 0,
+                         bcm_uart_type(port), port);
+       if (ret)
+               return ret;
+       bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
+       bcm_uart_enable(port);
+       return 0;
+}
+
+/*
+ * serial core request to flush & disable uart
+ */
+static void bcm_uart_shutdown(struct uart_port *port)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+       bcm_uart_writel(port, 0, UART_IR_REG);
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       bcm_uart_disable(port);
+       bcm_uart_flush(port);
+       free_irq(port->irq, port);
+}
+
+/*
+ * serial core request to change current uart setting
+ */
+static void bcm_uart_set_termios(struct uart_port *port,
+                                struct ktermios *new,
+                                struct ktermios *old)
+{
+       unsigned int ctl, baud, quot, ier;
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       /* disable uart while changing speed */
+       bcm_uart_disable(port);
+       bcm_uart_flush(port);
+
+       /* update Control register */
+       ctl = bcm_uart_readl(port, UART_CTL_REG);
+       ctl &= ~UART_CTL_BITSPERSYM_MASK;
+
+       switch (new->c_cflag & CSIZE) {
+       case CS5:
+               ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
+               break;
+       case CS6:
+               ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
+               break;
+       case CS7:
+               ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
+               break;
+       default:
+               ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
+               break;
+       }
+
+       ctl &= ~UART_CTL_STOPBITS_MASK;
+       if (new->c_cflag & CSTOPB)
+               ctl |= UART_CTL_STOPBITS_2;
+       else
+               ctl |= UART_CTL_STOPBITS_1;
+
+       ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
+       if (new->c_cflag & PARENB)
+               ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
+       ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
+       if (new->c_cflag & PARODD)
+               ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
+       bcm_uart_writel(port, ctl, UART_CTL_REG);
+
+       /* update Baudword register */
+       baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+       quot = uart_get_divisor(port, baud) - 1;
+       bcm_uart_writel(port, quot, UART_BAUD_REG);
+
+       /* update Interrupt register */
+       ier = bcm_uart_readl(port, UART_IR_REG);
+
+       ier &= ~UART_IR_MASK(UART_IR_EXTIP);
+       if (UART_ENABLE_MS(port, new->c_cflag))
+               ier |= UART_IR_MASK(UART_IR_EXTIP);
+
+       bcm_uart_writel(port, ier, UART_IR_REG);
+
+       /* update read/ignore mask */
+       port->read_status_mask = UART_FIFO_VALID_MASK;
+       if (new->c_iflag & INPCK) {
+               port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
+               port->read_status_mask |= UART_FIFO_PARERR_MASK;
+       }
+       if (new->c_iflag & (BRKINT))
+               port->read_status_mask |= UART_FIFO_BRKDET_MASK;
+
+       port->ignore_status_mask = 0;
+       if (new->c_iflag & IGNPAR)
+               port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
+       if (new->c_iflag & IGNBRK)
+               port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
+       if (!(new->c_cflag & CREAD))
+               port->ignore_status_mask |= UART_FIFO_VALID_MASK;
+
+       uart_update_timeout(port, new->c_cflag, baud);
+       bcm_uart_enable(port);
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * serial core request to claim uart iomem
+ */
+static int bcm_uart_request_port(struct uart_port *port)
+{
+       unsigned int size;
+
+       size = RSET_UART_SIZE;
+       if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
+               dev_err(port->dev, "Memory region busy\n");
+               return -EBUSY;
+       }
+
+       port->membase = ioremap(port->mapbase, size);
+       if (!port->membase) {
+               dev_err(port->dev, "Unable to map registers\n");
+               release_mem_region(port->mapbase, size);
+               return -EBUSY;
+       }
+       return 0;
+}
+
+/*
+ * serial core request to release uart iomem
+ */
+static void bcm_uart_release_port(struct uart_port *port)
+{
+       release_mem_region(port->mapbase, RSET_UART_SIZE);
+       iounmap(port->membase);
+}
+
+/*
+ * serial core request to do any port required autoconfiguration
+ */
+static void bcm_uart_config_port(struct uart_port *port, int flags)
+{
+       if (flags & UART_CONFIG_TYPE) {
+               if (bcm_uart_request_port(port))
+                       return;
+               port->type = PORT_BCM63XX;
+       }
+}
+
+/*
+ * serial core request to check that port information in serinfo are
+ * suitable
+ */
+static int bcm_uart_verify_port(struct uart_port *port,
+                               struct serial_struct *serinfo)
+{
+       if (port->type != PORT_BCM63XX)
+               return -EINVAL;
+       if (port->irq != serinfo->irq)
+               return -EINVAL;
+       if (port->iotype != serinfo->io_type)
+               return -EINVAL;
+       if (port->mapbase != (unsigned long)serinfo->iomem_base)
+               return -EINVAL;
+       return 0;
+}
+
+/* serial core callbacks */
+static struct uart_ops bcm_uart_ops = {
+       .tx_empty       = bcm_uart_tx_empty,
+       .get_mctrl      = bcm_uart_get_mctrl,
+       .set_mctrl      = bcm_uart_set_mctrl,
+       .start_tx       = bcm_uart_start_tx,
+       .stop_tx        = bcm_uart_stop_tx,
+       .stop_rx        = bcm_uart_stop_rx,
+       .enable_ms      = bcm_uart_enable_ms,
+       .break_ctl      = bcm_uart_break_ctl,
+       .startup        = bcm_uart_startup,
+       .shutdown       = bcm_uart_shutdown,
+       .set_termios    = bcm_uart_set_termios,
+       .type           = bcm_uart_type,
+       .release_port   = bcm_uart_release_port,
+       .request_port   = bcm_uart_request_port,
+       .config_port    = bcm_uart_config_port,
+       .verify_port    = bcm_uart_verify_port,
+};
+
+
+
+#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
+static inline void wait_for_xmitr(struct uart_port *port)
+{
+       unsigned int tmout;
+
+       /* Wait up to 10ms for the character(s) to be sent. */
+       tmout = 10000;
+       while (--tmout) {
+               unsigned int val;
+
+               val = bcm_uart_readl(port, UART_IR_REG);
+               if (val & UART_IR_STAT(UART_IR_TXEMPTY))
+                       break;
+               udelay(1);
+       }
+
+       /* Wait up to 1s for flow control if necessary */
+       if (port->flags & UPF_CONS_FLOW) {
+               tmout = 1000000;
+               while (--tmout) {
+                       unsigned int val;
+
+                       val = bcm_uart_readl(port, UART_EXTINP_REG);
+                       if (val & UART_EXTINP_CTS_MASK)
+                               break;
+                       udelay(1);
+               }
+       }
+}
+
+/*
+ * output given char
+ */
+static void bcm_console_putchar(struct uart_port *port, int ch)
+{
+       wait_for_xmitr(port);
+       bcm_uart_writel(port, ch, UART_FIFO_REG);
+}
+
+/*
+ * console core request to output given string
+ */
+static void bcm_console_write(struct console *co, const char *s,
+                             unsigned int count)
+{
+       struct uart_port *port;
+       unsigned long flags;
+       int locked;
+
+       port = &ports[co->index];
+
+       local_irq_save(flags);
+       if (port->sysrq) {
+               /* bcm_uart_interrupt() already took the lock */
+               locked = 0;
+       } else if (oops_in_progress) {
+               locked = spin_trylock(&port->lock);
+       } else {
+               spin_lock(&port->lock);
+               locked = 1;
+       }
+
+       /* call helper to deal with \r\n */
+       uart_console_write(port, s, count, bcm_console_putchar);
+
+       /* and wait for char to be transmitted */
+       wait_for_xmitr(port);
+
+       if (locked)
+               spin_unlock(&port->lock);
+       local_irq_restore(flags);
+}
+
+/*
+ * console core request to setup given console, find matching uart
+ * port and setup it.
+ */
+static int bcm_console_setup(struct console *co, char *options)
+{
+       struct uart_port *port;
+       int baud = 9600;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
+               return -EINVAL;
+       port = &ports[co->index];
+       if (!port->membase)
+               return -ENODEV;
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+       return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver bcm_uart_driver;
+
+static struct console bcm63xx_console = {
+       .name           = "ttyS",
+       .write          = bcm_console_write,
+       .device         = uart_console_device,
+       .setup          = bcm_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+       .data           = &bcm_uart_driver,
+};
+
+static int __init bcm63xx_console_init(void)
+{
+       register_console(&bcm63xx_console);
+       return 0;
+}
+
+console_initcall(bcm63xx_console_init);
+
+#define BCM63XX_CONSOLE        (&bcm63xx_console)
+#else
+#define BCM63XX_CONSOLE        NULL
+#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
+
+static struct uart_driver bcm_uart_driver = {
+       .owner          = THIS_MODULE,
+       .driver_name    = "bcm63xx_uart",
+       .dev_name       = "ttyS",
+       .major          = TTY_MAJOR,
+       .minor          = 64,
+       .nr             = 1,
+       .cons           = BCM63XX_CONSOLE,
+};
+
+/*
+ * platform driver probe/remove callback
+ */
+static int __devinit bcm_uart_probe(struct platform_device *pdev)
+{
+       struct resource *res_mem, *res_irq;
+       struct uart_port *port;
+       struct clk *clk;
+       int ret;
+
+       if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
+               return -EINVAL;
+
+       if (ports[pdev->id].membase)
+               return -EBUSY;
+
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res_mem)
+               return -ENODEV;
+
+       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res_irq)
+               return -ENODEV;
+
+       clk = clk_get(&pdev->dev, "periph");
+       if (IS_ERR(clk))
+               return -ENODEV;
+
+       port = &ports[pdev->id];
+       memset(port, 0, sizeof(*port));
+       port->iotype = UPIO_MEM;
+       port->mapbase = res_mem->start;
+       port->irq = res_irq->start;
+       port->ops = &bcm_uart_ops;
+       port->flags = UPF_BOOT_AUTOCONF;
+       port->dev = &pdev->dev;
+       port->fifosize = 16;
+       port->uartclk = clk_get_rate(clk) / 2;
+       clk_put(clk);
+
+       ret = uart_add_one_port(&bcm_uart_driver, port);
+       if (ret) {
+               kfree(port);
+               return ret;
+       }
+       platform_set_drvdata(pdev, port);
+       return 0;
+}
+
+static int __devexit bcm_uart_remove(struct platform_device *pdev)
+{
+       struct uart_port *port;
+
+       port = platform_get_drvdata(pdev);
+       uart_remove_one_port(&bcm_uart_driver, port);
+       platform_set_drvdata(pdev, NULL);
+       /* mark port as free */
+       ports[pdev->id].membase = 0;
+       return 0;
+}
+
+/*
+ * platform driver stuff
+ */
+static struct platform_driver bcm_uart_platform_driver = {
+       .probe  = bcm_uart_probe,
+       .remove = __devexit_p(bcm_uart_remove),
+       .driver = {
+               .owner = THIS_MODULE,
+               .name  = "bcm63xx_uart",
+       },
+};
+
+static int __init bcm_uart_init(void)
+{
+       int ret;
+
+       ret = uart_register_driver(&bcm_uart_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&bcm_uart_platform_driver);
+       if (ret)
+               uart_unregister_driver(&bcm_uart_driver);
+
+       return ret;
+}
+
+static void __exit bcm_uart_exit(void)
+{
+       platform_driver_unregister(&bcm_uart_platform_driver);
+       uart_unregister_driver(&bcm_uart_driver);
+}
+
+module_init(bcm_uart_init);
+module_exit(bcm_uart_exit);
+
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
+MODULE_LICENSE("GPL");
index 2d7feecaf492368294e92b884dc3fbd349f6a648..0028b6f89ce6aa7b48755e19c092e14b182da96e 100644 (file)
@@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port)
        if (port < 4) {
                temp = readl(stop_proc[port].global_control_reg);
                temp =
-                       (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
+                       (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
                writel(temp, stop_proc[port].global_control_reg);
 
                /* write flush */
@@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port)
        if (port < 4) {
                temp = readl(start_proc[port].global_control_reg);
                temp =
-                       (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
+                       (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
                writel(temp, start_proc[port].global_control_reg);
 
                /* write flush */
@@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port)
                dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
        }
 
-      if (new_page != NULL)
-             pci_free_consistent(dev, 4096, new_page, temp_pci);
+       if (new_page != NULL)
+               pci_free_consistent(dev, 4096, new_page, temp_pci);
 }
 
 static int startup(struct icom_port *icom_port)
@@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev,
                                const struct pci_device_id *ent)
 {
        int index;
-        unsigned int command_reg;
-        int retval;
-        struct icom_adapter *icom_adapter;
-        struct icom_port *icom_port;
+       unsigned int command_reg;
+       int retval;
+       struct icom_adapter *icom_adapter;
+       struct icom_port *icom_port;
 
-        retval = pci_enable_device(dev);
-        if (retval) {
+       retval = pci_enable_device(dev);
+       if (retval) {
                dev_err(&dev->dev, "Device enable FAILED\n");
-                return retval;
+               return retval;
        }
 
        if ( (retval = pci_request_regions(dev, "icom"))) {
@@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev,
                 return retval;
         }
 
-        pci_set_master(dev);
+       pci_set_master(dev);
 
-        if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
+       if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
                dev_err(&dev->dev, "PCI Config read FAILED\n");
-                return retval;
-        }
+               return retval;
+       }
 
        pci_write_config_dword(dev, PCI_COMMAND,
                command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
                | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
 
-        if (ent->driver_data == ADAPTER_V1) {
+       if (ent->driver_data == ADAPTER_V1) {
                pci_write_config_dword(dev, 0x44, 0x8300830A);
-        } else {
+       } else {
                pci_write_config_dword(dev, 0x44, 0x42004200);
                pci_write_config_dword(dev, 0x48, 0x42004200);
-         }
+       }
 
 
        retval = icom_alloc_adapter(&icom_adapter);
@@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev,
                 goto probe_exit0;
        }
 
-        icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
-        icom_adapter->pci_dev = dev;
-        icom_adapter->version = ent->driver_data;
-        icom_adapter->subsystem_id = ent->subdevice;
+       icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
+       icom_adapter->pci_dev = dev;
+       icom_adapter->version = ent->driver_data;
+       icom_adapter->subsystem_id = ent->subdevice;
 
 
        retval = icom_init_ports(icom_adapter);
@@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
                goto probe_exit1;
        }
 
-        icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
+       icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
 
        if (!icom_adapter->base_addr)
                goto probe_exit1;
@@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
 
        retval = icom_load_ports(icom_adapter);
 
-        for (index = 0; index < icom_adapter->numb_ports; index++) {
+       for (index = 0; index < icom_adapter->numb_ports; index++) {
                icom_port = &icom_adapter->port_info[index];
 
                if (icom_port->status == ICOM_PORT_ACTIVE) {
@@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
                                icom_port->status = ICOM_PORT_OFF;
                                dev_err(&dev->dev, "Device add failed\n");
                         } else
-                               dev_info(&dev->dev, "Device added\n");
+                               dev_info(&dev->dev, "Device added\n");
                }
        }
 
@@ -1595,9 +1595,7 @@ probe_exit0:
        pci_release_regions(dev);
        pci_disable_device(dev);
 
-        return retval;
-
-
+       return retval;
 }
 
 static void __devexit icom_remove(struct pci_dev *dev)
index 7f5e26873220b6f4b17fb5b6d34de3fbedd72d2a..2199d819a9870b2d8d52d4ab8bb6ae6832d56c00 100644 (file)
@@ -638,7 +638,7 @@ static void __init sa1100_init_ports(void)
        PPSR |= PPC_TXD1 | PPC_TXD3;
 }
 
-void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns)
+void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
 {
        if (fns->get_mctrl)
                sa1100_pops.get_mctrl = fns->get_mctrl;
index a3bb49031a7f9b08373d7fee681f742bdb263c2c..ff4617e214267ac78071047b25e43aedfb220e86 100644 (file)
@@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
        PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
-       PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
-       PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
        PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
-       PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"),
+       PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
        PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
        PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"),  /* Sierra Wireless AC850 3G Network Adapter R1 */
@@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
        PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
        PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
-       PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"),
-       PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
-       PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
+       PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
+       PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
+       PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
        PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
        PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100  1.00.",0x19ca78af,0xf964f42b),
index 0f7cf4c453e6edd0cfa79ea1f44fd3d40352ee87..c50e9fbbf743cbf5aa98f164afc759fd305e46d4 100644 (file)
@@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
                sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
 }
 
+static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
+{
+       return container_of(port, struct uart_txx9_port, port);
+}
+
 static void serial_txx9_stop_tx(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
 }
 
 static void serial_txx9_start_tx(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
 }
 
 static void serial_txx9_stop_rx(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
 }
 
@@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port)
 
 static void serial_txx9_initialize(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned int tmout = 10000;
 
        sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
@@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
 
 static unsigned int serial_txx9_tx_empty(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned long flags;
        unsigned int ret;
 
@@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port)
 
 static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned int ret;
 
        /* no modem control lines */
@@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
 
 static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
 
        if (mctrl & TIOCM_RTS)
                sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
@@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
 static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned long flags;
 
        spin_lock_irqsave(&up->port.lock, flags);
@@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
 {
        unsigned int ier;
        unsigned char c;
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
 
        /*
         *      First save the IER then disable the interrupts
@@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
 static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
 {
        unsigned int ier;
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
 
        /*
         *      First save the IER then disable the interrupts
@@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
 
 static int serial_txx9_startup(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned long flags;
        int retval;
 
@@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port)
 
 static void serial_txx9_shutdown(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned long flags;
 
        /*
@@ -636,7 +641,7 @@ static void
 serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
                       struct ktermios *old)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        unsigned int cval, fcr = 0;
        unsigned long flags;
        unsigned int baud, quot;
@@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up)
 
 static void serial_txx9_release_port(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        serial_txx9_release_resource(up);
 }
 
 static int serial_txx9_request_port(struct uart_port *port)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        return serial_txx9_request_resource(up);
 }
 
 static void serial_txx9_config_port(struct uart_port *port, int uflags)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
        int ret;
 
        /*
@@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
 
 static void serial_txx9_console_putchar(struct uart_port *port, int ch)
 {
-       struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+       struct uart_txx9_port *up = to_uart_txx9_port(port);
 
        wait_for_xmitr(up);
        sio_out(up, TXX9_SITFIFO, ch);
index d3b496800477b065b42b1b92a0a0b201e5503b78..b204a09291393406acd9450ba641d754fd0707ed 100644 (file)
@@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly;
  */
 static u32 sfi_use_ioremap __read_mostly;
 
-static void __iomem *sfi_map_memory(u64 phys, u32 size)
+/*
+ * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
+ * and introduces section mismatch. So use __ref to make it calm.
+ */
+static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
 {
        if (!phys || !size)
                return NULL;
@@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size)
                return early_ioremap(phys, size);
 }
 
-static void sfi_unmap_memory(void __iomem *virt, u32 size)
+static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
 {
        if (!virt || !size)
                return;
@@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa,
  * sfi_verify_table()
  * Sanity check table lengh, calculate checksum
  */
-static __init int sfi_verify_table(struct sfi_table_header *table)
+static int sfi_verify_table(struct sfi_table_header *table)
 {
 
        u8 checksum = 0;
@@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th,
  *    the mapped virt address will be returned, and the virt space
  *    will be released by call sfi_put_table() later
  *
+ * This two cases are from two different functions with two different
+ * sections and causes section mismatch warning. So use __ref to tell
+ * modpost not to make any noise.
+ *
  * Return value:
  *     NULL:                   when can't find a table matching the key
  *     ERR_PTR(error):         error value
  *     virt table address:     when a matched table is found
  */
-struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key)
+struct sfi_table_header *
+ __ref sfi_check_table(u64 pa, struct sfi_table_key *key)
 {
        struct sfi_table_header *th;
        void *ret = NULL;
index 6d7a3f82c54b400e3d51adf59b3189857e699833..21a118269cac1feb030b23977eb6dd454740345b 100644 (file)
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG)             += spi_bitbang.o
 obj-$(CONFIG_SPI_AU1550)               += au1550_spi.o
 obj-$(CONFIG_SPI_BUTTERFLY)            += spi_butterfly.o
 obj-$(CONFIG_SPI_GPIO)                 += spi_gpio.o
-obj-$(CONFIG_SPI_IMX)                  += mxc_spi.o
+obj-$(CONFIG_SPI_IMX)                  += spi_imx.o
 obj-$(CONFIG_SPI_LM70_LLP)             += spi_lm70llp.o
 obj-$(CONFIG_SPI_PXA2XX)               += pxa2xx_spi.o
 obj-$(CONFIG_SPI_OMAP_UWIRE)           += omap_uwire.o
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/mxc_spi.c
deleted file mode 100644 (file)
index b144723..0000000
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 Juergen Beisert
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation
- * 51 Franklin Street, Fifth Floor
- * Boston, MA  02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/types.h>
-
-#include <mach/spi.h>
-
-#define DRIVER_NAME "spi_imx"
-
-#define MXC_CSPIRXDATA         0x00
-#define MXC_CSPITXDATA         0x04
-#define MXC_CSPICTRL           0x08
-#define MXC_CSPIINT            0x0c
-#define MXC_RESET              0x1c
-
-/* generic defines to abstract from the different register layouts */
-#define MXC_INT_RR     (1 << 0) /* Receive data ready interrupt */
-#define MXC_INT_TE     (1 << 1) /* Transmit FIFO empty interrupt */
-
-struct mxc_spi_config {
-       unsigned int speed_hz;
-       unsigned int bpw;
-       unsigned int mode;
-       int cs;
-};
-
-struct mxc_spi_data {
-       struct spi_bitbang bitbang;
-
-       struct completion xfer_done;
-       void *base;
-       int irq;
-       struct clk *clk;
-       unsigned long spi_clk;
-       int *chipselect;
-
-       unsigned int count;
-       void (*tx)(struct mxc_spi_data *);
-       void (*rx)(struct mxc_spi_data *);
-       void *rx_buf;
-       const void *tx_buf;
-       unsigned int txfifo; /* number of words pushed in tx FIFO */
-
-       /* SoC specific functions */
-       void (*intctrl)(struct mxc_spi_data *, int);
-       int (*config)(struct mxc_spi_data *, struct mxc_spi_config *);
-       void (*trigger)(struct mxc_spi_data *);
-       int (*rx_available)(struct mxc_spi_data *);
-};
-
-#define MXC_SPI_BUF_RX(type)                                           \
-static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi)                \
-{                                                                      \
-       unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA);       \
-                                                                       \
-       if (mxc_spi->rx_buf) {                                          \
-               *(type *)mxc_spi->rx_buf = val;                         \
-               mxc_spi->rx_buf += sizeof(type);                        \
-       }                                                               \
-}
-
-#define MXC_SPI_BUF_TX(type)                                           \
-static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi)                \
-{                                                                      \
-       type val = 0;                                                   \
-                                                                       \
-       if (mxc_spi->tx_buf) {                                          \
-               val = *(type *)mxc_spi->tx_buf;                         \
-               mxc_spi->tx_buf += sizeof(type);                        \
-       }                                                               \
-                                                                       \
-       mxc_spi->count -= sizeof(type);                                 \
-                                                                       \
-       writel(val, mxc_spi->base + MXC_CSPITXDATA);                    \
-}
-
-MXC_SPI_BUF_RX(u8)
-MXC_SPI_BUF_TX(u8)
-MXC_SPI_BUF_RX(u16)
-MXC_SPI_BUF_TX(u16)
-MXC_SPI_BUF_RX(u32)
-MXC_SPI_BUF_TX(u32)
-
-/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
- * (which is currently not the case in this driver)
- */
-static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
-       256, 384, 512, 768, 1024};
-
-/* MX21, MX27 */
-static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
-               unsigned int fspi)
-{
-       int i, max;
-
-       if (cpu_is_mx21())
-               max = 18;
-       else
-               max = 16;
-
-       for (i = 2; i < max; i++)
-               if (fspi * mxc_clkdivs[i] >= fin)
-                       return i;
-
-       return max;
-}
-
-/* MX1, MX31, MX35 */
-static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
-               unsigned int fspi)
-{
-       int i, div = 4;
-
-       for (i = 0; i < 7; i++) {
-               if (fspi * div >= fin)
-                       return i;
-               div <<= 1;
-       }
-
-       return 7;
-}
-
-#define MX31_INTREG_TEEN       (1 << 0)
-#define MX31_INTREG_RREN       (1 << 3)
-
-#define MX31_CSPICTRL_ENABLE   (1 << 0)
-#define MX31_CSPICTRL_MASTER   (1 << 1)
-#define MX31_CSPICTRL_XCH      (1 << 2)
-#define MX31_CSPICTRL_POL      (1 << 4)
-#define MX31_CSPICTRL_PHA      (1 << 5)
-#define MX31_CSPICTRL_SSCTL    (1 << 6)
-#define MX31_CSPICTRL_SSPOL    (1 << 7)
-#define MX31_CSPICTRL_BC_SHIFT 8
-#define MX35_CSPICTRL_BL_SHIFT 20
-#define MX31_CSPICTRL_CS_SHIFT 24
-#define MX35_CSPICTRL_CS_SHIFT 12
-#define MX31_CSPICTRL_DR_SHIFT 16
-
-#define MX31_CSPISTATUS                0x14
-#define MX31_STATUS_RR         (1 << 3)
-
-/* These functions also work for the i.MX35, but be aware that
- * the i.MX35 has a slightly different register layout for bits
- * we do not use here.
- */
-static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
-{
-       unsigned int val = 0;
-
-       if (enable & MXC_INT_TE)
-               val |= MX31_INTREG_TEEN;
-       if (enable & MXC_INT_RR)
-               val |= MX31_INTREG_RREN;
-
-       writel(val, mxc_spi->base + MXC_CSPIINT);
-}
-
-static void mx31_trigger(struct mxc_spi_data *mxc_spi)
-{
-       unsigned int reg;
-
-       reg = readl(mxc_spi->base + MXC_CSPICTRL);
-       reg |= MX31_CSPICTRL_XCH;
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-}
-
-static int mx31_config(struct mxc_spi_data *mxc_spi,
-               struct mxc_spi_config *config)
-{
-       unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
-
-       reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) <<
-               MX31_CSPICTRL_DR_SHIFT;
-
-       if (cpu_is_mx31())
-               reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
-       else if (cpu_is_mx35()) {
-               reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
-               reg |= MX31_CSPICTRL_SSCTL;
-       }
-
-       if (config->mode & SPI_CPHA)
-               reg |= MX31_CSPICTRL_PHA;
-       if (config->mode & SPI_CPOL)
-               reg |= MX31_CSPICTRL_POL;
-       if (config->mode & SPI_CS_HIGH)
-               reg |= MX31_CSPICTRL_SSPOL;
-       if (config->cs < 0) {
-               if (cpu_is_mx31())
-                       reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
-               else if (cpu_is_mx35())
-                       reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
-       }
-
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-
-       return 0;
-}
-
-static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
-{
-       return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
-}
-
-#define MX27_INTREG_RR         (1 << 4)
-#define MX27_INTREG_TEEN       (1 << 9)
-#define MX27_INTREG_RREN       (1 << 13)
-
-#define MX27_CSPICTRL_POL      (1 << 5)
-#define MX27_CSPICTRL_PHA      (1 << 6)
-#define MX27_CSPICTRL_SSPOL    (1 << 8)
-#define MX27_CSPICTRL_XCH      (1 << 9)
-#define MX27_CSPICTRL_ENABLE   (1 << 10)
-#define MX27_CSPICTRL_MASTER   (1 << 11)
-#define MX27_CSPICTRL_DR_SHIFT 14
-#define MX27_CSPICTRL_CS_SHIFT 19
-
-static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
-{
-       unsigned int val = 0;
-
-       if (enable & MXC_INT_TE)
-               val |= MX27_INTREG_TEEN;
-       if (enable & MXC_INT_RR)
-               val |= MX27_INTREG_RREN;
-
-       writel(val, mxc_spi->base + MXC_CSPIINT);
-}
-
-static void mx27_trigger(struct mxc_spi_data *mxc_spi)
-{
-       unsigned int reg;
-
-       reg = readl(mxc_spi->base + MXC_CSPICTRL);
-       reg |= MX27_CSPICTRL_XCH;
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-}
-
-static int mx27_config(struct mxc_spi_data *mxc_spi,
-               struct mxc_spi_config *config)
-{
-       unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
-
-       reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) <<
-               MX27_CSPICTRL_DR_SHIFT;
-       reg |= config->bpw - 1;
-
-       if (config->mode & SPI_CPHA)
-               reg |= MX27_CSPICTRL_PHA;
-       if (config->mode & SPI_CPOL)
-               reg |= MX27_CSPICTRL_POL;
-       if (config->mode & SPI_CS_HIGH)
-               reg |= MX27_CSPICTRL_SSPOL;
-       if (config->cs < 0)
-               reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
-
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-
-       return 0;
-}
-
-static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
-{
-       return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR;
-}
-
-#define MX1_INTREG_RR          (1 << 3)
-#define MX1_INTREG_TEEN                (1 << 8)
-#define MX1_INTREG_RREN                (1 << 11)
-
-#define MX1_CSPICTRL_POL       (1 << 4)
-#define MX1_CSPICTRL_PHA       (1 << 5)
-#define MX1_CSPICTRL_XCH       (1 << 8)
-#define MX1_CSPICTRL_ENABLE    (1 << 9)
-#define MX1_CSPICTRL_MASTER    (1 << 10)
-#define MX1_CSPICTRL_DR_SHIFT  13
-
-static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
-{
-       unsigned int val = 0;
-
-       if (enable & MXC_INT_TE)
-               val |= MX1_INTREG_TEEN;
-       if (enable & MXC_INT_RR)
-               val |= MX1_INTREG_RREN;
-
-       writel(val, mxc_spi->base + MXC_CSPIINT);
-}
-
-static void mx1_trigger(struct mxc_spi_data *mxc_spi)
-{
-       unsigned int reg;
-
-       reg = readl(mxc_spi->base + MXC_CSPICTRL);
-       reg |= MX1_CSPICTRL_XCH;
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-}
-
-static int mx1_config(struct mxc_spi_data *mxc_spi,
-               struct mxc_spi_config *config)
-{
-       unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
-
-       reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) <<
-               MX1_CSPICTRL_DR_SHIFT;
-       reg |= config->bpw - 1;
-
-       if (config->mode & SPI_CPHA)
-               reg |= MX1_CSPICTRL_PHA;
-       if (config->mode & SPI_CPOL)
-               reg |= MX1_CSPICTRL_POL;
-
-       writel(reg, mxc_spi->base + MXC_CSPICTRL);
-
-       return 0;
-}
-
-static int mx1_rx_available(struct mxc_spi_data *mxc_spi)
-{
-       return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR;
-}
-
-static void mxc_spi_chipselect(struct spi_device *spi, int is_active)
-{
-       struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
-       unsigned int cs = 0;
-       int gpio = mxc_spi->chipselect[spi->chip_select];
-       struct mxc_spi_config config;
-
-       if (spi->mode & SPI_CS_HIGH)
-               cs = 1;
-
-       if (is_active == BITBANG_CS_INACTIVE) {
-               if (gpio >= 0)
-                       gpio_set_value(gpio, !cs);
-               return;
-       }
-
-       config.bpw = spi->bits_per_word;
-       config.speed_hz = spi->max_speed_hz;
-       config.mode = spi->mode;
-       config.cs = mxc_spi->chipselect[spi->chip_select];
-
-       mxc_spi->config(mxc_spi, &config);
-
-       /* Initialize the functions for transfer */
-       if (config.bpw <= 8) {
-               mxc_spi->rx = mxc_spi_buf_rx_u8;
-               mxc_spi->tx = mxc_spi_buf_tx_u8;
-       } else if (config.bpw <= 16) {
-               mxc_spi->rx = mxc_spi_buf_rx_u16;
-               mxc_spi->tx = mxc_spi_buf_tx_u16;
-       } else if (config.bpw <= 32) {
-               mxc_spi->rx = mxc_spi_buf_rx_u32;
-               mxc_spi->tx = mxc_spi_buf_tx_u32;
-       } else
-               BUG();
-
-       if (gpio >= 0)
-               gpio_set_value(gpio, cs);
-
-       return;
-}
-
-static void mxc_spi_push(struct mxc_spi_data *mxc_spi)
-{
-       while (mxc_spi->txfifo < 8) {
-               if (!mxc_spi->count)
-                       break;
-               mxc_spi->tx(mxc_spi);
-               mxc_spi->txfifo++;
-       }
-
-       mxc_spi->trigger(mxc_spi);
-}
-
-static irqreturn_t mxc_spi_isr(int irq, void *dev_id)
-{
-       struct mxc_spi_data *mxc_spi = dev_id;
-
-       while (mxc_spi->rx_available(mxc_spi)) {
-               mxc_spi->rx(mxc_spi);
-               mxc_spi->txfifo--;
-       }
-
-       if (mxc_spi->count) {
-               mxc_spi_push(mxc_spi);
-               return IRQ_HANDLED;
-       }
-
-       if (mxc_spi->txfifo) {
-               /* No data left to push, but still waiting for rx data,
-                * enable receive data available interrupt.
-                */
-               mxc_spi->intctrl(mxc_spi, MXC_INT_RR);
-               return IRQ_HANDLED;
-       }
-
-       mxc_spi->intctrl(mxc_spi, 0);
-       complete(&mxc_spi->xfer_done);
-
-       return IRQ_HANDLED;
-}
-
-static int mxc_spi_setupxfer(struct spi_device *spi,
-                                struct spi_transfer *t)
-{
-       struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
-       struct mxc_spi_config config;
-
-       config.bpw = t ? t->bits_per_word : spi->bits_per_word;
-       config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
-       config.mode = spi->mode;
-
-       mxc_spi->config(mxc_spi, &config);
-
-       return 0;
-}
-
-static int mxc_spi_transfer(struct spi_device *spi,
-                               struct spi_transfer *transfer)
-{
-       struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master);
-
-       mxc_spi->tx_buf = transfer->tx_buf;
-       mxc_spi->rx_buf = transfer->rx_buf;
-       mxc_spi->count = transfer->len;
-       mxc_spi->txfifo = 0;
-
-       init_completion(&mxc_spi->xfer_done);
-
-       mxc_spi_push(mxc_spi);
-
-       mxc_spi->intctrl(mxc_spi, MXC_INT_TE);
-
-       wait_for_completion(&mxc_spi->xfer_done);
-
-       return transfer->len;
-}
-
-static int mxc_spi_setup(struct spi_device *spi)
-{
-       if (!spi->bits_per_word)
-               spi->bits_per_word = 8;
-
-       pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
-                spi->mode, spi->bits_per_word, spi->max_speed_hz);
-
-       mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE);
-
-       return 0;
-}
-
-static void mxc_spi_cleanup(struct spi_device *spi)
-{
-}
-
-static int __init mxc_spi_probe(struct platform_device *pdev)
-{
-       struct spi_imx_master *mxc_platform_info;
-       struct spi_master *master;
-       struct mxc_spi_data *mxc_spi;
-       struct resource *res;
-       int i, ret;
-
-       mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data;
-       if (!mxc_platform_info) {
-               dev_err(&pdev->dev, "can't get the platform data\n");
-               return -EINVAL;
-       }
-
-       master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data));
-       if (!master)
-               return -ENOMEM;
-
-       platform_set_drvdata(pdev, master);
-
-       master->bus_num = pdev->id;
-       master->num_chipselect = mxc_platform_info->num_chipselect;
-
-       mxc_spi = spi_master_get_devdata(master);
-       mxc_spi->bitbang.master = spi_master_get(master);
-       mxc_spi->chipselect = mxc_platform_info->chipselect;
-
-       for (i = 0; i < master->num_chipselect; i++) {
-               if (mxc_spi->chipselect[i] < 0)
-                       continue;
-               ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME);
-               if (ret) {
-                       i--;
-                       while (i > 0)
-                               if (mxc_spi->chipselect[i] >= 0)
-                                       gpio_free(mxc_spi->chipselect[i--]);
-                       dev_err(&pdev->dev, "can't get cs gpios");
-                       goto out_master_put;
-               }
-               gpio_direction_output(mxc_spi->chipselect[i], 1);
-       }
-
-       mxc_spi->bitbang.chipselect = mxc_spi_chipselect;
-       mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer;
-       mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer;
-       mxc_spi->bitbang.master->setup = mxc_spi_setup;
-       mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup;
-
-       init_completion(&mxc_spi->xfer_done);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get platform resource\n");
-               ret = -ENOMEM;
-               goto out_gpio_free;
-       }
-
-       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
-               dev_err(&pdev->dev, "request_mem_region failed\n");
-               ret = -EBUSY;
-               goto out_gpio_free;
-       }
-
-       mxc_spi->base = ioremap(res->start, resource_size(res));
-       if (!mxc_spi->base) {
-               ret = -EINVAL;
-               goto out_release_mem;
-       }
-
-       mxc_spi->irq = platform_get_irq(pdev, 0);
-       if (!mxc_spi->irq) {
-               ret = -EINVAL;
-               goto out_iounmap;
-       }
-
-       ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi);
-       if (ret) {
-               dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret);
-               goto out_iounmap;
-       }
-
-       if (cpu_is_mx31() || cpu_is_mx35()) {
-               mxc_spi->intctrl = mx31_intctrl;
-               mxc_spi->config = mx31_config;
-               mxc_spi->trigger = mx31_trigger;
-               mxc_spi->rx_available = mx31_rx_available;
-       } else  if (cpu_is_mx27() || cpu_is_mx21()) {
-               mxc_spi->intctrl = mx27_intctrl;
-               mxc_spi->config = mx27_config;
-               mxc_spi->trigger = mx27_trigger;
-               mxc_spi->rx_available = mx27_rx_available;
-       } else if (cpu_is_mx1()) {
-               mxc_spi->intctrl = mx1_intctrl;
-               mxc_spi->config = mx1_config;
-               mxc_spi->trigger = mx1_trigger;
-               mxc_spi->rx_available = mx1_rx_available;
-       } else
-               BUG();
-
-       mxc_spi->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(mxc_spi->clk)) {
-               dev_err(&pdev->dev, "unable to get clock\n");
-               ret = PTR_ERR(mxc_spi->clk);
-               goto out_free_irq;
-       }
-
-       clk_enable(mxc_spi->clk);
-       mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk);
-
-       if (!cpu_is_mx31() || !cpu_is_mx35())
-               writel(1, mxc_spi->base + MXC_RESET);
-
-       mxc_spi->intctrl(mxc_spi, 0);
-
-       ret = spi_bitbang_start(&mxc_spi->bitbang);
-       if (ret) {
-               dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
-               goto out_clk_put;
-       }
-
-       dev_info(&pdev->dev, "probed\n");
-
-       return ret;
-
-out_clk_put:
-       clk_disable(mxc_spi->clk);
-       clk_put(mxc_spi->clk);
-out_free_irq:
-       free_irq(mxc_spi->irq, mxc_spi);
-out_iounmap:
-       iounmap(mxc_spi->base);
-out_release_mem:
-       release_mem_region(res->start, resource_size(res));
-out_gpio_free:
-       for (i = 0; i < master->num_chipselect; i++)
-               if (mxc_spi->chipselect[i] >= 0)
-                       gpio_free(mxc_spi->chipselect[i]);
-out_master_put:
-       spi_master_put(master);
-       kfree(master);
-       platform_set_drvdata(pdev, NULL);
-       return ret;
-}
-
-static int __exit mxc_spi_remove(struct platform_device *pdev)
-{
-       struct spi_master *master = platform_get_drvdata(pdev);
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master);
-       int i;
-
-       spi_bitbang_stop(&mxc_spi->bitbang);
-
-       writel(0, mxc_spi->base + MXC_CSPICTRL);
-       clk_disable(mxc_spi->clk);
-       clk_put(mxc_spi->clk);
-       free_irq(mxc_spi->irq, mxc_spi);
-       iounmap(mxc_spi->base);
-
-       for (i = 0; i < master->num_chipselect; i++)
-               if (mxc_spi->chipselect[i] >= 0)
-                       gpio_free(mxc_spi->chipselect[i]);
-
-       spi_master_put(master);
-
-       release_mem_region(res->start, resource_size(res));
-
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mxc_spi_driver = {
-       .driver = {
-                  .name = DRIVER_NAME,
-                  .owner = THIS_MODULE,
-                  },
-       .probe = mxc_spi_probe,
-       .remove = __exit_p(mxc_spi_remove),
-};
-
-static int __init mxc_spi_init(void)
-{
-       return platform_driver_register(&mxc_spi_driver);
-}
-
-static void __exit mxc_spi_exit(void)
-{
-       platform_driver_unregister(&mxc_spi_driver);
-}
-
-module_init(mxc_spi_init);
-module_exit(mxc_spi_exit);
-
-MODULE_DESCRIPTION("SPI Master Controller driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
new file mode 100644 (file)
index 0000000..89c22ef
--- /dev/null
@@ -0,0 +1,680 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Juergen Beisert
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#include <mach/spi.h>
+
+#define DRIVER_NAME "spi_imx"
+
+#define MXC_CSPIRXDATA         0x00
+#define MXC_CSPITXDATA         0x04
+#define MXC_CSPICTRL           0x08
+#define MXC_CSPIINT            0x0c
+#define MXC_RESET              0x1c
+
+/* generic defines to abstract from the different register layouts */
+#define MXC_INT_RR     (1 << 0) /* Receive data ready interrupt */
+#define MXC_INT_TE     (1 << 1) /* Transmit FIFO empty interrupt */
+
+struct spi_imx_config {
+       unsigned int speed_hz;
+       unsigned int bpw;
+       unsigned int mode;
+       int cs;
+};
+
+struct spi_imx_data {
+       struct spi_bitbang bitbang;
+
+       struct completion xfer_done;
+       void *base;
+       int irq;
+       struct clk *clk;
+       unsigned long spi_clk;
+       int *chipselect;
+
+       unsigned int count;
+       void (*tx)(struct spi_imx_data *);
+       void (*rx)(struct spi_imx_data *);
+       void *rx_buf;
+       const void *tx_buf;
+       unsigned int txfifo; /* number of words pushed in tx FIFO */
+
+       /* SoC specific functions */
+       void (*intctrl)(struct spi_imx_data *, int);
+       int (*config)(struct spi_imx_data *, struct spi_imx_config *);
+       void (*trigger)(struct spi_imx_data *);
+       int (*rx_available)(struct spi_imx_data *);
+};
+
+#define MXC_SPI_BUF_RX(type)                                           \
+static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx)                \
+{                                                                      \
+       unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA);       \
+                                                                       \
+       if (spi_imx->rx_buf) {                                          \
+               *(type *)spi_imx->rx_buf = val;                         \
+               spi_imx->rx_buf += sizeof(type);                        \
+       }                                                               \
+}
+
+#define MXC_SPI_BUF_TX(type)                                           \
+static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx)                \
+{                                                                      \
+       type val = 0;                                                   \
+                                                                       \
+       if (spi_imx->tx_buf) {                                          \
+               val = *(type *)spi_imx->tx_buf;                         \
+               spi_imx->tx_buf += sizeof(type);                        \
+       }                                                               \
+                                                                       \
+       spi_imx->count -= sizeof(type);                                 \
+                                                                       \
+       writel(val, spi_imx->base + MXC_CSPITXDATA);                    \
+}
+
+MXC_SPI_BUF_RX(u8)
+MXC_SPI_BUF_TX(u8)
+MXC_SPI_BUF_RX(u16)
+MXC_SPI_BUF_TX(u16)
+MXC_SPI_BUF_RX(u32)
+MXC_SPI_BUF_TX(u32)
+
+/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
+ * (which is currently not the case in this driver)
+ */
+static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+       256, 384, 512, 768, 1024};
+
+/* MX21, MX27 */
+static unsigned int spi_imx_clkdiv_1(unsigned int fin,
+               unsigned int fspi)
+{
+       int i, max;
+
+       if (cpu_is_mx21())
+               max = 18;
+       else
+               max = 16;
+
+       for (i = 2; i < max; i++)
+               if (fspi * mxc_clkdivs[i] >= fin)
+                       return i;
+
+       return max;
+}
+
+/* MX1, MX31, MX35 */
+static unsigned int spi_imx_clkdiv_2(unsigned int fin,
+               unsigned int fspi)
+{
+       int i, div = 4;
+
+       for (i = 0; i < 7; i++) {
+               if (fspi * div >= fin)
+                       return i;
+               div <<= 1;
+       }
+
+       return 7;
+}
+
+#define MX31_INTREG_TEEN       (1 << 0)
+#define MX31_INTREG_RREN       (1 << 3)
+
+#define MX31_CSPICTRL_ENABLE   (1 << 0)
+#define MX31_CSPICTRL_MASTER   (1 << 1)
+#define MX31_CSPICTRL_XCH      (1 << 2)
+#define MX31_CSPICTRL_POL      (1 << 4)
+#define MX31_CSPICTRL_PHA      (1 << 5)
+#define MX31_CSPICTRL_SSCTL    (1 << 6)
+#define MX31_CSPICTRL_SSPOL    (1 << 7)
+#define MX31_CSPICTRL_BC_SHIFT 8
+#define MX35_CSPICTRL_BL_SHIFT 20
+#define MX31_CSPICTRL_CS_SHIFT 24
+#define MX35_CSPICTRL_CS_SHIFT 12
+#define MX31_CSPICTRL_DR_SHIFT 16
+
+#define MX31_CSPISTATUS                0x14
+#define MX31_STATUS_RR         (1 << 3)
+
+/* These functions also work for the i.MX35, but be aware that
+ * the i.MX35 has a slightly different register layout for bits
+ * we do not use here.
+ */
+static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+       unsigned int val = 0;
+
+       if (enable & MXC_INT_TE)
+               val |= MX31_INTREG_TEEN;
+       if (enable & MXC_INT_RR)
+               val |= MX31_INTREG_RREN;
+
+       writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx31_trigger(struct spi_imx_data *spi_imx)
+{
+       unsigned int reg;
+
+       reg = readl(spi_imx->base + MXC_CSPICTRL);
+       reg |= MX31_CSPICTRL_XCH;
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx31_config(struct spi_imx_data *spi_imx,
+               struct spi_imx_config *config)
+{
+       unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+
+       reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+               MX31_CSPICTRL_DR_SHIFT;
+
+       if (cpu_is_mx31())
+               reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+       else if (cpu_is_mx35()) {
+               reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+               reg |= MX31_CSPICTRL_SSCTL;
+       }
+
+       if (config->mode & SPI_CPHA)
+               reg |= MX31_CSPICTRL_PHA;
+       if (config->mode & SPI_CPOL)
+               reg |= MX31_CSPICTRL_POL;
+       if (config->mode & SPI_CS_HIGH)
+               reg |= MX31_CSPICTRL_SSPOL;
+       if (config->cs < 0) {
+               if (cpu_is_mx31())
+                       reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
+               else if (cpu_is_mx35())
+                       reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
+       }
+
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+       return 0;
+}
+
+static int mx31_rx_available(struct spi_imx_data *spi_imx)
+{
+       return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
+}
+
+#define MX27_INTREG_RR         (1 << 4)
+#define MX27_INTREG_TEEN       (1 << 9)
+#define MX27_INTREG_RREN       (1 << 13)
+
+#define MX27_CSPICTRL_POL      (1 << 5)
+#define MX27_CSPICTRL_PHA      (1 << 6)
+#define MX27_CSPICTRL_SSPOL    (1 << 8)
+#define MX27_CSPICTRL_XCH      (1 << 9)
+#define MX27_CSPICTRL_ENABLE   (1 << 10)
+#define MX27_CSPICTRL_MASTER   (1 << 11)
+#define MX27_CSPICTRL_DR_SHIFT 14
+#define MX27_CSPICTRL_CS_SHIFT 19
+
+static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+       unsigned int val = 0;
+
+       if (enable & MXC_INT_TE)
+               val |= MX27_INTREG_TEEN;
+       if (enable & MXC_INT_RR)
+               val |= MX27_INTREG_RREN;
+
+       writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx27_trigger(struct spi_imx_data *spi_imx)
+{
+       unsigned int reg;
+
+       reg = readl(spi_imx->base + MXC_CSPICTRL);
+       reg |= MX27_CSPICTRL_XCH;
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx27_config(struct spi_imx_data *spi_imx,
+               struct spi_imx_config *config)
+{
+       unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
+
+       reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
+               MX27_CSPICTRL_DR_SHIFT;
+       reg |= config->bpw - 1;
+
+       if (config->mode & SPI_CPHA)
+               reg |= MX27_CSPICTRL_PHA;
+       if (config->mode & SPI_CPOL)
+               reg |= MX27_CSPICTRL_POL;
+       if (config->mode & SPI_CS_HIGH)
+               reg |= MX27_CSPICTRL_SSPOL;
+       if (config->cs < 0)
+               reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
+
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+       return 0;
+}
+
+static int mx27_rx_available(struct spi_imx_data *spi_imx)
+{
+       return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
+}
+
+#define MX1_INTREG_RR          (1 << 3)
+#define MX1_INTREG_TEEN                (1 << 8)
+#define MX1_INTREG_RREN                (1 << 11)
+
+#define MX1_CSPICTRL_POL       (1 << 4)
+#define MX1_CSPICTRL_PHA       (1 << 5)
+#define MX1_CSPICTRL_XCH       (1 << 8)
+#define MX1_CSPICTRL_ENABLE    (1 << 9)
+#define MX1_CSPICTRL_MASTER    (1 << 10)
+#define MX1_CSPICTRL_DR_SHIFT  13
+
+static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+       unsigned int val = 0;
+
+       if (enable & MXC_INT_TE)
+               val |= MX1_INTREG_TEEN;
+       if (enable & MXC_INT_RR)
+               val |= MX1_INTREG_RREN;
+
+       writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void mx1_trigger(struct spi_imx_data *spi_imx)
+{
+       unsigned int reg;
+
+       reg = readl(spi_imx->base + MXC_CSPICTRL);
+       reg |= MX1_CSPICTRL_XCH;
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int mx1_config(struct spi_imx_data *spi_imx,
+               struct spi_imx_config *config)
+{
+       unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+
+       reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+               MX1_CSPICTRL_DR_SHIFT;
+       reg |= config->bpw - 1;
+
+       if (config->mode & SPI_CPHA)
+               reg |= MX1_CSPICTRL_PHA;
+       if (config->mode & SPI_CPOL)
+               reg |= MX1_CSPICTRL_POL;
+
+       writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+       return 0;
+}
+
+static int mx1_rx_available(struct spi_imx_data *spi_imx)
+{
+       return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
+}
+
+static void spi_imx_chipselect(struct spi_device *spi, int is_active)
+{
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+       int gpio = spi_imx->chipselect[spi->chip_select];
+       int active = is_active != BITBANG_CS_INACTIVE;
+       int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
+
+       if (gpio < 0)
+               return;
+
+       gpio_set_value(gpio, dev_is_lowactive ^ active);
+}
+
+static void spi_imx_push(struct spi_imx_data *spi_imx)
+{
+       while (spi_imx->txfifo < 8) {
+               if (!spi_imx->count)
+                       break;
+               spi_imx->tx(spi_imx);
+               spi_imx->txfifo++;
+       }
+
+       spi_imx->trigger(spi_imx);
+}
+
+static irqreturn_t spi_imx_isr(int irq, void *dev_id)
+{
+       struct spi_imx_data *spi_imx = dev_id;
+
+       while (spi_imx->rx_available(spi_imx)) {
+               spi_imx->rx(spi_imx);
+               spi_imx->txfifo--;
+       }
+
+       if (spi_imx->count) {
+               spi_imx_push(spi_imx);
+               return IRQ_HANDLED;
+       }
+
+       if (spi_imx->txfifo) {
+               /* No data left to push, but still waiting for rx data,
+                * enable receive data available interrupt.
+                */
+               spi_imx->intctrl(spi_imx, MXC_INT_RR);
+               return IRQ_HANDLED;
+       }
+
+       spi_imx->intctrl(spi_imx, 0);
+       complete(&spi_imx->xfer_done);
+
+       return IRQ_HANDLED;
+}
+
+static int spi_imx_setupxfer(struct spi_device *spi,
+                                struct spi_transfer *t)
+{
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+       struct spi_imx_config config;
+
+       config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+       config.speed_hz  = t ? t->speed_hz : spi->max_speed_hz;
+       config.mode = spi->mode;
+       config.cs = spi_imx->chipselect[spi->chip_select];
+
+       if (!config.speed_hz)
+               config.speed_hz = spi->max_speed_hz;
+       if (!config.bpw)
+               config.bpw = spi->bits_per_word;
+       if (!config.speed_hz)
+               config.speed_hz = spi->max_speed_hz;
+
+       /* Initialize the functions for transfer */
+       if (config.bpw <= 8) {
+               spi_imx->rx = spi_imx_buf_rx_u8;
+               spi_imx->tx = spi_imx_buf_tx_u8;
+       } else if (config.bpw <= 16) {
+               spi_imx->rx = spi_imx_buf_rx_u16;
+               spi_imx->tx = spi_imx_buf_tx_u16;
+       } else if (config.bpw <= 32) {
+               spi_imx->rx = spi_imx_buf_rx_u32;
+               spi_imx->tx = spi_imx_buf_tx_u32;
+       } else
+               BUG();
+
+       spi_imx->config(spi_imx, &config);
+
+       return 0;
+}
+
+static int spi_imx_transfer(struct spi_device *spi,
+                               struct spi_transfer *transfer)
+{
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+
+       spi_imx->tx_buf = transfer->tx_buf;
+       spi_imx->rx_buf = transfer->rx_buf;
+       spi_imx->count = transfer->len;
+       spi_imx->txfifo = 0;
+
+       init_completion(&spi_imx->xfer_done);
+
+       spi_imx_push(spi_imx);
+
+       spi_imx->intctrl(spi_imx, MXC_INT_TE);
+
+       wait_for_completion(&spi_imx->xfer_done);
+
+       return transfer->len;
+}
+
+static int spi_imx_setup(struct spi_device *spi)
+{
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+       int gpio = spi_imx->chipselect[spi->chip_select];
+
+       pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
+                spi->mode, spi->bits_per_word, spi->max_speed_hz);
+
+       if (gpio >= 0)
+               gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+
+       spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
+
+       return 0;
+}
+
+static void spi_imx_cleanup(struct spi_device *spi)
+{
+}
+
+static int __init spi_imx_probe(struct platform_device *pdev)
+{
+       struct spi_imx_master *mxc_platform_info;
+       struct spi_master *master;
+       struct spi_imx_data *spi_imx;
+       struct resource *res;
+       int i, ret;
+
+       mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data;
+       if (!mxc_platform_info) {
+               dev_err(&pdev->dev, "can't get the platform data\n");
+               return -EINVAL;
+       }
+
+       master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+       if (!master)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, master);
+
+       master->bus_num = pdev->id;
+       master->num_chipselect = mxc_platform_info->num_chipselect;
+
+       spi_imx = spi_master_get_devdata(master);
+       spi_imx->bitbang.master = spi_master_get(master);
+       spi_imx->chipselect = mxc_platform_info->chipselect;
+
+       for (i = 0; i < master->num_chipselect; i++) {
+               if (spi_imx->chipselect[i] < 0)
+                       continue;
+               ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
+               if (ret) {
+                       i--;
+                       while (i > 0)
+                               if (spi_imx->chipselect[i] >= 0)
+                                       gpio_free(spi_imx->chipselect[i--]);
+                       dev_err(&pdev->dev, "can't get cs gpios");
+                       goto out_master_put;
+               }
+       }
+
+       spi_imx->bitbang.chipselect = spi_imx_chipselect;
+       spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
+       spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
+       spi_imx->bitbang.master->setup = spi_imx_setup;
+       spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
+       spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+       init_completion(&spi_imx->xfer_done);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "can't get platform resource\n");
+               ret = -ENOMEM;
+               goto out_gpio_free;
+       }
+
+       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+               dev_err(&pdev->dev, "request_mem_region failed\n");
+               ret = -EBUSY;
+               goto out_gpio_free;
+       }
+
+       spi_imx->base = ioremap(res->start, resource_size(res));
+       if (!spi_imx->base) {
+               ret = -EINVAL;
+               goto out_release_mem;
+       }
+
+       spi_imx->irq = platform_get_irq(pdev, 0);
+       if (!spi_imx->irq) {
+               ret = -EINVAL;
+               goto out_iounmap;
+       }
+
+       ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
+       if (ret) {
+               dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
+               goto out_iounmap;
+       }
+
+       if (cpu_is_mx31() || cpu_is_mx35()) {
+               spi_imx->intctrl = mx31_intctrl;
+               spi_imx->config = mx31_config;
+               spi_imx->trigger = mx31_trigger;
+               spi_imx->rx_available = mx31_rx_available;
+       } else  if (cpu_is_mx27() || cpu_is_mx21()) {
+               spi_imx->intctrl = mx27_intctrl;
+               spi_imx->config = mx27_config;
+               spi_imx->trigger = mx27_trigger;
+               spi_imx->rx_available = mx27_rx_available;
+       } else if (cpu_is_mx1()) {
+               spi_imx->intctrl = mx1_intctrl;
+               spi_imx->config = mx1_config;
+               spi_imx->trigger = mx1_trigger;
+               spi_imx->rx_available = mx1_rx_available;
+       } else
+               BUG();
+
+       spi_imx->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(spi_imx->clk)) {
+               dev_err(&pdev->dev, "unable to get clock\n");
+               ret = PTR_ERR(spi_imx->clk);
+               goto out_free_irq;
+       }
+
+       clk_enable(spi_imx->clk);
+       spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+
+       if (!cpu_is_mx31() || !cpu_is_mx35())
+               writel(1, spi_imx->base + MXC_RESET);
+
+       spi_imx->intctrl(spi_imx, 0);
+
+       ret = spi_bitbang_start(&spi_imx->bitbang);
+       if (ret) {
+               dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+               goto out_clk_put;
+       }
+
+       dev_info(&pdev->dev, "probed\n");
+
+       return ret;
+
+out_clk_put:
+       clk_disable(spi_imx->clk);
+       clk_put(spi_imx->clk);
+out_free_irq:
+       free_irq(spi_imx->irq, spi_imx);
+out_iounmap:
+       iounmap(spi_imx->base);
+out_release_mem:
+       release_mem_region(res->start, resource_size(res));
+out_gpio_free:
+       for (i = 0; i < master->num_chipselect; i++)
+               if (spi_imx->chipselect[i] >= 0)
+                       gpio_free(spi_imx->chipselect[i]);
+out_master_put:
+       spi_master_put(master);
+       kfree(master);
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+static int __exit spi_imx_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+       int i;
+
+       spi_bitbang_stop(&spi_imx->bitbang);
+
+       writel(0, spi_imx->base + MXC_CSPICTRL);
+       clk_disable(spi_imx->clk);
+       clk_put(spi_imx->clk);
+       free_irq(spi_imx->irq, spi_imx);
+       iounmap(spi_imx->base);
+
+       for (i = 0; i < master->num_chipselect; i++)
+               if (spi_imx->chipselect[i] >= 0)
+                       gpio_free(spi_imx->chipselect[i]);
+
+       spi_master_put(master);
+
+       release_mem_region(res->start, resource_size(res));
+
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static struct platform_driver spi_imx_driver = {
+       .driver = {
+                  .name = DRIVER_NAME,
+                  .owner = THIS_MODULE,
+                  },
+       .probe = spi_imx_probe,
+       .remove = __exit_p(spi_imx_remove),
+};
+
+static int __init spi_imx_init(void)
+{
+       return platform_driver_register(&spi_imx_driver);
+}
+
+static void __exit spi_imx_exit(void)
+{
+       platform_driver_unregister(&spi_imx_driver);
+}
+
+module_init(spi_imx_init);
+module_exit(spi_imx_exit);
+
+MODULE_DESCRIPTION("SPI Master Controller driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
index f921bd1109e1c1ff9a26e851e19a484860b19c08..5d23983f02fc7a70f69a1989f4bcccac46bba6a8 100644 (file)
@@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
        return status;
 }
 
-static struct file_operations spidev_fops = {
+static const struct file_operations spidev_fops = {
        .owner =        THIS_MODULE,
        /* REVISIT switch to aio primitives, so that userspace
         * gets more complete API coverage.  It'll simplify things
index ac8577358ba096126b38106e25c58d1268c25c37..c24e4e0367a276ce787140154156b25ac73707bb 100644 (file)
@@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio)
        struct dst_node *n = q->queuedata;
        int err = -EIO;
 
-       if (bio_empty_barrier(bio) && !q->prepare_discard_fn) {
+       if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
                /*
                 * This is a dirty^Wnice hack, but if we complete this
                 * operation with -EOPNOTSUPP like intended, XFS
@@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = {
 /*
  * Configuration parser.
  */
-static void cn_dst_callback(struct cn_msg *msg)
+static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct dst_ctl *ctl;
        int err;
@@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg)
        struct dst_node *n = NULL, *tmp;
        unsigned int hash;
 
+       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
+               err = -EPERM;
+               goto out;
+       }
+
        if (msg->len < sizeof(struct dst_ctl)) {
                err = -EBADMSG;
                goto out;
index ea8a5efc19bca41120bd4e5e4d2789b9afb11c36..fc2107f4c049b4b88905bbe06ee06aa7f1c45f45 100644 (file)
@@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client)
        return tsl2561_powerdown(client);
 }
 
-static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
 static const struct i2c_device_id tsl2561_id[] = {
        { "tsl2561", 0 },
        { }
index 90f962ee5fd8865a17622481a6a0d9c4e72d9966..5d04bf5b021a8c24b9534339ec194bc3f5659e05 100644 (file)
@@ -527,10 +527,13 @@ out_unlock:
        return err;
 }
 
-static void pohmelfs_cn_callback(struct cn_msg *msg)
+static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        int err;
 
+       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+               return;
+
        switch (msg->flags) {
                case POHMELFS_FLAGS_ADD:
                case POHMELFS_FLAGS_DEL:
index 333ee02e7b2b2a7bcf94baf5a2af0245185c0da7..864f0ba6a344ada55735e0f06d02f6033edb1cdd 100644 (file)
@@ -993,7 +993,7 @@ skip_io_on_zombie:
        return retval;
 }
 
-static struct file_operations fops = {
+static const struct file_operations fops = {
        .owner          = THIS_MODULE,
        .read           = usbtmc_read,
        .write          = usbtmc_write,
index c44367fea185822919231f87e16fcf9a6d994461..bf0f6520c6df3946b91f2a40f9b1a6c786918f6d 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/wait.h>
 #include <linux/compiler.h>
 #include <asm/uaccess.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/poll.h>
 #include <linux/smp_lock.h>
index 29500154d00c57d5f74a299fcfaa90610fecad03..2d867fd22413f9ab0a9af24bacc9aa306cf6df85 100644 (file)
@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
 }
 
 /* used after endpoint configuration */
-static struct file_operations printer_io_operations = {
+static const struct file_operations printer_io_operations = {
        .owner =        THIS_MODULE,
        .open =         printer_open,
        .read =         printer_read,
index cf2d45946c57c67e783a9eb44238dda29dd9b771..2273c815941fe1cf4a886c19831bc63cb871d0f9 100644 (file)
@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file)
        return single_open(file, pzl_print, inode->i_private);
 }
 
-static struct file_operations di_fops = {
+static const struct file_operations di_fops = {
        .open    = di_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
@@ -142,7 +142,7 @@ static struct file_operations di_fops = {
        .owner   = THIS_MODULE,
 };
 
-static struct file_operations asl_fops = {
+static const struct file_operations asl_fops = {
        .open    = asl_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
@@ -150,7 +150,7 @@ static struct file_operations asl_fops = {
        .owner   = THIS_MODULE,
 };
 
-static struct file_operations pzl_fops = {
+static const struct file_operations pzl_fops = {
        .open    = pzl_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
index d645f3899fe1c52a09599e90d241006337591381..32d0199d0c3220a1c037856e5143bbd641b2cf38 100644 (file)
@@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
        return read_count;
 }
 
-static struct
-file_operations usb_rio_fops = {
+static const struct file_operations usb_rio_fops = {
        .owner =        THIS_MODULE,
        .read =         read_rio,
        .write =        write_rio,
index 4a42993700c1aaadc3dbe55dd75c8d3738f04aff..2eecec0c13c9bf1bba5013865fdfdd799c175af0 100644 (file)
@@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
        return ret < 0 ? ret : len;
 }
 
-static struct file_operations command_fops = {
+static const struct file_operations command_fops = {
        .open   = command_open,
        .write  = command_write,
        .read   = NULL,
@@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file)
        return single_open(file, reservations_print, inode->i_private);
 }
 
-static struct file_operations reservations_fops = {
+static const struct file_operations reservations_fops = {
        .open    = reservations_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file)
        return single_open(file, drp_avail_print, inode->i_private);
 }
 
-static struct file_operations drp_avail_fops = {
+static const struct file_operations drp_avail_fops = {
        .open    = drp_avail_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
index 42e1005e2916ca19be87b0f1ad806907f849a105..d065894ce38fbf952a46d2da87da67b34374289a 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/uaccess.h>
-#include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
 #include <video/da8xx-fb.h>
index a1f2e7ce730bd1881a4c2519617bb549761f4f72..99bbd282ce634186e5d28f9a13cd9ba78658ddc2 100644 (file)
@@ -1800,7 +1800,7 @@ static int __init video_setup(char *options)
                global = 1;
        }
 
-       if (!global && !strstr(options, "fb:")) {
+       if (!global && !strchr(options, ':')) {
                fb_mode_option = options;
                global = 1;
        }
index f2de5a1acd6def8375dcffa11df4887fd9a04b28..5c5a1ad1d397417f39747c60d3c603357fbd47ec 100644 (file)
@@ -27,8 +27,6 @@
 #include <mach/msm_iomap.h>
 #include <mach/irqs.h>
 #include <mach/board.h>
-#include <linux/delay.h>
-
 #include <mach/msm_fb.h>
 #include "mddi_hw.h"
 
index d5e59556f9e2d3a056432535129021a7d42d25ef..70dadf9d2334c9a26c88bd10dc43ac1d20251b52 100644 (file)
@@ -93,7 +93,7 @@ struct blizzard_reg_list {
 };
 
 /* These need to be saved / restored separately from the rest. */
-static struct blizzard_reg_list blizzard_pll_regs[] = {
+static const struct blizzard_reg_list blizzard_pll_regs[] = {
        {
                .start  = 0x04,         /* Don't save PLL ctrl (0x0C) */
                .end    = 0x0a,
@@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = {
        },
 };
 
-static struct blizzard_reg_list blizzard_gen_regs[] = {
+static const struct blizzard_reg_list blizzard_gen_regs[] = {
        {
                .start  = 0x18,         /* SDRAM control */
                .end    = 0x20,
@@ -191,7 +191,7 @@ struct blizzard_struct {
 
        struct omapfb_device    *fbdev;
        struct lcd_ctrl_extif   *extif;
-       struct lcd_ctrl         *int_ctrl;
+       const struct lcd_ctrl   *int_ctrl;
 
        void                    (*power_up)(struct device *dev);
        void                    (*power_down)(struct device *dev);
@@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
                           (1 << OMAPFB_COLOR_YUV420);
 }
 
-static void _save_regs(struct blizzard_reg_list *list, int cnt)
+static void _save_regs(const struct blizzard_reg_list *list, int cnt)
 {
        int i;
 
@@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt)
        }
 }
 
-static void _restore_regs(struct blizzard_reg_list *list, int cnt)
+static void _restore_regs(const struct blizzard_reg_list *list, int cnt)
 {
        int i;
 
index 125e605b8c6827e60c707a37939598b4468d482e..0d0c8c8b9b56dc95660f256624c3afe754939870 100644 (file)
@@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi)
  * Set fb_info.fix fields and also updates fbdev.
  * When calling this fb_info.var must be set up already.
  */
-static void set_fb_fix(struct fb_info *fbi)
+static void set_fb_fix(struct fb_info *fbi, int from_init)
 {
        struct fb_fix_screeninfo *fix = &fbi->fix;
        struct fb_var_screeninfo *var = &fbi->var;
@@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi)
 
        rg = &plane->fbdev->mem_desc.region[plane->idx];
        fbi->screen_base        = rg->vaddr;
-       mutex_lock(&fbi->mm_lock);
-       fix->smem_start         = rg->paddr;
-       fix->smem_len           = rg->size;
-       mutex_unlock(&fbi->mm_lock);
+
+       if (!from_init) {
+               mutex_lock(&fbi->mm_lock);
+               fix->smem_start         = rg->paddr;
+               fix->smem_len           = rg->size;
+               mutex_unlock(&fbi->mm_lock);
+       } else {
+               fix->smem_start         = rg->paddr;
+               fix->smem_len           = rg->size;
+       }
 
        fix->type = FB_TYPE_PACKED_PIXELS;
        bpp = var->bits_per_pixel;
@@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi)
        int r = 0;
 
        omapfb_rqueue_lock(fbdev);
-       set_fb_fix(fbi);
+       set_fb_fix(fbi, 0);
        r = ctrl_change_mode(fbi);
        omapfb_rqueue_unlock(fbdev);
 
@@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
                if (old_size != size) {
                        if (size) {
                                memcpy(&fbi->var, new_var, sizeof(fbi->var));
-                               set_fb_fix(fbi);
+                               set_fb_fix(fbi, 0);
                        } else {
                                /*
                                 * Set these explicitly to indicate that the
@@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
        var->bits_per_pixel = fbdev->panel->bpp;
 
        set_fb_var(info, var);
-       set_fb_fix(info);
+       set_fb_fix(info, 1);
 
        r = fb_alloc_cmap(&info->cmap, 16, 0);
        if (r != 0)
index e98baf6916b88fd9c8d6f25b1161c5ce099a4d99..e35232a1857199926882234faf795e61cd755cc7 100644 (file)
@@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock);
  * find the kernel part of the task struct, copy the registers and
  * the buffer contents and then complete the task.
  */
-static void uvesafb_cn_callback(struct cn_msg *msg)
+static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct uvesafb_task *utask;
        struct uvesafb_ktask *task;
 
+       if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
+               return;
+
        if (msg->seq >= UVESAFB_TASKS_MAX)
                return;
 
index df52cb355f7dd10b093bff43796c283c62c9bb26..406caa6a71cbb1bc40d54e44f957079a1b4f1b06 100644 (file)
 #include "../w1.h"
 #include "../w1_int.h"
 
-/**
- * Address is selected using 2 pins, resulting in 4 possible addresses.
- *  0x18, 0x19, 0x1a, 0x1b
- * However, the chip cannot be detected without doing an i2c write,
- * so use the force module parameter.
- */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/**
- * Insmod parameters
- */
-I2C_CLIENT_INSMOD_1(ds2482);
-
 /**
  * The DS2482 registers - there are 3 registers that are addressed by a read
  * pointer. The read pointer is set by the last command executed.
@@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] =
 
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id);
-static int ds2482_detect(struct i2c_client *client, int kind,
-                        struct i2c_board_info *info);
 static int ds2482_remove(struct i2c_client *client);
 
 
@@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = {
        .probe          = ds2482_probe,
        .remove         = ds2482_remove,
        .id_table       = ds2482_id,
-       .detect         = ds2482_detect,
-       .address_data   = &addr_data,
 };
 
 /*
@@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data)
 }
 
 
-static int ds2482_detect(struct i2c_client *client, int kind,
-                        struct i2c_board_info *info)
-{
-       if (!i2c_check_functionality(client->adapter,
-                                    I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
-                                    I2C_FUNC_SMBUS_BYTE))
-               return -ENODEV;
-
-       strlcpy(info->type, "ds2482", I2C_NAME_SIZE);
-
-       return 0;
-}
-
 static int ds2482_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
@@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client,
        int temp1;
        int idx;
 
+       if (!i2c_check_functionality(client->adapter,
+                                    I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_BYTE))
+               return -ENODEV;
+
        if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) {
                err = -ENOMEM;
                goto exit;
index 52ccb3d3a96378aee4fb4f21cd984f9bb43a2925..45c126fea31dcdad608a666964d316214386e1fb 100644 (file)
@@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
        return error;
 }
 
-static void w1_cn_callback(struct cn_msg *msg)
+static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
 {
        struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
        struct w1_netlink_cmd *cmd;
index a9592d981b107de1ca1447d1972cd4e12a3252b4..6c4269b836b7f22ddadc2ca39d5531e467cffc95 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/fs.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
index 5ea80b19785bf5472bfb0cf1518ba739241f624a..a6c7c3e47e420c24ab45ecc7f69719be05954cca 100644 (file)
@@ -67,10 +67,13 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
 fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
                             e100/d102e_ucode.bin
 fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
-fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis
+fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \
+                                    cis/DP83903.cis cis/NE2K.cis \
+                                    cis/tamarack.cis
 fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
 fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
-fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis
+fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
+                                      cis/COMpad2.cis cis/COMpad4.cis
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
                                      advansys/3550.bin advansys/38C0800.bin
index 3f8c4f6bc43fbe8a48e23e7c2f4e5407ea50a036..c437e14f0b11ef72c73c771d1f52a0e635d10619 100644 (file)
@@ -597,6 +597,9 @@ Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
 
 File: cis/LA-PCM.cis
       cis/PCMLM28.cis
+      cis/DP83903.cis
+      cis/NE2K.cis
+      cis/tamarack.cis
 
 Licence: GPL
 
@@ -628,6 +631,8 @@ Driver: SERIAL_8250_CS - Serial PCMCIA adapter
 
 File: cis/MT5634ZLX.cis
       cis/RS-COM-2P.cis
+      cis/COMpad2.cis
+      cis/COMpad4.cis
 
 Licence: GPL
 
diff --git a/firmware/cis/COMpad2.cis.ihex b/firmware/cis/COMpad2.cis.ihex
new file mode 100644 (file)
index 0000000..1671c5e
--- /dev/null
@@ -0,0 +1,11 @@
+:1000000001030000FF151F0401414456414E5445B1
+:10001000434800434F4D7061642D33322F38350013
+:10002000312E300000FF210202011A0501050001F6
+:10003000031B0EC18118AA61E80207E8030730B864
+:100040009E1B08820108AA6030030F1B0883010869
+:10005000AA6040030F1B08840108AA6050030F1B0D
+:0D00600008850108AA6060030F1400FF006E
+:00000001FF
+#
+# Replacement CIS for Advantech COMpad-32/85
+#
diff --git a/firmware/cis/COMpad4.cis.ihex b/firmware/cis/COMpad4.cis.ihex
new file mode 100644 (file)
index 0000000..27bbec1
--- /dev/null
@@ -0,0 +1,9 @@
+:1000000001030000FF151F0401414456414E5445B1
+:10001000434800434F4D7061642D33322F383542D1
+:100020002D34000000FF210202011A050102000127
+:10003000011B0BC18118AA6040021F30B89E1B082B
+:0C004000820108AA6040031F1400FF00AA
+:00000001FF
+#
+# Replacement CIS for Advantech COMpad-32/85B-4
+#
diff --git a/firmware/cis/DP83903.cis.ihex b/firmware/cis/DP83903.cis.ihex
new file mode 100644 (file)
index 0000000..6d73ea3
--- /dev/null
@@ -0,0 +1,14 @@
+:1000000001030000FF152904014D756C74696675C4
+:100010006E6374696F6E20436172640000004E531A
+:1000200043204D46204C414E2F4D6F64656D00FFBF
+:1000300020047501000021020000060B02004900A7
+:100040000000006A000000FF00130343495321022F
+:1000500006001A060517201077021B0C970179017C
+:10006000556530FFFF284000FF001303434953212B
+:100070000202001A060507401077021B09870119C2
+:0800800001552330FFFFFF00D2
+:00000001FF
+#
+# This CIS is for cards based on the National Semiconductor
+# DP83903 Multiple Function Interface Chip
+#
diff --git a/firmware/cis/NE2K.cis.ihex b/firmware/cis/NE2K.cis.ihex
new file mode 100644 (file)
index 0000000..1bb40fc
--- /dev/null
@@ -0,0 +1,8 @@
+:1000000001030000FF1515040150434D4349410011
+:1000100045746865726E6574000000FF2102060079
+:100020001A050120F803031B09E001190155653089
+:06003000FFFF1400FF00B9
+:00000001FF
+#
+# Replacement CIS for various busted NE2000-compatible cards
+#
diff --git a/firmware/cis/tamarack.cis.ihex b/firmware/cis/tamarack.cis.ihex
new file mode 100644 (file)
index 0000000..1e86547
--- /dev/null
@@ -0,0 +1,10 @@
+:100000000103D400FF17034100FF152404015441EC
+:100010004D415241434B0045746865726E657400F2
+:10002000410030303437343331313830303100FF33
+:10003000210206001A050120F803031B14E08119B0
+:100040003F554D5D06864626E551000F100F30FFE7
+:05005000FF1400FF0099
+:00000001FF
+#
+# Replacement CIS for Surecom, Tamarack NE2000 cards
+#
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
deleted file mode 100644 (file)
index 5c4f6b4..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* AFS local cache management interface
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/fscache.h>
index 106be66dafd2ca7edc9388dac55d8a218ab52dc6..6ece2a13bf711fd477098d18b60a34a4861fea59 100644 (file)
 #include <linux/key.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <linux/fscache.h>
 
 #include "afs.h"
 #include "afs_vl.h"
-#include "cache.h"
 
 #define AFS_CELL_MAX_ADDRS 15
 
index d11c51fc2a3fd144ac8de14702207078743cd353..2ca7a7cafdbf9359ccf1a706a5fdda11f3fe157e 100644 (file)
@@ -8,8 +8,10 @@
  *
  */
 
+#include <linux/cred.h>
 #include <linux/file.h>
 #include <linux/poll.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/fs.h>
index 76738005c8e8af43c3b084a48d4f752e36e2186d..402cb84a92a1dbd538dc4eead68cd474bea3389b 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
 
        mempool_free(p, bs->bio_pool);
 }
+EXPORT_SYMBOL(bio_free);
 
 void bio_init(struct bio *bio)
 {
@@ -257,6 +258,7 @@ void bio_init(struct bio *bio)
        bio->bi_comp_cpu = -1;
        atomic_set(&bio->bi_cnt, 1);
 }
+EXPORT_SYMBOL(bio_init);
 
 /**
  * bio_alloc_bioset - allocate a bio for I/O
@@ -311,6 +313,7 @@ err_free:
        mempool_free(p, bs->bio_pool);
        return NULL;
 }
+EXPORT_SYMBOL(bio_alloc_bioset);
 
 static void bio_fs_destructor(struct bio *bio)
 {
@@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
 
        return bio;
 }
+EXPORT_SYMBOL(bio_alloc);
 
 static void bio_kmalloc_destructor(struct bio *bio)
 {
@@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
 
        return bio;
 }
+EXPORT_SYMBOL(bio_kmalloc);
 
 void zero_fill_bio(struct bio *bio)
 {
@@ -416,6 +421,7 @@ void bio_put(struct bio *bio)
                bio->bi_destructor(bio);
        }
 }
+EXPORT_SYMBOL(bio_put);
 
 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 {
@@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 
        return bio->bi_phys_segments;
 }
+EXPORT_SYMBOL(bio_phys_segments);
 
 /**
  *     __bio_clone     -       clone a bio
@@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
        bio->bi_size = bio_src->bi_size;
        bio->bi_idx = bio_src->bi_idx;
 }
+EXPORT_SYMBOL(__bio_clone);
 
 /**
  *     bio_clone       -       clone a bio
@@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
 
        return b;
 }
+EXPORT_SYMBOL(bio_clone);
 
 /**
  *     bio_get_nr_vecs         - return approx number of vecs
@@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
 
        return nr_pages;
 }
+EXPORT_SYMBOL(bio_get_nr_vecs);
 
 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
                          *page, unsigned int len, unsigned int offset,
@@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
        return __bio_add_page(q, bio, page, len, offset,
                              queue_max_hw_sectors(q));
 }
+EXPORT_SYMBOL(bio_add_pc_page);
 
 /**
  *     bio_add_page    -       attempt to add page to bio
@@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
        return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
 }
+EXPORT_SYMBOL(bio_add_page);
 
 struct bio_map_data {
        struct bio_vec *iovecs;
@@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio)
        bio_put(bio);
        return ret;
 }
+EXPORT_SYMBOL(bio_uncopy_user);
 
 /**
  *     bio_copy_user_iov       -       copy user data to bio
@@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
 
        return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
 }
+EXPORT_SYMBOL(bio_copy_user);
 
 static struct bio *__bio_map_user_iov(struct request_queue *q,
                                      struct block_device *bdev,
@@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
 
        return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
 }
+EXPORT_SYMBOL(bio_map_user);
 
 /**
  *     bio_map_user_iov - map user sg_iovec table into bio
@@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio)
        __bio_unmap_user(bio);
        bio_put(bio);
 }
+EXPORT_SYMBOL(bio_unmap_user);
 
 static void bio_map_kern_endio(struct bio *bio, int err)
 {
        bio_put(bio);
 }
 
-
 static struct bio *__bio_map_kern(struct request_queue *q, void *data,
                                  unsigned int len, gfp_t gfp_mask)
 {
@@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        bio_put(bio);
        return ERR_PTR(-EINVAL);
 }
+EXPORT_SYMBOL(bio_map_kern);
 
 static void bio_copy_kern_endio(struct bio *bio, int err)
 {
@@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
 
        return bio;
 }
+EXPORT_SYMBOL(bio_copy_kern);
 
 /*
  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
@@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error)
        if (bio->bi_end_io)
                bio->bi_end_io(bio, error);
 }
+EXPORT_SYMBOL(bio_endio);
 
 void bio_pair_release(struct bio_pair *bp)
 {
@@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp)
                mempool_free(bp, bp->bio2.bi_private);
        }
 }
+EXPORT_SYMBOL(bio_pair_release);
 
 static void bio_pair_end_1(struct bio *bi, int err)
 {
@@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
 
        return bp;
 }
+EXPORT_SYMBOL(bio_split);
 
 /**
  *      bio_sector_offset - Find hardware sector offset in bio
@@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs)
 
        kfree(bs);
 }
+EXPORT_SYMBOL(bioset_free);
 
 /**
  * bioset_create  - Create a bio_set
@@ -1592,6 +1613,7 @@ bad:
        bioset_free(bs);
        return NULL;
 }
+EXPORT_SYMBOL(bioset_create);
 
 static void __init biovec_init_slabs(void)
 {
@@ -1636,29 +1658,4 @@ static int __init init_bio(void)
 
        return 0;
 }
-
 subsys_initcall(init_bio);
-
-EXPORT_SYMBOL(bio_alloc);
-EXPORT_SYMBOL(bio_kmalloc);
-EXPORT_SYMBOL(bio_put);
-EXPORT_SYMBOL(bio_free);
-EXPORT_SYMBOL(bio_endio);
-EXPORT_SYMBOL(bio_init);
-EXPORT_SYMBOL(__bio_clone);
-EXPORT_SYMBOL(bio_clone);
-EXPORT_SYMBOL(bio_phys_segments);
-EXPORT_SYMBOL(bio_add_page);
-EXPORT_SYMBOL(bio_add_pc_page);
-EXPORT_SYMBOL(bio_get_nr_vecs);
-EXPORT_SYMBOL(bio_map_user);
-EXPORT_SYMBOL(bio_unmap_user);
-EXPORT_SYMBOL(bio_map_kern);
-EXPORT_SYMBOL(bio_copy_kern);
-EXPORT_SYMBOL(bio_pair_release);
-EXPORT_SYMBOL(bio_split);
-EXPORT_SYMBOL(bio_copy_user);
-EXPORT_SYMBOL(bio_uncopy_user);
-EXPORT_SYMBOL(bioset_create);
-EXPORT_SYMBOL(bioset_free);
-EXPORT_SYMBOL(bio_alloc_bioset);
index f128427b995b108382fde3b3fea6d0582f841666..69b355ae7f49139d928ec398807b9f0bc908c307 100644 (file)
@@ -27,7 +27,7 @@
 #include "btrfs_inode.h"
 #include "xattr.h"
 
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_BTRFS_POSIX_ACL
 
 static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
 {
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
        .set    = btrfs_xattr_acl_access_set,
 };
 
-#else /* CONFIG_FS_POSIX_ACL */
+#else /* CONFIG_BTRFS_POSIX_ACL */
 
 int btrfs_acl_chmod(struct inode *inode)
 {
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
        return 0;
 }
 
-#endif /* CONFIG_FS_POSIX_ACL */
+#endif /* CONFIG_BTRFS_POSIX_ACL */
index 82ee56bba29966e5dee552d764bb0e60fbc8bca9..a54d354cefcb71894a89975fae23ff94a9b939ad 100644 (file)
@@ -127,6 +127,14 @@ struct btrfs_inode {
         */
        u64 last_unlink_trans;
 
+       /*
+        * These two counters are for delalloc metadata reservations.  We keep
+        * track of how many extents we've accounted for vs how many extents we
+        * have.
+        */
+       int delalloc_reserved_extents;
+       int delalloc_extents;
+
        /*
         * ordered_data_close is set by truncate when a file that used
         * to have good data has been truncated to zero.  When it is set
index 80599b4e42bd350f5e6b7a094c3711a3dd32e8a8..dd8ced9814c4a5d0bc35e9bd8ef12cd8745382b9 100644 (file)
@@ -675,18 +675,19 @@ struct btrfs_space_info {
                                   current allocations */
        u64 bytes_readonly;     /* total bytes that are read only */
        u64 bytes_super;        /* total bytes reserved for the super blocks */
-
-       /* delalloc accounting */
-       u64 bytes_delalloc;     /* number of bytes reserved for allocation,
-                                  this space is not necessarily reserved yet
-                                  by the allocator */
+       u64 bytes_root;         /* the number of bytes needed to commit a
+                                  transaction */
        u64 bytes_may_use;      /* number of bytes that may be used for
-                                  delalloc */
+                                  delalloc/allocations */
+       u64 bytes_delalloc;     /* number of bytes currently reserved for
+                                  delayed allocation */
 
        int full;               /* indicates that we cannot allocate any more
                                   chunks for this space */
        int force_alloc;        /* set if we need to force a chunk alloc for
                                   this space */
+       int force_delalloc;     /* make people start doing filemap_flush until
+                                  we're under a threshold */
 
        struct list_head list;
 
@@ -695,6 +696,9 @@ struct btrfs_space_info {
        spinlock_t lock;
        struct rw_semaphore groups_sem;
        atomic_t caching_threads;
+
+       int allocating_chunk;
+       wait_queue_head_t wait;
 };
 
 /*
@@ -2022,7 +2026,12 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 
-int btrfs_check_metadata_free_space(struct btrfs_root *root);
+int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items);
+int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items);
+int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
+                                         struct inode *inode, int num_items);
+int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
+                                       struct inode *inode, int num_items);
 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
                                u64 bytes);
 void btrfs_free_reserved_data_space(struct btrfs_root *root,
@@ -2326,7 +2335,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                            int skip_pinned);
 int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
-extern struct file_operations btrfs_file_operations;
+extern const struct file_operations btrfs_file_operations;
 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct inode *inode,
                       u64 start, u64 end, u64 locked_end,
@@ -2357,7 +2366,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
 int btrfs_sync_fs(struct super_block *sb, int wait);
 
 /* acl.c */
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_BTRFS_POSIX_ACL
 int btrfs_check_acl(struct inode *inode, int mask);
 #else
 #define btrfs_check_acl NULL
index 644e796fd643e045ca0b0ed057743b7eb5e88438..af0435f79fa605e96f0461efd496599c576eb7e2 100644 (file)
@@ -822,14 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 
 int btrfs_write_tree_block(struct extent_buffer *buf)
 {
-       return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
-                                     buf->start + buf->len - 1, WB_SYNC_ALL);
+       return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
+                                       buf->start + buf->len - 1);
 }
 
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
 {
-       return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
-                                 buf->start, buf->start + buf->len - 1);
+       return filemap_fdatawait_range(buf->first_page->mapping,
+                                      buf->start, buf->start + buf->len - 1);
 }
 
 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -1630,7 +1630,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->sb = sb;
        fs_info->max_extent = (u64)-1;
        fs_info->max_inline = 8192 * 1024;
-       fs_info->metadata_ratio = 8;
+       fs_info->metadata_ratio = 0;
 
        fs_info->thread_pool_size = min_t(unsigned long,
                                          num_online_cpus() + 2, 8);
index 993f93ff7ba695c97b490f5e2d5d6d3b5939980c..359a754c782cd8338f0a1bf0d456306e3240c32c 100644 (file)
@@ -68,6 +68,8 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
                          struct extent_buffer **must_clean);
 static int find_next_key(struct btrfs_path *path, int level,
                         struct btrfs_key *key);
+static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+                           int dump_block_groups);
 
 static noinline int
 block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -2765,67 +2767,346 @@ void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
                                                       alloc_target);
 }
 
+static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
+{
+       u64 num_bytes;
+       int level;
+
+       level = BTRFS_MAX_LEVEL - 2;
+       /*
+        * NOTE: these calculations are absolutely the worst possible case.
+        * This assumes that _every_ item we insert will require a new leaf, and
+        * that the tree has grown to its maximum level size.
+        */
+
+       /*
+        * for every item we insert we could insert both an extent item and a
+        * extent ref item.  Then for ever item we insert, we will need to cow
+        * both the original leaf, plus the leaf to the left and right of it.
+        *
+        * Unless we are talking about the extent root, then we just want the
+        * number of items * 2, since we just need the extent item plus its ref.
+        */
+       if (root == root->fs_info->extent_root)
+               num_bytes = num_items * 2;
+       else
+               num_bytes = (num_items + (2 * num_items)) * 3;
+
+       /*
+        * num_bytes is total number of leaves we could need times the leaf
+        * size, and then for every leaf we could end up cow'ing 2 nodes per
+        * level, down to the leaf level.
+        */
+       num_bytes = (num_bytes * root->leafsize) +
+               (num_bytes * (level * 2)) * root->nodesize;
+
+       return num_bytes;
+}
+
 /*
- * for now this just makes sure we have at least 5% of our metadata space free
- * for use.
+ * Unreserve metadata space for delalloc.  If we have less reserved credits than
+ * we have extents, this function does nothing.
  */
-int btrfs_check_metadata_free_space(struct btrfs_root *root)
+int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
+                                         struct inode *inode, int num_items)
 {
        struct btrfs_fs_info *info = root->fs_info;
        struct btrfs_space_info *meta_sinfo;
-       u64 alloc_target, thresh;
-       int committed = 0, ret;
+       u64 num_bytes;
+       u64 alloc_target;
+       bool bug = false;
 
        /* get the space info for where the metadata will live */
        alloc_target = btrfs_get_alloc_profile(root, 0);
        meta_sinfo = __find_space_info(info, alloc_target);
-       if (!meta_sinfo)
-               goto alloc;
 
-again:
+       num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
+                                          num_items);
+
        spin_lock(&meta_sinfo->lock);
-       if (!meta_sinfo->full)
-               thresh = meta_sinfo->total_bytes * 80;
-       else
-               thresh = meta_sinfo->total_bytes * 95;
+       if (BTRFS_I(inode)->delalloc_reserved_extents <=
+           BTRFS_I(inode)->delalloc_extents) {
+               spin_unlock(&meta_sinfo->lock);
+               return 0;
+       }
+
+       BTRFS_I(inode)->delalloc_reserved_extents--;
+       BUG_ON(BTRFS_I(inode)->delalloc_reserved_extents < 0);
+
+       if (meta_sinfo->bytes_delalloc < num_bytes) {
+               bug = true;
+               meta_sinfo->bytes_delalloc = 0;
+       } else {
+               meta_sinfo->bytes_delalloc -= num_bytes;
+       }
+       spin_unlock(&meta_sinfo->lock);
 
+       BUG_ON(bug);
+
+       return 0;
+}
+
+static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
+{
+       u64 thresh;
+
+       thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
+               meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
+               meta_sinfo->bytes_super + meta_sinfo->bytes_root +
+               meta_sinfo->bytes_may_use;
+
+       thresh = meta_sinfo->total_bytes - thresh;
+       thresh *= 80;
        do_div(thresh, 100);
+       if (thresh <= meta_sinfo->bytes_delalloc)
+               meta_sinfo->force_delalloc = 1;
+       else
+               meta_sinfo->force_delalloc = 0;
+}
 
-       if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
-           meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
-           meta_sinfo->bytes_super > thresh) {
-               struct btrfs_trans_handle *trans;
-               if (!meta_sinfo->full) {
-                       meta_sinfo->force_alloc = 1;
+static int maybe_allocate_chunk(struct btrfs_root *root,
+                                struct btrfs_space_info *info)
+{
+       struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
+       struct btrfs_trans_handle *trans;
+       bool wait = false;
+       int ret = 0;
+       u64 min_metadata;
+       u64 free_space;
+
+       free_space = btrfs_super_total_bytes(disk_super);
+       /*
+        * we allow the metadata to grow to a max of either 5gb or 5% of the
+        * space in the volume.
+        */
+       min_metadata = min((u64)5 * 1024 * 1024 * 1024,
+                            div64_u64(free_space * 5, 100));
+       if (info->total_bytes >= min_metadata) {
+               spin_unlock(&info->lock);
+               return 0;
+       }
+
+       if (info->full) {
+               spin_unlock(&info->lock);
+               return 0;
+       }
+
+       if (!info->allocating_chunk) {
+               info->force_alloc = 1;
+               info->allocating_chunk = 1;
+               init_waitqueue_head(&info->wait);
+       } else {
+               wait = true;
+       }
+
+       spin_unlock(&info->lock);
+
+       if (wait) {
+               wait_event(info->wait,
+                          !info->allocating_chunk);
+               return 1;
+       }
+
+       trans = btrfs_start_transaction(root, 1);
+       if (!trans) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+                            4096 + 2 * 1024 * 1024,
+                            info->flags, 0);
+       btrfs_end_transaction(trans, root);
+       if (ret)
+               goto out;
+out:
+       spin_lock(&info->lock);
+       info->allocating_chunk = 0;
+       spin_unlock(&info->lock);
+       wake_up(&info->wait);
+
+       if (ret)
+               return 0;
+       return 1;
+}
+
+/*
+ * Reserve metadata space for delalloc.
+ */
+int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
+                                       struct inode *inode, int num_items)
+{
+       struct btrfs_fs_info *info = root->fs_info;
+       struct btrfs_space_info *meta_sinfo;
+       u64 num_bytes;
+       u64 used;
+       u64 alloc_target;
+       int flushed = 0;
+       int force_delalloc;
+
+       /* get the space info for where the metadata will live */
+       alloc_target = btrfs_get_alloc_profile(root, 0);
+       meta_sinfo = __find_space_info(info, alloc_target);
+
+       num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
+                                          num_items);
+again:
+       spin_lock(&meta_sinfo->lock);
+
+       force_delalloc = meta_sinfo->force_delalloc;
+
+       if (unlikely(!meta_sinfo->bytes_root))
+               meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
+
+       if (!flushed)
+               meta_sinfo->bytes_delalloc += num_bytes;
+
+       used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
+               meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
+               meta_sinfo->bytes_super + meta_sinfo->bytes_root +
+               meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
+
+       if (used > meta_sinfo->total_bytes) {
+               flushed++;
+
+               if (flushed == 1) {
+                       if (maybe_allocate_chunk(root, meta_sinfo))
+                               goto again;
+                       flushed++;
+               } else {
                        spin_unlock(&meta_sinfo->lock);
-alloc:
-                       trans = btrfs_start_transaction(root, 1);
-                       if (!trans)
-                               return -ENOMEM;
+               }
 
-                       ret = do_chunk_alloc(trans, root->fs_info->extent_root,
-                                            2 * 1024 * 1024, alloc_target, 0);
-                       btrfs_end_transaction(trans, root);
-                       if (!meta_sinfo) {
-                               meta_sinfo = __find_space_info(info,
-                                                              alloc_target);
-                       }
+               if (flushed == 2) {
+                       filemap_flush(inode->i_mapping);
+                       goto again;
+               } else if (flushed == 3) {
+                       btrfs_start_delalloc_inodes(root);
+                       btrfs_wait_ordered_extents(root, 0);
                        goto again;
                }
+               spin_lock(&meta_sinfo->lock);
+               meta_sinfo->bytes_delalloc -= num_bytes;
                spin_unlock(&meta_sinfo->lock);
+               printk(KERN_ERR "enospc, has %d, reserved %d\n",
+                      BTRFS_I(inode)->delalloc_extents,
+                      BTRFS_I(inode)->delalloc_reserved_extents);
+               dump_space_info(meta_sinfo, 0, 0);
+               return -ENOSPC;
+       }
 
-               if (!committed) {
-                       committed = 1;
-                       trans = btrfs_join_transaction(root, 1);
-                       if (!trans)
-                               return -ENOMEM;
-                       ret = btrfs_commit_transaction(trans, root);
-                       if (ret)
-                               return ret;
+       BTRFS_I(inode)->delalloc_reserved_extents++;
+       check_force_delalloc(meta_sinfo);
+       spin_unlock(&meta_sinfo->lock);
+
+       if (!flushed && force_delalloc)
+               filemap_flush(inode->i_mapping);
+
+       return 0;
+}
+
+/*
+ * unreserve num_items number of items worth of metadata space.  This needs to
+ * be paired with btrfs_reserve_metadata_space.
+ *
+ * NOTE: if you have the option, run this _AFTER_ you do a
+ * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
+ * oprations which will result in more used metadata, so we want to make sure we
+ * can do that without issue.
+ */
+int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
+{
+       struct btrfs_fs_info *info = root->fs_info;
+       struct btrfs_space_info *meta_sinfo;
+       u64 num_bytes;
+       u64 alloc_target;
+       bool bug = false;
+
+       /* get the space info for where the metadata will live */
+       alloc_target = btrfs_get_alloc_profile(root, 0);
+       meta_sinfo = __find_space_info(info, alloc_target);
+
+       num_bytes = calculate_bytes_needed(root, num_items);
+
+       spin_lock(&meta_sinfo->lock);
+       if (meta_sinfo->bytes_may_use < num_bytes) {
+               bug = true;
+               meta_sinfo->bytes_may_use = 0;
+       } else {
+               meta_sinfo->bytes_may_use -= num_bytes;
+       }
+       spin_unlock(&meta_sinfo->lock);
+
+       BUG_ON(bug);
+
+       return 0;
+}
+
+/*
+ * Reserve some metadata space for use.  We'll calculate the worste case number
+ * of bytes that would be needed to modify num_items number of items.  If we
+ * have space, fantastic, if not, you get -ENOSPC.  Please call
+ * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
+ * items you reserved, since whatever metadata you needed should have already
+ * been allocated.
+ *
+ * This will commit the transaction to make more space if we don't have enough
+ * metadata space.  THe only time we don't do this is if we're reserving space
+ * inside of a transaction, then we will just return -ENOSPC and it is the
+ * callers responsibility to handle it properly.
+ */
+int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
+{
+       struct btrfs_fs_info *info = root->fs_info;
+       struct btrfs_space_info *meta_sinfo;
+       u64 num_bytes;
+       u64 used;
+       u64 alloc_target;
+       int retries = 0;
+
+       /* get the space info for where the metadata will live */
+       alloc_target = btrfs_get_alloc_profile(root, 0);
+       meta_sinfo = __find_space_info(info, alloc_target);
+
+       num_bytes = calculate_bytes_needed(root, num_items);
+again:
+       spin_lock(&meta_sinfo->lock);
+
+       if (unlikely(!meta_sinfo->bytes_root))
+               meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
+
+       if (!retries)
+               meta_sinfo->bytes_may_use += num_bytes;
+
+       used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
+               meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
+               meta_sinfo->bytes_super + meta_sinfo->bytes_root +
+               meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
+
+       if (used > meta_sinfo->total_bytes) {
+               retries++;
+               if (retries == 1) {
+                       if (maybe_allocate_chunk(root, meta_sinfo))
+                               goto again;
+                       retries++;
+               } else {
+                       spin_unlock(&meta_sinfo->lock);
+               }
+
+               if (retries == 2) {
+                       btrfs_start_delalloc_inodes(root);
+                       btrfs_wait_ordered_extents(root, 0);
                        goto again;
                }
+               spin_lock(&meta_sinfo->lock);
+               meta_sinfo->bytes_may_use -= num_bytes;
+               spin_unlock(&meta_sinfo->lock);
+
+               dump_space_info(meta_sinfo, 0, 0);
                return -ENOSPC;
        }
+
+       check_force_delalloc(meta_sinfo);
        spin_unlock(&meta_sinfo->lock);
 
        return 0;
@@ -2888,7 +3169,7 @@ alloc:
                spin_unlock(&data_sinfo->lock);
 
                /* commit the current transaction and try again */
-               if (!committed) {
+               if (!committed && !root->fs_info->open_ioctl_trans) {
                        committed = 1;
                        trans = btrfs_join_transaction(root, 1);
                        if (!trans)
@@ -2916,7 +3197,7 @@ alloc:
        BTRFS_I(inode)->reserved_bytes += bytes;
        spin_unlock(&data_sinfo->lock);
 
-       return btrfs_check_metadata_free_space(root);
+       return 0;
 }
 
 /*
@@ -3015,17 +3296,15 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        BUG_ON(!space_info);
 
        spin_lock(&space_info->lock);
-       if (space_info->force_alloc) {
+       if (space_info->force_alloc)
                force = 1;
-               space_info->force_alloc = 0;
-       }
        if (space_info->full) {
                spin_unlock(&space_info->lock);
                goto out;
        }
 
        thresh = space_info->total_bytes - space_info->bytes_readonly;
-       thresh = div_factor(thresh, 6);
+       thresh = div_factor(thresh, 8);
        if (!force &&
           (space_info->bytes_used + space_info->bytes_pinned +
            space_info->bytes_reserved + alloc_bytes) < thresh) {
@@ -3039,7 +3318,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
         * we keep a reasonable number of metadata chunks allocated in the
         * FS as well.
         */
-       if (flags & BTRFS_BLOCK_GROUP_DATA) {
+       if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
                fs_info->data_chunk_allocations++;
                if (!(fs_info->data_chunk_allocations %
                      fs_info->metadata_ratio))
@@ -3047,8 +3326,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        }
 
        ret = btrfs_alloc_chunk(trans, extent_root, flags);
+       spin_lock(&space_info->lock);
        if (ret)
                space_info->full = 1;
+       space_info->force_alloc = 0;
+       spin_unlock(&space_info->lock);
 out:
        mutex_unlock(&extent_root->fs_info->chunk_mutex);
        return ret;
@@ -4063,21 +4345,32 @@ loop:
        return ret;
 }
 
-static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
+static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
+                           int dump_block_groups)
 {
        struct btrfs_block_group_cache *cache;
 
+       spin_lock(&info->lock);
        printk(KERN_INFO "space_info has %llu free, is %sfull\n",
               (unsigned long long)(info->total_bytes - info->bytes_used -
-                                   info->bytes_pinned - info->bytes_reserved),
+                                   info->bytes_pinned - info->bytes_reserved -
+                                   info->bytes_super),
               (info->full) ? "" : "not ");
        printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
-              " may_use=%llu, used=%llu\n",
+              " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
+              "\n",
               (unsigned long long)info->total_bytes,
               (unsigned long long)info->bytes_pinned,
               (unsigned long long)info->bytes_delalloc,
               (unsigned long long)info->bytes_may_use,
-              (unsigned long long)info->bytes_used);
+              (unsigned long long)info->bytes_used,
+              (unsigned long long)info->bytes_root,
+              (unsigned long long)info->bytes_super,
+              (unsigned long long)info->bytes_reserved);
+       spin_unlock(&info->lock);
+
+       if (!dump_block_groups)
+               return;
 
        down_read(&info->groups_sem);
        list_for_each_entry(cache, &info->block_groups, list) {
@@ -4145,7 +4438,7 @@ again:
                printk(KERN_ERR "btrfs allocation failed flags %llu, "
                       "wanted %llu\n", (unsigned long long)data,
                       (unsigned long long)num_bytes);
-               dump_space_info(sinfo, num_bytes);
+               dump_space_info(sinfo, num_bytes, 1);
        }
 
        return ret;
index 0cb88f8146ea85efa5bc944c9d95995a3d0ad772..de1793ba004aa472afc8c983521cea7380ef4720 100644 (file)
@@ -280,6 +280,14 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
        return NULL;
 }
 
+static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
+                    struct extent_state *other)
+{
+       if (tree->ops && tree->ops->merge_extent_hook)
+               tree->ops->merge_extent_hook(tree->mapping->host, new,
+                                            other);
+}
+
 /*
  * utility function to look for merge candidates inside a given range.
  * Any extents with matching state are merged together into a single
@@ -303,6 +311,7 @@ static int merge_state(struct extent_io_tree *tree,
                other = rb_entry(other_node, struct extent_state, rb_node);
                if (other->end == state->start - 1 &&
                    other->state == state->state) {
+                       merge_cb(tree, state, other);
                        state->start = other->start;
                        other->tree = NULL;
                        rb_erase(&other->rb_node, &tree->state);
@@ -314,33 +323,37 @@ static int merge_state(struct extent_io_tree *tree,
                other = rb_entry(other_node, struct extent_state, rb_node);
                if (other->start == state->end + 1 &&
                    other->state == state->state) {
+                       merge_cb(tree, state, other);
                        other->start = state->start;
                        state->tree = NULL;
                        rb_erase(&state->rb_node, &tree->state);
                        free_extent_state(state);
+                       state = NULL;
                }
        }
+
        return 0;
 }
 
-static void set_state_cb(struct extent_io_tree *tree,
+static int set_state_cb(struct extent_io_tree *tree,
                         struct extent_state *state,
                         unsigned long bits)
 {
        if (tree->ops && tree->ops->set_bit_hook) {
-               tree->ops->set_bit_hook(tree->mapping->host, state->start,
-                                       state->end, state->state, bits);
+               return tree->ops->set_bit_hook(tree->mapping->host,
+                                              state->start, state->end,
+                                              state->state, bits);
        }
+
+       return 0;
 }
 
 static void clear_state_cb(struct extent_io_tree *tree,
                           struct extent_state *state,
                           unsigned long bits)
 {
-       if (tree->ops && tree->ops->clear_bit_hook) {
-               tree->ops->clear_bit_hook(tree->mapping->host, state->start,
-                                         state->end, state->state, bits);
-       }
+       if (tree->ops && tree->ops->clear_bit_hook)
+               tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 }
 
 /*
@@ -358,6 +371,7 @@ static int insert_state(struct extent_io_tree *tree,
                        int bits)
 {
        struct rb_node *node;
+       int ret;
 
        if (end < start) {
                printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -365,11 +379,14 @@ static int insert_state(struct extent_io_tree *tree,
                       (unsigned long long)start);
                WARN_ON(1);
        }
-       if (bits & EXTENT_DIRTY)
-               tree->dirty_bytes += end - start + 1;
        state->start = start;
        state->end = end;
-       set_state_cb(tree, state, bits);
+       ret = set_state_cb(tree, state, bits);
+       if (ret)
+               return ret;
+
+       if (bits & EXTENT_DIRTY)
+               tree->dirty_bytes += end - start + 1;
        state->state |= bits;
        node = tree_insert(&tree->state, end, &state->rb_node);
        if (node) {
@@ -387,6 +404,15 @@ static int insert_state(struct extent_io_tree *tree,
        return 0;
 }
 
+static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
+                    u64 split)
+{
+       if (tree->ops && tree->ops->split_extent_hook)
+               return tree->ops->split_extent_hook(tree->mapping->host,
+                                                   orig, split);
+       return 0;
+}
+
 /*
  * split a given extent state struct in two, inserting the preallocated
  * struct 'prealloc' as the newly created second half.  'split' indicates an
@@ -405,6 +431,9 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
                       struct extent_state *prealloc, u64 split)
 {
        struct rb_node *node;
+
+       split_cb(tree, orig, split);
+
        prealloc->start = orig->start;
        prealloc->end = split - 1;
        prealloc->state = orig->state;
@@ -542,8 +571,8 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       set |= clear_state_bit(tree, state, bits,
-                                       wake, delete);
+                       set |= clear_state_bit(tree, state, bits, wake,
+                                              delete);
                        if (last_end == (u64)-1)
                                goto out;
                        start = last_end + 1;
@@ -561,12 +590,11 @@ hit_next:
                        prealloc = alloc_extent_state(GFP_ATOMIC);
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
-
                if (wake)
                        wake_up(&state->wq);
 
-               set |= clear_state_bit(tree, prealloc, bits,
-                                      wake, delete);
+               set |= clear_state_bit(tree, prealloc, bits, wake, delete);
+
                prealloc = NULL;
                goto out;
        }
@@ -667,16 +695,23 @@ out:
        return 0;
 }
 
-static void set_state_bits(struct extent_io_tree *tree,
+static int set_state_bits(struct extent_io_tree *tree,
                           struct extent_state *state,
                           int bits)
 {
+       int ret;
+
+       ret = set_state_cb(tree, state, bits);
+       if (ret)
+               return ret;
+
        if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
                u64 range = state->end - state->start + 1;
                tree->dirty_bytes += range;
        }
-       set_state_cb(tree, state, bits);
        state->state |= bits;
+
+       return 0;
 }
 
 static void cache_state(struct extent_state *state,
@@ -758,7 +793,10 @@ hit_next:
                        goto out;
                }
 
-               set_state_bits(tree, state, bits);
+               err = set_state_bits(tree, state, bits);
+               if (err)
+                       goto out;
+
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
@@ -805,7 +843,9 @@ hit_next:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       set_state_bits(tree, state, bits);
+                       err = set_state_bits(tree, state, bits);
+                       if (err)
+                               goto out;
                        cache_state(state, cached_state);
                        merge_state(tree, state);
                        if (last_end == (u64)-1)
@@ -829,11 +869,13 @@ hit_next:
                        this_end = last_start - 1;
                err = insert_state(tree, prealloc, start, this_end,
                                   bits);
-               cache_state(prealloc, cached_state);
-               prealloc = NULL;
                BUG_ON(err == -EEXIST);
-               if (err)
+               if (err) {
+                       prealloc = NULL;
                        goto out;
+               }
+               cache_state(prealloc, cached_state);
+               prealloc = NULL;
                start = this_end + 1;
                goto search_again;
        }
@@ -852,7 +894,11 @@ hit_next:
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
 
-               set_state_bits(tree, prealloc, bits);
+               err = set_state_bits(tree, prealloc, bits);
+               if (err) {
+                       prealloc = NULL;
+                       goto out;
+               }
                cache_state(prealloc, cached_state);
                merge_state(tree, prealloc);
                prealloc = NULL;
index 14ed16fd862df22a93b7286c4c4811a8c12fa6ba..4794ec891fed31a3998098cb1999818627aacb12 100644 (file)
@@ -60,8 +60,13 @@ struct extent_io_ops {
                                      struct extent_state *state, int uptodate);
        int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
                            unsigned long old, unsigned long bits);
-       int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
-                           unsigned long old, unsigned long bits);
+       int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
+                             unsigned long bits);
+       int (*merge_extent_hook)(struct inode *inode,
+                                struct extent_state *new,
+                                struct extent_state *other);
+       int (*split_extent_hook)(struct inode *inode,
+                                struct extent_state *orig, u64 split);
        int (*write_cache_pages_lock_hook)(struct page *page);
 };
 
@@ -79,10 +84,14 @@ struct extent_state {
        u64 start;
        u64 end; /* inclusive */
        struct rb_node rb_node;
+
+       /* ADD NEW ELEMENTS AFTER THIS */
        struct extent_io_tree *tree;
        wait_queue_head_t wq;
        atomic_t refs;
        unsigned long state;
+       u64 split_start;
+       u64 split_end;
 
        /* for use by the FS */
        u64 private;
index a3492a3ad96b960fecec513d4023f7ee641fcf81..f19e1259a971d52d62a9979ba311d3f136289e05 100644 (file)
@@ -123,7 +123,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
                    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 
        end_of_last_block = start_pos + num_bytes - 1;
-       btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
+       err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
+       if (err)
+               return err;
+
        for (i = 0; i < num_pages; i++) {
                struct page *p = pages[i];
                SetPageUptodate(p);
@@ -917,21 +920,35 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
        start_pos = pos;
 
        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+       /* do the reserve before the mutex lock in case we have to do some
+        * flushing.  We wouldn't deadlock, but this is more polite.
+        */
+       err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
+       if (err)
+               goto out_nolock;
+
+       mutex_lock(&inode->i_mutex);
+
        current->backing_dev_info = inode->i_mapping->backing_dev_info;
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
        if (err)
-               goto out_nolock;
+               goto out;
+
        if (count == 0)
-               goto out_nolock;
+               goto out;
 
        err = file_remove_suid(file);
        if (err)
-               goto out_nolock;
+               goto out;
+
        file_update_time(file);
 
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
 
-       mutex_lock(&inode->i_mutex);
+       /* generic_write_checks can change our pos */
+       start_pos = pos;
+
        BTRFS_I(inode)->sequence++;
        first_index = pos >> PAGE_CACHE_SHIFT;
        last_index = (pos + count) >> PAGE_CACHE_SHIFT;
@@ -1005,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                }
 
                if (will_write) {
-                       btrfs_fdatawrite_range(inode->i_mapping, pos,
-                                              pos + write_bytes - 1,
-                                              WB_SYNC_ALL);
+                       filemap_fdatawrite_range(inode->i_mapping, pos,
+                                                pos + write_bytes - 1);
                } else {
                        balance_dirty_pages_ratelimited_nr(inode->i_mapping,
                                                           num_pages);
@@ -1028,6 +1044,7 @@ out:
        mutex_unlock(&inode->i_mutex);
        if (ret)
                err = ret;
+       btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
 
 out_nolock:
        kfree(pages);
@@ -1196,7 +1213,7 @@ static int btrfs_file_mmap(struct file    *filp, struct vm_area_struct *vma)
        return 0;
 }
 
-struct file_operations btrfs_file_operations = {
+const struct file_operations btrfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .aio_read       = generic_file_aio_read,
index e9b76bcd1c129e0d1f4664f54fefd33caba72e08..112e5aa85892c35dbaf921ba16bba119d4c2c49e 100644 (file)
@@ -62,7 +62,7 @@ static const struct inode_operations btrfs_special_inode_operations;
 static const struct inode_operations btrfs_file_inode_operations;
 static const struct address_space_operations btrfs_aops;
 static const struct address_space_operations btrfs_symlink_aops;
-static struct file_operations btrfs_dir_file_operations;
+static const struct file_operations btrfs_dir_file_operations;
 static struct extent_io_ops btrfs_extent_io_ops;
 
 static struct kmem_cache *btrfs_inode_cachep;
@@ -1159,6 +1159,83 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
        return ret;
 }
 
+static int btrfs_split_extent_hook(struct inode *inode,
+                                   struct extent_state *orig, u64 split)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 size;
+
+       if (!(orig->state & EXTENT_DELALLOC))
+               return 0;
+
+       size = orig->end - orig->start + 1;
+       if (size > root->fs_info->max_extent) {
+               u64 num_extents;
+               u64 new_size;
+
+               new_size = orig->end - split + 1;
+               num_extents = div64_u64(size + root->fs_info->max_extent - 1,
+                                       root->fs_info->max_extent);
+
+               /*
+                * if we break a large extent up then leave delalloc_extents be,
+                * since we've already accounted for the large extent.
+                */
+               if (div64_u64(new_size + root->fs_info->max_extent - 1,
+                             root->fs_info->max_extent) < num_extents)
+                       return 0;
+       }
+
+       BTRFS_I(inode)->delalloc_extents++;
+
+       return 0;
+}
+
+/*
+ * extent_io.c merge_extent_hook, used to track merged delayed allocation
+ * extents so we can keep track of new extents that are just merged onto old
+ * extents, such as when we are doing sequential writes, so we can properly
+ * account for the metadata space we'll need.
+ */
+static int btrfs_merge_extent_hook(struct inode *inode,
+                                  struct extent_state *new,
+                                  struct extent_state *other)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       u64 new_size, old_size;
+       u64 num_extents;
+
+       /* not delalloc, ignore it */
+       if (!(other->state & EXTENT_DELALLOC))
+               return 0;
+
+       old_size = other->end - other->start + 1;
+       if (new->start < other->start)
+               new_size = other->end - new->start + 1;
+       else
+               new_size = new->end - other->start + 1;
+
+       /* we're not bigger than the max, unreserve the space and go */
+       if (new_size <= root->fs_info->max_extent) {
+               BTRFS_I(inode)->delalloc_extents--;
+               return 0;
+       }
+
+       /*
+        * If we grew by another max_extent, just return, we want to keep that
+        * reserved amount.
+        */
+       num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
+                               root->fs_info->max_extent);
+       if (div64_u64(new_size + root->fs_info->max_extent - 1,
+                     root->fs_info->max_extent) > num_extents)
+               return 0;
+
+       BTRFS_I(inode)->delalloc_extents--;
+
+       return 0;
+}
+
 /*
  * extent_io.c set_bit_hook, used to track delayed allocation
  * bytes in this file, and to maintain the list of inodes that
@@ -1167,6 +1244,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
                       unsigned long old, unsigned long bits)
 {
+
        /*
         * set_bit and clear bit hooks normally require _irqsave/restore
         * but in this case, we are only testeing for the DELALLOC
@@ -1174,6 +1252,8 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
         */
        if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
+
+               BTRFS_I(inode)->delalloc_extents++;
                btrfs_delalloc_reserve_space(root, inode, end - start + 1);
                spin_lock(&root->fs_info->delalloc_lock);
                BTRFS_I(inode)->delalloc_bytes += end - start + 1;
@@ -1190,22 +1270,27 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
 /*
  * extent_io.c clear_bit_hook, see set_bit_hook for why
  */
-static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
-                        unsigned long old, unsigned long bits)
+static int btrfs_clear_bit_hook(struct inode *inode,
+                               struct extent_state *state, unsigned long bits)
 {
        /*
         * set_bit and clear bit hooks normally require _irqsave/restore
         * but in this case, we are only testeing for the DELALLOC
         * bit, which is only set or cleared with irqs on
         */
-       if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
+       if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
 
+               BTRFS_I(inode)->delalloc_extents--;
+               btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
+
                spin_lock(&root->fs_info->delalloc_lock);
-               if (end - start + 1 > root->fs_info->delalloc_bytes) {
+               if (state->end - state->start + 1 >
+                   root->fs_info->delalloc_bytes) {
                        printk(KERN_INFO "btrfs warning: delalloc account "
                               "%llu %llu\n",
-                              (unsigned long long)end - start + 1,
+                              (unsigned long long)
+                              state->end - state->start + 1,
                               (unsigned long long)
                               root->fs_info->delalloc_bytes);
                        btrfs_delalloc_free_space(root, inode, (u64)-1);
@@ -1213,9 +1298,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
                        BTRFS_I(inode)->delalloc_bytes = 0;
                } else {
                        btrfs_delalloc_free_space(root, inode,
-                                                 end - start + 1);
-                       root->fs_info->delalloc_bytes -= end - start + 1;
-                       BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
+                                                 state->end -
+                                                 state->start + 1);
+                       root->fs_info->delalloc_bytes -= state->end -
+                               state->start + 1;
+                       BTRFS_I(inode)->delalloc_bytes -= state->end -
+                               state->start + 1;
                }
                if (BTRFS_I(inode)->delalloc_bytes == 0 &&
                    !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
@@ -2950,7 +3038,12 @@ again:
                goto again;
        }
 
-       btrfs_set_extent_delalloc(inode, page_start, page_end);
+       ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
+       if (ret) {
+               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               goto out_unlock;
+       }
+
        ret = 0;
        if (offset != PAGE_CACHE_SIZE) {
                kaddr = kmap(page);
@@ -2981,15 +3074,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
        u64 last_byte;
        u64 cur_offset;
        u64 hole_size;
-       int err;
+       int err = 0;
 
        if (size <= hole_start)
                return 0;
 
-       err = btrfs_check_metadata_free_space(root);
-       if (err)
-               return err;
-
        btrfs_truncate_page(inode->i_mapping, inode->i_size);
 
        while (1) {
@@ -3024,12 +3113,18 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
                                                 cur_offset, &hint_byte, 1);
                        if (err)
                                break;
+
+                       err = btrfs_reserve_metadata_space(root, 1);
+                       if (err)
+                               break;
+
                        err = btrfs_insert_file_extent(trans, root,
                                        inode->i_ino, cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
+                       btrfs_unreserve_metadata_space(root, 1);
                }
                free_extent_map(em);
                cur_offset = last_byte;
@@ -3990,11 +4085,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
-       err = btrfs_check_metadata_free_space(root);
+       /*
+        * 2 for inode item and ref
+        * 2 for dir items
+        * 1 for xattr if selinux is on
+        */
+       err = btrfs_reserve_metadata_space(root, 5);
        if (err)
-               goto fail;
+               return err;
 
        trans = btrfs_start_transaction(root, 1);
+       if (!trans)
+               goto fail;
        btrfs_set_trans_block_group(trans, dir);
 
        err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4032,6 +4134,7 @@ out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
 fail:
+       btrfs_unreserve_metadata_space(root, 5);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4052,10 +4155,18 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        u64 objectid;
        u64 index = 0;
 
-       err = btrfs_check_metadata_free_space(root);
+       /*
+        * 2 for inode item and ref
+        * 2 for dir items
+        * 1 for xattr if selinux is on
+        */
+       err = btrfs_reserve_metadata_space(root, 5);
        if (err)
-               goto fail;
+               return err;
+
        trans = btrfs_start_transaction(root, 1);
+       if (!trans)
+               goto fail;
        btrfs_set_trans_block_group(trans, dir);
 
        err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4096,6 +4207,7 @@ out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
 fail:
+       btrfs_unreserve_metadata_space(root, 5);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4118,10 +4230,16 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        if (inode->i_nlink == 0)
                return -ENOENT;
 
-       btrfs_inc_nlink(inode);
-       err = btrfs_check_metadata_free_space(root);
+       /*
+        * 1 item for inode ref
+        * 2 items for dir items
+        */
+       err = btrfs_reserve_metadata_space(root, 3);
        if (err)
-               goto fail;
+               return err;
+
+       btrfs_inc_nlink(inode);
+
        err = btrfs_set_inode_index(dir, &index);
        if (err)
                goto fail;
@@ -4145,6 +4263,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
 fail:
+       btrfs_unreserve_metadata_space(root, 3);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4164,17 +4283,21 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        u64 index = 0;
        unsigned long nr = 1;
 
-       err = btrfs_check_metadata_free_space(root);
+       /*
+        * 2 items for inode and ref
+        * 2 items for dir items
+        * 1 for xattr if selinux is on
+        */
+       err = btrfs_reserve_metadata_space(root, 5);
        if (err)
-               goto out_unlock;
+               return err;
 
        trans = btrfs_start_transaction(root, 1);
-       btrfs_set_trans_block_group(trans, dir);
-
-       if (IS_ERR(trans)) {
-               err = PTR_ERR(trans);
+       if (!trans) {
+               err = -ENOMEM;
                goto out_unlock;
        }
+       btrfs_set_trans_block_group(trans, dir);
 
        err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
        if (err) {
@@ -4223,6 +4346,7 @@ out_fail:
        btrfs_end_transaction_throttle(trans, root);
 
 out_unlock:
+       btrfs_unreserve_metadata_space(root, 5);
        if (drop_on_err)
                iput(inode);
        btrfs_btree_balance_dirty(root, nr);
@@ -4747,6 +4871,13 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto out;
        }
 
+       ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
+       if (ret) {
+               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
+
        ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
 again:
        lock_page(page);
@@ -4778,7 +4909,23 @@ again:
                goto again;
        }
 
-       btrfs_set_extent_delalloc(inode, page_start, page_end);
+       /*
+        * XXX - page_mkwrite gets called every time the page is dirtied, even
+        * if it was already dirty, so for space accounting reasons we need to
+        * clear any delalloc bits for the range we are fixing to save.  There
+        * is probably a better way to do this, but for now keep consistent with
+        * prepare_pages in the normal write path.
+        */
+       clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
+                         EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS);
+
+       ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
+       if (ret) {
+               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               ret = VM_FAULT_SIGBUS;
+               btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
+               goto out_unlock;
+       }
        ret = 0;
 
        /* page is wholly or partially inside EOF */
@@ -4801,6 +4948,7 @@ again:
        unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
 
 out_unlock:
+       btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
        if (!ret)
                return VM_FAULT_LOCKED;
        unlock_page(page);
@@ -4917,6 +5065,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
                return NULL;
        ei->last_trans = 0;
        ei->logged_trans = 0;
+       ei->delalloc_extents = 0;
+       ei->delalloc_reserved_extents = 0;
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
        INIT_LIST_HEAD(&ei->ordered_operations);
@@ -5070,7 +5220,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
            new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
 
-       ret = btrfs_check_metadata_free_space(root);
+       /*
+        * 2 items for dir items
+        * 1 item for orphan entry
+        * 1 item for ref
+        */
+       ret = btrfs_reserve_metadata_space(root, 4);
        if (ret)
                return ret;
 
@@ -5185,6 +5340,8 @@ out_fail:
 
        if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
+
+       btrfs_unreserve_metadata_space(root, 4);
        return ret;
 }
 
@@ -5256,11 +5413,18 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
                return -ENAMETOOLONG;
 
-       err = btrfs_check_metadata_free_space(root);
+       /*
+        * 2 items for inode item and ref
+        * 2 items for dir items
+        * 1 item for xattr if selinux is on
+        */
+       err = btrfs_reserve_metadata_space(root, 5);
        if (err)
-               goto out_fail;
+               return err;
 
        trans = btrfs_start_transaction(root, 1);
+       if (!trans)
+               goto out_fail;
        btrfs_set_trans_block_group(trans, dir);
 
        err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -5341,6 +5505,7 @@ out_unlock:
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
 out_fail:
+       btrfs_unreserve_metadata_space(root, 5);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -5362,6 +5527,11 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
 
        while (num_bytes > 0) {
                alloc_size = min(num_bytes, root->fs_info->max_extent);
+
+               ret = btrfs_reserve_metadata_space(root, 1);
+               if (ret)
+                       goto out;
+
                ret = btrfs_reserve_extent(trans, root, alloc_size,
                                           root->sectorsize, 0, alloc_hint,
                                           (u64)-1, &ins, 1);
@@ -5381,6 +5551,7 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
                num_bytes -= ins.offset;
                cur_offset += ins.offset;
                alloc_hint = ins.objectid + ins.offset;
+               btrfs_unreserve_metadata_space(root, 1);
        }
 out:
        if (cur_offset > start) {
@@ -5544,7 +5715,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .permission     = btrfs_permission,
 };
 
-static struct file_operations btrfs_dir_file_operations = {
+static const struct file_operations btrfs_dir_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .readdir        = btrfs_real_readdir,
@@ -5566,6 +5737,8 @@ static struct extent_io_ops btrfs_extent_io_ops = {
        .readpage_io_failed_hook = btrfs_io_failed_hook,
        .set_bit_hook = btrfs_set_bit_hook,
        .clear_bit_hook = btrfs_clear_bit_hook,
+       .merge_extent_hook = btrfs_merge_extent_hook,
+       .split_extent_hook = btrfs_split_extent_hook,
 };
 
 /*
index a8577a7f26ab248984357a0b8b6505013cff3dba..9a780c8d0ac83df7bc2444d90830e3214649e8a4 100644 (file)
@@ -239,7 +239,13 @@ static noinline int create_subvol(struct btrfs_root *root,
        u64 index = 0;
        unsigned long nr = 1;
 
-       ret = btrfs_check_metadata_free_space(root);
+       /*
+        * 1 - inode item
+        * 2 - refs
+        * 1 - root item
+        * 2 - dir items
+        */
+       ret = btrfs_reserve_metadata_space(root, 6);
        if (ret)
                return ret;
 
@@ -340,6 +346,9 @@ fail:
        err = btrfs_commit_transaction(trans, root);
        if (err && !ret)
                ret = err;
+
+       btrfs_unreserve_metadata_space(root, 6);
+       btrfs_btree_balance_dirty(root, nr);
        return ret;
 }
 
@@ -355,19 +364,27 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
        if (!root->ref_cows)
                return -EINVAL;
 
-       ret = btrfs_check_metadata_free_space(root);
+       /*
+        * 1 - inode item
+        * 2 - refs
+        * 1 - root item
+        * 2 - dir items
+        */
+       ret = btrfs_reserve_metadata_space(root, 6);
        if (ret)
                goto fail_unlock;
 
        pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
        if (!pending_snapshot) {
                ret = -ENOMEM;
+               btrfs_unreserve_metadata_space(root, 6);
                goto fail_unlock;
        }
        pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
        if (!pending_snapshot->name) {
                ret = -ENOMEM;
                kfree(pending_snapshot);
+               btrfs_unreserve_metadata_space(root, 6);
                goto fail_unlock;
        }
        memcpy(pending_snapshot->name, name, namelen);
@@ -1215,15 +1232,15 @@ static long btrfs_ioctl_trans_start(struct file *file)
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
-       int ret = 0;
+       int ret;
 
+       ret = -EPERM;
        if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
+               goto out;
 
-       if (file->private_data) {
-               ret = -EINPROGRESS;
+       ret = -EINPROGRESS;
+       if (file->private_data)
                goto out;
-       }
 
        ret = mnt_want_write(file->f_path.mnt);
        if (ret)
@@ -1233,12 +1250,19 @@ static long btrfs_ioctl_trans_start(struct file *file)
        root->fs_info->open_ioctl_trans++;
        mutex_unlock(&root->fs_info->trans_mutex);
 
+       ret = -ENOMEM;
        trans = btrfs_start_ioctl_transaction(root, 0);
-       if (trans)
-               file->private_data = trans;
-       else
-               ret = -ENOMEM;
-       /*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/
+       if (!trans)
+               goto out_drop;
+
+       file->private_data = trans;
+       return 0;
+
+out_drop:
+       mutex_lock(&root->fs_info->trans_mutex);
+       root->fs_info->open_ioctl_trans--;
+       mutex_unlock(&root->fs_info->trans_mutex);
+       mnt_drop_write(file->f_path.mnt);
 out:
        return ret;
 }
@@ -1254,24 +1278,20 @@ long btrfs_ioctl_trans_end(struct file *file)
        struct inode *inode = fdentry(file)->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
-       int ret = 0;
 
        trans = file->private_data;
-       if (!trans) {
-               ret = -EINVAL;
-               goto out;
-       }
-       btrfs_end_transaction(trans, root);
+       if (!trans)
+               return -EINVAL;
        file->private_data = NULL;
 
+       btrfs_end_transaction(trans, root);
+
        mutex_lock(&root->fs_info->trans_mutex);
        root->fs_info->open_ioctl_trans--;
        mutex_unlock(&root->fs_info->trans_mutex);
 
        mnt_drop_write(file->f_path.mnt);
-
-out:
-       return ret;
+       return 0;
 }
 
 long btrfs_ioctl(struct file *file, unsigned int
index b5d6d24726b0014463f15041bbc9ddd74fdb110e..897fba835f897a0d02f1d4677447fdcf70d115d7 100644 (file)
@@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
         * start IO on any dirty ones so the wait doesn't stall waiting
         * for pdflush to find them
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, end);
        if (wait) {
                wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
                                                 &entry->flags));
@@ -488,17 +488,15 @@ again:
        /* start IO across the range first to instantiate any delalloc
         * extents
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
        /* The compression code will leave pages locked but return from
         * writepage without setting the page writeback.  Starting again
         * with WB_SYNC_ALL will end up waiting for the IO to actually start.
         */
-       btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
+       filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
 
-       btrfs_wait_on_page_writeback_range(inode->i_mapping,
-                                          start >> PAGE_CACHE_SHIFT,
-                                          orig_end >> PAGE_CACHE_SHIFT);
+       filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
        end = orig_end;
        found = 0;
@@ -716,89 +714,6 @@ out:
 }
 
 
-/**
- * taken from mm/filemap.c because it isn't exported
- *
- * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
- * @mapping:   address space structure to write
- * @start:     offset in bytes where the range starts
- * @end:       offset in bytes where the range ends (inclusive)
- * @sync_mode: enable synchronous operation
- *
- * Start writeback against all of a mapping's dirty pages that lie
- * within the byte offsets <start, end> inclusive.
- *
- * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
- * opposed to a regular memory cleansing writeback.  The difference between
- * these two operations is that if a dirty page/buffer is encountered, it must
- * be waited upon, and not just skipped over.
- */
-int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
-                          loff_t end, int sync_mode)
-{
-       struct writeback_control wbc = {
-               .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
-               .range_start = start,
-               .range_end = end,
-       };
-       return btrfs_writepages(mapping, &wbc);
-}
-
-/**
- * taken from mm/filemap.c because it isn't exported
- *
- * wait_on_page_writeback_range - wait for writeback to complete
- * @mapping:   target address_space
- * @start:     beginning page index
- * @end:       ending page index
- *
- * Wait for writeback to complete against pages indexed by start->end
- * inclusive
- */
-int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
-                                      pgoff_t start, pgoff_t end)
-{
-       struct pagevec pvec;
-       int nr_pages;
-       int ret = 0;
-       pgoff_t index;
-
-       if (end < start)
-               return 0;
-
-       pagevec_init(&pvec, 0);
-       index = start;
-       while ((index <= end) &&
-                       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                       PAGECACHE_TAG_WRITEBACK,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
-               unsigned i;
-
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-
-                       /* until radix tree lookup accepts end_index */
-                       if (page->index > end)
-                               continue;
-
-                       wait_on_page_writeback(page);
-                       if (PageError(page))
-                               ret = -EIO;
-               }
-               pagevec_release(&pvec);
-               cond_resched();
-       }
-
-       /* Check for outstanding write errors */
-       if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
-               ret = -ENOSPC;
-       if (test_and_clear_bit(AS_EIO, &mapping->flags))
-               ret = -EIO;
-
-       return ret;
-}
-
 /*
  * add a given inode to the list of inodes that must be fully on
  * disk before a transaction commit finishes.
index 993a7ea45c702a580c784908584408684913e233..f82e87488ca8f47d158c37281a3aafa746d5031d 100644 (file)
@@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
 int btrfs_ordered_update_i_size(struct inode *inode,
                                struct btrfs_ordered_extent *ordered);
 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
-                                      pgoff_t start, pgoff_t end);
-int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
-                          loff_t end, int sync_mode);
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
 int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
 int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
index 67035385444cf29c31a59951311af66702fa80a8..9de9b22364190de829c4479cc74b81578f41b18a 100644 (file)
@@ -344,7 +344,9 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_export_op = &btrfs_export_ops;
        sb->s_xattr = btrfs_xattr_handlers;
        sb->s_time_gran = 1;
+#ifdef CONFIG_BTRFS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
+#endif
 
        tree_root = open_ctree(sb, fs_devices, (char *)data);
 
index 88f866f85e7affa4b387cc3cc72fae5a472bb786..0b8f36d4400a70919d055cac8b467b7a0f9c24d7 100644 (file)
@@ -186,6 +186,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
        h->alloc_exclude_start = 0;
        h->delayed_ref_updates = 0;
 
+       if (!current->journal_info)
+               current->journal_info = h;
+
        root->fs_info->running_transaction->use_count++;
        record_root_in_trans(h, root);
        mutex_unlock(&root->fs_info->trans_mutex);
@@ -317,6 +320,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
                wake_up(&cur_trans->writer_wait);
        put_transaction(cur_trans);
        mutex_unlock(&info->trans_mutex);
+
+       if (current->journal_info == trans)
+               current->journal_info = NULL;
        memset(trans, 0, sizeof(*trans));
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
@@ -743,6 +749,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        memcpy(&pending->root_key, &key, sizeof(key));
 fail:
        kfree(new_root_item);
+       btrfs_unreserve_metadata_space(root, 6);
        return ret;
 }
 
@@ -1059,6 +1066,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        mutex_unlock(&root->fs_info->trans_mutex);
 
+       if (current->journal_info == trans)
+               current->journal_info = NULL;
+
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
        return ret;
 }
index 23e7d36ff32554eb7e781ab7c8e9b8102fddc235..7eda483d7b5aa0cd5e5bd96d149cca6fe2fc8f16 100644 (file)
@@ -446,8 +446,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                        goto error;
 
                device->name = kstrdup(orig_dev->name, GFP_NOFS);
-               if (!device->name)
+               if (!device->name) {
+                       kfree(device);
                        goto error;
+               }
 
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
index a9d3bf4d2689a5924f1de8765bc38e3c5481c215..b0fc93f95fd0c7f18ac60471c60d767221df37af 100644 (file)
@@ -260,7 +260,7 @@ err:
  * attributes are handled directly.
  */
 struct xattr_handler *btrfs_xattr_handlers[] = {
-#ifdef CONFIG_FS_POSIX_ACL
+#ifdef CONFIG_BTRFS_POSIX_ACL
        &btrfs_xattr_acl_access_handler,
        &btrfs_xattr_acl_default_handler,
 #endif
index 0376ac66c44a6127199c5389f33f138f73c53fba..be4392ca20983345ae180a450fd0497442b135a8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/major.h>
 #include <linux/time.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/fcntl.h>
index d5c0ea2e8f2da53412fae66e6b758cf8c0e6850a..9f2d45d75b1a70084e5f153c94d49a2b5f863321 100644 (file)
@@ -26,20 +26,6 @@ config EXT4_FS
 
          If unsure, say N.
 
-config EXT4DEV_COMPAT
-       bool "Enable ext4dev compatibility"
-       depends on EXT4_FS
-       help
-         Starting with 2.6.28, the name of the ext4 filesystem was
-         renamed from ext4dev to ext4.  Unfortunately there are some
-         legacy userspace programs (such as klibc's fstype) have
-         "ext4dev" hardcoded.
-
-         To enable backwards compatibility so that systems that are
-         still expecting to mount ext4 filesystems using ext4dev,
-         choose Y here.   This feature will go away by 2.6.31, so
-         please arrange to get your userspace programs fixed!
-
 config EXT4_FS_XATTR
        bool "Ext4 extended attributes"
        depends on EXT4_FS
index e227eea23f052c063547b5e4cd6a8b7a5ba59801..984ca0cb38c38be74fb091d19e8d811fcc1d95ca 100644 (file)
@@ -65,6 +65,12 @@ typedef __u32 ext4_lblk_t;
 /* data type for block group number */
 typedef unsigned int ext4_group_t;
 
+/*
+ * Flags used in mballoc's allocation_context flags field.  
+ *
+ * Also used to show what's going on for debugging purposes when the
+ * flag field is exported via the traceport interface
+ */
 
 /* prefer goal again. length */
 #define EXT4_MB_HINT_MERGE             0x0001
@@ -127,6 +133,16 @@ struct mpage_da_data {
        int pages_written;
        int retval;
 };
+#define        DIO_AIO_UNWRITTEN       0x1
+typedef struct ext4_io_end {
+       struct list_head        list;           /* per-file finished AIO list */
+       struct inode            *inode;         /* file being written to */
+       unsigned int            flag;           /* unwritten or not */
+       int                     error;          /* I/O error code */
+       ext4_lblk_t             offset;         /* offset in the file */
+       size_t                  size;           /* size of the extent */
+       struct work_struct      work;           /* data work queue */
+} ext4_io_end_t;
 
 /*
  * Special inodes numbers
@@ -347,7 +363,16 @@ struct ext4_new_group_data {
        /* Call ext4_da_update_reserve_space() after successfully 
           allocating the blocks */
 #define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE   0x0008
-
+       /* caller is from the direct IO path, request to creation of an
+       unitialized extents if not allocated, split the uninitialized
+       extent if blocks has been preallocated already*/
+#define EXT4_GET_BLOCKS_DIO                    0x0010
+#define EXT4_GET_BLOCKS_CONVERT                        0x0020
+#define EXT4_GET_BLOCKS_DIO_CREATE_EXT         (EXT4_GET_BLOCKS_DIO|\
+                                        EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
+       /* Convert extent to initialized after direct IO complete */
+#define EXT4_GET_BLOCKS_DIO_CONVERT_EXT                (EXT4_GET_BLOCKS_CONVERT|\
+                                        EXT4_GET_BLOCKS_DIO_CREATE_EXT)
 
 /*
  * ioctl commands
@@ -500,8 +525,8 @@ struct move_extent {
 static inline __le32 ext4_encode_extra_time(struct timespec *time)
 {
        return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
-                          time->tv_sec >> 32 : 0) |
-                          ((time->tv_nsec << 2) & EXT4_NSEC_MASK));
+                          (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) |
+                          ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK));
 }
 
 static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
@@ -509,7 +534,7 @@ static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
        if (sizeof(time->tv_sec) > 4)
               time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
                               << 32;
-       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2;
+       time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
 }
 
 #define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode)                         \
@@ -672,6 +697,11 @@ struct ext4_inode_info {
        __u16 i_extra_isize;
 
        spinlock_t i_block_reservation_lock;
+
+       /* completed async DIOs that might need unwritten extents handling */
+       struct list_head i_aio_dio_complete_list;
+       /* current io_end structure for async DIO write*/
+       ext4_io_end_t *cur_aio_dio;
 };
 
 /*
@@ -942,18 +972,11 @@ struct ext4_sb_info {
        unsigned int s_mb_stats;
        unsigned int s_mb_order2_reqs;
        unsigned int s_mb_group_prealloc;
+       unsigned int s_max_writeback_mb_bump;
        /* where last allocation was done - for stream allocation */
        unsigned long s_mb_last_group;
        unsigned long s_mb_last_start;
 
-       /* history to debug policy */
-       struct ext4_mb_history *s_mb_history;
-       int s_mb_history_cur;
-       int s_mb_history_max;
-       int s_mb_history_num;
-       spinlock_t s_mb_history_lock;
-       int s_mb_history_filter;
-
        /* stats for buddy allocator */
        spinlock_t s_mb_pa_lock;
        atomic_t s_bal_reqs;    /* number of reqs with len > 1 */
@@ -980,6 +1003,9 @@ struct ext4_sb_info {
 
        unsigned int s_log_groups_per_flex;
        struct flex_groups *s_flex_groups;
+
+       /* workqueue for dio unwritten */
+       struct workqueue_struct *dio_unwritten_wq;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1397,7 +1423,7 @@ extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t ext4_get_reserved_space(struct inode *inode);
-
+extern int flush_aio_dio_completed_IO(struct inode *inode);
 /* ioctl.c */
 extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
 extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
@@ -1699,6 +1725,8 @@ extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
                          loff_t len);
+extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+                         loff_t len);
 extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
                           sector_t block, unsigned int max_blocks,
                           struct buffer_head *bh, int flags);
index 61652f1d15e67cdc945b9de935550d189ce33b56..2ca686454e875c976ff969d5ecf7e437d0dadd40 100644 (file)
@@ -220,6 +220,11 @@ static inline int ext4_ext_get_actual_len(struct ext4_extent *ext)
                (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
 }
 
+static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
+{
+       ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
+}
+
 extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
 extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
 extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
@@ -235,7 +240,7 @@ extern int ext4_ext_try_to_merge(struct inode *inode,
                                 struct ext4_ext_path *path,
                                 struct ext4_extent *);
 extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
+extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
 extern int ext4_ext_walk_space(struct inode *, ext4_lblk_t, ext4_lblk_t,
                                                        ext_prepare_callback, void *);
 extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
index 139fb8cb87e40dabf2538ff28d163849ed8d2bfa..a2865980342f90c21aed6d731fcf1d66d72e0312 100644 (file)
@@ -161,11 +161,13 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
 int __ext4_journal_stop(const char *where, handle_t *handle);
 
-#define EXT4_NOJOURNAL_HANDLE  ((handle_t *) 0x1)
+#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
 
+/* Note:  Do not use this for NULL handles.  This is only to determine if
+ * a properly allocated handle is using a journal or not. */
 static inline int ext4_handle_valid(handle_t *handle)
 {
-       if (handle == EXT4_NOJOURNAL_HANDLE)
+       if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT)
                return 0;
        return 1;
 }
index 7a3832577923700ebb62bcca4f5f0bf7266eaae5..10539e364283ad9e0f21eaffe5eb5c9a65f2fed4 100644 (file)
@@ -723,7 +723,7 @@ err:
  * insert new index [@logical;@ptr] into the block at @curp;
  * check where to insert: before @curp or after @curp
  */
-static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
                                struct ext4_ext_path *curp,
                                int logical, ext4_fsblk_t ptr)
 {
@@ -1586,7 +1586,7 @@ out:
  */
 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
                                struct ext4_ext_path *path,
-                               struct ext4_extent *newext)
+                               struct ext4_extent *newext, int flag)
 {
        struct ext4_extent_header *eh;
        struct ext4_extent *ex, *fex;
@@ -1602,7 +1602,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
        BUG_ON(path[depth].p_hdr == NULL);
 
        /* try to insert block into found extent and return */
-       if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
+       if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
+               && ext4_can_extents_be_merged(inode, ex, newext)) {
                ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
                                ext4_ext_is_uninitialized(newext),
                                ext4_ext_get_actual_len(newext),
@@ -1722,7 +1723,8 @@ has_space:
 
 merge:
        /* try to merge extents to the right */
-       ext4_ext_try_to_merge(inode, path, nearex);
+       if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
+               ext4_ext_try_to_merge(inode, path, nearex);
 
        /* try to merge extents to the left */
 
@@ -2378,6 +2380,7 @@ void ext4_ext_init(struct super_block *sb)
         */
 
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
                printk(KERN_INFO "EXT4-fs: file extents enabled");
 #ifdef AGGRESSIVE_TEST
                printk(", aggressive tests");
@@ -2389,6 +2392,7 @@ void ext4_ext_init(struct super_block *sb)
                printk(", stats");
 #endif
                printk("\n");
+#endif
 #ifdef EXTENTS_STATS
                spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
                EXT4_SB(sb)->s_ext_min = 1 << 30;
@@ -2490,7 +2494,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
 }
 
 #define EXT4_EXT_ZERO_LEN 7
-
 /*
  * This function is called by ext4_ext_get_blocks() if someone tries to write
  * to an uninitialized extent. It may result in splitting the uninitialized
@@ -2583,7 +2586,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                        ex3->ee_block = cpu_to_le32(iblock);
                        ext4_ext_store_pblock(ex3, newblock);
                        ex3->ee_len = cpu_to_le16(allocated);
-                       err = ext4_ext_insert_extent(handle, inode, path, ex3);
+                       err = ext4_ext_insert_extent(handle, inode, path,
+                                                       ex3, 0);
                        if (err == -ENOSPC) {
                                err =  ext4_ext_zeroout(inode, &orig_ex);
                                if (err)
@@ -2639,7 +2643,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                ext4_ext_store_pblock(ex3, newblock + max_blocks);
                ex3->ee_len = cpu_to_le16(allocated - max_blocks);
                ext4_ext_mark_uninitialized(ex3);
-               err = ext4_ext_insert_extent(handle, inode, path, ex3);
+               err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
                if (err == -ENOSPC) {
                        err =  ext4_ext_zeroout(inode, &orig_ex);
                        if (err)
@@ -2757,7 +2761,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        err = ext4_ext_dirty(handle, inode, path + depth);
        goto out;
 insert:
-       err = ext4_ext_insert_extent(handle, inode, path, &newex);
+       err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
        if (err == -ENOSPC) {
                err =  ext4_ext_zeroout(inode, &orig_ex);
                if (err)
@@ -2784,6 +2788,324 @@ fix_extent_len:
        return err;
 }
 
+/*
+ * This function is called by ext4_ext_get_blocks() from
+ * ext4_get_blocks_dio_write() when DIO to write
+ * to an uninitialized extent.
+ *
+ * Writing to an uninitized extent may result in splitting the uninitialized
+ * extent into multiple /intialized unintialized extents (up to three)
+ * There are three possibilities:
+ *   a> There is no split required: Entire extent should be uninitialized
+ *   b> Splits in two extents: Write is happening at either end of the extent
+ *   c> Splits in three extents: Somone is writing in middle of the extent
+ *
+ * One of more index blocks maybe needed if the extent tree grow after
+ * the unintialized extent split. To prevent ENOSPC occur at the IO
+ * complete, we need to split the uninitialized extent before DIO submit
+ * the IO. The uninitilized extent called at this time will be split
+ * into three uninitialized extent(at most). After IO complete, the part
+ * being filled will be convert to initialized by the end_io callback function
+ * via ext4_convert_unwritten_extents().
+ */
+static int ext4_split_unwritten_extents(handle_t *handle,
+                                       struct inode *inode,
+                                       struct ext4_ext_path *path,
+                                       ext4_lblk_t iblock,
+                                       unsigned int max_blocks,
+                                       int flags)
+{
+       struct ext4_extent *ex, newex, orig_ex;
+       struct ext4_extent *ex1 = NULL;
+       struct ext4_extent *ex2 = NULL;
+       struct ext4_extent *ex3 = NULL;
+       struct ext4_extent_header *eh;
+       ext4_lblk_t ee_block;
+       unsigned int allocated, ee_len, depth;
+       ext4_fsblk_t newblock;
+       int err = 0;
+       int ret = 0;
+
+       ext_debug("ext4_split_unwritten_extents: inode %lu,"
+                 "iblock %llu, max_blocks %u\n", inode->i_ino,
+                 (unsigned long long)iblock, max_blocks);
+       depth = ext_depth(inode);
+       eh = path[depth].p_hdr;
+       ex = path[depth].p_ext;
+       ee_block = le32_to_cpu(ex->ee_block);
+       ee_len = ext4_ext_get_actual_len(ex);
+       allocated = ee_len - (iblock - ee_block);
+       newblock = iblock - ee_block + ext_pblock(ex);
+       ex2 = ex;
+       orig_ex.ee_block = ex->ee_block;
+       orig_ex.ee_len   = cpu_to_le16(ee_len);
+       ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
+
+       /*
+        * if the entire unintialized extent length less than
+        * the size of extent to write, there is no need to split
+        * uninitialized extent
+        */
+       if (allocated <= max_blocks)
+               return ret;
+
+       err = ext4_ext_get_access(handle, inode, path + depth);
+       if (err)
+               goto out;
+       /* ex1: ee_block to iblock - 1 : uninitialized */
+       if (iblock > ee_block) {
+               ex1 = ex;
+               ex1->ee_len = cpu_to_le16(iblock - ee_block);
+               ext4_ext_mark_uninitialized(ex1);
+               ex2 = &newex;
+       }
+       /*
+        * for sanity, update the length of the ex2 extent before
+        * we insert ex3, if ex1 is NULL. This is to avoid temporary
+        * overlap of blocks.
+        */
+       if (!ex1 && allocated > max_blocks)
+               ex2->ee_len = cpu_to_le16(max_blocks);
+       /* ex3: to ee_block + ee_len : uninitialised */
+       if (allocated > max_blocks) {
+               unsigned int newdepth;
+               ex3 = &newex;
+               ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+               ext4_ext_store_pblock(ex3, newblock + max_blocks);
+               ex3->ee_len = cpu_to_le16(allocated - max_blocks);
+               ext4_ext_mark_uninitialized(ex3);
+               err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+               if (err == -ENOSPC) {
+                       err =  ext4_ext_zeroout(inode, &orig_ex);
+                       if (err)
+                               goto fix_extent_len;
+                       /* update the extent length and mark as initialized */
+                       ex->ee_block = orig_ex.ee_block;
+                       ex->ee_len   = orig_ex.ee_len;
+                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_dirty(handle, inode, path + depth);
+                       /* zeroed the full extent */
+                       /* blocks available from iblock */
+                       return allocated;
+
+               } else if (err)
+                       goto fix_extent_len;
+               /*
+                * The depth, and hence eh & ex might change
+                * as part of the insert above.
+                */
+               newdepth = ext_depth(inode);
+               /*
+                * update the extent length after successful insert of the
+                * split extent
+                */
+               orig_ex.ee_len = cpu_to_le16(ee_len -
+                                               ext4_ext_get_actual_len(ex3));
+               depth = newdepth;
+               ext4_ext_drop_refs(path);
+               path = ext4_ext_find_extent(inode, iblock, path);
+               if (IS_ERR(path)) {
+                       err = PTR_ERR(path);
+                       goto out;
+               }
+               eh = path[depth].p_hdr;
+               ex = path[depth].p_ext;
+               if (ex2 != &newex)
+                       ex2 = ex;
+
+               err = ext4_ext_get_access(handle, inode, path + depth);
+               if (err)
+                       goto out;
+
+               allocated = max_blocks;
+       }
+       /*
+        * If there was a change of depth as part of the
+        * insertion of ex3 above, we need to update the length
+        * of the ex1 extent again here
+        */
+       if (ex1 && ex1 != ex) {
+               ex1 = ex;
+               ex1->ee_len = cpu_to_le16(iblock - ee_block);
+               ext4_ext_mark_uninitialized(ex1);
+               ex2 = &newex;
+       }
+       /*
+        * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
+        * uninitialised still.
+        */
+       ex2->ee_block = cpu_to_le32(iblock);
+       ext4_ext_store_pblock(ex2, newblock);
+       ex2->ee_len = cpu_to_le16(allocated);
+       ext4_ext_mark_uninitialized(ex2);
+       if (ex2 != ex)
+               goto insert;
+       /* Mark modified extent as dirty */
+       err = ext4_ext_dirty(handle, inode, path + depth);
+       ext_debug("out here\n");
+       goto out;
+insert:
+       err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+       if (err == -ENOSPC) {
+               err =  ext4_ext_zeroout(inode, &orig_ex);
+               if (err)
+                       goto fix_extent_len;
+               /* update the extent length and mark as initialized */
+               ex->ee_block = orig_ex.ee_block;
+               ex->ee_len   = orig_ex.ee_len;
+               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_dirty(handle, inode, path + depth);
+               /* zero out the first half */
+               return allocated;
+       } else if (err)
+               goto fix_extent_len;
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err ? err : allocated;
+
+fix_extent_len:
+       ex->ee_block = orig_ex.ee_block;
+       ex->ee_len   = orig_ex.ee_len;
+       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+       ext4_ext_mark_uninitialized(ex);
+       ext4_ext_dirty(handle, inode, path + depth);
+       return err;
+}
+static int ext4_convert_unwritten_extents_dio(handle_t *handle,
+                                             struct inode *inode,
+                                             struct ext4_ext_path *path)
+{
+       struct ext4_extent *ex;
+       struct ext4_extent_header *eh;
+       int depth;
+       int err = 0;
+       int ret = 0;
+
+       depth = ext_depth(inode);
+       eh = path[depth].p_hdr;
+       ex = path[depth].p_ext;
+
+       err = ext4_ext_get_access(handle, inode, path + depth);
+       if (err)
+               goto out;
+       /* first mark the extent as initialized */
+       ext4_ext_mark_initialized(ex);
+
+       /*
+        * We have to see if it can be merged with the extent
+        * on the left.
+        */
+       if (ex > EXT_FIRST_EXTENT(eh)) {
+               /*
+                * To merge left, pass "ex - 1" to try_to_merge(),
+                * since it merges towards right _only_.
+                */
+               ret = ext4_ext_try_to_merge(inode, path, ex - 1);
+               if (ret) {
+                       err = ext4_ext_correct_indexes(handle, inode, path);
+                       if (err)
+                               goto out;
+                       depth = ext_depth(inode);
+                       ex--;
+               }
+       }
+       /*
+        * Try to Merge towards right.
+        */
+       ret = ext4_ext_try_to_merge(inode, path, ex);
+       if (ret) {
+               err = ext4_ext_correct_indexes(handle, inode, path);
+               if (err)
+                       goto out;
+               depth = ext_depth(inode);
+       }
+       /* Mark modified extent as dirty */
+       err = ext4_ext_dirty(handle, inode, path + depth);
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err;
+}
+
+static int
+ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+                       ext4_lblk_t iblock, unsigned int max_blocks,
+                       struct ext4_ext_path *path, int flags,
+                       unsigned int allocated, struct buffer_head *bh_result,
+                       ext4_fsblk_t newblock)
+{
+       int ret = 0;
+       int err = 0;
+       ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+
+       ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
+                 "block %llu, max_blocks %u, flags %d, allocated %u",
+                 inode->i_ino, (unsigned long long)iblock, max_blocks,
+                 flags, allocated);
+       ext4_ext_show_leaf(inode, path);
+
+       /* DIO get_block() before submit the IO, split the extent */
+       if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+               ret = ext4_split_unwritten_extents(handle,
+                                               inode, path, iblock,
+                                               max_blocks, flags);
+               /* flag the io_end struct that we need convert when IO done */
+               if (io)
+                       io->flag = DIO_AIO_UNWRITTEN;
+               goto out;
+       }
+       /* DIO end_io complete, convert the filled extent to written */
+       if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
+               ret = ext4_convert_unwritten_extents_dio(handle, inode,
+                                                       path);
+               goto out2;
+       }
+       /* buffered IO case */
+       /*
+        * repeat fallocate creation request
+        * we already have an unwritten extent
+        */
+       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
+               goto map_out;
+
+       /* buffered READ or buffered write_begin() lookup */
+       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+               /*
+                * We have blocks reserved already.  We
+                * return allocated blocks so that delalloc
+                * won't do block reservation for us.  But
+                * the buffer head will be unmapped so that
+                * a read from the block returns 0s.
+                */
+               set_buffer_unwritten(bh_result);
+               goto out1;
+       }
+
+       /* buffered write, writepage time, convert*/
+       ret = ext4_ext_convert_to_initialized(handle, inode,
+                                               path, iblock,
+                                               max_blocks);
+out:
+       if (ret <= 0) {
+               err = ret;
+               goto out2;
+       } else
+               allocated = ret;
+       set_buffer_new(bh_result);
+map_out:
+       set_buffer_mapped(bh_result);
+out1:
+       if (allocated > max_blocks)
+               allocated = max_blocks;
+       ext4_ext_show_leaf(inode, path);
+       bh_result->b_bdev = inode->i_sb->s_bdev;
+       bh_result->b_blocknr = newblock;
+out2:
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
+       return err ? err : allocated;
+}
 /*
  * Block allocation/map/preallocation routine for extents based files
  *
@@ -2814,6 +3136,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
        int err = 0, depth, ret, cache_type;
        unsigned int allocated = 0;
        struct ext4_allocation_request ar;
+       ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
 
        __clear_bit(BH_New, &bh_result->b_state);
        ext_debug("blocks %u/%u requested for inode %lu\n",
@@ -2889,33 +3212,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
                                                        EXT4_EXT_CACHE_EXTENT);
                                goto out;
                        }
-                       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
-                               goto out;
-                       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
-                               if (allocated > max_blocks)
-                                       allocated = max_blocks;
-                               /*
-                                * We have blocks reserved already.  We
-                                * return allocated blocks so that delalloc
-                                * won't do block reservation for us.  But
-                                * the buffer head will be unmapped so that
-                                * a read from the block returns 0s.
-                                */
-                               set_buffer_unwritten(bh_result);
-                               bh_result->b_bdev = inode->i_sb->s_bdev;
-                               bh_result->b_blocknr = newblock;
-                               goto out2;
-                       }
-
-                       ret = ext4_ext_convert_to_initialized(handle, inode,
-                                                               path, iblock,
-                                                               max_blocks);
-                       if (ret <= 0) {
-                               err = ret;
-                               goto out2;
-                       } else
-                               allocated = ret;
-                       goto outnew;
+                       ret = ext4_ext_handle_uninitialized_extents(handle,
+                                       inode, iblock, max_blocks, path,
+                                       flags, allocated, bh_result, newblock);
+                       return ret;
                }
        }
 
@@ -2986,9 +3286,21 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
        /* try to insert new extent into found leaf and return */
        ext4_ext_store_pblock(&newex, newblock);
        newex.ee_len = cpu_to_le16(ar.len);
-       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)  /* Mark uninitialized */
+       /* Mark uninitialized */
+       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
                ext4_ext_mark_uninitialized(&newex);
-       err = ext4_ext_insert_extent(handle, inode, path, &newex);
+               /*
+                * io_end structure was created for every async
+                * direct IO write to the middle of the file.
+                * To avoid unecessary convertion for every aio dio rewrite
+                * to the mid of file, here we flag the IO that is really
+                * need the convertion.
+                *
+                */
+               if (io && flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT)
+                       io->flag = DIO_AIO_UNWRITTEN;
+       }
+       err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
        if (err) {
                /* free data blocks we just allocated */
                /* not a good idea to call discard here directly,
@@ -3002,7 +3314,6 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
        /* previous routine could use block we allocated */
        newblock = ext_pblock(&newex);
        allocated = ext4_ext_get_actual_len(&newex);
-outnew:
        set_buffer_new(bh_result);
 
        /* Cache only when it is _not_ an uninitialized extent */
@@ -3200,6 +3511,63 @@ retry:
        return ret > 0 ? ret2 : ret;
 }
 
+/*
+ * This function convert a range of blocks to written extents
+ * The caller of this function will pass the start offset and the size.
+ * all unwritten extents within this range will be converted to
+ * written extents.
+ *
+ * This function is called from the direct IO end io call back
+ * function, to convert the fallocated extents after IO is completed.
+ */
+int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+                                   loff_t len)
+{
+       handle_t *handle;
+       ext4_lblk_t block;
+       unsigned int max_blocks;
+       int ret = 0;
+       int ret2 = 0;
+       struct buffer_head map_bh;
+       unsigned int credits, blkbits = inode->i_blkbits;
+
+       block = offset >> blkbits;
+       /*
+        * We can't just convert len to max_blocks because
+        * If blocksize = 4096 offset = 3072 and len = 2048
+        */
+       max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
+                                                       - block;
+       /*
+        * credits to insert 1 extent into extent tree
+        */
+       credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       while (ret >= 0 && ret < max_blocks) {
+               block = block + ret;
+               max_blocks = max_blocks - ret;
+               handle = ext4_journal_start(inode, credits);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+                       break;
+               }
+               map_bh.b_state = 0;
+               ret = ext4_get_blocks(handle, inode, block,
+                                     max_blocks, &map_bh,
+                                     EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
+               if (ret <= 0) {
+                       WARN_ON(ret <= 0);
+                       printk(KERN_ERR "%s: ext4_ext_get_blocks "
+                                   "returned error inode#%lu, block=%u, "
+                                   "max_blocks=%u", __func__,
+                                   inode->i_ino, block, max_blocks);
+               }
+               ext4_mark_inode_dirty(handle, inode);
+               ret2 = ext4_journal_stop(handle);
+               if (ret <= 0 || ret2 )
+                       break;
+       }
+       return ret > 0 ? ret2 : ret;
+}
 /*
  * Callback function called for each extent to gather FIEMAP information.
  */
index 07475740b512535ec6d30aa3b36d6892b765e839..2b1531266ee25010776dcebe8921103da29d9c3a 100644 (file)
@@ -44,6 +44,8 @@
  *
  * What we do is just kick off a commit and wait on it.  This will snapshot the
  * inode to disk.
+ *
+ * i_mutex lock is held when entering and exiting this function
  */
 
 int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
@@ -56,6 +58,9 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
 
        trace_ext4_sync_file(file, dentry, datasync);
 
+       ret = flush_aio_dio_completed_IO(inode);
+       if (ret < 0)
+               goto out;
        /*
         * data=writeback:
         *  The caller's filemap_fdatawrite()/wait will sync the data.
index 064746fad5812e693ef6d3ef2578822a3007cadb..5c5bc5dafff818991b26f5fd7ebdf6ebf4e83c67 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/namei.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include <linux/workqueue.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -1144,6 +1145,64 @@ static int check_block_validity(struct inode *inode, const char *msg,
        return 0;
 }
 
+/*
+ * Return the number of contiguous dirty pages in a given inode
+ * starting at page frame idx.
+ */
+static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
+                                   unsigned int max_pages)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t index;
+       struct pagevec pvec;
+       pgoff_t num = 0;
+       int i, nr_pages, done = 0;
+
+       if (max_pages == 0)
+               return 0;
+       pagevec_init(&pvec, 0);
+       while (!done) {
+               index = idx;
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                             PAGECACHE_TAG_DIRTY,
+                                             (pgoff_t)PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       struct buffer_head *bh, *head;
+
+                       lock_page(page);
+                       if (unlikely(page->mapping != mapping) ||
+                           !PageDirty(page) ||
+                           PageWriteback(page) ||
+                           page->index != idx) {
+                               done = 1;
+                               unlock_page(page);
+                               break;
+                       }
+                       if (page_has_buffers(page)) {
+                               bh = head = page_buffers(page);
+                               do {
+                                       if (!buffer_delay(bh) &&
+                                           !buffer_unwritten(bh))
+                                               done = 1;
+                                       bh = bh->b_this_page;
+                               } while (!done && (bh != head));
+                       }
+                       unlock_page(page);
+                       if (done)
+                               break;
+                       idx++;
+                       num++;
+                       if (num >= max_pages)
+                               break;
+               }
+               pagevec_release(&pvec);
+       }
+       return num;
+}
+
 /*
  * The ext4_get_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
@@ -1175,6 +1234,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
        clear_buffer_mapped(bh);
        clear_buffer_unwritten(bh);
 
+       ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+                 "logical block %lu\n", inode->i_ino, flags, max_blocks,
+                 (unsigned long)block);
        /*
         * Try to see if we can get the block without requesting a new
         * file system block.
@@ -1796,11 +1858,11 @@ repeat:
 
        if (ext4_claim_free_blocks(sbi, total)) {
                spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               vfs_dq_release_reservation_block(inode, total);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
-               vfs_dq_release_reservation_block(inode, total);
                return -ENOSPC;
        }
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -2092,18 +2154,18 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
 static void ext4_print_free_blocks(struct inode *inode)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       printk(KERN_EMERG "Total free blocks count %lld\n",
-                       ext4_count_free_blocks(inode->i_sb));
-       printk(KERN_EMERG "Free/Dirty block details\n");
-       printk(KERN_EMERG "free_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
-       printk(KERN_EMERG "dirty_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
-       printk(KERN_EMERG "Block reservation details\n");
-       printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_data_blocks);
-       printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_meta_blocks);
+       printk(KERN_CRIT "Total free blocks count %lld\n",
+              ext4_count_free_blocks(inode->i_sb));
+       printk(KERN_CRIT "Free/Dirty block details\n");
+       printk(KERN_CRIT "free_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
+       printk(KERN_CRIT "dirty_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+       printk(KERN_CRIT "Block reservation details\n");
+       printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_data_blocks);
+       printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_meta_blocks);
        return;
 }
 
@@ -2189,14 +2251,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                 * writepage and writepages will again try to write
                 * the same.
                 */
-               printk(KERN_EMERG "%s block allocation failed for inode %lu "
-                                 "at logical offset %llu with max blocks "
-                                 "%zd with error %d\n",
-                                 __func__, mpd->inode->i_ino,
-                                 (unsigned long long)next,
-                                 mpd->b_size >> mpd->inode->i_blkbits, err);
-               printk(KERN_EMERG "This should not happen.!! "
-                                       "Data will be lost\n");
+               ext4_msg(mpd->inode->i_sb, KERN_CRIT,
+                        "delayed block allocation failed for inode %lu at "
+                        "logical offset %llu with max blocks %zd with "
+                        "error %d\n", mpd->inode->i_ino,
+                        (unsigned long long) next,
+                        mpd->b_size >> mpd->inode->i_blkbits, err);
+               printk(KERN_CRIT "This should not happen!!  "
+                      "Data will be lost\n");
                if (err == -ENOSPC) {
                        ext4_print_free_blocks(mpd->inode);
                }
@@ -2743,8 +2805,10 @@ static int ext4_da_writepages(struct address_space *mapping,
        int no_nrwrite_index_update;
        int pages_written = 0;
        long pages_skipped;
+       unsigned int max_pages;
        int range_cyclic, cycled = 1, io_done = 0;
-       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       int needed_blocks, ret = 0;
+       long desired_nr_to_write, nr_to_writebump = 0;
        loff_t range_start = wbc->range_start;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
@@ -2771,16 +2835,6 @@ static int ext4_da_writepages(struct address_space *mapping,
        if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
                return -EROFS;
 
-       /*
-        * Make sure nr_to_write is >= sbi->s_mb_stream_request
-        * This make sure small files blocks are allocated in
-        * single attempt. This ensure that small files
-        * get less fragmented.
-        */
-       if (wbc->nr_to_write < sbi->s_mb_stream_request) {
-               nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
-               wbc->nr_to_write = sbi->s_mb_stream_request;
-       }
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                range_whole = 1;
 
@@ -2795,6 +2849,36 @@ static int ext4_da_writepages(struct address_space *mapping,
        } else
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
 
+       /*
+        * This works around two forms of stupidity.  The first is in
+        * the writeback code, which caps the maximum number of pages
+        * written to be 1024 pages.  This is wrong on multiple
+        * levels; different architectues have a different page size,
+        * which changes the maximum amount of data which gets
+        * written.  Secondly, 4 megabytes is way too small.  XFS
+        * forces this value to be 16 megabytes by multiplying
+        * nr_to_write parameter by four, and then relies on its
+        * allocator to allocate larger extents to make them
+        * contiguous.  Unfortunately this brings us to the second
+        * stupidity, which is that ext4's mballoc code only allocates
+        * at most 2048 blocks.  So we force contiguous writes up to
+        * the number of dirty blocks in the inode, or
+        * sbi->max_writeback_mb_bump whichever is smaller.
+        */
+       max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
+       if (!range_cyclic && range_whole)
+               desired_nr_to_write = wbc->nr_to_write * 8;
+       else
+               desired_nr_to_write = ext4_num_dirty_pages(inode, index,
+                                                          max_pages);
+       if (desired_nr_to_write > max_pages)
+               desired_nr_to_write = max_pages;
+
+       if (wbc->nr_to_write < desired_nr_to_write) {
+               nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
+               wbc->nr_to_write = desired_nr_to_write;
+       }
+
        mpd.wbc = wbc;
        mpd.inode = mapping->host;
 
@@ -2822,10 +2906,9 @@ retry:
                handle = ext4_journal_start(inode, needed_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
-                       printk(KERN_CRIT "%s: jbd2_start: "
+                       ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d\n", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
-                       dump_stack();
                        goto out_writepages;
                }
 
@@ -2897,9 +2980,10 @@ retry:
                goto retry;
        }
        if (pages_skipped != wbc->pages_skipped)
-               printk(KERN_EMERG "This should not happen leaving %s "
-                               "with nr_to_write = %ld ret = %d\n",
-                               __func__, wbc->nr_to_write, ret);
+               ext4_msg(inode->i_sb, KERN_CRIT,
+                        "This should not happen leaving %s "
+                        "with nr_to_write = %ld ret = %d\n",
+                        __func__, wbc->nr_to_write, ret);
 
        /* Update index */
        index += pages_written;
@@ -2914,7 +2998,8 @@ retry:
 out_writepages:
        if (!no_nrwrite_index_update)
                wbc->no_nrwrite_index_update = 0;
-       wbc->nr_to_write -= nr_to_writebump;
+       if (wbc->nr_to_write > nr_to_writebump)
+               wbc->nr_to_write -= nr_to_writebump;
        wbc->range_start = range_start;
        trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
        return ret;
@@ -3272,6 +3357,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 }
 
 /*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
  * If the O_DIRECT write will extend the file then add this inode to the
  * orphan list.  So recovery will truncate it back to the original size
  * if the machine crashes during the write.
@@ -3280,7 +3367,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
                              const struct iovec *iov, loff_t offset,
                              unsigned long nr_segs)
 {
@@ -3291,6 +3378,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
+       int retries = 0;
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -3313,9 +3401,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
                }
        }
 
+retry:
        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL);
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
 
        if (orphan) {
                int err;
@@ -3354,6 +3445,359 @@ out:
        return ret;
 }
 
+/* Maximum number of blocks we map for direct IO at once. */
+
+static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
+                  struct buffer_head *bh_result, int create)
+{
+       handle_t *handle = NULL;
+       int ret = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int dio_credits;
+
+       ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
+                  inode->i_ino, create);
+       /*
+        * DIO VFS code passes create = 0 flag for write to
+        * the middle of file. It does this to avoid block
+        * allocation for holes, to prevent expose stale data
+        * out when there is parallel buffered read (which does
+        * not hold the i_mutex lock) while direct IO write has
+        * not completed. DIO request on holes finally falls back
+        * to buffered IO for this reason.
+        *
+        * For ext4 extent based file, since we support fallocate,
+        * new allocated extent as uninitialized, for holes, we
+        * could fallocate blocks for holes, thus parallel
+        * buffered IO read will zero out the page when read on
+        * a hole while parallel DIO write to the hole has not completed.
+        *
+        * when we come here, we know it's a direct IO write to
+        * to the middle of file (<i_size)
+        * so it's safe to override the create flag from VFS.
+        */
+       create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
+
+       if (max_blocks > DIO_MAX_BLOCKS)
+               max_blocks = DIO_MAX_BLOCKS;
+       dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       handle = ext4_journal_start(inode, dio_credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+       ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
+                             create);
+       if (ret > 0) {
+               bh_result->b_size = (ret << inode->i_blkbits);
+               ret = 0;
+       }
+       ext4_journal_stop(handle);
+out:
+       return ret;
+}
+
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+       BUG_ON(!io);
+       iput(io->inode);
+       kfree(io);
+}
+static void dump_aio_dio_list(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+       struct list_head *cur, *before, *after;
+       ext4_io_end_t *io, *io0, *io1;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
+               return;
+       }
+
+       ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
+       list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
+               cur = &io->list;
+               before = cur->prev;
+               io0 = container_of(before, ext4_io_end_t, list);
+               after = cur->next;
+               io1 = container_of(after, ext4_io_end_t, list);
+
+               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+                           io, inode->i_ino, io0, io1);
+       }
+#endif
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+{
+       struct inode *inode = io->inode;
+       loff_t offset = io->offset;
+       size_t size = io->size;
+       int ret = 0;
+
+       ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+                  "list->prev 0x%p\n",
+                  io, inode->i_ino, io->list.next, io->list.prev);
+
+       if (list_empty(&io->list))
+               return ret;
+
+       if (io->flag != DIO_AIO_UNWRITTEN)
+               return ret;
+
+       if (offset + size <= i_size_read(inode))
+               ret = ext4_convert_unwritten_extents(inode, offset, size);
+
+       if (ret < 0) {
+               printk(KERN_EMERG "%s: failed to convert unwritten"
+                       "extents to written extents, error is %d"
+                       " io is still on inode %lu aio dio list\n",
+                       __func__, ret, inode->i_ino);
+               return ret;
+       }
+
+       /* clear the DIO AIO unwritten flag */
+       io->flag = 0;
+       return ret;
+}
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_aio_dio_work(struct work_struct *work)
+{
+       ext4_io_end_t *io  = container_of(work, ext4_io_end_t, work);
+       struct inode *inode = io->inode;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+       ret = ext4_end_aio_dio_nolock(io);
+       if (ret >= 0) {
+               if (!list_empty(&io->list))
+                       list_del_init(&io->list);
+               ext4_free_io_end(io);
+       }
+       mutex_unlock(&inode->i_mutex);
+}
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When AIO DIO IO is completed, the work to convert unwritten
+ * extents to written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of completed AIO from DIO path
+ * that might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents to written.
+ */
+int flush_aio_dio_completed_IO(struct inode *inode)
+{
+       ext4_io_end_t *io;
+       int ret = 0;
+       int ret2 = 0;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
+               return ret;
+
+       dump_aio_dio_list(inode);
+       while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
+                               ext4_io_end_t, list);
+               /*
+                * Calling ext4_end_aio_dio_nolock() to convert completed
+                * IO to written.
+                *
+                * When ext4_sync_file() is called, run_queue() may already
+                * about to flush the work corresponding to this io structure.
+                * It will be upset if it founds the io structure related
+                * to the work-to-be schedule is freed.
+                *
+                * Thus we need to keep the io structure still valid here after
+                * convertion finished. The io structure has a flag to
+                * avoid double converting from both fsync and background work
+                * queue work.
+                */
+               ret = ext4_end_aio_dio_nolock(io);
+               if (ret < 0)
+                       ret2 = ret;
+               else
+                       list_del_init(&io->list);
+       }
+       return (ret2 < 0) ? ret2 : 0;
+}
+
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+{
+       ext4_io_end_t *io = NULL;
+
+       io = kmalloc(sizeof(*io), GFP_NOFS);
+
+       if (io) {
+               igrab(inode);
+               io->inode = inode;
+               io->flag = 0;
+               io->offset = 0;
+               io->size = 0;
+               io->error = 0;
+               INIT_WORK(&io->work, ext4_end_aio_dio_work);
+               INIT_LIST_HEAD(&io->list);
+       }
+
+       return io;
+}
+
+static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+                           ssize_t size, void *private)
+{
+        ext4_io_end_t *io_end = iocb->private;
+       struct workqueue_struct *wq;
+
+       ext_debug("ext4_end_io_dio(): io_end 0x%p"
+                 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
+                 iocb->private, io_end->inode->i_ino, iocb, offset,
+                 size);
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               return;
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (io_end->flag != DIO_AIO_UNWRITTEN){
+               ext4_free_io_end(io_end);
+               iocb->private = NULL;
+               return;
+       }
+
+       io_end->offset = offset;
+       io_end->size = size;
+       wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
+
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
+
+       /* Add the io_end to per-inode completed aio dio list*/
+       list_add_tail(&io_end->list,
+                &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
+       iocb->private = NULL;
+}
+/*
+ * For ext4 extent files, ext4 will do direct-io write to holes,
+ * preallocated extents, and those write extend the file, no need to
+ * fall back to buffered IO.
+ *
+ * For holes, we fallocate those blocks, mark them as unintialized
+ * If those blocks were preallocated, we mark sure they are splited, but
+ * still keep the range to write as unintialized.
+ *
+ * The unwrritten extents will be converted to written when DIO is completed.
+ * For async direct IO, since the IO may still pending when return, we
+ * set up an end_io call back function, which will do the convertion
+ * when async direct IO completed.
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ */
+static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+       size_t count = iov_length(iov, nr_segs);
+
+       loff_t final_size = offset + count;
+       if (rw == WRITE && final_size <= inode->i_size) {
+               /*
+                * We could direct write to holes and fallocate.
+                *
+                * Allocated blocks to fill the hole are marked as uninitialized
+                * to prevent paralel buffered read to expose the stale data
+                * before DIO complete the data IO.
+                *
+                * As to previously fallocated extents, ext4 get_block
+                * will just simply mark the buffer mapped but still
+                * keep the extents uninitialized.
+                *
+                * for non AIO case, we will convert those unwritten extents
+                * to written after return back from blockdev_direct_IO.
+                *
+                * for async DIO, the conversion needs to be defered when
+                * the IO is completed. The ext4 end_io callback function
+                * will be called to take care of the conversion work.
+                * Here for async case, we allocate an io_end structure to
+                * hook to the iocb.
+                */
+               iocb->private = NULL;
+               EXT4_I(inode)->cur_aio_dio = NULL;
+               if (!is_sync_kiocb(iocb)) {
+                       iocb->private = ext4_init_io_end(inode);
+                       if (!iocb->private)
+                               return -ENOMEM;
+                       /*
+                        * we save the io structure for current async
+                        * direct IO, so that later ext4_get_blocks()
+                        * could flag the io structure whether there
+                        * is a unwritten extents needs to be converted
+                        * when IO is completed.
+                        */
+                       EXT4_I(inode)->cur_aio_dio = iocb->private;
+               }
+
+               ret = blockdev_direct_IO(rw, iocb, inode,
+                                        inode->i_sb->s_bdev, iov,
+                                        offset, nr_segs,
+                                        ext4_get_block_dio_write,
+                                        ext4_end_io_dio);
+               if (iocb->private)
+                       EXT4_I(inode)->cur_aio_dio = NULL;
+               /*
+                * The io_end structure takes a reference to the inode,
+                * that structure needs to be destroyed and the
+                * reference to the inode need to be dropped, when IO is
+                * complete, even with 0 byte write, or failed.
+                *
+                * In the successful AIO DIO case, the io_end structure will be
+                * desctroyed and the reference to the inode will be dropped
+                * after the end_io call back function is called.
+                *
+                * In the case there is 0 byte write, or error case, since
+                * VFS direct IO won't invoke the end_io call back function,
+                * we need to free the end_io structure here.
+                */
+               if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+                       ext4_free_io_end(iocb->private);
+                       iocb->private = NULL;
+               } else if (ret > 0)
+                       /*
+                        * for non AIO case, since the IO is already
+                        * completed, we could do the convertion right here
+                        */
+                       ret = ext4_convert_unwritten_extents(inode,
+                                                               offset, ret);
+               return ret;
+       }
+
+       /* for write the the end of file case, we fall back to old way */
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
+static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
 /*
  * Pages can be marked dirty completely asynchronously from ext4's journalling
  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
@@ -4551,8 +4995,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
  */
 static int ext4_do_update_inode(handle_t *handle,
                                struct inode *inode,
-                               struct ext4_iloc *iloc,
-                               int do_sync)
+                               struct ext4_iloc *iloc)
 {
        struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
        struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4653,22 +5096,10 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
        }
 
-       /*
-        * If we're not using a journal and we were called from
-        * ext4_write_inode() to sync the inode (making do_sync true),
-        * we can just use sync_dirty_buffer() directly to do our dirty
-        * work.  Testing s_journal here is a bit redundant but it's
-        * worth it to avoid potential future trouble.
-        */
-       if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) {
-               BUFFER_TRACE(bh, "call sync_dirty_buffer");
-               sync_dirty_buffer(bh);
-       } else {
-               BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-               rc = ext4_handle_dirty_metadata(handle, inode, bh);
-               if (!err)
-                       err = rc;
-       }
+       BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+       rc = ext4_handle_dirty_metadata(handle, inode, bh);
+       if (!err)
+               err = rc;
        ei->i_state &= ~EXT4_STATE_NEW;
 
 out_brelse:
@@ -4736,8 +5167,16 @@ int ext4_write_inode(struct inode *inode, int wait)
                err = ext4_get_inode_loc(inode, &iloc);
                if (err)
                        return err;
-               err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE,
-                                          inode, &iloc, wait);
+               if (wait)
+                       sync_dirty_buffer(iloc.bh);
+               if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
+                       ext4_error(inode->i_sb, __func__,
+                                  "IO error syncing inode, "
+                                  "inode=%lu, block=%llu",
+                                  inode->i_ino,
+                                  (unsigned long long)iloc.bh->b_blocknr);
+                       err = -EIO;
+               }
        }
        return err;
 }
@@ -5033,7 +5472,7 @@ int ext4_mark_iloc_dirty(handle_t *handle,
        get_bh(iloc->bh);
 
        /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
-       err = ext4_do_update_inode(handle, inode, iloc, 0);
+       err = ext4_do_update_inode(handle, inode, iloc);
        put_bh(iloc->bh);
        return err;
 }
@@ -5177,27 +5616,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  */
 void ext4_dirty_inode(struct inode *inode)
 {
-       handle_t *current_handle = ext4_journal_current_handle();
        handle_t *handle;
 
-       if (!ext4_handle_valid(current_handle)) {
-               ext4_mark_inode_dirty(current_handle, inode);
-               return;
-       }
-
        handle = ext4_journal_start(inode, 2);
        if (IS_ERR(handle))
                goto out;
-       if (current_handle &&
-               current_handle->h_transaction != handle->h_transaction) {
-               /* This task has a transaction open against a different fs */
-               printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __func__);
-       } else {
-               jbd_debug(5, "marking dirty.  outer handle=%p\n",
-                               current_handle);
-               ext4_mark_inode_dirty(handle, inode);
-       }
+
+       ext4_mark_inode_dirty(handle, inode);
+
        ext4_journal_stop(handle);
 out:
        return;
index e9c61896d6055a7817190bfe748903fa808cf959..bba12824defad2c8d5cee1baaff488576beda8d7 100644 (file)
@@ -2096,207 +2096,6 @@ out:
        return err;
 }
 
-#ifdef EXT4_MB_HISTORY
-struct ext4_mb_proc_session {
-       struct ext4_mb_history *history;
-       struct super_block *sb;
-       int start;
-       int max;
-};
-
-static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
-                                       struct ext4_mb_history *hs,
-                                       int first)
-{
-       if (hs == s->history + s->max)
-               hs = s->history;
-       if (!first && hs == s->history + s->start)
-               return NULL;
-       while (hs->orig.fe_len == 0) {
-               hs++;
-               if (hs == s->history + s->max)
-                       hs = s->history;
-               if (hs == s->history + s->start)
-                       return NULL;
-       }
-       return hs;
-}
-
-static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
-{
-       struct ext4_mb_proc_session *s = seq->private;
-       struct ext4_mb_history *hs;
-       int l = *pos;
-
-       if (l == 0)
-               return SEQ_START_TOKEN;
-       hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
-       if (!hs)
-               return NULL;
-       while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
-       return hs;
-}
-
-static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
-                                     loff_t *pos)
-{
-       struct ext4_mb_proc_session *s = seq->private;
-       struct ext4_mb_history *hs = v;
-
-       ++*pos;
-       if (v == SEQ_START_TOKEN)
-               return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
-       else
-               return ext4_mb_history_skip_empty(s, ++hs, 0);
-}
-
-static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
-{
-       char buf[25], buf2[25], buf3[25], *fmt;
-       struct ext4_mb_history *hs = v;
-
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
-                               "%-5s %-2s %-6s %-5s %-5s %-6s\n",
-                         "pid", "inode", "original", "goal", "result", "found",
-                          "grps", "cr", "flags", "merge", "tail", "broken");
-               return 0;
-       }
-
-       if (hs->op == EXT4_MB_HISTORY_ALLOC) {
-               fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
-                       "0x%04x %-5s %-5u %-6u\n";
-               sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
-                       hs->result.fe_start, hs->result.fe_len,
-                       hs->result.fe_logical);
-               sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
-                       hs->orig.fe_start, hs->orig.fe_len,
-                       hs->orig.fe_logical);
-               sprintf(buf3, "%u/%d/%u@%u", hs->goal.fe_group,
-                       hs->goal.fe_start, hs->goal.fe_len,
-                       hs->goal.fe_logical);
-               seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
-                               hs->found, hs->groups, hs->cr, hs->flags,
-                               hs->merged ? "M" : "", hs->tail,
-                               hs->buddy ? 1 << hs->buddy : 0);
-       } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
-               fmt = "%-5u %-8u %-23s %-23s %-23s\n";
-               sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group,
-                       hs->result.fe_start, hs->result.fe_len,
-                       hs->result.fe_logical);
-               sprintf(buf, "%u/%d/%u@%u", hs->orig.fe_group,
-                       hs->orig.fe_start, hs->orig.fe_len,
-                       hs->orig.fe_logical);
-               seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
-       } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
-               sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
-                       hs->result.fe_start, hs->result.fe_len);
-               seq_printf(seq, "%-5u %-8u %-23s discard\n",
-                               hs->pid, hs->ino, buf2);
-       } else if (hs->op == EXT4_MB_HISTORY_FREE) {
-               sprintf(buf2, "%u/%d/%u", hs->result.fe_group,
-                       hs->result.fe_start, hs->result.fe_len);
-               seq_printf(seq, "%-5u %-8u %-23s free\n",
-                               hs->pid, hs->ino, buf2);
-       }
-       return 0;
-}
-
-static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
-{
-}
-
-static const struct seq_operations ext4_mb_seq_history_ops = {
-       .start  = ext4_mb_seq_history_start,
-       .next   = ext4_mb_seq_history_next,
-       .stop   = ext4_mb_seq_history_stop,
-       .show   = ext4_mb_seq_history_show,
-};
-
-static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
-{
-       struct super_block *sb = PDE(inode)->data;
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_mb_proc_session *s;
-       int rc;
-       int size;
-
-       if (unlikely(sbi->s_mb_history == NULL))
-               return -ENOMEM;
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
-       if (s == NULL)
-               return -ENOMEM;
-       s->sb = sb;
-       size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
-       s->history = kmalloc(size, GFP_KERNEL);
-       if (s->history == NULL) {
-               kfree(s);
-               return -ENOMEM;
-       }
-
-       spin_lock(&sbi->s_mb_history_lock);
-       memcpy(s->history, sbi->s_mb_history, size);
-       s->max = sbi->s_mb_history_max;
-       s->start = sbi->s_mb_history_cur % s->max;
-       spin_unlock(&sbi->s_mb_history_lock);
-
-       rc = seq_open(file, &ext4_mb_seq_history_ops);
-       if (rc == 0) {
-               struct seq_file *m = (struct seq_file *)file->private_data;
-               m->private = s;
-       } else {
-               kfree(s->history);
-               kfree(s);
-       }
-       return rc;
-
-}
-
-static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq = (struct seq_file *)file->private_data;
-       struct ext4_mb_proc_session *s = seq->private;
-       kfree(s->history);
-       kfree(s);
-       return seq_release(inode, file);
-}
-
-static ssize_t ext4_mb_seq_history_write(struct file *file,
-                               const char __user *buffer,
-                               size_t count, loff_t *ppos)
-{
-       struct seq_file *seq = (struct seq_file *)file->private_data;
-       struct ext4_mb_proc_session *s = seq->private;
-       struct super_block *sb = s->sb;
-       char str[32];
-       int value;
-
-       if (count >= sizeof(str)) {
-               printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
-                               "mb_history", (int)sizeof(str));
-               return -EOVERFLOW;
-       }
-
-       if (copy_from_user(str, buffer, count))
-               return -EFAULT;
-
-       value = simple_strtol(str, NULL, 0);
-       if (value < 0)
-               return -ERANGE;
-       EXT4_SB(sb)->s_mb_history_filter = value;
-
-       return count;
-}
-
-static const struct file_operations ext4_mb_seq_history_fops = {
-       .owner          = THIS_MODULE,
-       .open           = ext4_mb_seq_history_open,
-       .read           = seq_read,
-       .write          = ext4_mb_seq_history_write,
-       .llseek         = seq_lseek,
-       .release        = ext4_mb_seq_history_release,
-};
-
 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
 {
        struct super_block *sb = seq->private;
@@ -2396,82 +2195,6 @@ static const struct file_operations ext4_mb_seq_groups_fops = {
        .release        = seq_release,
 };
 
-static void ext4_mb_history_release(struct super_block *sb)
-{
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-
-       if (sbi->s_proc != NULL) {
-               remove_proc_entry("mb_groups", sbi->s_proc);
-               if (sbi->s_mb_history_max)
-                       remove_proc_entry("mb_history", sbi->s_proc);
-       }
-       kfree(sbi->s_mb_history);
-}
-
-static void ext4_mb_history_init(struct super_block *sb)
-{
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       int i;
-
-       if (sbi->s_proc != NULL) {
-               if (sbi->s_mb_history_max)
-                       proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
-                                        &ext4_mb_seq_history_fops, sb);
-               proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
-                                &ext4_mb_seq_groups_fops, sb);
-       }
-
-       sbi->s_mb_history_cur = 0;
-       spin_lock_init(&sbi->s_mb_history_lock);
-       i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
-       sbi->s_mb_history = i ? kzalloc(i, GFP_KERNEL) : NULL;
-       /* if we can't allocate history, then we simple won't use it */
-}
-
-static noinline_for_stack void
-ext4_mb_store_history(struct ext4_allocation_context *ac)
-{
-       struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
-       struct ext4_mb_history h;
-
-       if (sbi->s_mb_history == NULL)
-               return;
-
-       if (!(ac->ac_op & sbi->s_mb_history_filter))
-               return;
-
-       h.op = ac->ac_op;
-       h.pid = current->pid;
-       h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
-       h.orig = ac->ac_o_ex;
-       h.result = ac->ac_b_ex;
-       h.flags = ac->ac_flags;
-       h.found = ac->ac_found;
-       h.groups = ac->ac_groups_scanned;
-       h.cr = ac->ac_criteria;
-       h.tail = ac->ac_tail;
-       h.buddy = ac->ac_buddy;
-       h.merged = 0;
-       if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
-               if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
-                               ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-                       h.merged = 1;
-               h.goal = ac->ac_g_ex;
-               h.result = ac->ac_f_ex;
-       }
-
-       spin_lock(&sbi->s_mb_history_lock);
-       memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
-       if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
-               sbi->s_mb_history_cur = 0;
-       spin_unlock(&sbi->s_mb_history_lock);
-}
-
-#else
-#define ext4_mb_history_release(sb)
-#define ext4_mb_history_init(sb)
-#endif
-
 
 /* Create and initialize ext4_group_info data for the given group. */
 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
@@ -2690,7 +2413,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
        sbi->s_mb_stats = MB_DEFAULT_STATS;
        sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
        sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
-       sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
        sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
 
        sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
@@ -2708,12 +2430,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
                spin_lock_init(&lg->lg_prealloc_lock);
        }
 
-       ext4_mb_history_init(sb);
+       if (sbi->s_proc)
+               proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
+                                &ext4_mb_seq_groups_fops, sb);
 
        if (sbi->s_journal)
                sbi->s_journal->j_commit_callback = release_blocks_on_commit;
-
-       printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
        return 0;
 }
 
@@ -2790,7 +2512,8 @@ int ext4_mb_release(struct super_block *sb)
        }
 
        free_percpu(sbi->s_locality_groups);
-       ext4_mb_history_release(sb);
+       if (sbi->s_proc)
+               remove_proc_entry("mb_groups", sbi->s_proc);
 
        return 0;
 }
@@ -3276,7 +2999,10 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
                        atomic_inc(&sbi->s_bal_breaks);
        }
 
-       ext4_mb_store_history(ac);
+       if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
+               trace_ext4_mballoc_alloc(ac);
+       else
+               trace_ext4_mballoc_prealloc(ac);
 }
 
 /*
@@ -3776,7 +3502,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
        if (ac) {
                ac->ac_sb = sb;
                ac->ac_inode = pa->pa_inode;
-               ac->ac_op = EXT4_MB_HISTORY_DISCARD;
        }
 
        while (bit < end) {
@@ -3796,7 +3521,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
                        ac->ac_b_ex.fe_start = bit;
                        ac->ac_b_ex.fe_len = next - bit;
                        ac->ac_b_ex.fe_logical = 0;
-                       ext4_mb_store_history(ac);
+                       trace_ext4_mballoc_discard(ac);
                }
 
                trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
@@ -3831,9 +3556,6 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       if (ac)
-               ac->ac_op = EXT4_MB_HISTORY_DISCARD;
-
        trace_ext4_mb_release_group_pa(ac, pa);
        BUG_ON(pa->pa_deleted == 0);
        ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
@@ -3848,7 +3570,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
                ac->ac_b_ex.fe_start = bit;
                ac->ac_b_ex.fe_len = pa->pa_len;
                ac->ac_b_ex.fe_logical = 0;
-               ext4_mb_store_history(ac);
+               trace_ext4_mballoc_discard(ac);
        }
 
        return 0;
@@ -4189,7 +3911,6 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
        size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
        isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
                >> bsbits;
-       size = max(size, isize);
 
        if ((size == isize) &&
            !ext4_fs_is_busy(sbi) &&
@@ -4199,6 +3920,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
        }
 
        /* don't use group allocation for large files */
+       size = max(size, isize);
        if (size >= sbi->s_mb_stream_request) {
                ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
                return;
@@ -4739,7 +4461,6 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
 
        ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
        if (ac) {
-               ac->ac_op = EXT4_MB_HISTORY_FREE;
                ac->ac_inode = inode;
                ac->ac_sb = sb;
        }
@@ -4806,7 +4527,7 @@ do_more:
                ac->ac_b_ex.fe_group = block_group;
                ac->ac_b_ex.fe_start = bit;
                ac->ac_b_ex.fe_len = count;
-               ext4_mb_store_history(ac);
+               trace_ext4_mballoc_free(ac);
        }
 
        err = ext4_mb_load_buddy(sb, block_group, &e4b);
index 188d3d709b24252d9ec68b1d384af0c6fd4c4ed4..0ca811061bc72eca18b46ca025b660f32ce87138 100644 (file)
@@ -52,18 +52,8 @@ extern u8 mb_enable_debug;
 #define mb_debug(n, fmt, a...)
 #endif
 
-/*
- * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
- * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
- */
-#define EXT4_MB_HISTORY
 #define EXT4_MB_HISTORY_ALLOC          1       /* allocation */
 #define EXT4_MB_HISTORY_PREALLOC       2       /* preallocated blocks used */
-#define EXT4_MB_HISTORY_DISCARD                4       /* preallocation discarded */
-#define EXT4_MB_HISTORY_FREE           8       /* free */
-
-#define EXT4_MB_HISTORY_DEFAULT                (EXT4_MB_HISTORY_ALLOC | \
-                                        EXT4_MB_HISTORY_PREALLOC)
 
 /*
  * How long mballoc can look for a best extent (in found extents)
@@ -84,7 +74,7 @@ extern u8 mb_enable_debug;
  * with 'ext4_mb_stats' allocator will collect stats that will be
  * shown at umount. The collecting costs though!
  */
-#define MB_DEFAULT_STATS               1
+#define MB_DEFAULT_STATS               0
 
 /*
  * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
@@ -217,22 +207,6 @@ struct ext4_allocation_context {
 #define AC_STATUS_FOUND                2
 #define AC_STATUS_BREAK                3
 
-struct ext4_mb_history {
-       struct ext4_free_extent orig;   /* orig allocation */
-       struct ext4_free_extent goal;   /* goal allocation */
-       struct ext4_free_extent result; /* result allocation */
-       unsigned pid;
-       unsigned ino;
-       __u16 found;    /* how many extents have been found */
-       __u16 groups;   /* how many groups have been scanned */
-       __u16 tail;     /* what tail broke some buddy */
-       __u16 buddy;    /* buddy the tail ^^^ broke */
-       __u16 flags;
-       __u8 cr:3;      /* which phase the result extent was found at */
-       __u8 op:4;
-       __u8 merged:1;
-};
-
 struct ext4_buddy {
        struct page *bd_buddy_page;
        void *bd_buddy;
@@ -247,13 +221,6 @@ struct ext4_buddy {
 #define EXT4_MB_BITMAP(e4b)    ((e4b)->bd_bitmap)
 #define EXT4_MB_BUDDY(e4b)     ((e4b)->bd_buddy)
 
-#ifndef EXT4_MB_HISTORY
-static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
-{
-       return;
-}
-#endif
-
 #define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
 
 static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
index bf519f239ae6b5d02930a46fbe43311af111bbf3..a93d5b80f3e2f02b1c33592cbd1abc93193677a5 100644 (file)
@@ -75,7 +75,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
                                goto err_out;
                }
        }
-       retval = ext4_ext_insert_extent(handle, inode, path, &newext);
+       retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
 err_out:
        if (path) {
                ext4_ext_drop_refs(path);
index c07a2915e40b63d0bf5e0ffa4ad08146e2749e36..25b6b1457360d2ed1df3ab943a99bac01d4a5e11 100644 (file)
@@ -322,7 +322,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                        goto out;
 
                if (ext4_ext_insert_extent(handle, orig_inode,
-                                       orig_path, new_ext))
+                                       orig_path, new_ext, 0))
                        goto out;
        }
 
@@ -333,7 +333,7 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
                        goto out;
 
                if (ext4_ext_insert_extent(handle, orig_inode,
-                                          orig_path, end_ext))
+                                          orig_path, end_ext, 0))
                        goto out;
        }
 out:
@@ -1001,14 +1001,6 @@ mext_check_arguments(struct inode *orig_inode,
                return -EINVAL;
        }
 
-       /* orig and donor should be different file */
-       if (orig_inode->i_ino == donor_inode->i_ino) {
-               ext4_debug("ext4 move extent: The argument files should not "
-                       "be same file [ino:orig %lu, donor %lu]\n",
-                       orig_inode->i_ino, donor_inode->i_ino);
-               return -EINVAL;
-       }
-
        /* Ext4 move extent supports only extent based file */
        if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
                ext4_debug("ext4 move extent: orig file is not extents "
@@ -1232,6 +1224,14 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
        int block_len_in_page;
        int uninit;
 
+       /* orig and donor should be different file */
+       if (orig_inode->i_ino == donor_inode->i_ino) {
+               ext4_debug("ext4 move extent: The argument files should not "
+                       "be same file [ino:orig %lu, donor %lu]\n",
+                       orig_inode->i_ino, donor_inode->i_ino);
+               return -EINVAL;
+       }
+
        /* protect orig and donor against a truncate */
        ret1 = mext_inode_double_lock(orig_inode, donor_inode);
        if (ret1 < 0)
index 42f81d285cd5212d85372d3efb5b05a656bd9bad..7c8fe80bacdd93ef64b5cec3f300690099c56a37 100644 (file)
@@ -2076,7 +2076,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
        struct ext4_iloc iloc;
        int err = 0;
 
-       if (!ext4_handle_valid(handle))
+       /* ext4_handle_valid() assumes a valid handle_t pointer */
+       if (handle && !ext4_handle_valid(handle))
                return 0;
 
        mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
index df539ba277798daa6ded137dd79efab7906f0a52..312211ee05af39618274736a49096cb88ff067de 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/ext4.h>
 
-static int default_mb_history_length = 1000;
-
-module_param_named(default_mb_history_length, default_mb_history_length,
-                  int, 0644);
-MODULE_PARM_DESC(default_mb_history_length,
-                "Default number of entries saved for mb_history");
-
 struct proc_dir_entry *ext4_proc_root;
 static struct kset *ext4_kset;
 
@@ -189,6 +182,36 @@ void ext4_itable_unused_set(struct super_block *sb,
                bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
 }
 
+
+/* Just increment the non-pointer handle value */
+static handle_t *ext4_get_nojournal(void)
+{
+       handle_t *handle = current->journal_info;
+       unsigned long ref_cnt = (unsigned long)handle;
+
+       BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
+
+       ref_cnt++;
+       handle = (handle_t *)ref_cnt;
+
+       current->journal_info = handle;
+       return handle;
+}
+
+
+/* Decrement the non-pointer handle value */
+static void ext4_put_nojournal(handle_t *handle)
+{
+       unsigned long ref_cnt = (unsigned long)handle;
+
+       BUG_ON(ref_cnt == 0);
+
+       ref_cnt--;
+       handle = (handle_t *)ref_cnt;
+
+       current->journal_info = handle;
+}
+
 /*
  * Wrappers for jbd2_journal_start/end.
  *
@@ -215,11 +238,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
                }
                return jbd2_journal_start(journal, nblocks);
        }
-       /*
-        * We're not journaling, return the appropriate indication.
-        */
-       current->journal_info = EXT4_NOJOURNAL_HANDLE;
-       return current->journal_info;
+       return ext4_get_nojournal();
 }
 
 /*
@@ -235,11 +254,7 @@ int __ext4_journal_stop(const char *where, handle_t *handle)
        int rc;
 
        if (!ext4_handle_valid(handle)) {
-               /*
-                * Do this here since we don't call jbd2_journal_stop() in
-                * no-journal mode.
-                */
-               current->journal_info = NULL;
+               ext4_put_nojournal(handle);
                return 0;
        }
        sb = handle->h_transaction->t_journal->j_private;
@@ -580,6 +595,9 @@ static void ext4_put_super(struct super_block *sb)
        struct ext4_super_block *es = sbi->s_es;
        int i, err;
 
+       flush_workqueue(sbi->dio_unwritten_wq);
+       destroy_workqueue(sbi->dio_unwritten_wq);
+
        lock_super(sb);
        lock_kernel();
        if (sb->s_dirt)
@@ -684,6 +702,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_allocated_meta_blocks = 0;
        ei->i_delalloc_reserved_flag = 0;
        spin_lock_init(&(ei->i_block_reservation_lock));
+       INIT_LIST_HEAD(&ei->i_aio_dio_complete_list);
+       ei->cur_aio_dio = NULL;
 
        return &ei->vfs_inode;
 }
@@ -1052,7 +1072,7 @@ enum {
        Opt_journal_update, Opt_journal_dev,
        Opt_journal_checksum, Opt_journal_async_commit,
        Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
-       Opt_data_err_abort, Opt_data_err_ignore, Opt_mb_history_length,
+       Opt_data_err_abort, Opt_data_err_ignore,
        Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
        Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
        Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
@@ -1099,7 +1119,6 @@ static const match_table_t tokens = {
        {Opt_data_writeback, "data=writeback"},
        {Opt_data_err_abort, "data_err=abort"},
        {Opt_data_err_ignore, "data_err=ignore"},
-       {Opt_mb_history_length, "mb_history_length=%u"},
        {Opt_offusrjquota, "usrjquota="},
        {Opt_usrjquota, "usrjquota=%s"},
        {Opt_offgrpjquota, "grpjquota="},
@@ -1340,13 +1359,6 @@ static int parse_options(char *options, struct super_block *sb,
                case Opt_data_err_ignore:
                        clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
                        break;
-               case Opt_mb_history_length:
-                       if (match_int(&args[0], &option))
-                               return 0;
-                       if (option < 0)
-                               return 0;
-                       sbi->s_mb_history_max = option;
-                       break;
 #ifdef CONFIG_QUOTA
                case Opt_usrjquota:
                        qtype = USRQUOTA;
@@ -1646,13 +1658,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
                        EXT4_INODES_PER_GROUP(sb),
                        sbi->s_mount_opt);
 
-       if (EXT4_SB(sb)->s_journal) {
-               ext4_msg(sb, KERN_INFO, "%s journal on %s",
-                      EXT4_SB(sb)->s_journal->j_inode ? "internal" :
-                      "external", EXT4_SB(sb)->s_journal->j_devname);
-       } else {
-               ext4_msg(sb, KERN_INFO, "no journal");
-       }
        return res;
 }
 
@@ -2197,6 +2202,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
 EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
+EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
 
 static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
@@ -2210,6 +2216,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(mb_order2_req),
        ATTR_LIST(mb_stream_req),
        ATTR_LIST(mb_group_prealloc),
+       ATTR_LIST(max_writeback_mb_bump),
        NULL,
 };
 
@@ -2413,7 +2420,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
        sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
        sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
-       sbi->s_mb_history_max = default_mb_history_length;
 
        set_opt(sbi->s_mount_opt, BARRIER);
 
@@ -2679,6 +2685,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        sbi->s_stripe = ext4_get_stripe_size(sbi);
+       sbi->s_max_writeback_mb_bump = 128;
 
        /*
         * set up enough so that it can read an inode
@@ -2798,6 +2805,12 @@ no_journal:
                        clear_opt(sbi->s_mount_opt, NOBH);
                }
        }
+       EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
+       if (!EXT4_SB(sb)->dio_unwritten_wq) {
+               printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
+               goto failed_mount_wq;
+       }
+
        /*
         * The jbd2_journal_load will have done any necessary log recovery,
         * so we can safely mount the rest of the filesystem now.
@@ -2849,12 +2862,12 @@ no_journal:
                         "available");
        }
 
-       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+       if (test_opt(sb, DELALLOC) &&
+           (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
                ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
                         "requested data journaling mode");
                clear_opt(sbi->s_mount_opt, DELALLOC);
-       } else if (test_opt(sb, DELALLOC))
-               ext4_msg(sb, KERN_INFO, "delayed allocation enabled");
+       }
 
        err = ext4_setup_system_zone(sb);
        if (err) {
@@ -2910,6 +2923,8 @@ cantfind_ext4:
 
 failed_mount4:
        ext4_msg(sb, KERN_ERR, "mount failed");
+       destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
+failed_mount_wq:
        ext4_release_system_zone(sb);
        if (sbi->s_journal) {
                jbd2_journal_destroy(sbi->s_journal);
@@ -3164,9 +3179,7 @@ static int ext4_load_journal(struct super_block *sb,
                        return -EINVAL;
        }
 
-       if (journal->j_flags & JBD2_BARRIER)
-               ext4_msg(sb, KERN_INFO, "barriers enabled");
-       else
+       if (!(journal->j_flags & JBD2_BARRIER))
                ext4_msg(sb, KERN_INFO, "barriers disabled");
 
        if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
@@ -3361,11 +3374,13 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 {
        int ret = 0;
        tid_t target;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        trace_ext4_sync_fs(sb, wait);
-       if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
+       flush_workqueue(sbi->dio_unwritten_wq);
+       if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
                if (wait)
-                       jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target);
+                       jbd2_log_wait_commit(sbi->s_journal, target);
        }
        return ret;
 }
@@ -3951,27 +3966,6 @@ static struct file_system_type ext4_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-#ifdef CONFIG_EXT4DEV_COMPAT
-static int ext4dev_get_sb(struct file_system_type *fs_type, int flags,
-                         const char *dev_name, void *data,struct vfsmount *mnt)
-{
-       printk(KERN_WARNING "EXT4-fs (%s): Update your userspace programs "
-              "to mount using ext4\n", dev_name);
-       printk(KERN_WARNING "EXT4-fs (%s): ext4dev backwards compatibility "
-              "will go away by 2.6.31\n", dev_name);
-       return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt);
-}
-
-static struct file_system_type ext4dev_fs_type = {
-       .owner          = THIS_MODULE,
-       .name           = "ext4dev",
-       .get_sb         = ext4dev_get_sb,
-       .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS("ext4dev");
-#endif
-
 static int __init init_ext4_fs(void)
 {
        int err;
@@ -3996,13 +3990,6 @@ static int __init init_ext4_fs(void)
        err = register_filesystem(&ext4_fs_type);
        if (err)
                goto out;
-#ifdef CONFIG_EXT4DEV_COMPAT
-       err = register_filesystem(&ext4dev_fs_type);
-       if (err) {
-               unregister_filesystem(&ext4_fs_type);
-               goto out;
-       }
-#endif
        return 0;
 out:
        destroy_inodecache();
@@ -4021,9 +4008,6 @@ out4:
 static void __exit exit_ext4_fs(void)
 {
        unregister_filesystem(&ext4_fs_type);
-#ifdef CONFIG_EXT4DEV_COMPAT
-       unregister_filesystem(&ext4dev_fs_type);
-#endif
        destroy_inodecache();
        exit_ext4_xattr();
        exit_ext4_mballoc();
index adb0e72a176dd42bb8bdd1b4d099e021a02b02b1..7db0979c6b72f0d16e19c77d6e73af5642992f83 100644 (file)
@@ -323,7 +323,7 @@ extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 /* fat/misc.c */
 extern void fat_fs_error(struct super_block *s, const char *fmt, ...)
        __attribute__ ((format (printf, 2, 3))) __cold;
-extern void fat_clusters_flush(struct super_block *sb);
+extern int fat_clusters_flush(struct super_block *sb);
 extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
 extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
                              __le16 __time, __le16 __date, u8 time_cs);
index 04629d1302fc45e42aef40b180cfd1fcc4b0f73b..76b7961ab6632e5d955a9139eaa4b0652e11ff8c 100644 (file)
@@ -451,12 +451,16 @@ static void fat_write_super(struct super_block *sb)
 
 static int fat_sync_fs(struct super_block *sb, int wait)
 {
-       lock_super(sb);
-       fat_clusters_flush(sb);
-       sb->s_dirt = 0;
-       unlock_super(sb);
+       int err = 0;
 
-       return 0;
+       if (sb->s_dirt) {
+               lock_super(sb);
+               sb->s_dirt = 0;
+               err = fat_clusters_flush(sb);
+               unlock_super(sb);
+       }
+
+       return err;
 }
 
 static void fat_put_super(struct super_block *sb)
@@ -812,7 +816,7 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
                        seq_puts(m, ",shortname=mixed");
                        break;
                case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95:
-                       /* seq_puts(m, ",shortname=lower"); */
+                       seq_puts(m, ",shortname=lower");
                        break;
                default:
                        seq_puts(m, ",shortname=unknown");
@@ -963,7 +967,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
        opts->codepage = fat_default_codepage;
        opts->iocharset = fat_default_iocharset;
        if (is_vfat) {
-               opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95;
+               opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
                opts->rodir = 0;
        } else {
                opts->shortname = 0;
index 4e35be873e0990df86c3d510f0fd82297be5d22c..0f55f5cb732f24078af99a605e4fa764eabad6bf 100644 (file)
@@ -43,19 +43,19 @@ EXPORT_SYMBOL_GPL(fat_fs_error);
 
 /* Flushes the number of free clusters on FAT32 */
 /* XXX: Need to write one per FSINFO block.  Currently only writes 1 */
-void fat_clusters_flush(struct super_block *sb)
+int fat_clusters_flush(struct super_block *sb)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(sb);
        struct buffer_head *bh;
        struct fat_boot_fsinfo *fsinfo;
 
        if (sbi->fat_bits != 32)
-               return;
+               return 0;
 
        bh = sb_bread(sb, sbi->fsinfo_sector);
        if (bh == NULL) {
                printk(KERN_ERR "FAT: bread failed in fat_clusters_flush\n");
-               return;
+               return -EIO;
        }
 
        fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
@@ -74,6 +74,8 @@ void fat_clusters_flush(struct super_block *sb)
                mark_buffer_dirty(bh);
        }
        brelse(bh);
+
+       return 0;
 }
 
 /*
index cb6e83557112d74e03cf00f37d6b3bddfa66cd83..f565f24019b585c0d52d6934f4e3c32f22b6f420 100644 (file)
@@ -499,17 +499,10 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
        int charlen;
 
        if (utf8) {
-               int name_len = strlen(name);
-
-               *outlen = utf8s_to_utf16s(name, PATH_MAX, (wchar_t *) outname);
-
-               /*
-                * We stripped '.'s before and set len appropriately,
-                * but utf8s_to_utf16s doesn't care about len
-                */
-               *outlen -= (name_len - len);
-
-               if (*outlen > 255)
+               *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+               if (*outlen < 0)
+                       return *outlen;
+               else if (*outlen > 255)
                        return -ENAMETOOLONG;
 
                op = &outname[*outlen * sizeof(wchar_t)];
index 5d70b3e6d49bb105e3c000888c42ec835647add5..ca0f5eb62b20e450b38ef84b52c64bc2a214bc17 100644 (file)
@@ -643,6 +643,7 @@ out:
 
 int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
 {
+       struct transaction_chp_stats_s *stats;
        transaction_t *transaction;
        journal_t *journal;
        int ret = 0;
@@ -679,6 +680,12 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
 
        /* OK, that was the last buffer for the transaction: we can now
           safely remove this transaction from the log */
+       stats = &transaction->t_chp_stats;
+       if (stats->cs_chp_time)
+               stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time,
+                                                   jiffies);
+       trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev,
+                                   transaction->t_tid, stats);
 
        __jbd2_journal_drop_transaction(journal, transaction);
        kfree(transaction);
index 26d991ddc1e6bf30b3abee615f597b7109119671..d4cfd6d2779e07fe1d5381975840ccb3e3486566 100644 (file)
@@ -410,10 +410,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        if (commit_transaction->t_synchronous_commit)
                write_op = WRITE_SYNC_PLUG;
        trace_jbd2_commit_locking(journal, commit_transaction);
-       stats.u.run.rs_wait = commit_transaction->t_max_wait;
-       stats.u.run.rs_locked = jiffies;
-       stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
-                                               stats.u.run.rs_locked);
+       stats.run.rs_wait = commit_transaction->t_max_wait;
+       stats.run.rs_locked = jiffies;
+       stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
+                                             stats.run.rs_locked);
 
        spin_lock(&commit_transaction->t_handle_lock);
        while (commit_transaction->t_updates) {
@@ -486,9 +486,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        jbd2_journal_switch_revoke_table(journal);
 
        trace_jbd2_commit_flushing(journal, commit_transaction);
-       stats.u.run.rs_flushing = jiffies;
-       stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
-                                              stats.u.run.rs_flushing);
+       stats.run.rs_flushing = jiffies;
+       stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
+                                            stats.run.rs_flushing);
 
        commit_transaction->t_state = T_FLUSH;
        journal->j_committing_transaction = commit_transaction;
@@ -523,11 +523,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        spin_unlock(&journal->j_state_lock);
 
        trace_jbd2_commit_logging(journal, commit_transaction);
-       stats.u.run.rs_logging = jiffies;
-       stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
-                                                stats.u.run.rs_logging);
-       stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
-       stats.u.run.rs_blocks_logged = 0;
+       stats.run.rs_logging = jiffies;
+       stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
+                                              stats.run.rs_logging);
+       stats.run.rs_blocks = commit_transaction->t_outstanding_credits;
+       stats.run.rs_blocks_logged = 0;
 
        J_ASSERT(commit_transaction->t_nr_buffers <=
                 commit_transaction->t_outstanding_credits);
@@ -695,7 +695,7 @@ start_journal_io:
                                submit_bh(write_op, bh);
                        }
                        cond_resched();
-                       stats.u.run.rs_blocks_logged += bufs;
+                       stats.run.rs_blocks_logged += bufs;
 
                        /* Force a new descriptor to be generated next
                            time round the loop. */
@@ -988,33 +988,30 @@ restart_loop:
        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 
        commit_transaction->t_start = jiffies;
-       stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging,
-                                               commit_transaction->t_start);
+       stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
+                                             commit_transaction->t_start);
 
        /*
-        * File the transaction for history
+        * File the transaction statistics
         */
-       stats.ts_type = JBD2_STATS_RUN;
        stats.ts_tid = commit_transaction->t_tid;
-       stats.u.run.rs_handle_count = commit_transaction->t_handle_count;
-       spin_lock(&journal->j_history_lock);
-       memcpy(journal->j_history + journal->j_history_cur, &stats,
-                       sizeof(stats));
-       if (++journal->j_history_cur == journal->j_history_max)
-               journal->j_history_cur = 0;
+       stats.run.rs_handle_count = commit_transaction->t_handle_count;
+       trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
+                            commit_transaction->t_tid, &stats.run);
 
        /*
         * Calculate overall stats
         */
+       spin_lock(&journal->j_history_lock);
        journal->j_stats.ts_tid++;
-       journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait;
-       journal->j_stats.u.run.rs_running += stats.u.run.rs_running;
-       journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked;
-       journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing;
-       journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging;
-       journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count;
-       journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks;
-       journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged;
+       journal->j_stats.run.rs_wait += stats.run.rs_wait;
+       journal->j_stats.run.rs_running += stats.run.rs_running;
+       journal->j_stats.run.rs_locked += stats.run.rs_locked;
+       journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
+       journal->j_stats.run.rs_logging += stats.run.rs_logging;
+       journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
+       journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
+       journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
        spin_unlock(&journal->j_history_lock);
 
        commit_transaction->t_state = T_FINISHED;
index 53b86e16e5fe3a4bc29f8825db8b2a54b7e60fde..b0ab5219becbd92e34c6676a93716e102e70abe9 100644 (file)
@@ -136,10 +136,6 @@ static int kjournald2(void *arg)
        journal->j_task = current;
        wake_up(&journal->j_wait_done_commit);
 
-       printk(KERN_INFO "kjournald2 starting: pid %d, dev %s, "
-              "commit interval %ld seconds\n", current->pid,
-              journal->j_devname, journal->j_commit_interval / HZ);
-
        /*
         * And now, wait forever for commit wakeup events.
         */
@@ -223,7 +219,8 @@ static int jbd2_journal_start_thread(journal_t *journal)
 {
        struct task_struct *t;
 
-       t = kthread_run(kjournald2, journal, "kjournald2");
+       t = kthread_run(kjournald2, journal, "jbd2/%s",
+                       journal->j_devname);
        if (IS_ERR(t))
                return PTR_ERR(t);
 
@@ -679,153 +676,6 @@ struct jbd2_stats_proc_session {
        int max;
 };
 
-static void *jbd2_history_skip_empty(struct jbd2_stats_proc_session *s,
-                                       struct transaction_stats_s *ts,
-                                       int first)
-{
-       if (ts == s->stats + s->max)
-               ts = s->stats;
-       if (!first && ts == s->stats + s->start)
-               return NULL;
-       while (ts->ts_type == 0) {
-               ts++;
-               if (ts == s->stats + s->max)
-                       ts = s->stats;
-               if (ts == s->stats + s->start)
-                       return NULL;
-       }
-       return ts;
-
-}
-
-static void *jbd2_seq_history_start(struct seq_file *seq, loff_t *pos)
-{
-       struct jbd2_stats_proc_session *s = seq->private;
-       struct transaction_stats_s *ts;
-       int l = *pos;
-
-       if (l == 0)
-               return SEQ_START_TOKEN;
-       ts = jbd2_history_skip_empty(s, s->stats + s->start, 1);
-       if (!ts)
-               return NULL;
-       l--;
-       while (l) {
-               ts = jbd2_history_skip_empty(s, ++ts, 0);
-               if (!ts)
-                       break;
-               l--;
-       }
-       return ts;
-}
-
-static void *jbd2_seq_history_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       struct jbd2_stats_proc_session *s = seq->private;
-       struct transaction_stats_s *ts = v;
-
-       ++*pos;
-       if (v == SEQ_START_TOKEN)
-               return jbd2_history_skip_empty(s, s->stats + s->start, 1);
-       else
-               return jbd2_history_skip_empty(s, ++ts, 0);
-}
-
-static int jbd2_seq_history_show(struct seq_file *seq, void *v)
-{
-       struct transaction_stats_s *ts = v;
-       if (v == SEQ_START_TOKEN) {
-               seq_printf(seq, "%-4s %-5s %-5s %-5s %-5s %-5s %-5s %-6s %-5s "
-                               "%-5s %-5s %-5s %-5s %-5s\n", "R/C", "tid",
-                               "wait", "run", "lock", "flush", "log", "hndls",
-                               "block", "inlog", "ctime", "write", "drop",
-                               "close");
-               return 0;
-       }
-       if (ts->ts_type == JBD2_STATS_RUN)
-               seq_printf(seq, "%-4s %-5lu %-5u %-5u %-5u %-5u %-5u "
-                               "%-6lu %-5lu %-5lu\n", "R", ts->ts_tid,
-                               jiffies_to_msecs(ts->u.run.rs_wait),
-                               jiffies_to_msecs(ts->u.run.rs_running),
-                               jiffies_to_msecs(ts->u.run.rs_locked),
-                               jiffies_to_msecs(ts->u.run.rs_flushing),
-                               jiffies_to_msecs(ts->u.run.rs_logging),
-                               ts->u.run.rs_handle_count,
-                               ts->u.run.rs_blocks,
-                               ts->u.run.rs_blocks_logged);
-       else if (ts->ts_type == JBD2_STATS_CHECKPOINT)
-               seq_printf(seq, "%-4s %-5lu %48s %-5u %-5lu %-5lu %-5lu\n",
-                               "C", ts->ts_tid, " ",
-                               jiffies_to_msecs(ts->u.chp.cs_chp_time),
-                               ts->u.chp.cs_written, ts->u.chp.cs_dropped,
-                               ts->u.chp.cs_forced_to_close);
-       else
-               J_ASSERT(0);
-       return 0;
-}
-
-static void jbd2_seq_history_stop(struct seq_file *seq, void *v)
-{
-}
-
-static const struct seq_operations jbd2_seq_history_ops = {
-       .start  = jbd2_seq_history_start,
-       .next   = jbd2_seq_history_next,
-       .stop   = jbd2_seq_history_stop,
-       .show   = jbd2_seq_history_show,
-};
-
-static int jbd2_seq_history_open(struct inode *inode, struct file *file)
-{
-       journal_t *journal = PDE(inode)->data;
-       struct jbd2_stats_proc_session *s;
-       int rc, size;
-
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
-       if (s == NULL)
-               return -ENOMEM;
-       size = sizeof(struct transaction_stats_s) * journal->j_history_max;
-       s->stats = kmalloc(size, GFP_KERNEL);
-       if (s->stats == NULL) {
-               kfree(s);
-               return -ENOMEM;
-       }
-       spin_lock(&journal->j_history_lock);
-       memcpy(s->stats, journal->j_history, size);
-       s->max = journal->j_history_max;
-       s->start = journal->j_history_cur % s->max;
-       spin_unlock(&journal->j_history_lock);
-
-       rc = seq_open(file, &jbd2_seq_history_ops);
-       if (rc == 0) {
-               struct seq_file *m = file->private_data;
-               m->private = s;
-       } else {
-               kfree(s->stats);
-               kfree(s);
-       }
-       return rc;
-
-}
-
-static int jbd2_seq_history_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq = file->private_data;
-       struct jbd2_stats_proc_session *s = seq->private;
-
-       kfree(s->stats);
-       kfree(s);
-       return seq_release(inode, file);
-}
-
-static struct file_operations jbd2_seq_history_fops = {
-       .owner          = THIS_MODULE,
-       .open           = jbd2_seq_history_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = jbd2_seq_history_release,
-};
-
 static void *jbd2_seq_info_start(struct seq_file *seq, loff_t *pos)
 {
        return *pos ? NULL : SEQ_START_TOKEN;
@@ -842,29 +692,29 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
 
        if (v != SEQ_START_TOKEN)
                return 0;
-       seq_printf(seq, "%lu transaction, each upto %u blocks\n",
+       seq_printf(seq, "%lu transaction, each up to %u blocks\n",
                        s->stats->ts_tid,
                        s->journal->j_max_transaction_buffers);
        if (s->stats->ts_tid == 0)
                return 0;
        seq_printf(seq, "average: \n  %ums waiting for transaction\n",
-           jiffies_to_msecs(s->stats->u.run.rs_wait / s->stats->ts_tid));
+           jiffies_to_msecs(s->stats->run.rs_wait / s->stats->ts_tid));
        seq_printf(seq, "  %ums running transaction\n",
-           jiffies_to_msecs(s->stats->u.run.rs_running / s->stats->ts_tid));
+           jiffies_to_msecs(s->stats->run.rs_running / s->stats->ts_tid));
        seq_printf(seq, "  %ums transaction was being locked\n",
-           jiffies_to_msecs(s->stats->u.run.rs_locked / s->stats->ts_tid));
+           jiffies_to_msecs(s->stats->run.rs_locked / s->stats->ts_tid));
        seq_printf(seq, "  %ums flushing data (in ordered mode)\n",
-           jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
+           jiffies_to_msecs(s->stats->run.rs_flushing / s->stats->ts_tid));
        seq_printf(seq, "  %ums logging transaction\n",
-           jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
+           jiffies_to_msecs(s->stats->run.rs_logging / s->stats->ts_tid));
        seq_printf(seq, "  %lluus average transaction commit time\n",
                   div_u64(s->journal->j_average_commit_time, 1000));
        seq_printf(seq, "  %lu handles per transaction\n",
-           s->stats->u.run.rs_handle_count / s->stats->ts_tid);
+           s->stats->run.rs_handle_count / s->stats->ts_tid);
        seq_printf(seq, "  %lu blocks per transaction\n",
-           s->stats->u.run.rs_blocks / s->stats->ts_tid);
+           s->stats->run.rs_blocks / s->stats->ts_tid);
        seq_printf(seq, "  %lu logged blocks per transaction\n",
-           s->stats->u.run.rs_blocks_logged / s->stats->ts_tid);
+           s->stats->run.rs_blocks_logged / s->stats->ts_tid);
        return 0;
 }
 
@@ -920,7 +770,7 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-static struct file_operations jbd2_seq_info_fops = {
+static const struct file_operations jbd2_seq_info_fops = {
        .owner          = THIS_MODULE,
        .open           = jbd2_seq_info_open,
        .read           = seq_read,
@@ -934,8 +784,6 @@ static void jbd2_stats_proc_init(journal_t *journal)
 {
        journal->j_proc_entry = proc_mkdir(journal->j_devname, proc_jbd2_stats);
        if (journal->j_proc_entry) {
-               proc_create_data("history", S_IRUGO, journal->j_proc_entry,
-                                &jbd2_seq_history_fops, journal);
                proc_create_data("info", S_IRUGO, journal->j_proc_entry,
                                 &jbd2_seq_info_fops, journal);
        }
@@ -944,27 +792,9 @@ static void jbd2_stats_proc_init(journal_t *journal)
 static void jbd2_stats_proc_exit(journal_t *journal)
 {
        remove_proc_entry("info", journal->j_proc_entry);
-       remove_proc_entry("history", journal->j_proc_entry);
        remove_proc_entry(journal->j_devname, proc_jbd2_stats);
 }
 
-static void journal_init_stats(journal_t *journal)
-{
-       int size;
-
-       if (!proc_jbd2_stats)
-               return;
-
-       journal->j_history_max = 100;
-       size = sizeof(struct transaction_stats_s) * journal->j_history_max;
-       journal->j_history = kzalloc(size, GFP_KERNEL);
-       if (!journal->j_history) {
-               journal->j_history_max = 0;
-               return;
-       }
-       spin_lock_init(&journal->j_history_lock);
-}
-
 /*
  * Management for journal control blocks: functions to create and
  * destroy journal_t structures, and to initialise and read existing
@@ -1009,7 +839,7 @@ static journal_t * journal_init_common (void)
                goto fail;
        }
 
-       journal_init_stats(journal);
+       spin_lock_init(&journal->j_history_lock);
 
        return journal;
 fail:
@@ -1115,7 +945,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
        while ((p = strchr(p, '/')))
                *p = '!';
        p = journal->j_devname + strlen(journal->j_devname);
-       sprintf(p, ":%lu", journal->j_inode->i_ino);
+       sprintf(p, "-%lu", journal->j_inode->i_ino);
        jbd_debug(1,
                  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
                  journal, inode->i_sb->s_id, inode->i_ino,
index 00388d2a3c9970a3fdfccec1c4dda85904b6fd4e..5c01fc148ce880e02a557a3ade47a00056d563f3 100644 (file)
@@ -176,7 +176,7 @@ static const struct file_operations exports_operations = {
 extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
 extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
 
-static struct file_operations pool_stats_operations = {
+static const struct file_operations pool_stats_operations = {
        .open           = nfsd_pool_stats_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 6a2711f4c32122e9900c209f0521ec6b5f6d59f3..5941958f1e47325e9497cef3101065f3efd73f46 100644 (file)
@@ -36,6 +36,7 @@
 
 void nilfs_btnode_cache_init_once(struct address_space *btnc)
 {
+       memset(btnc, 0, sizeof(*btnc));
        INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
        spin_lock_init(&btnc->tree_lock);
        INIT_LIST_HEAD(&btnc->private_list);
index 1a4fa04cf071bd8a82ae9f21d079612e963b419b..e097099bfc8f0923480e110243979d9ace0be924 100644 (file)
@@ -697,7 +697,7 @@ not_empty:
        return 0;
 }
 
-struct file_operations nilfs_dir_operations = {
+const struct file_operations nilfs_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .readdir        = nilfs_readdir,
index 7d7b4983dee3d631c22456d0c2d6581bf4239b9c..30292df443cea081b6b4c94690f41aaf8cad01ec 100644 (file)
@@ -134,7 +134,7 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  * We have mostly NULL's here: the current defaults are ok for
  * the nilfs filesystem.
  */
-struct file_operations nilfs_file_operations = {
+const struct file_operations nilfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
index 2d2c501deb54e8e88fac474267eac501d9964581..5040220c3732866ad2f9bd7b4c79a81ee8c5883f 100644 (file)
@@ -400,6 +400,7 @@ int nilfs_read_inode_common(struct inode *inode,
        ii->i_dir_acl = S_ISREG(inode->i_mode) ?
                0 : le32_to_cpu(raw_inode->i_dir_acl);
 #endif
+       ii->i_dir_start_lookup = 0;
        ii->i_cno = 0;
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
 
index b18c4998f8d0c1834727ecf089d48d3235851766..f6326112d647537c5ea0419d36ec4b73d466fbbd 100644 (file)
@@ -433,7 +433,7 @@ static const struct address_space_operations def_mdt_aops = {
 };
 
 static const struct inode_operations def_mdt_iops;
-static struct file_operations def_mdt_fops;
+static const struct file_operations def_mdt_fops;
 
 /*
  * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
index bad7368782d08a7759ca5964c9dda18b130f4819..4da6f67e9a915aeb2096cc03cb47ec09a40f0338 100644 (file)
@@ -294,9 +294,9 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *);
 /*
  * Inodes and files operations
  */
-extern struct file_operations nilfs_dir_operations;
+extern const struct file_operations nilfs_dir_operations;
 extern const struct inode_operations nilfs_file_inode_operations;
-extern struct file_operations nilfs_file_operations;
+extern const struct file_operations nilfs_file_operations;
 extern const struct address_space_operations nilfs_aops;
 extern const struct inode_operations nilfs_dir_inode_operations;
 extern const struct inode_operations nilfs_special_inode_operations;
index 2224b4d07bf0116b478f9d8cf565c424f9b64b78..44a88a9fa2c87724408fb1268cb46b84b87b3746 100644 (file)
@@ -124,10 +124,10 @@ int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
        while (*s && len > 0) {
                if (*s & 0x80) {
                        size = utf8_to_utf32(s, len, &u);
-                       if (size < 0) {
-                               /* Ignore character and move on */
-                               size = 1;
-                       } else if (u >= PLANE_SIZE) {
+                       if (size < 0)
+                               return -EINVAL;
+
+                       if (u >= PLANE_SIZE) {
                                u -= PLANE_SIZE;
                                *op++ = (wchar_t) (SURROGATE_PAIR |
                                                ((u >> 10) & SURROGATE_BITS));
index 09cc25d04611bfac57adfd422ba519a1c7a5664b..c452d116b89251690bcc393681340bf743a5ac20 100644 (file)
@@ -966,7 +966,7 @@ static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
 }
 #endif  /* CONFIG_DEBUG_FS */
 
-static struct file_operations o2hb_debug_fops = {
+static const struct file_operations o2hb_debug_fops = {
        .open =         o2hb_debug_open,
        .release =      o2hb_debug_release,
        .read =         o2hb_debug_read,
index cfb2be708abe33b255ed8f3208f93569a92339e8..da794bc07a6c27ba82b964179a968c6b720317fb 100644 (file)
@@ -207,7 +207,7 @@ static int nst_fop_release(struct inode *inode, struct file *file)
        return seq_release_private(inode, file);
 }
 
-static struct file_operations nst_seq_fops = {
+static const struct file_operations nst_seq_fops = {
        .open = nst_fop_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -388,7 +388,7 @@ static int sc_fop_release(struct inode *inode, struct file *file)
        return seq_release_private(inode, file);
 }
 
-static struct file_operations sc_seq_fops = {
+static const struct file_operations sc_seq_fops = {
        .open = sc_fop_open,
        .read = seq_read,
        .llseek = seq_lseek,
index ca46002ec10ea7ae1fa3da82e27a5f010f47bd92..42b0bad7a612459c8f3e6857cbf829cab964bd57 100644 (file)
@@ -478,7 +478,7 @@ bail:
        return -ENOMEM;
 }
 
-static struct file_operations debug_purgelist_fops = {
+static const struct file_operations debug_purgelist_fops = {
        .open =         debug_purgelist_open,
        .release =      debug_buffer_release,
        .read =         debug_buffer_read,
@@ -538,7 +538,7 @@ bail:
        return -ENOMEM;
 }
 
-static struct file_operations debug_mle_fops = {
+static const struct file_operations debug_mle_fops = {
        .open =         debug_mle_open,
        .release =      debug_buffer_release,
        .read =         debug_buffer_read,
@@ -741,7 +741,7 @@ static int debug_lockres_release(struct inode *inode, struct file *file)
        return seq_release_private(inode, file);
 }
 
-static struct file_operations debug_lockres_fops = {
+static const struct file_operations debug_lockres_fops = {
        .open =         debug_lockres_open,
        .release =      debug_lockres_release,
        .read =         seq_read,
@@ -925,7 +925,7 @@ bail:
        return -ENOMEM;
 }
 
-static struct file_operations debug_state_fops = {
+static const struct file_operations debug_state_fops = {
        .open =         debug_state_open,
        .release =      debug_buffer_release,
        .read =         debug_buffer_read,
index 4cc3c890a2cd48a0d13dc8cb7da4efabf0c22e16..c0e48aeebb1c3877862853ec1761bed05e6f9355 100644 (file)
@@ -373,7 +373,7 @@ static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
 }
 #endif /* CONFIG_DEBUG_FS */
 
-static struct file_operations ocfs2_osb_debug_fops = {
+static const struct file_operations ocfs2_osb_debug_fops = {
        .open =         ocfs2_osb_debug_open,
        .release =      ocfs2_debug_release,
        .read =         ocfs2_debug_read,
index 3680bae335b5b37464dbf6ba126855be69e8d021..b42d6241903472820723c5c2cb693f1b040158e0 100644 (file)
@@ -498,7 +498,7 @@ const struct inode_operations omfs_dir_inops = {
        .rmdir = omfs_rmdir,
 };
 
-struct file_operations omfs_dir_operations = {
+const struct file_operations omfs_dir_operations = {
        .read = generic_read_dir,
        .readdir = omfs_readdir,
        .llseek = generic_file_llseek,
index 4845fbb18e6e96c579384e3dfa53271a0b577343..399487c093646a86ea910756d127d53e4e813668 100644 (file)
@@ -322,7 +322,7 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, omfs_get_block);
 }
 
-struct file_operations omfs_file_operations = {
+const struct file_operations omfs_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
        .write = do_sync_write,
index df71039945ac2f15124bdba6e2a7c3c16ac485dc..ebe2fdbe535ec07f95aa0981e2cc53f12d912cdf 100644 (file)
@@ -44,14 +44,14 @@ extern int omfs_allocate_range(struct super_block *sb, int min_request,
 extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
 
 /* dir.c */
-extern struct file_operations omfs_dir_operations;
+extern const struct file_operations omfs_dir_operations;
 extern const struct inode_operations omfs_dir_inops;
 extern int omfs_make_empty(struct inode *inode, struct super_block *sb);
 extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
                        u64 fsblock);
 
 /* file.c */
-extern struct file_operations omfs_file_operations;
+extern const struct file_operations omfs_file_operations;
 extern const struct inode_operations omfs_file_inops;
 extern const struct address_space_operations omfs_aops;
 extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
index 7b685e10cbad8cc9b052cec48737fbf7bf6f7afb..f38fee0311a7a82b3ee19ece135c3e17b38e1cdd 100644 (file)
@@ -248,19 +248,11 @@ ssize_t part_stat_show(struct device *dev,
                part_stat_read(p, merges[WRITE]),
                (unsigned long long)part_stat_read(p, sectors[WRITE]),
                jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
-               part_in_flight(p),
+               p->in_flight,
                jiffies_to_msecs(part_stat_read(p, io_ticks)),
                jiffies_to_msecs(part_stat_read(p, time_in_queue)));
 }
 
-ssize_t part_inflight_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       struct hd_struct *p = dev_to_part(dev);
-
-       return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
-}
-
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 ssize_t part_fail_show(struct device *dev,
                       struct device_attribute *attr, char *buf)
@@ -289,7 +281,6 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
-static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 static struct device_attribute dev_attr_fail =
        __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -301,7 +292,6 @@ static struct attribute *part_attrs[] = {
        &dev_attr_size.attr,
        &dev_attr_alignment_offset.attr,
        &dev_attr_stat.attr,
-       &dev_attr_inflight.attr,
 #ifdef CONFIG_FAIL_MAKE_REQUEST
        &dev_attr_fail.attr,
 #endif
index 56013371f9f3cdcd8c89a4455233522a415291aa..a44a7897fd4d20988aa933a89fab2d9c1600d53a 100644 (file)
@@ -23,7 +23,6 @@
 #include <asm/io.h>
 #include <linux/list.h>
 #include <linux/ioport.h>
-#include <linux/mm.h>
 #include <linux/memory.h>
 #include <asm/sections.h>
 
index 2281c2cbfe2b4ddeefc5d4417de1e40dd7df4dfb..5033ce0d254b90dcd0ca19162c6cbe9df254c87d 100644 (file)
@@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = {
 #define KPF_COMPOUND_TAIL      16
 #define KPF_HUGE               17
 #define KPF_UNEVICTABLE                18
+#define KPF_HWPOISON           19
 #define KPF_NOPAGE             20
 
 #define KPF_KSM                        21
@@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page)
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
        u |= kpf_copy_bit(k, KPF_MLOCKED,       PG_mlocked);
 
+#ifdef CONFIG_MEMORY_FAILURE
+       u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
+#endif
+
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
index a201fc370223b01c918ae9767432c3060c6de95e..fd38ce2e32e349ba3c418dbc454957dd0e3ad958 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/sched.h>
 #include <linux/syscalls.h>
 #include <linux/module.h>
 #include <linux/slab.h>
index 9cca3785cab881c854236b00a37fa5e478f06a3a..66d6106a2067fefc1f4154e500d93ea889f94cd6 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_GENERIC_GPIO_H
 #define _ASM_GENERIC_GPIO_H
 
+#include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 
index ae1e9e16695946e7d79eb6ea91d034f047c004f9..b69347b8904ff29f465a5d2adb29b1385b939e02 100644 (file)
@@ -387,6 +387,7 @@ struct drm_crtc {
  * @get_modes: get mode list for this connector
  * @set_property: property for this connector may need update
  * @destroy: make object go away
+ * @force: notify the driver the connector is forced on
  *
  * Each CRTC may have one or more connectors attached to it.  The functions
  * below allow the core DRM code to control connectors, enumerate available modes,
@@ -401,6 +402,7 @@ struct drm_connector_funcs {
        int (*set_property)(struct drm_connector *connector, struct drm_property *property,
                             uint64_t val);
        void (*destroy)(struct drm_connector *connector);
+       void (*force)(struct drm_connector *connector);
 };
 
 struct drm_encoder_funcs {
@@ -429,6 +431,13 @@ struct drm_encoder {
        void *helper_private;
 };
 
+enum drm_connector_force {
+       DRM_FORCE_UNSPECIFIED,
+       DRM_FORCE_OFF,
+       DRM_FORCE_ON,         /* force on analog part normally */
+       DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
 /**
  * drm_connector - central DRM connector control structure
  * @crtc: CRTC this connector is currently connected to, NULL if none
@@ -478,9 +487,12 @@ struct drm_connector {
 
        void *helper_private;
 
+       /* forced on connector */
+       enum drm_connector_force force;
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
        uint32_t force_encoder_id;
        struct drm_encoder *encoder; /* currently active encoder */
+       void *fb_helper_private;
 };
 
 /**
@@ -746,7 +758,7 @@ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
 extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
                                int hdisplay, int vdisplay, int vrefresh,
-                               bool reduced, bool interlaced);
+                               bool reduced, bool interlaced, bool margins);
 extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
                                int hdisplay, int vdisplay, int vrefresh,
                                bool interlaced, int margins);
index 4c8dacaf4f583314966340e470aaee5430018389..b29e20168b5f76d33fe572d42333070069647a1f 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <linux/fb.h>
 
+#include "drm_fb_helper.h"
 struct drm_crtc_helper_funcs {
        /*
         * Control power levels on the CRTC.  If the mode passed in is
@@ -60,6 +61,9 @@ struct drm_crtc_helper_funcs {
        /* Move the crtc on the current fb to the given position *optional* */
        int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
                             struct drm_framebuffer *old_fb);
+
+       /* reload the current crtc LUT */
+       void (*load_lut)(struct drm_crtc *crtc);
 };
 
 struct drm_encoder_helper_funcs {
@@ -119,10 +123,11 @@ static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
        encoder->helper_private = (void *)funcs;
 }
 
-static inline void drm_connector_helper_add(struct drm_connector *connector,
+static inline int drm_connector_helper_add(struct drm_connector *connector,
                                            const struct drm_connector_helper_funcs *funcs)
 {
        connector->helper_private = (void *)funcs;
+       return drm_fb_helper_add_connector(connector);
 }
 
 extern int drm_helper_resume_force_mode(struct drm_device *dev);
index 88fffbdfa26f95a07460c2f8d7db8c12f4eba967..58c892a2cbfac410cfee2613acfec0dd66b750c6 100644 (file)
@@ -35,9 +35,30 @@ struct drm_fb_helper_crtc {
        struct drm_mode_set mode_set;
 };
 
+
 struct drm_fb_helper_funcs {
        void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
                          u16 blue, int regno);
+       void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+                         u16 *blue, int regno);
+};
+
+/* mode specified on the command line */
+struct drm_fb_helper_cmdline_mode {
+       bool specified;
+       bool refresh_specified;
+       bool bpp_specified;
+       int xres, yres;
+       int bpp;
+       int refresh;
+       bool rb;
+       bool interlace;
+       bool cvt;
+       bool margins;
+};
+
+struct drm_fb_helper_connector {
+       struct drm_fb_helper_cmdline_mode cmdline_mode;
 };
 
 struct drm_fb_helper {
@@ -52,11 +73,14 @@ struct drm_fb_helper {
 };
 
 int drm_fb_helper_single_fb_probe(struct drm_device *dev,
+                                 int preferred_bpp,
                                  int (*fb_create)(struct drm_device *dev,
                                                   uint32_t fb_width,
                                                   uint32_t fb_height,
                                                   uint32_t surface_width,
                                                   uint32_t surface_height,
+                                                  uint32_t surface_depth,
+                                                  uint32_t surface_bpp,
                                                   struct drm_framebuffer **fb_ptr));
 int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
                                  int max_conn);
@@ -77,6 +101,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
 void drm_fb_helper_restore(void);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
                            uint32_t fb_width, uint32_t fb_height);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+                           uint32_t depth);
+
+int drm_fb_helper_add_connector(struct drm_connector *connector);
+int drm_fb_helper_parse_command_line(struct drm_device *dev);
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 #endif
index 3f6e545609be892f79374033d79a851372d1e2fd..e6f3b120f51a5e4f9d51bf694b8ecba0b8d7503c 100644 (file)
@@ -80,7 +80,7 @@
        {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
        {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
        {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
-       {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
        {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
        {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
        {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
-       {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+       {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
        {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
        {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
        {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
index 086e5c362d3a7c57064278135d36064f3a6c2a2a..817b23705c91f62d1ed3a066eddcffc724da77c4 100644 (file)
@@ -397,7 +397,7 @@ struct atmdev_ops { /* only send is required */
        int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
            void __user *optval,int optlen);
        int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
-           void __user *optval,int optlen);
+           void __user *optval,unsigned int optlen);
        int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
        int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
        void (*phy_put)(struct atm_dev *dev,unsigned char value,
index e23a86cae5ac9a328fd2fe1cc7c4eecc0741760c..25119041e034c9f51bdbbbfe4d3ad146ac9ad9c4 100644 (file)
@@ -82,7 +82,6 @@ enum rq_cmd_type_bits {
 enum {
        REQ_LB_OP_EJECT = 0x40,         /* eject request */
        REQ_LB_OP_FLUSH = 0x41,         /* flush request */
-       REQ_LB_OP_DISCARD = 0x42,       /* discard sectors */
 };
 
 /*
@@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unplug_fn) (struct request_queue *);
-typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
 
 struct bio_vec;
 struct bvec_merge_data {
@@ -313,6 +311,7 @@ struct queue_limits {
        unsigned int            alignment_offset;
        unsigned int            io_min;
        unsigned int            io_opt;
+       unsigned int            max_discard_sectors;
 
        unsigned short          logical_block_size;
        unsigned short          max_hw_segments;
@@ -340,7 +339,6 @@ struct request_queue
        make_request_fn         *make_request_fn;
        prep_rq_fn              *prep_rq_fn;
        unplug_fn               *unplug_fn;
-       prepare_discard_fn      *prepare_discard_fn;
        merge_bvec_fn           *merge_bvec_fn;
        prepare_flush_fn        *prepare_flush_fn;
        softirq_done_fn         *softirq_done_fn;
@@ -460,6 +458,7 @@ struct request_queue
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 #define QUEUE_FLAG_IO_STAT     15      /* do IO stats */
 #define QUEUE_FLAG_CQ         16       /* hardware does queuing */
+#define QUEUE_FLAG_DISCARD     17      /* supports DISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_CLUSTER) |            \
@@ -591,6 +590,7 @@ enum {
 #define blk_queue_flushing(q)  ((q)->ordseq)
 #define blk_queue_stackable(q) \
        test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
+#define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 
 #define blk_fs_request(rq)     ((rq)->cmd_type == REQ_TYPE_FS)
 #define blk_pc_request(rq)     ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_discard_sectors(struct request_queue *q,
+               unsigned int max_discard_sectors);
 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
 extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
 extern void blk_queue_dma_alignment(struct request_queue *, int);
 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
        return q->limits.physical_block_size;
 }
 
+static inline int bdev_physical_block_size(struct block_device *bdev)
+{
+       return queue_physical_block_size(bdev_get_queue(bdev));
+}
+
 static inline unsigned int queue_io_min(struct request_queue *q)
 {
        return q->limits.io_min;
 }
 
+static inline int bdev_io_min(struct block_device *bdev)
+{
+       return queue_io_min(bdev_get_queue(bdev));
+}
+
 static inline unsigned int queue_io_opt(struct request_queue *q)
 {
        return q->limits.io_opt;
 }
 
+static inline int bdev_io_opt(struct block_device *bdev)
+{
+       return queue_io_opt(bdev_get_queue(bdev));
+}
+
 static inline int queue_alignment_offset(struct request_queue *q)
 {
-       if (q && q->limits.misaligned)
+       if (q->limits.misaligned)
                return -1;
 
-       if (q && q->limits.alignment_offset)
-               return q->limits.alignment_offset;
-
-       return 0;
+       return q->limits.alignment_offset;
 }
 
 static inline int queue_sector_alignment_offset(struct request_queue *q,
@@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q,
                & (q->limits.io_min - 1);
 }
 
+static inline int bdev_alignment_offset(struct block_device *bdev)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+
+       if (q->limits.misaligned)
+               return -1;
+
+       if (bdev != bdev->bd_contains)
+               return bdev->bd_part->alignment_offset;
+
+       return q->limits.alignment_offset;
+}
+
 static inline int queue_dma_alignment(struct request_queue *q)
 {
        return q ? q->dma_alignment : 511;
@@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p)
 }
 
 struct work_struct;
+struct delayed_work;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+int kblockd_schedule_delayed_work(struct request_queue *q,
+                                       struct delayed_work *work,
+                                       unsigned long delay);
 
 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
        MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
index 7e4350ece0f8dd495398ca4cbf41c95e995b6525..3b73b9992b261125a22ef70a46bebc88a2eba23a 100644 (file)
@@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                           char __user *arg);
 extern int blk_trace_startstop(struct request_queue *q, int start);
 extern int blk_trace_remove(struct request_queue *q);
+extern void blk_trace_remove_sysfs(struct device *dev);
 extern int blk_trace_init_sysfs(struct device *dev);
 
 extern struct attribute_group blk_trace_attr_group;
@@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group;
 # define blk_trace_startstop(q, start)                 (-ENOTTY)
 # define blk_trace_remove(q)                           (-ENOTTY)
 # define blk_add_trace_msg(q, fmt, ...)                        do { } while (0)
+# define blk_trace_remove_sysfs(dev)                   do { } while (0)
 static inline int blk_trace_init_sysfs(struct device *dev)
 {
        return 0;
index b62bb9294d0c594618515c5f0214e963f7e3a904..0008dee665148850f2b46af2918b23635cedc7c0 100644 (file)
@@ -37,7 +37,7 @@ extern void cgroup_exit(struct task_struct *p, int run_callbacks);
 extern int cgroupstats_build(struct cgroupstats *stats,
                                struct dentry *dentry);
 
-extern struct file_operations proc_cgroup_operations;
+extern const struct file_operations proc_cgroup_operations;
 
 /* Define the enumeration of all cgroup subsystems */
 #define SUBSYS(_x) _x ## _subsys_id,
index 47ebf416f5129aa67026eee08fc64948d2f69259..3a14615fd35c57cb2c42940c8f3e0e3dd9c78c9b 100644 (file)
@@ -132,11 +132,8 @@ struct cn_callback_id {
 };
 
 struct cn_callback_data {
-       void (*destruct_data) (void *);
-       void *ddata;
-       
-       void *callback_priv;
-       void (*callback) (struct cn_msg *);
+       struct sk_buff *skb;
+       void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
 
        void *free;
 };
@@ -167,11 +164,11 @@ struct cn_dev {
        struct cn_queue_dev *cbdev;
 };
 
-int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *));
+int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
 void cn_del_callback(struct cb_id *);
 int cn_netlink_send(struct cn_msg *, u32, gfp_t);
 
-int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *));
+int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
 
 int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
index 2adaa2529f184fda637a6a2aeea55bda41693104..2620a8c63571a673f29a802a8144fec5014575f3 100644 (file)
@@ -300,6 +300,10 @@ struct inodes_stat_t {
 #define BLKTRACESTOP _IO(0x12,117)
 #define BLKTRACETEARDOWN _IO(0x12,118)
 #define BLKDISCARD _IO(0x12,119)
+#define BLKIOMIN _IO(0x12,120)
+#define BLKIOOPT _IO(0x12,121)
+#define BLKALIGNOFF _IO(0x12,122)
+#define BLKPBSZGET _IO(0x12,123)
 
 #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
 #define FIBMAP    _IO(0x00,1)  /* bmap access */
@@ -2446,7 +2450,7 @@ static int __fops ## _open(struct inode *inode, struct file *file)        \
        __simple_attr_check_format(__fmt, 0ull);                        \
        return simple_attr_open(inode, file, __get, __set, __fmt);      \
 }                                                                      \
-static struct file_operations __fops = {                               \
+static const struct file_operations __fops = {                         \
        .owner   = THIS_MODULE,                                         \
        .open    = __fops ## _open,                                     \
        .release = simple_attr_release,                                 \
index 297df45ffd0ad27e8219aaf3715a00d030f5bf54..7beaa21b3880051c341d55358b5312cdfb10c65a 100644 (file)
@@ -98,7 +98,7 @@ struct hd_struct {
        int make_it_fail;
 #endif
        unsigned long stamp;
-       int in_flight[2];
+       int in_flight;
 #ifdef CONFIG_SMP
        struct disk_stats *dkstats;
 #else
@@ -322,23 +322,18 @@ static inline void free_part_stats(struct hd_struct *part)
 #define part_stat_sub(cpu, gendiskp, field, subnd)                     \
        part_stat_add(cpu, gendiskp, field, -subnd)
 
-static inline void part_inc_in_flight(struct hd_struct *part, int rw)
+static inline void part_inc_in_flight(struct hd_struct *part)
 {
-       part->in_flight[rw]++;
+       part->in_flight++;
        if (part->partno)
-               part_to_disk(part)->part0.in_flight[rw]++;
+               part_to_disk(part)->part0.in_flight++;
 }
 
-static inline void part_dec_in_flight(struct hd_struct *part, int rw)
+static inline void part_dec_in_flight(struct hd_struct *part)
 {
-       part->in_flight[rw]--;
+       part->in_flight--;
        if (part->partno)
-               part_to_disk(part)->part0.in_flight[rw]--;
-}
-
-static inline int part_in_flight(struct hd_struct *part)
-{
-       return part->in_flight[0] + part->in_flight[1];
+               part_to_disk(part)->part0.in_flight--;
 }
 
 /* block/blk-core.c */
@@ -551,8 +546,6 @@ extern ssize_t part_size_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
 extern ssize_t part_stat_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
-extern ssize_t part_inflight_show(struct device *dev,
-                             struct device_attribute *attr, char *buf);
 #ifdef CONFIG_FAIL_MAKE_REQUEST
 extern ssize_t part_fail_show(struct device *dev,
                              struct device_attribute *attr, char *buf);
index 5eb9b0f857e0720569eb04373ef42fb51bb04c9a..5a9aae4adb444a3c8a63b5b9c5afd2c4456396de 100644 (file)
@@ -44,7 +44,7 @@ struct ip_tunnel_prl {
        __u16                   flags;
        __u16                   __reserved;
        __u32                   datalen;
-       __u32                   rs_delay;
+       __u32                   __reserved2;
        /* data follows */
 };
 
index 52695d3dfd0b2e672951902dac0830fc14a2a537..f1011f7f3d4142bf5771007661cec4cd8a6c3315 100644 (file)
@@ -464,9 +464,9 @@ struct handle_s
  */
 struct transaction_chp_stats_s {
        unsigned long           cs_chp_time;
-       unsigned long           cs_forced_to_close;
-       unsigned long           cs_written;
-       unsigned long           cs_dropped;
+       __u32                   cs_forced_to_close;
+       __u32                   cs_written;
+       __u32                   cs_dropped;
 };
 
 /* The transaction_t type is the guts of the journaling mechanism.  It
@@ -668,23 +668,16 @@ struct transaction_run_stats_s {
        unsigned long           rs_flushing;
        unsigned long           rs_logging;
 
-       unsigned long           rs_handle_count;
-       unsigned long           rs_blocks;
-       unsigned long           rs_blocks_logged;
+       __u32                   rs_handle_count;
+       __u32                   rs_blocks;
+       __u32                   rs_blocks_logged;
 };
 
 struct transaction_stats_s {
-       int                     ts_type;
        unsigned long           ts_tid;
-       union {
-               struct transaction_run_stats_s run;
-               struct transaction_chp_stats_s chp;
-       } u;
+       struct transaction_run_stats_s run;
 };
 
-#define JBD2_STATS_RUN         1
-#define JBD2_STATS_CHECKPOINT  2
-
 static inline unsigned long
 jbd2_time_diff(unsigned long start, unsigned long end)
 {
@@ -988,12 +981,6 @@ struct journal_s
        /*
         * Journal statistics
         */
-       struct transaction_stats_s *j_history;
-       int                     j_history_max;
-       int                     j_history_cur;
-       /*
-        * Protect the transactions statistics history
-        */
        spinlock_t              j_history_lock;
        struct proc_dir_entry   *j_proc_entry;
        struct transaction_stats_s j_stats;
index 0d45b4e8d367e0ec6219f2a98a2b642d9f7e85f4..08bc776d05e27952c92d400eee106cd5d75d8cbe 100644 (file)
@@ -145,14 +145,14 @@ static inline int ip_mroute_opt(int opt)
 #endif
 
 #ifdef CONFIG_IP_MROUTE
-extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int);
+extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
 extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
 extern int ip_mr_init(void);
 #else
 static inline
 int ip_mroute_setsockopt(struct sock *sock,
-                        int optname, char __user *optval, int optlen)
+                        int optname, char __user *optval, unsigned int optlen)
 {
        return -ENOPROTOOPT;
 }
index 43dc97e32183b3f7ac2f444b65966bb62e6140e1..b191865a6ca37e71cb287db4151002523b018d1f 100644 (file)
@@ -134,7 +134,7 @@ static inline int ip6_mroute_opt(int opt)
 struct sock;
 
 #ifdef CONFIG_IPV6_MROUTE
-extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int);
+extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
 extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ip6_mr_input(struct sk_buff *skb);
 extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
@@ -143,7 +143,7 @@ extern void ip6_mr_cleanup(void);
 #else
 static inline
 int ip6_mroute_setsockopt(struct sock *sock,
-                         int optname, char __user *optval, int optlen)
+                         int optname, char __user *optval, unsigned int optlen)
 {
        return -ENOPROTOOPT;
 }
index 9040a10584f7fe0b11ae5012d42e3cf5bec9fcf2..529a0931711da5875cc70e88953dbe9621afd5d3 100644 (file)
@@ -178,11 +178,11 @@ struct proto_ops {
        int             (*listen)    (struct socket *sock, int len);
        int             (*shutdown)  (struct socket *sock, int flags);
        int             (*setsockopt)(struct socket *sock, int level,
-                                     int optname, char __user *optval, int optlen);
+                                     int optname, char __user *optval, unsigned int optlen);
        int             (*getsockopt)(struct socket *sock, int level,
                                      int optname, char __user *optval, int __user *optlen);
        int             (*compat_setsockopt)(struct socket *sock, int level,
-                                     int optname, char __user *optval, int optlen);
+                                     int optname, char __user *optval, unsigned int optlen);
        int             (*compat_getsockopt)(struct socket *sock, int level,
                                      int optname, char __user *optval, int __user *optlen);
        int             (*sendmsg)   (struct kiocb *iocb, struct socket *sock,
@@ -256,7 +256,7 @@ extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
 extern int kernel_getsockopt(struct socket *sock, int level, int optname,
                             char *optval, int *optlen);
 extern int kernel_setsockopt(struct socket *sock, int level, int optname,
-                            char *optval, int optlen);
+                            char *optval, unsigned int optlen);
 extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
                           size_t size, int flags);
 extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
@@ -313,7 +313,7 @@ SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
 SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
 SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
 SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
-                        char __user *optval, int optlen), (sock, level, optname, optval, optlen)) \
+                        char __user *optval, unsigned int optlen), (sock, level, optname, optval, optlen)) \
 SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \
                         char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \
 SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \
index 48cfe51bfddcd989396d5c3fc2c71b2581f3caa2..6132b5e6d9d336e80571b4c5e3f4e99ac6427a61 100644 (file)
@@ -221,12 +221,12 @@ __ret;})
 
 /* Call setsockopt() */
 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
-                 int len);
+                 unsigned int len);
 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
                  int *len);
 
 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
-               char __user *opt, int len);
+               char __user *opt, unsigned int len);
 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
                char __user *opt, int *len);
 
index fa287f25138dc2a243e27825f92d670b210a4b6b..6673743946f776d2bd7a61bacd68afdc6fd94f36 100644 (file)
@@ -6,10 +6,10 @@
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
+#include <linux/ktime.h>
 #include <linux/wait.h>
 #include <linux/string.h>
 #include <linux/fs.h>
-#include <linux/sched.h>
 #include <asm/uaccess.h>
 
 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
index 731af71cddc9b1a8f3fb326f08f4d0bec048e08e..fcb9884df6181827120dcb64efd7a2104fd157d7 100644 (file)
@@ -114,8 +114,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
 int __must_check res_counter_charge_locked(struct res_counter *counter,
                unsigned long val);
 int __must_check res_counter_charge(struct res_counter *counter,
-               unsigned long val, struct res_counter **limit_fail_at,
-               struct res_counter **soft_limit_at);
+               unsigned long val, struct res_counter **limit_fail_at);
 
 /*
  * uncharge - tell that some portion of the resource is released
@@ -128,8 +127,7 @@ int __must_check res_counter_charge(struct res_counter *counter,
  */
 
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
-void res_counter_uncharge(struct res_counter *counter, unsigned long val,
-                               bool *was_soft_limit_excess);
+void res_counter_uncharge(struct res_counter *counter, unsigned long val);
 
 static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
 {
index fe661afe071325f635e7d7211035ff77bddbf8e0..db532ce288beec2f2a57bb9eb03912ede8f8afc9 100644 (file)
 /* Qualcomm MSM SoCs */
 #define PORT_MSM       88
 
+/* BCM63xx family SoCs */
+#define PORT_BCM63XX   89
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
index 3b461dffe244a120f012294ebf46ba718526e04c..3273a0c5043b96321bf6dda00bc44d4d4c254003 100644 (file)
@@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage {
                                /* _SS_MAXSIZE value minus size of ss_family */
 } __attribute__ ((aligned(_K_SS_ALIGNSIZE)));  /* force desired alignment */
 
-#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+#ifdef __KERNEL__
 
 #include <asm/socket.h>                        /* arch-dependent defines       */
 #include <linux/sockios.h>             /* the SIOCxxx I/O controls     */
@@ -100,21 +100,6 @@ struct cmsghdr {
                             ((mhdr)->msg_controllen - \
                              ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
 
-/*
- *     This mess will go away with glibc
- */
-#ifdef __KERNEL__
-#define __KINLINE static inline
-#elif  defined(__GNUC__) 
-#define __KINLINE static __inline__
-#elif defined(__cplusplus)
-#define __KINLINE static inline
-#else
-#define __KINLINE static
-#endif
-
-
 /*
  *     Get the next cmsg header
  *
@@ -128,7 +113,7 @@ struct cmsghdr {
  *     ancillary object DATA.                          --ANK (980731)
  */
  
-__KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
+static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
                                               struct cmsghdr *__cmsg)
 {
        struct cmsghdr * __ptr;
@@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
        return __ptr;
 }
 
-__KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
+static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
 {
        return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
 }
index 5bbf8bf9efea73f5eb714cf97d8e8d2c1a5ef95c..7c3002832d05a62f98be2fcc9145807f618e9095 100644 (file)
@@ -40,8 +40,8 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
 
 extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
 
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int,
-       int (*)(struct sock *, int, int, char __user *, int));
+extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+       int (*)(struct sock *, int, int, char __user *, unsigned int));
 extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
        int __user *, int (*)(struct sock *, int, int, char __user *,
                                int __user *));
index 03cffd9f64e3ee8413caf2d5f72368f00d5fbecb..696d6e4ce68a0e72b857b1d569c1bb98c5ceeca0 100644 (file)
@@ -48,13 +48,13 @@ struct inet_connection_sock_af_ops {
        u16         net_header_len;
        u16         sockaddr_len;
        int         (*setsockopt)(struct sock *sk, int level, int optname, 
-                                 char __user *optval, int optlen);
+                                 char __user *optval, unsigned int optlen);
        int         (*getsockopt)(struct sock *sk, int level, int optname, 
                                  char __user *optval, int __user *optlen);
 #ifdef CONFIG_COMPAT
        int         (*compat_setsockopt)(struct sock *sk,
                                int level, int optname,
-                               char __user *optval, int optlen);
+                               char __user *optval, unsigned int optlen);
        int         (*compat_getsockopt)(struct sock *sk,
                                int level, int optname,
                                char __user *optval, int __user *optlen);
@@ -332,5 +332,5 @@ extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
                                      char __user *optval, int __user *optlen);
 extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int optlen);
+                                     char __user *optval, unsigned int optlen);
 #endif /* _INET_CONNECTION_SOCK_H */
index 5b26a0bd178ecc031d357786f519bfdacde0f8eb..2f47e5482b552a1d5bf95a54d5101ab80ca0899a 100644 (file)
@@ -381,10 +381,10 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
 extern void    ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
 extern int     ip_cmsg_send(struct net *net,
                             struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int     ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen);
+extern int     ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
 extern int     ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
 extern int     compat_ip_setsockopt(struct sock *sk, int level,
-                       int optname, char __user *optval, int optlen);
+                       int optname, char __user *optval, unsigned int optlen);
 extern int     compat_ip_getsockopt(struct sock *sk, int level,
                        int optname, char __user *optval, int __user *optlen);
 extern int     ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
index 76e3ea6e2fe5248b6cb44a2bb9d68ded3b7f4d28..87acf8f3a15527cd0446e1dd6f75bdd60a636ba4 100644 (file)
@@ -27,18 +27,11 @@ struct ip_tunnel
        unsigned int                    prl_count;      /* # of entries in PRL */
 };
 
-/* ISATAP: default interval between RS in secondy */
-#define IPTUNNEL_RS_DEFAULT_DELAY      (900)
-
 struct ip_tunnel_prl_entry
 {
        struct ip_tunnel_prl_entry      *next;
        __be32                          addr;
        u16                             flags;
-       unsigned long                   rs_delay;
-       struct timer_list               rs_timer;
-       struct ip_tunnel                *tunnel;
-       spinlock_t                      lock;
 };
 
 #define IPTUNNEL_XMIT() do {                                           \
index ad9a5113025444d77d24fb0721e312c6ac83e9f6..8c31d8a0c1fe8812104bb91049cf208aea412b4a 100644 (file)
@@ -550,7 +550,7 @@ extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 extern int                     ipv6_setsockopt(struct sock *sk, int level, 
                                                int optname,
                                                char __user *optval, 
-                                               int optlen);
+                                               unsigned int optlen);
 extern int                     ipv6_getsockopt(struct sock *sk, int level, 
                                                int optname,
                                                char __user *optval, 
@@ -559,7 +559,7 @@ extern int                  compat_ipv6_setsockopt(struct sock *sk,
                                                int level,
                                                int optname,
                                                char __user *optval,
-                                               int optlen);
+                                               unsigned int optlen);
 extern int                     compat_ipv6_getsockopt(struct sock *sk,
                                                int level,
                                                int optname,
index 42d00ced5eb876da48c61fcac021bd1658e532ca..6e5f0e0c7967e9f78589ba4f5d1f56ab09c2e2aa 100644 (file)
@@ -544,7 +544,7 @@ struct sctp_af {
                                         int level,
                                         int optname,
                                         char __user *optval,
-                                        int optlen);
+                                        unsigned int optlen);
        int             (*getsockopt)   (struct sock *sk,
                                         int level,
                                         int optname,
@@ -554,7 +554,7 @@ struct sctp_af {
                                         int level,
                                         int optname,
                                         char __user *optval,
-                                        int optlen);
+                                        unsigned int optlen);
        int             (*compat_getsockopt)    (struct sock *sk,
                                         int level,
                                         int optname,
index 950409dcec3d5763bf4822c4fb8599e20808c728..1621935aad5bc5b3e69299c136c685dd42dbff92 100644 (file)
@@ -624,7 +624,7 @@ struct proto {
        void                    (*shutdown)(struct sock *sk, int how);
        int                     (*setsockopt)(struct sock *sk, int level, 
                                        int optname, char __user *optval,
-                                       int optlen);
+                                       unsigned int optlen);
        int                     (*getsockopt)(struct sock *sk, int level, 
                                        int optname, char __user *optval, 
                                        int __user *option);     
@@ -632,7 +632,7 @@ struct proto {
        int                     (*compat_setsockopt)(struct sock *sk,
                                        int level,
                                        int optname, char __user *optval,
-                                       int optlen);
+                                       unsigned int optlen);
        int                     (*compat_getsockopt)(struct sock *sk,
                                        int level,
                                        int optname, char __user *optval,
@@ -951,7 +951,7 @@ extern void                 sock_rfree(struct sk_buff *skb);
 
 extern int                     sock_setsockopt(struct socket *sock, int level,
                                                int op, char __user *optval,
-                                               int optlen);
+                                               unsigned int optlen);
 
 extern int                     sock_getsockopt(struct socket *sock, int level,
                                                int op, char __user *optval, 
@@ -993,7 +993,7 @@ extern int                      sock_no_shutdown(struct socket *, int);
 extern int                     sock_no_getsockopt(struct socket *, int , int,
                                                   char __user *, int __user *);
 extern int                     sock_no_setsockopt(struct socket *, int, int,
-                                                  char __user *, int);
+                                                  char __user *, unsigned int);
 extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
                                                struct msghdr *, size_t);
 extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
@@ -1015,11 +1015,11 @@ extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size, int flags);
 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
-                                 char __user *optval, int optlen);
+                                 char __user *optval, unsigned int optlen);
 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
                int optname, char __user *optval, int __user *optlen);
 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
-               int optname, char __user *optval, int optlen);
+               int optname, char __user *optval, unsigned int optlen);
 
 extern void sk_common_release(struct sock *sk);
 
index 56b76027b85eeb4c7e4de930750e9584f6326489..03a49c7033774c9769921e69cd233d55882ad667 100644 (file)
@@ -394,13 +394,13 @@ extern int                        tcp_getsockopt(struct sock *sk, int level,
                                               int __user *optlen);
 extern int                     tcp_setsockopt(struct sock *sk, int level, 
                                               int optname, char __user *optval, 
-                                              int optlen);
+                                              unsigned int optlen);
 extern int                     compat_tcp_getsockopt(struct sock *sk,
                                        int level, int optname,
                                        char __user *optval, int __user *optlen);
 extern int                     compat_tcp_setsockopt(struct sock *sk,
                                        int level, int optname,
-                                       char __user *optval, int optlen);
+                                       char __user *optval, unsigned int optlen);
 extern void                    tcp_set_keepalive(struct sock *sk, int val);
 extern int                     tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
                                            struct msghdr *msg,
index 5fb029f817a3574fb8b7648dbb793d2ff29248b5..f98abd2ce709bd5e784aa0ca79cd483a8ebbbbd5 100644 (file)
@@ -144,7 +144,7 @@ extern unsigned int udp_poll(struct file *file, struct socket *sock,
 extern int     udp_lib_getsockopt(struct sock *sk, int level, int optname,
                                   char __user *optval, int __user *optlen);
 extern int     udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int optlen,
+                                  char __user *optval, unsigned int optlen,
                                   int (*push_pending_frames)(struct sock *));
 
 extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
index 6d76a39a9c5b4cdf08842fd6e46c482afbfe72f3..3f2b94de2cfa6863b7c947884616b503c47e77d1 100644 (file)
@@ -14,6 +14,7 @@ extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cm
                             void __user *arg);
 extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
                                    unsigned long arg);
+extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
 #else
 static inline int wext_proc_init(struct net *net)
 {
index 9a3b4986517324cf50d98db5cfdb169b32ff3711..d696a692d94a677af807292b8aef92c80785ea50 100644 (file)
@@ -279,7 +279,7 @@ extern struct pccard_resource_ops pccard_iodyn_ops;
 extern struct pccard_resource_ops pccard_nonstatic_ops;
 
 /* socket drivers are expected to use these callbacks in their .drv struct */
-extern int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state);
+extern int pcmcia_socket_dev_suspend(struct device *dev);
 extern int pcmcia_socket_dev_resume(struct device *dev);
 
 /* socket drivers use this callback in their IRQ handler */
index d86af94691c2c4148711f29880187b0587409581..00405b5f624a2d742a1d95d2a6fb59d530b6e369 100644 (file)
@@ -488,6 +488,39 @@ TRACE_EVENT(block_remap,
                  (unsigned long long)__entry->old_sector)
 );
 
+TRACE_EVENT(block_rq_remap,
+
+       TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
+                sector_t from),
+
+       TP_ARGS(q, rq, dev, from),
+
+       TP_STRUCT__entry(
+               __field( dev_t,         dev             )
+               __field( sector_t,      sector          )
+               __field( unsigned int,  nr_sector       )
+               __field( dev_t,         old_dev         )
+               __field( sector_t,      old_sector      )
+               __array( char,          rwbs,   6       )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = disk_devt(rq->rq_disk);
+               __entry->sector         = blk_rq_pos(rq);
+               __entry->nr_sector      = blk_rq_sectors(rq);
+               __entry->old_dev        = dev;
+               __entry->old_sector     = from;
+               blk_fill_rwbs_rq(__entry->rwbs, rq);
+       ),
+
+       TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+                 (unsigned long long)__entry->sector,
+                 __entry->nr_sector,
+                 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
+                 (unsigned long long)__entry->old_sector)
+);
+
 #endif /* _TRACE_BLOCK_H */
 
 /* This part must be outside protection */
index c1bd8f1e8b94e0eefcf67b45a398c0bc07ccc37c..d09550bf3f951ec4a9230f9156a5fd29190b9e13 100644 (file)
@@ -11,6 +11,7 @@ struct ext4_allocation_context;
 struct ext4_allocation_request;
 struct ext4_prealloc_space;
 struct ext4_inode_info;
+struct mpage_da_data;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
@@ -236,6 +237,7 @@ TRACE_EVENT(ext4_da_writepages,
                __field(        char,   for_kupdate             )
                __field(        char,   for_reclaim             )
                __field(        char,   range_cyclic            )
+               __field(       pgoff_t, writeback_index         )
        ),
 
        TP_fast_assign(
@@ -249,15 +251,17 @@ TRACE_EVENT(ext4_da_writepages,
                __entry->for_kupdate    = wbc->for_kupdate;
                __entry->for_reclaim    = wbc->for_reclaim;
                __entry->range_cyclic   = wbc->range_cyclic;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
        ),
 
-       TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
+       TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
                  jbd2_dev_to_name(__entry->dev),
                  (unsigned long) __entry->ino, __entry->nr_to_write,
                  __entry->pages_skipped, __entry->range_start,
                  __entry->range_end, __entry->nonblocking,
                  __entry->for_kupdate, __entry->for_reclaim,
-                 __entry->range_cyclic)
+                 __entry->range_cyclic,
+                 (unsigned long) __entry->writeback_index)
 );
 
 TRACE_EVENT(ext4_da_write_pages,
@@ -309,6 +313,7 @@ TRACE_EVENT(ext4_da_writepages_result,
                __field(        char,   encountered_congestion  )
                __field(        char,   more_io                 )       
                __field(        char,   no_nrwrite_index_update )
+               __field(       pgoff_t, writeback_index         )
        ),
 
        TP_fast_assign(
@@ -320,14 +325,16 @@ TRACE_EVENT(ext4_da_writepages_result,
                __entry->encountered_congestion = wbc->encountered_congestion;
                __entry->more_io        = wbc->more_io;
                __entry->no_nrwrite_index_update = wbc->no_nrwrite_index_update;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
        ),
 
-       TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d",
+       TP_printk("dev %s ino %lu ret %d pages_written %d pages_skipped %ld congestion %d more_io %d no_nrwrite_index_update %d writeback_index %lu",
                  jbd2_dev_to_name(__entry->dev),
                  (unsigned long) __entry->ino, __entry->ret,
                  __entry->pages_written, __entry->pages_skipped,
                  __entry->encountered_congestion, __entry->more_io,
-                 __entry->no_nrwrite_index_update)
+                 __entry->no_nrwrite_index_update,
+                 (unsigned long) __entry->writeback_index)
 );
 
 TRACE_EVENT(ext4_da_write_begin,
@@ -737,6 +744,169 @@ TRACE_EVENT(ext4_alloc_da_blocks,
                  __entry->data_blocks, __entry->meta_blocks)
 );
 
+TRACE_EVENT(ext4_mballoc_alloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u16,  found                   )
+               __field(        __u16,  groups                  )
+               __field(        __u16,  buddy                   )
+               __field(        __u16,  flags                   )
+               __field(        __u16,  tail                    )
+               __field(        __u8,   cr                      )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  goal_logical            )
+               __field(          int,  goal_start              )
+               __field(        __u32,  goal_group              )
+               __field(          int,  goal_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->found          = ac->ac_found;
+               __entry->flags          = ac->ac_flags;
+               __entry->groups         = ac->ac_groups_scanned;
+               __entry->buddy          = ac->ac_buddy;
+               __entry->tail           = ac->ac_tail;
+               __entry->cr             = ac->ac_criteria;
+               __entry->orig_logical   = ac->ac_o_ex.fe_logical;
+               __entry->orig_start     = ac->ac_o_ex.fe_start;
+               __entry->orig_group     = ac->ac_o_ex.fe_group;
+               __entry->orig_len       = ac->ac_o_ex.fe_len;
+               __entry->goal_logical   = ac->ac_g_ex.fe_logical;
+               __entry->goal_start     = ac->ac_g_ex.fe_start;
+               __entry->goal_group     = ac->ac_g_ex.fe_group;
+               __entry->goal_len       = ac->ac_g_ex.fe_len;
+               __entry->result_logical = ac->ac_f_ex.fe_logical;
+               __entry->result_start   = ac->ac_f_ex.fe_start;
+               __entry->result_group   = ac->ac_f_ex.fe_group;
+               __entry->result_len     = ac->ac_f_ex.fe_len;
+       ),
+
+       TP_printk("dev %s inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
+                 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
+                 "tail %u broken %u",
+                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->goal_group, __entry->goal_start,
+                 __entry->goal_len, __entry->goal_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical,
+                 __entry->found, __entry->groups, __entry->cr,
+                 __entry->flags, __entry->tail,
+                 __entry->buddy ? 1 << __entry->buddy : 0)
+);
+
+TRACE_EVENT(ext4_mballoc_prealloc,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  orig_logical            )
+               __field(          int,  orig_start              )
+               __field(        __u32,  orig_group              )
+               __field(          int,  orig_len                )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->orig_logical   = ac->ac_o_ex.fe_logical;
+               __entry->orig_start     = ac->ac_o_ex.fe_start;
+               __entry->orig_group     = ac->ac_o_ex.fe_group;
+               __entry->orig_len       = ac->ac_o_ex.fe_len;
+               __entry->result_logical = ac->ac_b_ex.fe_logical;
+               __entry->result_start   = ac->ac_b_ex.fe_start;
+               __entry->result_group   = ac->ac_b_ex.fe_group;
+               __entry->result_len     = ac->ac_b_ex.fe_len;
+       ),
+
+       TP_printk("dev %s inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
+                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+                 __entry->orig_group, __entry->orig_start,
+                 __entry->orig_len, __entry->orig_logical,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical)
+);
+
+TRACE_EVENT(ext4_mballoc_discard,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->result_logical = ac->ac_b_ex.fe_logical;
+               __entry->result_start   = ac->ac_b_ex.fe_start;
+               __entry->result_group   = ac->ac_b_ex.fe_group;
+               __entry->result_len     = ac->ac_b_ex.fe_len;
+       ),
+
+       TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
+                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical)
+);
+
+TRACE_EVENT(ext4_mballoc_free,
+       TP_PROTO(struct ext4_allocation_context *ac),
+
+       TP_ARGS(ac),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        __u32,  result_logical          )
+               __field(          int,  result_start            )
+               __field(        __u32,  result_group            )
+               __field(          int,  result_len              )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = ac->ac_inode->i_sb->s_dev;
+               __entry->ino            = ac->ac_inode->i_ino;
+               __entry->result_logical = ac->ac_b_ex.fe_logical;
+               __entry->result_start   = ac->ac_b_ex.fe_start;
+               __entry->result_group   = ac->ac_b_ex.fe_group;
+               __entry->result_len     = ac->ac_b_ex.fe_len;
+       ),
+
+       TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
+                 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+                 __entry->result_group, __entry->result_start,
+                 __entry->result_len, __entry->result_logical)
+);
+
 #endif /* _TRACE_EXT4_H */
 
 /* This part must be outside protection */
index b851f0b4701cf6bc8c1497003025e2c877d32142..3c60b75adb9e226de91d34cb9c635fbc999a9fa6 100644 (file)
@@ -7,6 +7,9 @@
 #include <linux/jbd2.h>
 #include <linux/tracepoint.h>
 
+struct transaction_chp_stats_s;
+struct transaction_run_stats_s;
+
 TRACE_EVENT(jbd2_checkpoint,
 
        TP_PROTO(journal_t *journal, int result),
@@ -162,6 +165,81 @@ TRACE_EVENT(jbd2_submit_inode_data,
                  jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
 );
 
+TRACE_EVENT(jbd2_run_stats,
+       TP_PROTO(dev_t dev, unsigned long tid,
+                struct transaction_run_stats_s *stats),
+
+       TP_ARGS(dev, tid, stats),
+
+       TP_STRUCT__entry(
+               __field(                dev_t,  dev             )
+               __field(        unsigned long,  tid             )
+               __field(        unsigned long,  wait            )
+               __field(        unsigned long,  running         )
+               __field(        unsigned long,  locked          )
+               __field(        unsigned long,  flushing        )
+               __field(        unsigned long,  logging         )
+               __field(                __u32,  handle_count    )
+               __field(                __u32,  blocks          )
+               __field(                __u32,  blocks_logged   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = dev;
+               __entry->tid            = tid;
+               __entry->wait           = stats->rs_wait;
+               __entry->running        = stats->rs_running;
+               __entry->locked         = stats->rs_locked;
+               __entry->flushing       = stats->rs_flushing;
+               __entry->logging        = stats->rs_logging;
+               __entry->handle_count   = stats->rs_handle_count;
+               __entry->blocks         = stats->rs_blocks;
+               __entry->blocks_logged  = stats->rs_blocks_logged;
+       ),
+
+       TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
+                 "logging %u handle_count %u blocks %u blocks_logged %u",
+                 jbd2_dev_to_name(__entry->dev), __entry->tid,
+                 jiffies_to_msecs(__entry->wait),
+                 jiffies_to_msecs(__entry->running),
+                 jiffies_to_msecs(__entry->locked),
+                 jiffies_to_msecs(__entry->flushing),
+                 jiffies_to_msecs(__entry->logging),
+                 __entry->handle_count, __entry->blocks,
+                 __entry->blocks_logged)
+);
+
+TRACE_EVENT(jbd2_checkpoint_stats,
+       TP_PROTO(dev_t dev, unsigned long tid,
+                struct transaction_chp_stats_s *stats),
+
+       TP_ARGS(dev, tid, stats),
+
+       TP_STRUCT__entry(
+               __field(                dev_t,  dev             )
+               __field(        unsigned long,  tid             )
+               __field(        unsigned long,  chp_time        )
+               __field(                __u32,  forced_to_close )
+               __field(                __u32,  written         )
+               __field(                __u32,  dropped         )
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = dev;
+               __entry->tid            = tid;
+               __entry->chp_time       = stats->cs_chp_time;
+               __entry->forced_to_close= stats->cs_forced_to_close;
+               __entry->written        = stats->cs_written;
+               __entry->dropped        = stats->cs_dropped;
+       ),
+
+       TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
+                 "written %u dropped %u",
+                 jbd2_dev_to_name(__entry->dev), __entry->tid,
+                 jiffies_to_msecs(__entry->chp_time),
+                 __entry->forced_to_close, __entry->written, __entry->dropped)
+);
+
 #endif /* _TRACE_JBD2_H */
 
 /* This part must be outside protection */
index 7ccba4bc5e3b815a9ac9a9c5a0e40d8e70b5c780..ca83b73fba195ac643a594f9cae3abf4347c2f42 100644 (file)
@@ -703,7 +703,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
 static const struct inode_operations cgroup_dir_inode_operations;
-static struct file_operations proc_cgroupstats_operations;
+static const struct file_operations proc_cgroupstats_operations;
 
 static struct backing_dev_info cgroup_backing_dev_info = {
        .name           = "cgroup",
@@ -1863,7 +1863,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
        return single_release(inode, file);
 }
 
-static struct file_operations cgroup_seqfile_operations = {
+static const struct file_operations cgroup_seqfile_operations = {
        .read = seq_read,
        .write = cgroup_file_write,
        .llseek = seq_lseek,
@@ -1922,7 +1922,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
        return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
 }
 
-static struct file_operations cgroup_file_operations = {
+static const struct file_operations cgroup_file_operations = {
        .read = cgroup_file_read,
        .write = cgroup_file_write,
        .llseek = generic_file_llseek,
@@ -3369,7 +3369,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
        return single_open(file, proc_cgroup_show, pid);
 }
 
-struct file_operations proc_cgroup_operations = {
+const struct file_operations proc_cgroup_operations = {
        .open           = cgroup_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -3398,7 +3398,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
        return single_open(file, proc_cgroupstats_show, NULL);
 }
 
-static struct file_operations proc_cgroupstats_operations = {
+static const struct file_operations proc_cgroupstats_operations = {
        .open = cgroupstats_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -3708,8 +3708,10 @@ static void check_for_release(struct cgroup *cgrp)
 void __css_put(struct cgroup_subsys_state *css)
 {
        struct cgroup *cgrp = css->cgroup;
+       int val;
        rcu_read_lock();
-       if (atomic_dec_return(&css->refcnt) == 1) {
+       val = atomic_dec_return(&css->refcnt);
+       if (val == 1) {
                if (notify_on_release(cgrp)) {
                        set_bit(CGRP_RELEASABLE, &cgrp->flags);
                        check_for_release(cgrp);
@@ -3717,6 +3719,7 @@ void __css_put(struct cgroup_subsys_state *css)
                cgroup_wakeup_rmdir_waiter(cgrp);
        }
        rcu_read_unlock();
+       WARN_ON_ONCE(val < 1);
 }
 
 /*
index 6d7020490f94fdc3563f81c0582935492f724a34..3e1c36e7998fdbeffa17142bade5c358c61a2857 100644 (file)
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
-       printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
-              smp_processor_id());
        return 1;
 }
 
index cfadc1291d0badb247275feacb720fa3408b9c44..5240d75f4c60e95f95eb55e0cc46b448e2d6eb7c 100644 (file)
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
        return seq_open(filp, &kprobes_seq_ops);
 }
 
-static struct file_operations debugfs_kprobes_operations = {
+static const struct file_operations debugfs_kprobes_operations = {
        .open           = kprobes_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
        return count;
 }
 
-static struct file_operations fops_kp = {
+static const struct file_operations fops_kp = {
        .read =         read_enabled_file_bool,
        .write =        write_enabled_file_bool,
 };
index fe748a86d452030b8d44fcb7731c1badeb93c33c..8b7d8805819d07d998467db967c777c04021ec9e 100644 (file)
@@ -1992,12 +1992,14 @@ static inline unsigned long layout_symtab(struct module *mod,
                                          Elf_Shdr *sechdrs,
                                          unsigned int symindex,
                                          unsigned int strindex,
-                                         const Elf_Hdr *hdr,
+                                         const Elf_Ehdr *hdr,
                                          const char *secstrings,
                                          unsigned long *pstroffs,
                                          unsigned long *strmap)
 {
+       return 0;
 }
+
 static inline void add_kallsyms(struct module *mod,
                                Elf_Shdr *sechdrs,
                                unsigned int shnum,
@@ -2081,9 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
        struct module *mod;
        long err = 0;
        void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
-#ifdef CONFIG_KALLSYMS
        unsigned long symoffs, stroffs, *strmap;
-#endif
+
        mm_segment_t old_fs;
 
        DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
index c89f5e9fd17397130836e77f9c178d7f2e280f0b..179e6ad80dc03a44a853641175a547c6153f360e 100644 (file)
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file)
        return single_open(file, show_rcudata, NULL);
 }
 
-static struct file_operations rcudata_fops = {
+static const struct file_operations rcudata_fops = {
        .owner = THIS_MODULE,
        .open = rcudata_open,
        .read = seq_read,
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file)
        return single_open(file, show_rcudata_csv, NULL);
 }
 
-static struct file_operations rcudata_csv_fops = {
+static const struct file_operations rcudata_csv_fops = {
        .owner = THIS_MODULE,
        .open = rcudata_csv_open,
        .read = seq_read,
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file)
        return single_open(file, show_rcuhier, NULL);
 }
 
-static struct file_operations rcuhier_fops = {
+static const struct file_operations rcuhier_fops = {
        .owner = THIS_MODULE,
        .open = rcuhier_open,
        .read = seq_read,
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file)
        return single_open(file, show_rcugp, NULL);
 }
 
-static struct file_operations rcugp_fops = {
+static const struct file_operations rcugp_fops = {
        .owner = THIS_MODULE,
        .open = rcugp_open,
        .read = seq_read,
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file)
        return single_open(file, show_rcu_pending, NULL);
 }
 
-static struct file_operations rcu_pending_fops = {
+static const struct file_operations rcu_pending_fops = {
        .owner = THIS_MODULE,
        .open = rcu_pending_open,
        .read = seq_read,
index 88faec23e83301e5b64490318aba2ac722dfb00c..bcdabf37c40b58165d29a2e7dc33b41ef9a6d547 100644 (file)
@@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
 }
 
 int res_counter_charge(struct res_counter *counter, unsigned long val,
-                       struct res_counter **limit_fail_at,
-                       struct res_counter **soft_limit_fail_at)
+                       struct res_counter **limit_fail_at)
 {
        int ret;
        unsigned long flags;
        struct res_counter *c, *u;
 
        *limit_fail_at = NULL;
-       if (soft_limit_fail_at)
-               *soft_limit_fail_at = NULL;
        local_irq_save(flags);
        for (c = counter; c != NULL; c = c->parent) {
                spin_lock(&c->lock);
                ret = res_counter_charge_locked(c, val);
-               /*
-                * With soft limits, we return the highest ancestor
-                * that exceeds its soft limit
-                */
-               if (soft_limit_fail_at &&
-                       !res_counter_soft_limit_check_locked(c))
-                       *soft_limit_fail_at = c;
                spin_unlock(&c->lock);
                if (ret < 0) {
                        *limit_fail_at = c;
@@ -85,8 +75,7 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
        counter->usage -= val;
 }
 
-void res_counter_uncharge(struct res_counter *counter, unsigned long val,
-                               bool *was_soft_limit_excess)
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
 {
        unsigned long flags;
        struct res_counter *c;
@@ -94,9 +83,6 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val,
        local_irq_save(flags);
        for (c = counter; c != NULL; c = c->parent) {
                spin_lock(&c->lock);
-               if (was_soft_limit_excess)
-                       *was_soft_limit_excess =
-                               !res_counter_soft_limit_check_locked(c);
                res_counter_uncharge_locked(c, val);
                spin_unlock(&c->lock);
        }
index ee61f454a98b9005ed7055b6f3e4bba5f2036ebe..1535f3884b88ebd7c33bff7c0e5970a96980a29d 100644 (file)
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
        return single_open(filp, sched_feat_show, NULL);
 }
 
-static struct file_operations sched_feat_fops = {
+static const struct file_operations sched_feat_fops = {
        .open           = sched_feat_open,
        .write          = sched_feat_write,
        .read           = seq_read,
index ac2e1dc708bdfb4f090aa1b6a743861bc5941c21..479ce5682d7c41c858557086de9113b0d71fa00c 100644 (file)
@@ -127,7 +127,7 @@ again:
        clock = wrap_max(clock, min_clock);
        clock = wrap_min(clock, max_clock);
 
-       if (cmpxchg(&scd->clock, old_clock, clock) != old_clock)
+       if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
                goto again;
 
        return clock;
@@ -163,7 +163,7 @@ again:
                val = remote_clock;
        }
 
-       if (cmpxchg(ptr, old_val, val) != old_val)
+       if (cmpxchg64(ptr, old_val, val) != old_val)
                goto again;
 
        return val;
index e0f59a21c06110b0dad7c0a5d031b40c9d3c660a..89aed5933ed4f23b4edef0e8ef6026dfe777dae7 100644 (file)
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
        if (!inidle && !ts->inidle)
                goto end;
 
+       /*
+        * Set ts->inidle unconditionally. Even if the system did not
+        * switch to NOHZ mode the cpu frequency governers rely on the
+        * update of the idle time accounting in tick_nohz_start_idle().
+        */
+       ts->inidle = 1;
+
        now = tick_nohz_start_idle(ts);
 
        /*
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
        if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
                goto end;
 
-       ts->inidle = 1;
-
        if (need_resched())
                goto end;
 
index fddd69d16e039a5d75f9093856d4abec8c6d10b6..1b5b7aa2fdfd094fec018efbc533b65a686b1339 100644 (file)
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
        return single_open(filp, timer_list_show, NULL);
 }
 
-static struct file_operations timer_list_fops = {
+static const struct file_operations timer_list_fops = {
        .open           = timer_list_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 4cde8b9c716f39b778ab7dfe5ead8985c0eb2c97..ee5681f8d7ecd2f1429d5455ea2839f12921b42f 100644 (file)
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
        return single_open(filp, tstats_show, NULL);
 }
 
-static struct file_operations tstats_fops = {
+static const struct file_operations tstats_fops = {
        .open           = tstats_open,
        .read           = seq_read,
        .write          = tstats_write,
index 3eb159c277c85a7a9d30f3b94d1a68bfa52661b0..d9d6206e0b148f5c67e5ecda22a7c7992eec76d7 100644 (file)
@@ -855,6 +855,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
                        sizeof(r), &r);
 }
 
+/**
+ * blk_add_trace_rq_remap - Add a trace for a request-remap operation
+ * @q:         queue the io is for
+ * @rq:                the source request
+ * @dev:       target device
+ * @from:      source sector
+ *
+ * Description:
+ *     Device mapper remaps request to other devices.
+ *     Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_rq_remap(struct request_queue *q,
+                                  struct request *rq, dev_t dev,
+                                  sector_t from)
+{
+       struct blk_trace *bt = q->blk_trace;
+       struct blk_io_trace_remap r;
+
+       if (likely(!bt))
+               return;
+
+       r.device_from = cpu_to_be32(dev);
+       r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
+       r.sector_from = cpu_to_be64(from);
+
+       __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+                       rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
+                       sizeof(r), &r);
+}
+
 /**
  * blk_add_driver_data - Add binary message with driver-specific data
  * @q:         queue the io is for
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_remap(blk_add_trace_remap);
        WARN_ON(ret);
+       ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
+       WARN_ON(ret);
 }
 
 static void blk_unregister_tracepoints(void)
 {
+       unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
        unregister_trace_block_remap(blk_add_trace_remap);
        unregister_trace_block_split(blk_add_trace_split);
        unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
        return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
 }
 
+void blk_trace_remove_sysfs(struct device *dev)
+{
+       sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
+}
+
 #endif /* CONFIG_BLK_DEV_IO_TRACE */
 
 #ifdef CONFIG_EVENT_TRACING
index 46592feab5a6b0cf47e5cc147f43e1669efcf712..3724756e41ca9771d311101a38d2f408d416ef6a 100644 (file)
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
        func = ftrace_trace_function;
+#else
+       func = __ftrace_trace_function;
+#endif
 
        if (ftrace_pid_trace) {
                set_ftrace_pid_function(func);
index 81b1645c85490175beeea335fcebecb12fd4783a..a91da69f153ad0c859997356d53db6548006db7c 100644 (file)
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
                return 1;
        }
 
-       if (!register_tracer(&kmem_tracer)) {
+       if (register_tracer(&kmem_tracer) != 0) {
                pr_warning("Warning: could not register the kmem tracer\n");
                return 1;
        }
index b91839e9e892a08eb7726dc7b668d289c16f09ad..33bed5e67a2110b2201856b8828b457110147728 100644 (file)
@@ -1771,7 +1771,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
                 * advance both strings to next white space
                 */
                if (*fmt == '*') {
-                       while (!isspace(*fmt) && *fmt)
+                       while (!isspace(*fmt) && *fmt != '%' && *fmt)
                                fmt++;
                        while (!isspace(*str) && *str)
                                str++;
index edd300aca17309cfb6c2e1ef9cef2fcf36de1d74..57963c6063d1be3158bbc01c3e69dc0473b1d524 100644 (file)
@@ -224,7 +224,9 @@ config KSM
          the many instances by a single resident page with that content, so
          saving memory until one or another app needs to modify the content.
          Recommended for use with KVM, or with other duplicative applications.
-         See Documentation/vm/ksm.txt for more information.
+         See Documentation/vm/ksm.txt for more information: KSM is inactive
+         until a program has madvised that an area is MADV_MERGEABLE, and
+         root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
 
 config DEFAULT_MMAP_MIN_ADDR
         int "Low address space to protect from user allocation"
index f7edac356f465275031110db70c1e57aafbc5cda..bef1af4f77e36186503c9fe2f91530954a456f3f 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
                sizeof(struct __struct), __alignof__(struct __struct),\
                (__flags), NULL)
 
-static void __init ksm_init_max_kernel_pages(void)
-{
-       ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
-}
-
 static int __init ksm_slab_init(void)
 {
        rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
        struct task_struct *ksm_thread;
        int err;
 
-       ksm_init_max_kernel_pages();
+       ksm_max_kernel_pages = totalram_pages / 4;
 
        err = ksm_slab_init();
        if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
                kthread_stop(ksm_thread);
                goto out_free2;
        }
+#else
+       ksm_run = KSM_RUN_MERGE;        /* no way for user to start it */
+
 #endif /* CONFIG_SYSFS */
 
        return 0;
index e2b98a6875c079b36f103b724130462591094735..f99f5991d6bba1b5ec224f0631124144a6c0a8bb 100644 (file)
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
 static void
 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
                                struct mem_cgroup_per_zone *mz,
-                               struct mem_cgroup_tree_per_zone *mctz)
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
 {
        struct rb_node **p = &mctz->rb_root.rb_node;
        struct rb_node *parent = NULL;
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
        if (mz->on_tree)
                return;
 
-       mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res);
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
        while (*p) {
                parent = *p;
                mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
@@ -352,16 +355,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
        mz->on_tree = false;
 }
 
-static void
-mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
-                               struct mem_cgroup_per_zone *mz,
-                               struct mem_cgroup_tree_per_zone *mctz)
-{
-       spin_lock(&mctz->lock);
-       __mem_cgroup_insert_exceeded(mem, mz, mctz);
-       spin_unlock(&mctz->lock);
-}
-
 static void
 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
                                struct mem_cgroup_per_zone *mz,
@@ -392,34 +385,36 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
 
 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
 {
-       unsigned long long prev_usage_in_excess, new_usage_in_excess;
-       bool updated_tree = false;
+       unsigned long long excess;
        struct mem_cgroup_per_zone *mz;
        struct mem_cgroup_tree_per_zone *mctz;
-
-       mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page));
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
        mctz = soft_limit_tree_from_page(page);
 
        /*
-        * We do updates in lazy mode, mem's are removed
-        * lazily from the per-zone, per-node rb tree
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
         */
-       prev_usage_in_excess = mz->usage_in_excess;
-
-       new_usage_in_excess = res_counter_soft_limit_excess(&mem->res);
-       if (prev_usage_in_excess) {
-               mem_cgroup_remove_exceeded(mem, mz, mctz);
-               updated_tree = true;
-       }
-       if (!new_usage_in_excess)
-               goto done;
-       mem_cgroup_insert_exceeded(mem, mz, mctz);
-
-done:
-       if (updated_tree) {
-               spin_lock(&mctz->lock);
-               mz->usage_in_excess = new_usage_in_excess;
-               spin_unlock(&mctz->lock);
+       for (; mem; mem = parent_mem_cgroup(mem)) {
+               mz = mem_cgroup_zoneinfo(mem, nid, zid);
+               excess = res_counter_soft_limit_excess(&mem->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(mem, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
        }
 }
 
@@ -447,9 +442,10 @@ static struct mem_cgroup_per_zone *
 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 {
        struct rb_node *rightmost = NULL;
-       struct mem_cgroup_per_zone *mz = NULL;
+       struct mem_cgroup_per_zone *mz;
 
 retry:
+       mz = NULL;
        rightmost = rb_last(&mctz->rb_root);
        if (!rightmost)
                goto done;              /* Nothing to reclaim from */
@@ -1270,9 +1266,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        gfp_t gfp_mask, struct mem_cgroup **memcg,
                        bool oom, struct page *page)
 {
-       struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit;
+       struct mem_cgroup *mem, *mem_over_limit;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
-       struct res_counter *fail_res, *soft_fail_res = NULL;
+       struct res_counter *fail_res;
 
        if (unlikely(test_thread_flag(TIF_MEMDIE))) {
                /* Don't account this! */
@@ -1304,17 +1300,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
 
                if (mem_cgroup_is_root(mem))
                        goto done;
-               ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
-                                               &soft_fail_res);
+               ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
                if (likely(!ret)) {
                        if (!do_swap_account)
                                break;
                        ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
-                                                       &fail_res, NULL);
+                                                       &fail_res);
                        if (likely(!ret))
                                break;
                        /* mem+swap counter fails */
-                       res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&mem->res, PAGE_SIZE);
                        flags |= MEM_CGROUP_RECLAIM_NOSWAP;
                        mem_over_limit = mem_cgroup_from_res_counter(fail_res,
                                                                        memsw);
@@ -1353,16 +1348,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                }
        }
        /*
-        * Insert just the ancestor, we should trickle down to the correct
-        * cgroup for reclaim, since the other nodes will be below their
-        * soft limit
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
         */
-       if (soft_fail_res) {
-               mem_over_soft_limit =
-                       mem_cgroup_from_res_counter(soft_fail_res, res);
-               if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
-                       mem_cgroup_update_tree(mem_over_soft_limit, page);
-       }
+       if (mem_cgroup_soft_limit_check(mem))
+               mem_cgroup_update_tree(mem, page);
 done:
        return 0;
 nomem:
@@ -1437,10 +1427,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
                if (!mem_cgroup_is_root(mem)) {
-                       res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&mem->res, PAGE_SIZE);
                        if (do_swap_account)
-                               res_counter_uncharge(&mem->memsw, PAGE_SIZE,
-                                                       NULL);
+                               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
                }
                css_put(&mem->css);
                return;
@@ -1519,7 +1508,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
                goto out;
 
        if (!mem_cgroup_is_root(from))
-               res_counter_uncharge(&from->res, PAGE_SIZE, NULL);
+               res_counter_uncharge(&from->res, PAGE_SIZE);
        mem_cgroup_charge_statistics(from, pc, false);
 
        page = pc->page;
@@ -1539,7 +1528,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
        }
 
        if (do_swap_account && !mem_cgroup_is_root(from))
-               res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL);
+               res_counter_uncharge(&from->memsw, PAGE_SIZE);
        css_put(&from->css);
 
        css_get(&to->css);
@@ -1610,9 +1599,9 @@ uncharge:
        css_put(&parent->css);
        /* uncharge if move fails */
        if (!mem_cgroup_is_root(parent)) {
-               res_counter_uncharge(&parent->res, PAGE_SIZE, NULL);
+               res_counter_uncharge(&parent->res, PAGE_SIZE);
                if (do_swap_account)
-                       res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&parent->memsw, PAGE_SIZE);
        }
        return ret;
 }
@@ -1803,8 +1792,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                         * calling css_tryget
                         */
                        if (!mem_cgroup_is_root(memcg))
-                               res_counter_uncharge(&memcg->memsw, PAGE_SIZE,
-                                                       NULL);
+                               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                        mem_cgroup_swap_statistics(memcg, false);
                        mem_cgroup_put(memcg);
                }
@@ -1831,9 +1819,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
        if (!mem)
                return;
        if (!mem_cgroup_is_root(mem)) {
-               res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
                if (do_swap_account)
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
        }
        css_put(&mem->css);
 }
@@ -1848,7 +1836,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        struct page_cgroup *pc;
        struct mem_cgroup *mem = NULL;
        struct mem_cgroup_per_zone *mz;
-       bool soft_limit_excess = false;
 
        if (mem_cgroup_disabled())
                return NULL;
@@ -1888,10 +1875,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        }
 
        if (!mem_cgroup_is_root(mem)) {
-               res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess);
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
                if (do_swap_account &&
                                (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
        }
        if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
                mem_cgroup_swap_statistics(mem, true);
@@ -1908,7 +1895,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
 
-       if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
+       if (mem_cgroup_soft_limit_check(mem))
                mem_cgroup_update_tree(mem, page);
        /* at swapout, this memcg will be accessed to record to swap */
        if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
@@ -1986,7 +1973,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
                 * This memcg can be obsolete one. We avoid calling css_tryget
                 */
                if (!mem_cgroup_is_root(memcg))
-                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
+                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
                mem_cgroup_swap_statistics(memcg, false);
                mem_cgroup_put(memcg);
        }
@@ -2233,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
        unsigned long reclaimed;
        int loop = 0;
        struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
 
        if (order > 0)
                return 0;
@@ -2284,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                        break;
                        } while (1);
                }
-               mz->usage_in_excess =
-                       res_counter_soft_limit_excess(&mz->mem->res);
                __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->mem->res);
                /*
                 * One school of thought says that we should not add
                 * back the node to the tree if reclaim returns 0.
@@ -2295,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                 * memory to reclaim from. Consider this as a longer
                 * term TODO.
                 */
-               if (mz->usage_in_excess)
-                       __mem_cgroup_insert_exceeded(mz->mem, mz, mctz);
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
                spin_unlock(&mctz->lock);
                css_put(&mz->mem->css);
                loop++;
index 43d8cacfdaa5e4f0e1e3ba0b5e8275570e948292..4a048abad0436d2bd883fd25395b1e44c0b6b99f 100644 (file)
@@ -1043,7 +1043,9 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
  */
 static void *pcpu_alloc(size_t size, size_t align, bool reserved)
 {
+       static int warn_limit = 10;
        struct pcpu_chunk *chunk;
+       const char *err;
        int slot, off;
 
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1059,11 +1061,14 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
        if (reserved && pcpu_reserved_chunk) {
                chunk = pcpu_reserved_chunk;
                if (size > chunk->contig_hint ||
-                   pcpu_extend_area_map(chunk) < 0)
+                   pcpu_extend_area_map(chunk) < 0) {
+                       err = "failed to extend area map of reserved chunk";
                        goto fail_unlock;
+               }
                off = pcpu_alloc_area(chunk, size, align);
                if (off >= 0)
                        goto area_found;
+               err = "alloc from reserved chunk failed";
                goto fail_unlock;
        }
 
@@ -1080,6 +1085,7 @@ restart:
                        case 1:
                                goto restart;   /* pcpu_lock dropped, restart */
                        default:
+                               err = "failed to extend area map";
                                goto fail_unlock;
                        }
 
@@ -1093,8 +1099,10 @@ restart:
        spin_unlock_irq(&pcpu_lock);
 
        chunk = alloc_pcpu_chunk();
-       if (!chunk)
+       if (!chunk) {
+               err = "failed to allocate new chunk";
                goto fail_unlock_mutex;
+       }
 
        spin_lock_irq(&pcpu_lock);
        pcpu_chunk_relocate(chunk, -1);
@@ -1107,6 +1115,7 @@ area_found:
        if (pcpu_populate_chunk(chunk, off, size)) {
                spin_lock_irq(&pcpu_lock);
                pcpu_free_area(chunk, off);
+               err = "failed to populate";
                goto fail_unlock;
        }
 
@@ -1119,6 +1128,13 @@ fail_unlock:
        spin_unlock_irq(&pcpu_lock);
 fail_unlock_mutex:
        mutex_unlock(&pcpu_alloc_mutex);
+       if (warn_limit) {
+               pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
+                          "%s\n", size, align, err);
+               dump_stack();
+               if (!--warn_limit)
+                       pr_info("PERCPU: limit reached, disable warning\n");
+       }
        return NULL;
 }
 
@@ -1347,6 +1363,10 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info(
        struct pcpu_alloc_info *ai;
        unsigned int *cpu_map;
 
+       /* this function may be called multiple times */
+       memset(group_map, 0, sizeof(group_map));
+       memset(group_cnt, 0, sizeof(group_map));
+
        /*
         * Determine min_unit_size, alloc_size and max_upa such that
         * alloc_size is multiple of atom_size and is the smallest
@@ -1574,6 +1594,7 @@ static void pcpu_dump_alloc_info(const char *lvl,
 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                                  void *base_addr)
 {
+       static char cpus_buf[4096] __initdata;
        static int smap[2], dmap[2];
        size_t dyn_size = ai->dyn_size;
        size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
@@ -1585,17 +1606,26 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        int *unit_map;
        int group, unit, i;
 
+       cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
+
+#define PCPU_SETUP_BUG_ON(cond)        do {                                    \
+       if (unlikely(cond)) {                                           \
+               pr_emerg("PERCPU: failed to initialize, %s", #cond);    \
+               pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);   \
+               pcpu_dump_alloc_info(KERN_EMERG, ai);                   \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
        /* sanity checks */
        BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
                     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
-       BUG_ON(ai->nr_groups <= 0);
-       BUG_ON(!ai->static_size);
-       BUG_ON(!base_addr);
-       BUG_ON(ai->unit_size < size_sum);
-       BUG_ON(ai->unit_size & ~PAGE_MASK);
-       BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
-
-       pcpu_dump_alloc_info(KERN_DEBUG, ai);
+       PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
+       PCPU_SETUP_BUG_ON(!ai->static_size);
+       PCPU_SETUP_BUG_ON(!base_addr);
+       PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
+       PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
+       PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
 
        /* process group information and build config tables accordingly */
        group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
@@ -1604,7 +1634,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
-               unit_map[cpu] = NR_CPUS;
+               unit_map[cpu] = UINT_MAX;
        pcpu_first_unit_cpu = NR_CPUS;
 
        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
@@ -1618,8 +1648,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                        if (cpu == NR_CPUS)
                                continue;
 
-                       BUG_ON(cpu > nr_cpu_ids || !cpu_possible(cpu));
-                       BUG_ON(unit_map[cpu] != NR_CPUS);
+                       PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
+                       PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
+                       PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
 
                        unit_map[cpu] = unit + i;
                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
@@ -1632,7 +1663,11 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        pcpu_nr_units = unit;
 
        for_each_possible_cpu(cpu)
-               BUG_ON(unit_map[cpu] == NR_CPUS);
+               PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
+
+       /* we're done parsing the input, undefine BUG macro and dump config */
+#undef PCPU_SETUP_BUG_ON
+       pcpu_dump_alloc_info(KERN_INFO, ai);
 
        pcpu_nr_groups = ai->nr_groups;
        pcpu_group_offsets = group_offsets;
@@ -1782,7 +1817,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
        void *base = (void *)ULONG_MAX;
        void **areas = NULL;
        struct pcpu_alloc_info *ai;
-       size_t size_sum, areas_size;
+       size_t size_sum, areas_size, max_distance;
        int group, i, rc;
 
        ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
@@ -1832,8 +1867,24 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
        }
 
        /* base address is now known, determine group base offsets */
-       for (group = 0; group < ai->nr_groups; group++)
+       max_distance = 0;
+       for (group = 0; group < ai->nr_groups; group++) {
                ai->groups[group].base_offset = areas[group] - base;
+               max_distance = max(max_distance, ai->groups[group].base_offset);
+       }
+       max_distance += ai->unit_size;
+
+       /* warn if maximum distance is further than 75% of vmalloc space */
+       if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
+               pr_warning("PERCPU: max_distance=0x%lx too large for vmalloc "
+                          "space 0x%lx\n",
+                          max_distance, VMALLOC_END - VMALLOC_START);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+               /* and fail if we have fallback */
+               rc = -EINVAL;
+               goto out_free;
+#endif
+       }
 
        pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
                PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
index 28aafe2b530668b03c766619a83873ee2a91087e..dd43373a483fa764ce35a3bdc541ea4148653af4 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -242,8 +242,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 }
 
 /*
- * At what user virtual address is page expected in vma? checking that the
- * page matches the vma: currently only used on anon pages, by unuse_vma;
+ * At what user virtual address is page expected in vma?
+ * checking that the page matches the vma.
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
index 4de7f02f820b03bfcf36b5fc8d6827b5eecd38cb..a1bc6b9af9a23634f4185e85775d94a6500bbb0e 100644 (file)
@@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                goto bad_swap;
        }
 
-       if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
-               p->flags |= SWP_SOLIDSTATE;
-               p->cluster_next = 1 + (random32() % p->highest_bit);
+       if (p->bdev) {
+               if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
+                       p->flags |= SWP_SOLIDSTATE;
+                       p->cluster_next = 1 + (random32() % p->highest_bit);
+               }
+               if (discard_swap(p) == 0)
+                       p->flags |= SWP_DISCARDABLE;
        }
-       if (discard_swap(p) == 0)
-               p->flags |= SWP_DISCARDABLE;
 
        mutex_lock(&swapon_mutex);
        spin_lock(&swap_lock);
index 4ecbbded98f277218987086ca46d954433384554..5e7aed0802bf933baaf9ea0441166ea406850495 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/rcupdate.h>
 #include <linux/pfn.h>
 #include <linux/kmemleak.h>
-#include <linux/highmem.h>
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
index 343146e1bceb0998e1b2a0804b5a798944669ea5..a91504850195ff91ef178a390492fd50375e25c3 100644 (file)
@@ -169,6 +169,7 @@ static size_t vlan_get_size(const struct net_device *dev)
        struct vlan_dev_info *vlan = vlan_dev_info(dev);
 
        return nla_total_size(2) +      /* IFLA_VLAN_ID */
+              sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
               vlan_qos_map_size(vlan->nr_ingress_mappings) +
               vlan_qos_map_size(vlan->nr_egress_mappings);
 }
index 8c4d843eb17f46bbec50847aa1352580adcd1c26..950bd16d2383463851c1f3f919c254ac8f89fdae 100644 (file)
@@ -679,7 +679,7 @@ static int check_qos(const struct atm_qos *qos)
 }
 
 int vcc_setsockopt(struct socket *sock, int level, int optname,
-                  char __user *optval, int optlen)
+                  char __user *optval, unsigned int optlen)
 {
        struct atm_vcc *vcc;
        unsigned long value;
index 92e2981f479f920f0dfe3815cd9c5734af81a3aa..f48a76b6cdf48845a5834800f6716113c9c95914 100644 (file)
@@ -21,7 +21,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
-                  char __user *optval, int optlen);
+                  char __user *optval, unsigned int optlen);
 int vcc_getsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, int __user *optlen);
 
index e1d22d9430dd2c0b40469d134b82ab7ad5942bc8..d4c024504f998b719d7cdb47a52abaa8830be44d 100644 (file)
@@ -59,7 +59,7 @@ static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr,
 }
 
 static int pvc_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int error;
index 7b831b526d0bce4fccb9204a4c9ee7adc3c3bafc..f90d143c4b2592d6ed2145724d580911b9839c28 100644 (file)
@@ -446,7 +446,7 @@ int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
 
 
 static int svc_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct atm_vcc *vcc = ATM_SD(sock);
index fbcac76fdc0dd651f293ffe833123f00a303caff..f454607303717d2f1339cfd901e6b2ae5ec8437f 100644 (file)
@@ -534,7 +534,7 @@ ax25_cb *ax25_create_cb(void)
  */
 
 static int ax25_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, int optlen)
+       char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        ax25_cb *ax25;
@@ -641,15 +641,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
 
        case SO_BINDTODEVICE:
                if (optlen > IFNAMSIZ)
-                       optlen=IFNAMSIZ;
-               if (copy_from_user(devname, optval, optlen)) {
-               res = -EFAULT;
-                       break;
-               }
+                       optlen = IFNAMSIZ;
 
-               dev = dev_get_by_name(&init_net, devname);
-               if (dev == NULL) {
-                       res = -ENODEV;
+               if (copy_from_user(devname, optval, optlen)) {
+                       res = -EFAULT;
                        break;
                }
 
@@ -657,12 +652,18 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                   (sock->state != SS_UNCONNECTED ||
                    sk->sk_state == TCP_LISTEN)) {
                        res = -EADDRNOTAVAIL;
-                       dev_put(dev);
+                       break;
+               }
+
+               dev = dev_get_by_name(&init_net, devname);
+               if (!dev) {
+                       res = -ENODEV;
                        break;
                }
 
                ax25->ax25_dev = ax25_dev_ax25dev(dev);
                ax25_fillin_cb(ax25, ax25->ax25_dev);
+               dev_put(dev);
                break;
 
        default:
@@ -900,7 +901,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
 
        sock_init_data(NULL, sk);
 
-       sk->sk_destruct = ax25_free_sock;
        sk->sk_type     = osk->sk_type;
        sk->sk_priority = osk->sk_priority;
        sk->sk_protocol = osk->sk_protocol;
@@ -938,6 +938,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
        }
 
        sk->sk_protinfo = ax25;
+       sk->sk_destruct = ax25_free_sock;
        ax25->sk    = sk;
 
        return sk;
index 4f9621f759a010d7e2403660103a780341f77e14..75302a986067daebc76e4fba40c9c6a92d6de3bf 100644 (file)
@@ -466,7 +466,7 @@ drop:
        goto done;
 }
 
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
 {
        struct hci_ufilter uf = { .opcode = 0 };
        struct sock *sk = sock->sk;
index b03012564647dc21216059e4ffbcacb78da7389a..555d9da1869b2fa3daca87eef3f88974a6edb0c8 100644 (file)
@@ -1698,7 +1698,7 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
        return bt_sock_recvmsg(iocb, sock, msg, len, flags);
 }
 
-static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
+static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct l2cap_options opts;
@@ -1755,7 +1755,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
        return err;
 }
 
-static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
+static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct bt_security sec;
index 0b85e8116859b21592d52d09f32e1ceb1bdad1f7..8a20aaf1f2316676564501c49bb59c3fe56c898b 100644 (file)
@@ -730,7 +730,7 @@ out:
        return copied ? : err;
 }
 
-static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
+static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int err = 0;
@@ -766,7 +766,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
        return err;
 }
 
-static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
+static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct bt_security sec;
index 13c27f17192c6f3aa4fe1bee6c1a12643497b850..77f4153bdb5e5ebe434487497d1e8d3ed0253cbf 100644 (file)
@@ -644,7 +644,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        return err;
 }
 
-static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
+static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int err = 0;
index 142ebac141764b5deffdbde15c3c6510d6e4d770..b1b3b0fbf41c1b168c67f7ebdf93e3c8f9e32cf5 100644 (file)
@@ -432,6 +432,7 @@ err2:
        br_fdb_delete_by_port(br, p, 1);
 err1:
        kobject_put(&p->kobj);
+       p = NULL; /* kobject_put frees */
 err0:
        dev_set_promiscuity(dev, -1);
 put_back:
index db3152df7d2b627fd2484c14734e9a084d3e93a0..b5e897922d32e85986e5a2ea949f43fca3d32884 100644 (file)
@@ -411,7 +411,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct raw_sock *ro = raw_sk(sk);
index 12728b17a226ebfc6e76351dd131f09e3d1de2e4..a407c3addbae432913a20dce2560d2bab1d248f2 100644 (file)
@@ -331,7 +331,7 @@ struct compat_sock_fprog {
 };
 
 static int do_set_attach_filter(struct socket *sock, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
        struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -351,7 +351,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
 }
 
 static int do_set_sock_timeout(struct socket *sock, int level,
-               int optname, char __user *optval, int optlen)
+               int optname, char __user *optval, unsigned int optlen)
 {
        struct compat_timeval __user *up = (struct compat_timeval __user *) optval;
        struct timeval ktime;
@@ -373,7 +373,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
 }
 
 static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        if (optname == SO_ATTACH_FILTER)
                return do_set_attach_filter(sock, level, optname,
@@ -385,7 +385,7 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
 }
 
 asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        int err;
        struct socket *sock;
@@ -558,8 +558,8 @@ struct compat_group_filter {
 
 
 int compat_mc_setsockopt(struct sock *sock, int level, int optname,
-       char __user *optval, int optlen,
-       int (*setsockopt)(struct sock *,int,int,char __user *,int))
+       char __user *optval, unsigned int optlen,
+       int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int))
 {
        char __user     *koptval = optval;
        int             koptlen = optlen;
index 560c8c9c03ab3ccfef96bfd1eed5fc195c4b1a75..b8f74cfb1bfdf1365494e87a1b712834f936f287 100644 (file)
@@ -2288,6 +2288,9 @@ int netif_receive_skb(struct sk_buff *skb)
        int ret = NET_RX_DROP;
        __be16 type;
 
+       if (!skb->tstamp.tv64)
+               net_timestamp(skb);
+
        if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
                return NET_RX_SUCCESS;
 
@@ -2295,9 +2298,6 @@ int netif_receive_skb(struct sk_buff *skb)
        if (netpoll_receive_skb(skb))
                return NET_RX_DROP;
 
-       if (!skb->tstamp.tv64)
-               net_timestamp(skb);
-
        if (!skb->iif)
                skb->iif = skb->dev->ifindex;
 
index 7d4c57523b09b69e63abfb7583446ddeea057a80..427ded841224e6b7ce79cc91158efec24ab54b4a 100644 (file)
@@ -16,7 +16,7 @@
 #include <net/sock.h>
 #include <linux/rtnetlink.h>
 #include <linux/wireless.h>
-#include <net/iw_handler.h>
+#include <net/wext.h>
 
 #include "net-sysfs.h"
 
@@ -363,18 +363,16 @@ static ssize_t wireless_show(struct device *d, char *buf,
                                               char *))
 {
        struct net_device *dev = to_net_dev(d);
-       const struct iw_statistics *iw = NULL;
+       const struct iw_statistics *iw;
        ssize_t ret = -EINVAL;
 
-       read_lock(&dev_base_lock);
+       rtnl_lock();
        if (dev_isalive(dev)) {
-               if (dev->wireless_handlers &&
-                   dev->wireless_handlers->get_wireless_stats)
-                       iw = dev->wireless_handlers->get_wireless_stats(dev);
-               if (iw != NULL)
+               iw = get_wireless_stats(dev);
+               if (iw)
                        ret = (*format)(iw, buf);
        }
-       read_unlock(&dev_base_lock);
+       rtnl_unlock();
 
        return ret;
 }
@@ -505,7 +503,7 @@ int netdev_register_kobject(struct net_device *net)
        *groups++ = &netstat_group;
 
 #ifdef CONFIG_WIRELESS_EXT_SYSFS
-       if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
+       if (net->wireless_handlers || net->ieee80211_ptr)
                *groups++ = &wireless_group;
 #endif
 #endif /* CONFIG_SYSFS */
index 4d11c28ca8ca0f1b2dbbedb3a9e569a983087f40..86acdba0a97d657201349491a53b04ba198e4c98 100644 (file)
@@ -964,7 +964,7 @@ static ssize_t pktgen_if_write(struct file *file,
                if (value == 0x7FFFFFFF)
                        pkt_dev->delay = ULLONG_MAX;
                else
-                       pkt_dev->delay = (u64)value * NSEC_PER_USEC;
+                       pkt_dev->delay = (u64)value;
 
                sprintf(pg_result, "OK: delay=%llu",
                        (unsigned long long) pkt_dev->delay);
@@ -2105,15 +2105,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
 static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
 {
        ktime_t start_time, end_time;
-       s32 remaining;
+       s64 remaining;
        struct hrtimer_sleeper t;
 
        hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        hrtimer_set_expires(&t.timer, spin_until);
 
        remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
-       if (remaining <= 0)
+       if (remaining <= 0) {
+               pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
                return;
+       }
 
        start_time = ktime_now();
        if (remaining < 100)
@@ -2210,7 +2212,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
        if (pkt_dev->flags & F_QUEUE_MAP_CPU)
                pkt_dev->cur_queue_map = smp_processor_id();
 
-       else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
+       else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
                __u16 t;
                if (pkt_dev->flags & F_QUEUE_MAP_RND) {
                        t = random32() %
index 524712a7b154c4627df17c1a6655eb941711cce4..7626b6aacd685551cf13bf5fe8fbbbbc6105e56f 100644 (file)
@@ -446,7 +446,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
  */
 
 int sock_setsockopt(struct socket *sock, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int val;
@@ -1228,17 +1228,22 @@ void __init sk_init(void)
 void sock_wfree(struct sk_buff *skb)
 {
        struct sock *sk = skb->sk;
-       int res;
+       unsigned int len = skb->truesize;
 
-       /* In case it might be waiting for more memory. */
-       res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc);
-       if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
+       if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
+               /*
+                * Keep a reference on sk_wmem_alloc, this will be released
+                * after sk_write_space() call
+                */
+               atomic_sub(len - 1, &sk->sk_wmem_alloc);
                sk->sk_write_space(sk);
+               len = 1;
+       }
        /*
-        * if sk_wmem_alloc reached 0, we are last user and should
-        * free this sock, as sk_free() call could not do it.
+        * if sk_wmem_alloc reaches 0, we must finish what sk_free()
+        * could not do because of in-flight packets
         */
-       if (res == 0)
+       if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
                __sk_free(sk);
 }
 EXPORT_SYMBOL(sock_wfree);
@@ -1697,7 +1702,7 @@ int sock_no_shutdown(struct socket *sock, int how)
 EXPORT_SYMBOL(sock_no_shutdown);
 
 int sock_no_setsockopt(struct socket *sock, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
@@ -2018,7 +2023,7 @@ EXPORT_SYMBOL(sock_common_recvmsg);
  *     Set socket options on an inet socket.
  */
 int sock_common_setsockopt(struct socket *sock, int level, int optname,
-                          char __user *optval, int optlen)
+                          char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
 
@@ -2028,7 +2033,7 @@ EXPORT_SYMBOL(sock_common_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
-                                 char __user *optval, int optlen)
+                                 char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
 
index e0879bfb7dd56dc6857ea16aacb82f258df61e18..ac1205df6c86327155d79f81da345e8b8411bec7 100644 (file)
@@ -194,7 +194,7 @@ static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
        nlmsg_end(dcbnl_skb, nlh);
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               return -EINVAL;
 
        return 0;
 nlmsg_failure:
@@ -275,7 +275,7 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
 
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               goto err_out;
 
        return 0;
 nlmsg_failure:
@@ -316,12 +316,11 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
 
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               goto err_out;
 
        return 0;
 
 nlmsg_failure:
-err:
        kfree_skb(dcbnl_skb);
 err_out:
        return -EINVAL;
@@ -383,7 +382,7 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
 
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               goto err_out;
 
        return 0;
 nlmsg_failure:
@@ -460,7 +459,7 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret) {
                ret = -EINVAL;
-               goto err;
+               goto err_out;
        }
 
        return 0;
@@ -799,7 +798,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
 
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               goto err_out;
 
        return 0;
 
@@ -1063,7 +1062,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
 
        ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
        if (ret)
-               goto err;
+               goto err_out;
 
        return 0;
 
index d6bc47363b1c0b75006b4fc95556515d90e3088c..5ef32c2f0d6a964a9ab35e813798adbe209b71a6 100644 (file)
@@ -290,14 +290,14 @@ extern int           dccp_disconnect(struct sock *sk, int flags);
 extern int        dccp_getsockopt(struct sock *sk, int level, int optname,
                                   char __user *optval, int __user *optlen);
 extern int        dccp_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int optlen);
+                                  char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
 extern int        compat_dccp_getsockopt(struct sock *sk,
                                int level, int optname,
                                char __user *optval, int __user *optlen);
 extern int        compat_dccp_setsockopt(struct sock *sk,
                                int level, int optname,
-                               char __user *optval, int optlen);
+                               char __user *optval, unsigned int optlen);
 #endif
 extern int        dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int        dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
index bc4467082a00830f98fdd0c3f0c6b4738a4c0590..a156319fd0acd53a65cfae08992d97c27c9a1058 100644 (file)
@@ -393,7 +393,7 @@ out:
 EXPORT_SYMBOL_GPL(dccp_ioctl);
 
 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
-                                  char __user *optval, int optlen)
+                                  char __user *optval, unsigned int optlen)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct dccp_service_list *sl = NULL;
@@ -464,7 +464,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
 }
 
 static int dccp_setsockopt_ccid(struct sock *sk, int type,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        u8 *val;
        int rc = 0;
@@ -494,7 +494,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
 }
 
 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
-               char __user *optval, int optlen)
+               char __user *optval, unsigned int optlen)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        int val, err = 0;
@@ -546,7 +546,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
 }
 
 int dccp_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        if (level != SOL_DCCP)
                return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(dccp_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int optlen)
+                          char __user *optval, unsigned int optlen)
 {
        if (level != SOL_DCCP)
                return inet_csk_compat_setsockopt(sk, level, optname,
index 77d40289653cf0e739e5893e0e053c2145d6156a..7a58c87baf172f9384fab475f8243eab58716e58 100644 (file)
@@ -157,7 +157,7 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
 static struct hlist_head dn_wild_sk;
 static atomic_t decnet_memory_allocated;
 
-static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
+static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
 
 static struct hlist_head *dn_find_list(struct sock *sk)
@@ -1325,7 +1325,7 @@ out:
        return err;
 }
 
-static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
+static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int err;
@@ -1337,7 +1337,7 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
        return err;
 }
 
-static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
+static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
 {
        struct  sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
index 51593a48f2ddae396a97395239ca7f7c1aad5859..a413b1bf4465828860d9052d7fad828330edce89 100644 (file)
@@ -414,7 +414,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
        int val;
index 13198859982e64baf80b97463af6e0b70bc75a50..30e74eee07d664755882ef4f16b4ecf901baf7c2 100644 (file)
@@ -244,7 +244,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
index 58c4b0f7c4aae333e5767e331b84bebbe4845bd0..57737b8d1711a9348ca7000963eb4e65f3680db7 100644 (file)
@@ -1119,6 +1119,7 @@ int inet_sk_rebuild_header(struct sock *sk)
 {
        struct flowi fl = {
                .oif = sk->sk_bound_dev_if,
+               .mark = sk->sk_mark,
                .nl_u = {
                        .ip4_u = {
                                .daddr  = daddr,
index e92f1fd28aa5161bc28969ba25b3d9d0bf95418e..5df2f6a0b0f01da83bead89554bb7e7492298646 100644 (file)
@@ -1077,12 +1077,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
                ip_mc_up(in_dev);
                /* fall through */
        case NETDEV_CHANGEADDR:
-               if (IN_DEV_ARP_NOTIFY(in_dev))
-                       arp_send(ARPOP_REQUEST, ETH_P_ARP,
-                                in_dev->ifa_list->ifa_address,
-                                dev,
-                                in_dev->ifa_list->ifa_address,
-                                NULL, dev->dev_addr, NULL);
+               /* Send gratuitous ARP to notify of link change */
+               if (IN_DEV_ARP_NOTIFY(in_dev)) {
+                       struct in_ifaddr *ifa = in_dev->ifa_list;
+
+                       if (ifa)
+                               arp_send(ARPOP_REQUEST, ETH_P_ARP,
+                                        ifa->ifa_address, dev,
+                                        ifa->ifa_address, NULL,
+                                        dev->dev_addr, NULL);
+               }
                break;
        case NETDEV_DOWN:
                ip_mc_down(in_dev);
index 22cd19ee44e52f5b94788e37dd6b5dc02f018eae..4351ca2cf0b8554eadd258295d44a93455f3e506 100644 (file)
@@ -714,7 +714,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
 
 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int optlen)
+                              char __user *optval, unsigned int optlen)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
index 9fe5d7b81580fe8b8768b03e4fff05f9059d14c6..f9895180f4818cf73f55bd1183c07dfbf852477e 100644 (file)
@@ -335,6 +335,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
 
                {
                        struct flowi fl = { .oif = sk->sk_bound_dev_if,
+                                           .mark = sk->sk_mark,
                                            .nl_u = { .ip4_u =
                                                      { .daddr = daddr,
                                                        .saddr = inet->saddr,
index 5a0693576e821c628edfb3f436243a0f8b107d4a..0c0b6e363a20a9ac2e16b0ba22e816b7327ee6bc 100644 (file)
@@ -440,7 +440,7 @@ out:
  */
 
 static int do_ip_setsockopt(struct sock *sk, int level,
-                           int optname, char __user *optval, int optlen)
+                           int optname, char __user *optval, unsigned int optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        int val = 0, err;
@@ -950,7 +950,7 @@ e_inval:
 }
 
 int ip_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int optlen)
+               int optname, char __user *optval, unsigned int optlen)
 {
        int err;
 
@@ -975,7 +975,7 @@ EXPORT_SYMBOL(ip_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
-                        char __user *optval, int optlen)
+                        char __user *optval, unsigned int optlen)
 {
        int err;
 
index c43ec2d51ce21dcb8666785f52bbd5417d868a0e..630a56df7b47ee80c9c77dcbb62048dc73ed9fc5 100644 (file)
@@ -931,7 +931,7 @@ static void mrtsock_destruct(struct sock *sk)
  *     MOSPF/PIM router set up we can clean this up.
  */
 
-int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
+int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
 {
        int ret;
        struct vifctl vif;
index ebb1e5848bc6639703d88029ea40231b343a06c1..757c9171e7c25b021c21d26a681d5238fa0017c1 100644 (file)
@@ -741,7 +741,7 @@ out:        return ret;
 }
 
 static int do_raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        if (optname == ICMP_FILTER) {
                if (inet_sk(sk)->num != IPPROTO_ICMP)
@@ -753,7 +753,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname,
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        if (level != SOL_RAW)
                return ip_setsockopt(sk, level, optname, optval, optlen);
@@ -762,7 +762,7 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
 
 #ifdef CONFIG_COMPAT
 static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int optlen)
+                                char __user *optval, unsigned int optlen)
 {
        if (level != SOL_RAW)
                return compat_ip_setsockopt(sk, level, optname, optval, optlen);
index 21387ebabf00e491f428c11131a467c61c9150b9..64d0af675823c7fe18d15f53c8d48224105f7c1b 100644 (file)
@@ -580,7 +580,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 
        lock_sock(sk);
 
-       timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
+       timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
        while (tss.len) {
                ret = __tcp_splice_read(sk, &tss);
                if (ret < 0)
@@ -2032,7 +2032,7 @@ int tcp_disconnect(struct sock *sk, int flags)
  *     Socket option code for TCP.
  */
 static int do_tcp_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int optlen)
+               int optname, char __user *optval, unsigned int optlen)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2047,7 +2047,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        return -EINVAL;
 
                val = strncpy_from_user(name, optval,
-                                       min(TCP_CA_NAME_MAX-1, optlen));
+                                       min_t(long, TCP_CA_NAME_MAX-1, optlen));
                if (val < 0)
                        return -EFAULT;
                name[val] = 0;
@@ -2220,7 +2220,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 }
 
 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
-                  int optlen)
+                  unsigned int optlen)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -2232,7 +2232,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
 
 #ifdef CONFIG_COMPAT
 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        if (level != SOL_TCP)
                return inet_csk_compat_setsockopt(sk, level, optname,
index 5200aab0ca978866de729079af5c640eee7473fb..fcd278a7080e1656060cd5f57e38764bec3bdb74 100644 (file)
@@ -361,6 +361,7 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
 #define OPTION_SACK_ADVERTISE  (1 << 0)
 #define OPTION_TS              (1 << 1)
 #define OPTION_MD5             (1 << 2)
+#define OPTION_WSCALE          (1 << 3)
 
 struct tcp_out_options {
        u8 options;             /* bit field of OPTION_* */
@@ -427,7 +428,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
                               TCPOLEN_SACK_PERM);
        }
 
-       if (unlikely(opts->ws)) {
+       if (unlikely(OPTION_WSCALE & opts->options)) {
                *ptr++ = htonl((TCPOPT_NOP << 24) |
                               (TCPOPT_WINDOW << 16) |
                               (TCPOLEN_WINDOW << 8) |
@@ -494,8 +495,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
        }
        if (likely(sysctl_tcp_window_scaling)) {
                opts->ws = tp->rx_opt.rcv_wscale;
-               if (likely(opts->ws))
-                       size += TCPOLEN_WSCALE_ALIGNED;
+               opts->options |= OPTION_WSCALE;
+               size += TCPOLEN_WSCALE_ALIGNED;
        }
        if (likely(sysctl_tcp_sack)) {
                opts->options |= OPTION_SACK_ADVERTISE;
@@ -537,8 +538,8 @@ static unsigned tcp_synack_options(struct sock *sk,
 
        if (likely(ireq->wscale_ok)) {
                opts->ws = ireq->rcv_wscale;
-               if (likely(opts->ws))
-                       size += TCPOLEN_WSCALE_ALIGNED;
+               opts->options |= OPTION_WSCALE;
+               size += TCPOLEN_WSCALE_ALIGNED;
        }
        if (likely(doing_ts)) {
                opts->options |= OPTION_TS;
index ebaaa7f973d70a8dd4038b30c621adf01a4c6268..6ec6a8a4a224236acae5393d997727c7804c07cb 100644 (file)
@@ -696,6 +696,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        if (rt == NULL) {
                struct flowi fl = { .oif = ipc.oif,
+                                   .mark = sk->sk_mark,
                                    .nl_u = { .ip4_u =
                                              { .daddr = faddr,
                                                .saddr = saddr,
@@ -1359,7 +1360,7 @@ void udp_destroy_sock(struct sock *sk)
  *     Socket option code for UDP
  */
 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                      char __user *optval, int optlen,
+                      char __user *optval, unsigned int optlen,
                       int (*push_pending_frames)(struct sock *))
 {
        struct udp_sock *up = udp_sk(sk);
@@ -1441,7 +1442,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(udp_lib_setsockopt);
 
 int udp_setsockopt(struct sock *sk, int level, int optname,
-                  char __user *optval, int optlen)
+                  char __user *optval, unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1451,7 +1452,7 @@ int udp_setsockopt(struct sock *sk, int level, int optname,
 
 #ifdef CONFIG_COMPAT
 int compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
index 9f4a6165f7229ca68deaf98aeadae777a61437a0..aaad650d47d9ca38a703c368734cdf1f93ecf86a 100644 (file)
@@ -11,13 +11,13 @@ extern void         __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 extern int     udp_v4_get_port(struct sock *sk, unsigned short snum);
 
 extern int     udp_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int optlen);
+                              char __user *optval, unsigned int optlen);
 extern int     udp_getsockopt(struct sock *sk, int level, int optname,
                               char __user *optval, int __user *optlen);
 
 #ifdef CONFIG_COMPAT
 extern int     compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int optlen);
+                                     char __user *optval, unsigned int optlen);
 extern int     compat_udp_getsockopt(struct sock *sk, int level, int optname,
                                      char __user *optval, int __user *optlen);
 #endif
index 090675e269ee7b62ffea095dd4c728de61853ab4..716153941fc47ef43fba2acede50b522806f020b 100644 (file)
@@ -1281,7 +1281,7 @@ int ip6mr_sk_done(struct sock *sk)
  *     MOSPF/PIM router set up we can clean this up.
  */
 
-int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
+int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
 {
        int ret;
        struct mif6ctl vif;
index f5e0682b402dd115eb4119c999f4b427b10a7970..14f54eb5a7fc89e1ab9b3d2c7a83e84a54378dbe 100644 (file)
@@ -123,7 +123,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
 }
 
 static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct net *net = sock_net(sk);
@@ -773,7 +773,7 @@ e_inval:
 }
 
 int ipv6_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int optlen)
+                   char __user *optval, unsigned int optlen)
 {
        int err;
 
@@ -801,7 +801,7 @@ EXPORT_SYMBOL(ipv6_setsockopt);
 
 #ifdef CONFIG_COMPAT
 int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int optlen)
+                          char __user *optval, unsigned int optlen)
 {
        int err;
 
index 498b9b0b0fade607c0ece1b1ce3edaacc03ef968..f74e4e2cdd061433345929399b7a7266b32dacec 100644 (file)
@@ -658,7 +658,6 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
                     &icmp6h, NULL,
                     send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0);
 }
-EXPORT_SYMBOL(ndisc_send_rs);
 
 
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb)
index 7d675b8d82d36813e2229d6abfe31ffdeec7d4be..4f24570b0869bd5093b18884866d4c67f20dcf41 100644 (file)
@@ -957,7 +957,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
 
 
 static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        struct raw6_sock *rp = raw6_sk(sk);
        int val;
@@ -1000,7 +1000,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
 }
 
 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        switch(level) {
                case SOL_RAW:
@@ -1024,7 +1024,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
 
 #ifdef CONFIG_COMPAT
 static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int optlen)
+                                  char __user *optval, unsigned int optlen)
 {
        switch (level) {
        case SOL_RAW:
index fcb53962884797c8508dc71a85547d7fe21333a4..dbd19a78ca73ad32b048b8e9c8b89f3600195c90 100644 (file)
@@ -15,7 +15,6 @@
  * Roger Venning <r.venning@telstra.com>:      6to4 support
  * Nate Thompson <nate@thebog.net>:            6to4 support
  * Fred Templin <fred.l.templin@boeing.com>:   isatap support
- * Sascha Hlusiak <mail@saschahlusiak.de>:     stateless autoconf for isatap
  */
 
 #include <linux/module.h>
@@ -223,44 +222,6 @@ failed:
        return NULL;
 }
 
-static void ipip6_tunnel_rs_timer(unsigned long data)
-{
-       struct ip_tunnel_prl_entry *p = (struct ip_tunnel_prl_entry *) data;
-       struct inet6_dev *ifp;
-       struct inet6_ifaddr *addr;
-
-       spin_lock(&p->lock);
-       ifp = __in6_dev_get(p->tunnel->dev);
-
-       read_lock_bh(&ifp->lock);
-       for (addr = ifp->addr_list; addr; addr = addr->if_next) {
-               struct in6_addr rtr;
-
-               if (!(ipv6_addr_type(&addr->addr) & IPV6_ADDR_LINKLOCAL))
-                       continue;
-
-               /* Send RS to guessed linklocal address of router
-                *
-                * Better: send to ff02::2 encapsuled in unicast directly
-                * to router-v4 instead of guessing the v6 address.
-                *
-                * Cisco/Windows seem to not set the u/l bit correctly,
-                * so we won't guess right.
-                */
-               ipv6_addr_set(&rtr,  htonl(0xFE800000), 0, 0, 0);
-               if (!__ipv6_isatap_ifid(rtr.s6_addr + 8,
-                                       p->addr)) {
-                       ndisc_send_rs(p->tunnel->dev, &addr->addr, &rtr);
-               }
-       }
-       read_unlock_bh(&ifp->lock);
-
-       mod_timer(&p->rs_timer, jiffies + HZ * p->rs_delay);
-       spin_unlock(&p->lock);
-
-       return;
-}
-
 static struct ip_tunnel_prl_entry *
 __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
 {
@@ -313,13 +274,12 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
 
        c = 0;
        for (prl = t->prl; prl; prl = prl->next) {
-               if (c > cmax)
+               if (c >= cmax)
                        break;
                if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
                        continue;
                kp[c].addr = prl->addr;
                kp[c].flags = prl->flags;
-               kp[c].rs_delay = prl->rs_delay;
                c++;
                if (kprl.addr != htonl(INADDR_ANY))
                        break;
@@ -369,23 +329,11 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
        }
 
        p->next = t->prl;
-       p->tunnel = t;
        t->prl = p;
        t->prl_count++;
-
-       spin_lock_init(&p->lock);
-       setup_timer(&p->rs_timer, ipip6_tunnel_rs_timer, (unsigned long) p);
 update:
        p->addr = a->addr;
        p->flags = a->flags;
-       p->rs_delay = a->rs_delay;
-       if (p->rs_delay == 0)
-               p->rs_delay = IPTUNNEL_RS_DEFAULT_DELAY;
-       spin_lock(&p->lock);
-       del_timer(&p->rs_timer);
-       if (p->flags & PRL_DEFAULT)
-               mod_timer(&p->rs_timer, jiffies + 1);
-       spin_unlock(&p->lock);
 out:
        write_unlock(&ipip6_lock);
        return err;
@@ -404,9 +352,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
                        if ((*p)->addr == a->addr) {
                                x = *p;
                                *p = x->next;
-                               spin_lock(&x->lock);
-                               del_timer(&x->rs_timer);
-                               spin_unlock(&x->lock);
                                kfree(x);
                                t->prl_count--;
                                goto out;
@@ -417,9 +362,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
                while (t->prl) {
                        x = t->prl;
                        t->prl = t->prl->next;
-                       spin_lock(&x->lock);
-                       del_timer(&x->rs_timer);
-                       spin_unlock(&x->lock);
                        kfree(x);
                        t->prl_count--;
                }
index b265b7047d3e3b9795c348c92306c3b2b2970255..3a60f12b34ed053da8cf4261952d6d35ff879f28 100644 (file)
@@ -1044,7 +1044,7 @@ void udpv6_destroy_sock(struct sock *sk)
  *     Socket option code for UDP
  */
 int udpv6_setsockopt(struct sock *sk, int level, int optname,
-                    char __user *optval, int optlen)
+                    char __user *optval, unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1054,7 +1054,7 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname,
 
 #ifdef CONFIG_COMPAT
 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
index 6bb303471e204ade2e31d025cb9d3ec1b80fbd60..d7571046bfc440386a52b71bdb2c5295e4360001 100644 (file)
@@ -16,10 +16,10 @@ extern int  udp_v6_get_port(struct sock *sk, unsigned short snum);
 extern int     udpv6_getsockopt(struct sock *sk, int level, int optname,
                                 char __user *optval, int __user *optlen);
 extern int     udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int optlen);
+                                char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
 extern int     compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                       char __user *optval, int optlen);
+                                       char __user *optval, unsigned int optlen);
 extern int     compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
                                       char __user *optval, int __user *optlen);
 #endif
index f1118d92a191dc07acd9bfda34ce81c7df8613f0..66c7a20011f36b3147bb66c5169f5c292236f597 100644 (file)
@@ -1292,7 +1292,7 @@ const char *ipx_device_name(struct ipx_interface *intrfc)
  * socket object. */
 
 static int ipx_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int opt;
index 50b43c57d5d8e01c94796b988fa6e98a7f4aee2b..dd35641835f408d161a7d1c70550a15f1a0309ac 100644 (file)
@@ -1826,7 +1826,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
  *
  */
 static int irda_setsockopt(struct socket *sock, int level, int optname,
-                          char __user *optval, int optlen)
+                          char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct irda_sock *self = irda_sk(sk);
index d985d163dcfc12590948cefcb5af50af3d08754e..bada1b9c670bd555fa10833165326492762b3d97 100644 (file)
@@ -1387,7 +1387,7 @@ static int iucv_sock_release(struct socket *sock)
 
 /* getsockopt and setsockopt */
 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
index c45eee1c0e8d46719df66883caa4d874f806ec5f..7aa4fd170104452be2d9daa1194bc313dfd759eb 100644 (file)
@@ -973,7 +973,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
  *     Set various connection specific parameters.
  */
 static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
-                            char __user *optval, int optlen)
+                            char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
index 97a278a2f48ee1a88b4cc1e585ac8e2a4e19ce45..8d26e9bf896448167f9f2af8b8a9aa9354d85129 100644 (file)
@@ -1388,8 +1388,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 
        reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
 
-       printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
-                       sdata->dev->name, reason_code);
+       printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
+                       sdata->dev->name, mgmt->sa, reason_code);
 
        ieee80211_set_disassoc(sdata, false);
        return RX_MGMT_CFG80211_DISASSOC;
@@ -1675,7 +1675,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
 
        /* direct probe may be part of the association flow */
        if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
-               printk(KERN_DEBUG "%s direct probe responded\n",
+               printk(KERN_DEBUG "%s: direct probe responded\n",
                       sdata->dev->name);
                wk->tries = 0;
                wk->state = IEEE80211_MGD_STATE_AUTH;
@@ -2502,9 +2502,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgd_work *wk;
        const u8 *bssid = NULL;
 
-       printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
-              sdata->dev->name, req->reason_code);
-
        mutex_lock(&ifmgd->mtx);
 
        if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
@@ -2532,6 +2529,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
 
        mutex_unlock(&ifmgd->mtx);
 
+       printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
+              sdata->dev->name, bssid, req->reason_code);
+
        ieee80211_send_deauth_disassoc(sdata, bssid,
                        IEEE80211_STYPE_DEAUTH, req->reason_code,
                        cookie);
@@ -2545,9 +2545,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
-              sdata->dev->name, req->reason_code);
-
        mutex_lock(&ifmgd->mtx);
 
        /*
@@ -2561,6 +2558,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
                return -ENOLINK;
        }
 
+       printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
+              sdata->dev->name, req->bss->bssid, req->reason_code);
+
        ieee80211_set_disassoc(sdata, false);
 
        mutex_unlock(&ifmgd->mtx);
index 5143d203256b06a9c6e418f3d7611d4b89352d49..fd40282966132d9c76719bda986732ce4d511bc5 100644 (file)
@@ -367,7 +367,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        u32 staflags;
 
-       if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
+       if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
+                       || ieee80211_is_auth(hdr->frame_control)
+                       || ieee80211_is_assoc_resp(hdr->frame_control)
+                       || ieee80211_is_reassoc_resp(hdr->frame_control)))
                return TX_CONTINUE;
 
        staflags = get_sta_flags(sta);
index 8ab829f865741c4a9f782f7dc793642528f13d95..f042ae521557b340be5252aa41ccdc67f5202a29 100644 (file)
@@ -113,7 +113,7 @@ static int nf_sockopt(struct sock *sk, u_int8_t pf, int val,
 }
 
 int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
-                 int len)
+                 unsigned int len)
 {
        return nf_sockopt(sk, pf, val, opt, &len, 0);
 }
@@ -154,7 +154,7 @@ static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val,
 }
 
 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf,
-               int val, char __user *opt, int len)
+               int val, char __user *opt, unsigned int len)
 {
        return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
 }
index a4bafbf150974da5daa3ba622f10b7b589604272..19e98007691cce17b68e3fc29896b7dc3626a526 100644 (file)
@@ -1150,7 +1150,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk,
 }
 
 static int netlink_setsockopt(struct socket *sock, int level, int optname,
-                             char __user *optval, int optlen)
+                             char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -1788,7 +1788,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
        }
 
        rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
-                         NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
+                         NLMSG_ERROR, payload, 0);
        errmsg = nlmsg_data(rep);
        errmsg->error = err;
        memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
index ce1a34b99c2328f16648d5cf96cb8ff5edc25ed6..7a834952f67fe75f72558b7e3c8477bc8ac259a6 100644 (file)
@@ -301,7 +301,7 @@ void nr_destroy_socket(struct sock *sk)
  */
 
 static int nr_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, int optlen)
+       char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct nr_sock *nr = nr_sk(sk);
index 103d5611b8181dba6f3cb523dae9af20b17257e6..d7ecca0a0c0787d674cc3e28bfab7ed4cc4534a4 100644 (file)
@@ -1701,7 +1701,7 @@ static void packet_flush_mclist(struct sock *sk)
 }
 
 static int
-packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
+packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
index b8252d289cd70475d16ce661d4c120b722ff3cc6..5f32d217535b4695667dc3e75ea3e0bcaab9a15e 100644 (file)
@@ -742,7 +742,7 @@ static int pep_init(struct sock *sk)
 }
 
 static int pep_setsockopt(struct sock *sk, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        struct pep_sock *pn = pep_sk(sk);
        int val = 0, err = 0;
index 07aa9f08d5fbdd0a66eb4713103b2f687089a8ea..aa5b5a972bff7a3e40700b8969e3b145bca428eb 100644 (file)
@@ -407,7 +407,6 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport)
        return -EADDRINUSE;
 
 found:
-       mutex_unlock(&port_mutex);
        pn->sobject = pn_object(pn_addr(pn->sobject), sport);
        return 0;
 }
index 6b58aeff4c7ab72b4072799b63e2f0dd8d466a5c..98e05382fd3cf19e01fe2315674e0e84ff8da145 100644 (file)
@@ -248,7 +248,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
 }
 
 static int rds_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
        int ret;
index dbeaf2983822e99a36377017b16e33e1fae32a03..ba2efb960c6007ecbd9b5b0a49372d21d32ed68a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/rfkill.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/miscdevice.h>
 #include <linux/wait.h>
index 1e166c9685aac9c479eb7720a28f7dff4ecaa6f2..502cce76621d07abdd624798c5c76e3802db63aa 100644 (file)
@@ -370,7 +370,7 @@ void rose_destroy_socket(struct sock *sk)
  */
 
 static int rose_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, int optlen)
+       char __user *optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct rose_sock *rose = rose_sk(sk);
index bfe493ebf27c36a7a82974af5a596ee8f75528c9..a86afceaa94fadd8a1137c7688a03cb5714cf18e 100644 (file)
@@ -507,7 +507,7 @@ out:
  * set RxRPC socket options
  */
 static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
-                           char __user *optval, int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
        unsigned min_sec_level;
index 89af37a6c87168a4b68f60e5211b57882fb9c25c..c8d05758661d96cf09c41f3babb983c4f2194ce9 100644 (file)
@@ -2027,7 +2027,8 @@ out:
  * instead a error will be indicated to the user.
  */
 static int sctp_setsockopt_disable_fragments(struct sock *sk,
-                                           char __user *optval, int optlen)
+                                            char __user *optval,
+                                            unsigned int optlen)
 {
        int val;
 
@@ -2043,7 +2044,7 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
 }
 
 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
-                                       int optlen)
+                                 unsigned int optlen)
 {
        if (optlen > sizeof(struct sctp_event_subscribe))
                return -EINVAL;
@@ -2064,7 +2065,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
  * association is closed.
  */
 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
-                                           int optlen)
+                                    unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
 
@@ -2318,7 +2319,8 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
 }
 
 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
-                                           char __user *optval, int optlen)
+                                           char __user *optval,
+                                           unsigned int optlen)
 {
        struct sctp_paddrparams  params;
        struct sctp_transport   *trans = NULL;
@@ -2430,7 +2432,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
  */
 
 static int sctp_setsockopt_delayed_ack(struct sock *sk,
-                                           char __user *optval, int optlen)
+                                      char __user *optval, unsigned int optlen)
 {
        struct sctp_sack_info    params;
        struct sctp_transport   *trans = NULL;
@@ -2546,7 +2548,7 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
  * by the change).  With TCP-style sockets, this option is inherited by
  * sockets derived from a listener socket.
  */
-static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int optlen)
+static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
 {
        struct sctp_initmsg sinit;
        struct sctp_sock *sp = sctp_sk(sk);
@@ -2583,7 +2585,8 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int opt
  *   to this call if the caller is using the UDP model.
  */
 static int sctp_setsockopt_default_send_param(struct sock *sk,
-                                               char __user *optval, int optlen)
+                                             char __user *optval,
+                                             unsigned int optlen)
 {
        struct sctp_sndrcvinfo info;
        struct sctp_association *asoc;
@@ -2622,7 +2625,7 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
  * association peer's addresses.
  */
 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
-                                       int optlen)
+                                       unsigned int optlen)
 {
        struct sctp_prim prim;
        struct sctp_transport *trans;
@@ -2651,7 +2654,7 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
  *  integer boolean flag.
  */
 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
-                                       int optlen)
+                                  unsigned int optlen)
 {
        int val;
 
@@ -2676,7 +2679,8 @@ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
  * be changed.
  *
  */
-static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int optlen) {
+static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
+{
        struct sctp_rtoinfo rtoinfo;
        struct sctp_association *asoc;
 
@@ -2728,7 +2732,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
  * See [SCTP] for more information.
  *
  */
-static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int optlen)
+static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
 {
 
        struct sctp_assocparams assocparams;
@@ -2800,7 +2804,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
  * addresses and a user will receive both PF_INET6 and PF_INET type
  * addresses on the socket.
  */
-static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int optlen)
+static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
 {
        int val;
        struct sctp_sock *sp = sctp_sk(sk);
@@ -2844,7 +2848,7 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int op
  *    changed (effecting future associations only).
  * assoc_value:  This parameter specifies the maximum size in bytes.
  */
-static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen)
+static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
 {
        struct sctp_assoc_value params;
        struct sctp_association *asoc;
@@ -2899,7 +2903,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
  *   set primary request:
  */
 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
-                                            int optlen)
+                                            unsigned int optlen)
 {
        struct sctp_sock        *sp;
        struct sctp_endpoint    *ep;
@@ -2950,7 +2954,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
 }
 
 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
-                                         int optlen)
+                                           unsigned int optlen)
 {
        struct sctp_setadaptation adaptation;
 
@@ -2979,7 +2983,7 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval
  * saved with outbound messages.
  */
 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
-                                  int optlen)
+                                  unsigned int optlen)
 {
        struct sctp_assoc_value params;
        struct sctp_sock *sp;
@@ -3030,7 +3034,7 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
  */
 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
                                               char __user *optval,
-                                              int optlen)
+                                              unsigned int optlen)
 {
        int val;
 
@@ -3063,7 +3067,7 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
  */
 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
                                                  char __user *optval,
-                                                 int optlen)
+                                                 unsigned int optlen)
 {
        u32 val;
 
@@ -3096,7 +3100,7 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
  */
 static int sctp_setsockopt_maxburst(struct sock *sk,
                                    char __user *optval,
-                                   int optlen)
+                                   unsigned int optlen)
 {
        struct sctp_assoc_value params;
        struct sctp_sock *sp;
@@ -3140,8 +3144,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
  * will only effect future associations on the socket.
  */
 static int sctp_setsockopt_auth_chunk(struct sock *sk,
-                                   char __user *optval,
-                                   int optlen)
+                                     char __user *optval,
+                                     unsigned int optlen)
 {
        struct sctp_authchunk val;
 
@@ -3172,8 +3176,8 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
  * endpoint requires the peer to use.
  */
 static int sctp_setsockopt_hmac_ident(struct sock *sk,
-                                   char __user *optval,
-                                   int optlen)
+                                     char __user *optval,
+                                     unsigned int optlen)
 {
        struct sctp_hmacalgo *hmacs;
        u32 idents;
@@ -3215,7 +3219,7 @@ out:
  */
 static int sctp_setsockopt_auth_key(struct sock *sk,
                                    char __user *optval,
-                                   int optlen)
+                                   unsigned int optlen)
 {
        struct sctp_authkey *authkey;
        struct sctp_association *asoc;
@@ -3260,8 +3264,8 @@ out:
  * the association shared key.
  */
 static int sctp_setsockopt_active_key(struct sock *sk,
-                                       char __user *optval,
-                                       int optlen)
+                                     char __user *optval,
+                                     unsigned int optlen)
 {
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
@@ -3288,8 +3292,8 @@ static int sctp_setsockopt_active_key(struct sock *sk,
  * This set option will delete a shared secret key from use.
  */
 static int sctp_setsockopt_del_key(struct sock *sk,
-                                       char __user *optval,
-                                       int optlen)
+                                  char __user *optval,
+                                  unsigned int optlen)
 {
        struct sctp_authkeyid val;
        struct sctp_association *asoc;
@@ -3332,7 +3336,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
  *   optlen  - the size of the buffer.
  */
 SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
-                               char __user *optval, int optlen)
+                               char __user *optval, unsigned int optlen)
 {
        int retval = 0;
 
index 49917a1cac7d921f6fae418bb89a19a4e9e02a1f..75655365b5fd2041d9bdf97d936275b1f74b217e 100644 (file)
@@ -2098,12 +2098,17 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
        unsigned long a[6];
        unsigned long a0, a1;
        int err;
+       unsigned int len;
 
        if (call < 1 || call > SYS_ACCEPT4)
                return -EINVAL;
 
+       len = nargs[call];
+       if (len > sizeof(a))
+               return -EINVAL;
+
        /* copy_from_user should be SMP safe. */
-       if (copy_from_user(a, args, nargs[call]))
+       if (copy_from_user(a, args, len))
                return -EFAULT;
 
        audit_socketcall(nargs[call] / sizeof(unsigned long), a);
@@ -2386,7 +2391,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
 }
 
 int kernel_setsockopt(struct socket *sock, int level, int optname,
-                       char *optval, int optlen)
+                       char *optval, unsigned int optlen)
 {
        mm_segment_t oldfs = get_fs();
        int err;
index e8254e809b790a76267fddeabd36995daeb80412..e6d9abf7440e09dac80152770f6a22b83a1c8201 100644 (file)
@@ -1658,7 +1658,7 @@ restart:
  */
 
 static int setsockopt(struct socket *sock,
-                     int lvl, int opt, char __user *ov, int ol)
+                     int lvl, int opt, char __user *ov, unsigned int ol)
 {
        struct sock *sk = sock->sk;
        struct tipc_port *tport = tipc_sk_port(sk);
index 7fae7eee65def0b24d17aa1046c37a496bcf04b6..93c3ed329204f034c18a1ec08b9d54bb75364abe 100644 (file)
@@ -762,9 +762,8 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
                wdev->conn->params.ssid = wdev->ssid;
                wdev->conn->params.ssid_len = connect->ssid_len;
 
-               /* don't care about result -- but fill bssid & channel */
-               if (!wdev->conn->params.bssid || !wdev->conn->params.channel)
-                       bss = cfg80211_get_conn_bss(wdev);
+               /* see if we have the bss already */
+               bss = cfg80211_get_conn_bss(wdev);
 
                wdev->sme_state = CFG80211_SME_CONNECTING;
                wdev->connect_keys = connkeys;
index bf725275eb8d2e97788fa636de465f30a3a17ba0..5615a88025367f5beaee4e90e9040d5e4deb9f15 100644 (file)
@@ -30,7 +30,8 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
        if (wdev->wext.keys) {
                wdev->wext.keys->def = wdev->wext.default_key;
                wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
-               wdev->wext.connect.privacy = true;
+               if (wdev->wext.default_key != -1)
+                       wdev->wext.connect.privacy = true;
        }
 
        if (!wdev->wext.connect.ssid_len)
@@ -229,8 +230,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                data->flags = 1;
                data->length = wdev->wext.connect.ssid_len;
                memcpy(ssid, wdev->wext.connect.ssid, data->length);
-       } else
-               data->flags = 0;
+       }
        wdev_unlock(wdev);
 
        return 0;
@@ -306,8 +306,6 @@ int cfg80211_mgd_wext_giwap(struct net_device *dev,
        wdev_lock(wdev);
        if (wdev->current_bss)
                memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
-       else if (wdev->wext.connect.bssid)
-               memcpy(ap_addr->sa_data, wdev->wext.connect.bssid, ETH_ALEN);
        else
                memset(ap_addr->sa_data, 0, ETH_ALEN);
        wdev_unlock(wdev);
index 5b4a0cee4418543a3f034821b32c022c03a34e0c..60fe57761ca94bb5d9faf0638c00d373618301e2 100644 (file)
@@ -470,7 +470,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
 /*
  * Get statistics out of the driver
  */
-static struct iw_statistics *get_wireless_stats(struct net_device *dev)
+struct iw_statistics *get_wireless_stats(struct net_device *dev)
 {
        /* New location */
        if ((dev->wireless_handlers != NULL) &&
@@ -773,10 +773,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                        essid_compat = 1;
                else if (IW_IS_SET(cmd) && (iwp->length != 0)) {
                        char essid[IW_ESSID_MAX_SIZE + 1];
+                       unsigned int len;
+                       len = iwp->length * descr->token_size;
 
-                       err = copy_from_user(essid, iwp->pointer,
-                                            iwp->length *
-                                            descr->token_size);
+                       if (len > IW_ESSID_MAX_SIZE)
+                               return -EFAULT;
+
+                       err = copy_from_user(essid, iwp->pointer, len);
                        if (err)
                                return -EFAULT;
 
index 5e6c072c64d3d8b3380cc68247565fa291237952..7fa9c7ad3d3b5b7efbe4c612511425628900a138 100644 (file)
@@ -409,7 +409,7 @@ static void x25_destroy_socket(struct sock *sk)
  */
 
 static int x25_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        int opt;
        struct sock *sk = sock->sk;
index 9cf80a11e8b68aa23d6b744e3cdb0da81e438981..26fab33ffa8cccc13a02e9d3b4573c974f71e86e 100644 (file)
@@ -28,7 +28,7 @@ static int my_open(struct inode *inode, struct file *file)
        return -EPERM;
 }
 
-static struct file_operations mark_ops = {
+static const struct file_operations mark_ops = {
        .open = my_open,
 };
 
index 8e9777b764052a1088d38fc3385c4dd1dfb25107..0c72c9c38956f1ac949f049db87fc17a3d8eed39 100644 (file)
@@ -43,7 +43,7 @@ static ssize_t ima_show_htable_violations(struct file *filp,
        return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
 }
 
-static struct file_operations ima_htable_violations_ops = {
+static const struct file_operations ima_htable_violations_ops = {
        .read = ima_show_htable_violations
 };
 
@@ -55,7 +55,7 @@ static ssize_t ima_show_measurements_count(struct file *filp,
 
 }
 
-static struct file_operations ima_measurements_count_ops = {
+static const struct file_operations ima_measurements_count_ops = {
        .read = ima_show_measurements_count
 };
 
@@ -158,7 +158,7 @@ static int ima_measurements_open(struct inode *inode, struct file *file)
        return seq_open(file, &ima_measurments_seqops);
 }
 
-static struct file_operations ima_measurements_ops = {
+static const struct file_operations ima_measurements_ops = {
        .open = ima_measurements_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -233,7 +233,7 @@ static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
        return seq_open(file, &ima_ascii_measurements_seqops);
 }
 
-static struct file_operations ima_ascii_measurements_ops = {
+static const struct file_operations ima_ascii_measurements_ops = {
        .open = ima_ascii_measurements_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -313,7 +313,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct file_operations ima_measure_policy_ops = {
+static const struct file_operations ima_measure_policy_ops = {
        .open = ima_open_policy,
        .write = ima_write_policy,
        .release = ima_release_policy
index f0ebc971c6865041855e27b9b3cc3423e20c1cfa..1dd66ddffcaf6b97cf46ef2fe2f055a7ca317d37 100644 (file)
@@ -897,6 +897,15 @@ static int tas_create(struct i2c_adapter *adapter,
        client = i2c_new_device(adapter, &info);
        if (!client)
                return -ENODEV;
+       /*
+        * We know the driver is already loaded, so the device should be
+        * already bound. If not it means binding failed, and then there
+        * is no point in keeping the device instantiated.
+        */
+       if (!client->driver) {
+               i2c_unregister_device(client);
+               return -ENODEV;
+       }
 
        /*
         * Let i2c-core delete that device on driver removal.
index 6e7d09ae0e82310fa053155c43ec2b9c11d1d084..7d722a025d0d7783d775c824a0076b8160b11304 100644 (file)
@@ -29,6 +29,8 @@ extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
 
 extern int use_internal_drums;
 
+static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
+                                    struct snd_midi_channel *chan);
 /*
  * The next table looks magical, but it certainly is not. Its values have
  * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
@@ -242,16 +244,20 @@ void snd_opl3_timer_func(unsigned long data)
        int again = 0;
        int i;
 
-       spin_lock_irqsave(&opl3->sys_timer_lock, flags);
+       spin_lock_irqsave(&opl3->voice_lock, flags);
        for (i = 0; i < opl3->max_voices; i++) {
                struct snd_opl3_voice *vp = &opl3->voices[i];
                if (vp->state > 0 && vp->note_off_check) {
                        if (vp->note_off == jiffies)
-                               snd_opl3_note_off(opl3, vp->note, 0, vp->chan);
+                               snd_opl3_note_off_unsafe(opl3, vp->note, 0,
+                                                        vp->chan);
                        else
                                again++;
                }
        }
+       spin_unlock_irqrestore(&opl3->voice_lock, flags);
+
+       spin_lock_irqsave(&opl3->sys_timer_lock, flags);
        if (again) {
                opl3->tlist.expires = jiffies + 1;      /* invoke again */
                add_timer(&opl3->tlist);
@@ -658,15 +664,14 @@ static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice)
 /*
  * Release a note in response to a midi note off.
  */
-void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan)
+static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
+                                    struct snd_midi_channel *chan)
 {
        struct snd_opl3 *opl3;
 
        int voice;
        struct snd_opl3_voice *vp;
 
-       unsigned long flags;
-
        opl3 = p;
 
 #ifdef DEBUG_MIDI
@@ -674,12 +679,9 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan
                   chan->number, chan->midi_program, note);
 #endif
 
-       spin_lock_irqsave(&opl3->voice_lock, flags);
-
        if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
                if (chan->drum_channel && use_internal_drums) {
                        snd_opl3_drum_switch(opl3, note, vel, 0, chan);
-                       spin_unlock_irqrestore(&opl3->voice_lock, flags);
                        return;
                }
                /* this loop will hopefully kill all extra voices, because
@@ -697,6 +699,16 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan
                        snd_opl3_kill_voice(opl3, voice);
                }
        }
+}
+
+void snd_opl3_note_off(void *p, int note, int vel,
+                      struct snd_midi_channel *chan)
+{
+       struct snd_opl3 *opl3 = p;
+       unsigned long flags;
+
+       spin_lock_irqsave(&opl3->voice_lock, flags);
+       snd_opl3_note_off_unsafe(p, note, vel, chan);
        spin_unlock_irqrestore(&opl3->voice_lock, flags);
 }
 
index c52691c2fc46df746aee9136545f1c4ed6aa1447..9a88cdfd952a0c609847442b24da8746d8aee51e 100644 (file)
@@ -915,7 +915,7 @@ static int __devinit hal2_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit hal2_remove(struct platform_device *pdev)
+static int __devexit hal2_remove(struct platform_device *pdev)
 {
        struct snd_card *card = platform_get_drvdata(pdev);
 
index e497525bc11bcf50612c071bdfc3c5ba186bacf1..8691f4cf6191fc8500d4995c42e227bd8680a6c6 100644 (file)
@@ -973,7 +973,7 @@ static int __devinit snd_sgio2audio_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit snd_sgio2audio_remove(struct platform_device *pdev)
+static int __devexit snd_sgio2audio_remove(struct platform_device *pdev)
 {
        struct snd_card *card = platform_get_drvdata(pdev);
 
index b1b3a644f73804a186263df3241f5c49650c5f48..75454648d50c6efb10d79357804c322c6f7aa80e 100644 (file)
@@ -1037,7 +1037,7 @@ static int atc_line_front_unmute(struct ct_atc *atc, unsigned char state)
 
 static int atc_line_surround_unmute(struct ct_atc *atc, unsigned char state)
 {
-       return atc_daio_unmute(atc, state, LINEO4);
+       return atc_daio_unmute(atc, state, LINEO2);
 }
 
 static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state)
@@ -1047,7 +1047,7 @@ static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state)
 
 static int atc_line_rear_unmute(struct ct_atc *atc, unsigned char state)
 {
-       return atc_daio_unmute(atc, state, LINEO2);
+       return atc_daio_unmute(atc, state, LINEO4);
 }
 
 static int atc_line_in_unmute(struct ct_atc *atc, unsigned char state)
index da2065cd2c0d5c3ffd5783b98c90ebfbc4ee244c..1305f7ca02c3c2726049dc448dec6b7fa36a63cd 100644 (file)
@@ -950,7 +950,7 @@ static int __devinit snd_echo_new_pcm(struct echoaudio *chip)
        Control interface
 ******************************************************************************/
 
-#ifndef ECHOCARD_HAS_VMIXER
+#if !defined(ECHOCARD_HAS_VMIXER) || defined(ECHOCARD_HAS_LINE_OUT_GAIN)
 
 /******************* PCM output volume *******************/
 static int snd_echo_output_gain_info(struct snd_kcontrol *kcontrol,
@@ -1003,6 +1003,19 @@ static int snd_echo_output_gain_put(struct snd_kcontrol *kcontrol,
        return changed;
 }
 
+#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
+/* On the Mia this one controls the line-out volume */
+static struct snd_kcontrol_new snd_echo_line_output_gain __devinitdata = {
+       .name = "Line Playback Volume",
+       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+                 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+       .info = snd_echo_output_gain_info,
+       .get = snd_echo_output_gain_get,
+       .put = snd_echo_output_gain_put,
+       .tlv = {.p = db_scale_output_gain},
+};
+#else
 static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = {
        .name = "PCM Playback Volume",
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -1012,9 +1025,10 @@ static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = {
        .put = snd_echo_output_gain_put,
        .tlv = {.p = db_scale_output_gain},
 };
-
 #endif
 
+#endif /* !ECHOCARD_HAS_VMIXER || ECHOCARD_HAS_LINE_OUT_GAIN */
+
 
 
 #ifdef ECHOCARD_HAS_INPUT_GAIN
@@ -2030,10 +2044,18 @@ static int __devinit snd_echo_probe(struct pci_dev *pci,
        snd_echo_vmixer.count = num_pipes_out(chip) * num_busses_out(chip);
        if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vmixer, chip))) < 0)
                goto ctl_error;
-#else
-       if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_pcm_output_gain, chip))) < 0)
+#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
+       err = snd_ctl_add(chip->card,
+                         snd_ctl_new1(&snd_echo_line_output_gain, chip));
+       if (err < 0)
                goto ctl_error;
 #endif
+#else /* ECHOCARD_HAS_VMIXER */
+       err = snd_ctl_add(chip->card,
+                         snd_ctl_new1(&snd_echo_pcm_output_gain, chip));
+       if (err < 0)
+               goto ctl_error;
+#endif /* ECHOCARD_HAS_VMIXER */
 
 #ifdef ECHOCARD_HAS_INPUT_GAIN
        if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_line_input_gain, chip))) < 0)
index f3b9b45c9c1be7ad82b3c9249e66dfe84ef15833..f05c8c097aa8196de2f9fe0a62e6e45e2efbd66c 100644 (file)
@@ -29,6 +29,7 @@
 #define ECHOCARD_HAS_ADAT      FALSE
 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
 #define ECHOCARD_HAS_MIDI
+#define ECHOCARD_HAS_LINE_OUT_GAIN
 
 /* Pipe indexes */
 #define PX_ANALOG_OUT  0       /* 8 */
index 20a66f85f0a4247b317d8bbd9e68dafe107e892e..c9ad182e1b4b6a3ddd23b422f2e4f821bde90604 100644 (file)
@@ -2303,6 +2303,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
  * white-list for enable_msi
  */
 static struct snd_pci_quirk msi_white_list[] __devinitdata = {
+       SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
        SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
        {}
 };
index 215e72a8711306567bf3c898b5ef02caeb5ef5a4..2d603f6aba6319bbf149a8dc5b966aec328aba8e 100644 (file)
@@ -4031,6 +4031,127 @@ static int ad1984a_thinkpad_init(struct hda_codec *codec)
        return 0;
 }
 
+/*
+ * HP Touchsmart
+ * port-A (0x11)      - front hp-out
+ * port-B (0x14)      - unused
+ * port-C (0x15)      - unused
+ * port-D (0x12)      - rear line out
+ * port-E (0x1c)      - front mic-in
+ * port-F (0x16)      - Internal speakers
+ * digital-mic (0x17) - Internal mic
+ */
+
+static struct hda_verb ad1984a_touchsmart_verbs[] = {
+       /* DACs; unmute as default */
+       {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
+       {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
+       /* Port-A (HP) mixer - route only from analog mixer */
+       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+       {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
+       /* Port-A pin */
+       {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       /* Port-A (HP) pin - always unmuted */
+       {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       /* Port-E (int speaker) mixer - route only from analog mixer */
+       {0x25, AC_VERB_SET_AMP_GAIN_MUTE, 0x03},
+       /* Port-E pin */
+       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+       {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+       {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+       /* Port-F (int speaker) mixer - route only from analog mixer */
+       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+       {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
+       /* Port-F pin */
+       {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       /* Analog mixer; mute as default */
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
+       {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
+       /* Analog Mix output amp */
+       {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       /* capture sources */
+       /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
+       {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
+       {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       /* unsolicited event for pin-sense */
+       {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
+       {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
+       /* allow to touch GPIO1 (for mute control) */
+       {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+       {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+       {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
+       /* internal mic - dmic */
+       {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+       /* set magic COEFs for dmic */
+       {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
+       {0x01, AC_VERB_SET_PROC_COEF, 0x08},
+       { } /* end */
+};
+
+static struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = {
+       HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
+/*     HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
+       {
+               .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+               .name = "Master Playback Switch",
+               .info = snd_hda_mixer_amp_switch_info,
+               .get = snd_hda_mixer_amp_switch_get,
+               .put = ad1884a_mobile_master_sw_put,
+               .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
+       },
+       HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
+       HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
+       HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
+       HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+       HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+       { } /* end */
+};
+
+/* switch to external mic if plugged */
+static void ad1984a_touchsmart_automic(struct hda_codec *codec)
+{
+       if (snd_hda_codec_read(codec, 0x1c, 0,
+                                    AC_VERB_GET_PIN_SENSE, 0) & 0x80000000) {
+               snd_hda_codec_write(codec, 0x0c, 0,
+                                    AC_VERB_SET_CONNECT_SEL, 0x4);
+       } else {
+               snd_hda_codec_write(codec, 0x0c, 0,
+                                    AC_VERB_SET_CONNECT_SEL, 0x5);
+       }
+}
+
+
+/* unsolicited event for HP jack sensing */
+static void ad1984a_touchsmart_unsol_event(struct hda_codec *codec,
+       unsigned int res)
+{
+       switch (res >> 26) {
+       case AD1884A_HP_EVENT:
+               ad1884a_hp_automute(codec);
+               break;
+       case AD1884A_MIC_EVENT:
+               ad1984a_touchsmart_automic(codec);
+               break;
+       }
+}
+
+/* initialize jack-sensing, too */
+static int ad1984a_touchsmart_init(struct hda_codec *codec)
+{
+       ad198x_init(codec);
+       ad1884a_hp_automute(codec);
+       ad1984a_touchsmart_automic(codec);
+       return 0;
+}
+
+
 /*
  */
 
@@ -4039,6 +4160,7 @@ enum {
        AD1884A_LAPTOP,
        AD1884A_MOBILE,
        AD1884A_THINKPAD,
+       AD1984A_TOUCHSMART,
        AD1884A_MODELS
 };
 
@@ -4047,6 +4169,7 @@ static const char *ad1884a_models[AD1884A_MODELS] = {
        [AD1884A_LAPTOP]        = "laptop",
        [AD1884A_MOBILE]        = "mobile",
        [AD1884A_THINKPAD]      = "thinkpad",
+       [AD1984A_TOUCHSMART]    = "touchsmart",
 };
 
 static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
@@ -4059,6 +4182,7 @@ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
        SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
        SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
        SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
+       SND_PCI_QUIRK(0x103c, 0x2a82, "Touchsmart", AD1984A_TOUCHSMART),
        {}
 };
 
@@ -4142,6 +4266,21 @@ static int patch_ad1884a(struct hda_codec *codec)
                codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
                codec->patch_ops.init = ad1984a_thinkpad_init;
                break;
+       case AD1984A_TOUCHSMART:
+               spec->mixers[0] = ad1984a_touchsmart_mixers;
+               spec->init_verbs[0] = ad1984a_touchsmart_verbs;
+               spec->multiout.dig_out_nid = 0;
+               codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
+               codec->patch_ops.init = ad1984a_touchsmart_init;
+               /* set the upper-limit for mixer amp to 0dB for avoiding the
+                * possible damage by overloading
+                */
+               snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
+                                         (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+                                         (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+                                         (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+                                         (1 << AC_AMPCAP_MUTE_SHIFT));
+               break;
        }
 
        return 0;
index 9d899eda44d779695a1506598f8d5662d8a4a0f1..3fbbc8c01e703845e590345ca0f564cd02499d12 100644 (file)
@@ -682,11 +682,13 @@ static struct hda_input_mux cxt5045_capture_source = {
 };
 
 static struct hda_input_mux cxt5045_capture_source_benq = {
-       .num_items = 3,
+       .num_items = 5,
        .items = {
                { "IntMic", 0x1 },
                { "ExtMic", 0x2 },
                { "LineIn", 0x3 },
+               { "CD",     0x4 },
+               { "Mixer",  0x0 },
        }
 };
 
@@ -811,11 +813,19 @@ static struct snd_kcontrol_new cxt5045_mixers[] = {
 };
 
 static struct snd_kcontrol_new cxt5045_benq_mixers[] = {
+       HDA_CODEC_VOLUME("CD Capture Volume", 0x1a, 0x04, HDA_INPUT),
+       HDA_CODEC_MUTE("CD Capture Switch", 0x1a, 0x04, HDA_INPUT),
+       HDA_CODEC_VOLUME("CD Playback Volume", 0x17, 0x4, HDA_INPUT),
+       HDA_CODEC_MUTE("CD Playback Switch", 0x17, 0x4, HDA_INPUT),
+
        HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT),
        HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT),
        HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT),
        HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT),
 
+       HDA_CODEC_VOLUME("Mixer Capture Volume", 0x1a, 0x0, HDA_INPUT),
+       HDA_CODEC_MUTE("Mixer Capture Switch", 0x1a, 0x0, HDA_INPUT),
+
        {}
 };
 
index 129605819560073d542de35b1b265d4cd0696ae0..470fd74a0a1ad6697f073ee9bdd6bd03bf699aef 100644 (file)
@@ -1332,15 +1332,20 @@ do_sku:
         *              when the external headphone out jack is plugged"
         */
        if (!spec->autocfg.hp_pins[0]) {
+               hda_nid_t nid;
                tmp = (ass >> 11) & 0x3;        /* HP to chassis */
                if (tmp == 0)
-                       spec->autocfg.hp_pins[0] = porta;
+                       nid = porta;
                else if (tmp == 1)
-                       spec->autocfg.hp_pins[0] = porte;
+                       nid = porte;
                else if (tmp == 2)
-                       spec->autocfg.hp_pins[0] = portd;
+                       nid = portd;
                else
                        return 1;
+               for (i = 0; i < spec->autocfg.line_outs; i++)
+                       if (spec->autocfg.line_out_pins[i] == nid)
+                               return 1;
+               spec->autocfg.hp_pins[0] = nid;
        }
 
        alc_init_auto_hp(codec);
@@ -1362,7 +1367,7 @@ static void alc_ssid_check(struct hda_codec *codec,
 }
 
 /*
- * Fix-up pin default configurations
+ * Fix-up pin default configurations and add default verbs
  */
 
 struct alc_pincfg {
@@ -1370,9 +1375,14 @@ struct alc_pincfg {
        u32 val;
 };
 
-static void alc_fix_pincfg(struct hda_codec *codec,
+struct alc_fixup {
+       const struct alc_pincfg *pins;
+       const struct hda_verb *verbs;
+};
+
+static void alc_pick_fixup(struct hda_codec *codec,
                           const struct snd_pci_quirk *quirk,
-                          const struct alc_pincfg **pinfix)
+                          const struct alc_fixup *fix)
 {
        const struct alc_pincfg *cfg;
 
@@ -1380,9 +1390,14 @@ static void alc_fix_pincfg(struct hda_codec *codec,
        if (!quirk)
                return;
 
-       cfg = pinfix[quirk->value];
-       for (; cfg->nid; cfg++)
-               snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
+       fix += quirk->value;
+       cfg = fix->pins;
+       if (cfg) {
+               for (; cfg->nid; cfg++)
+                       snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
+       }
+       if (fix->verbs)
+               add_verb(codec->spec, fix->verbs);
 }
 
 /*
@@ -9593,11 +9608,13 @@ static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
        { }
 };
 
-static const struct alc_pincfg *alc882_pin_fixes[] = {
-       [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
+static const struct alc_fixup alc882_fixups[] = {
+       [PINFIX_ABIT_AW9D_MAX] = {
+               .pins = alc882_abit_aw9d_pinfix
+       },
 };
 
-static struct snd_pci_quirk alc882_pinfix_tbl[] = {
+static struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
        {}
 };
@@ -9869,7 +9886,7 @@ static int patch_alc882(struct hda_codec *codec)
                board_config = ALC882_AUTO;
        }
 
-       alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
+       alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups);
 
        if (board_config == ALC882_AUTO) {
                /* automatic parse from the BIOS config */
@@ -12660,7 +12677,7 @@ static struct alc_config_preset alc268_presets[] = {
                .init_hook = alc268_toshiba_automute,
        },
        [ALC268_ACER] = {
-               .mixers = { alc268_acer_mixer, alc268_capture_nosrc_mixer,
+               .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer,
                            alc268_beep_mixer },
                .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
                                alc268_acer_verbs },
@@ -12842,12 +12859,15 @@ static int patch_alc268(struct hda_codec *codec)
                unsigned int wcap = get_wcaps(codec, 0x07);
                int i;
 
+               spec->capsrc_nids = alc268_capsrc_nids;
                /* get type */
                wcap = get_wcaps_type(wcap);
                if (spec->auto_mic ||
                    wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
                        spec->adc_nids = alc268_adc_nids_alt;
                        spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt);
+                       if (spec->auto_mic)
+                               fixup_automic_adc(codec);
                        if (spec->auto_mic || spec->input_mux->num_items == 1)
                                add_mixer(spec, alc268_capture_nosrc_mixer);
                        else
@@ -12857,7 +12877,6 @@ static int patch_alc268(struct hda_codec *codec)
                        spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids);
                        add_mixer(spec, alc268_capture_mixer);
                }
-               spec->capsrc_nids = alc268_capsrc_nids;
                /* set default input source */
                for (i = 0; i < spec->num_adc_nids; i++)
                        snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i],
@@ -14357,15 +14376,16 @@ static void alc861_auto_init_multi_out(struct hda_codec *codec)
 static void alc861_auto_init_hp_out(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t pin;
 
-       pin = spec->autocfg.hp_pins[0];
-       if (pin)
-               alc861_auto_set_output_and_unmute(codec, pin, PIN_HP,
+       if (spec->autocfg.hp_outs)
+               alc861_auto_set_output_and_unmute(codec,
+                                                 spec->autocfg.hp_pins[0],
+                                                 PIN_HP,
                                                  spec->multiout.hp_nid);
-       pin = spec->autocfg.speaker_pins[0];
-       if (pin)
-               alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT,
+       if (spec->autocfg.speaker_outs)
+               alc861_auto_set_output_and_unmute(codec,
+                                                 spec->autocfg.speaker_pins[0],
+                                                 PIN_OUT,
                                                  spec->multiout.dac_nids[0]);
 }
 
@@ -15158,7 +15178,7 @@ static struct snd_pci_quirk alc861vd_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1019, 0xa88d, "Realtek ALC660 demo", ALC660VD_3ST),
        SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_HP),
        SND_PCI_QUIRK(0x1043, 0x12e2, "Asus z35m", ALC660VD_3ST),
-       SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST),
+       /*SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST),*/ /* auto */
        SND_PCI_QUIRK(0x1043, 0x1633, "Asus V1Sn", ALC660VD_ASUS_V1S),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS", ALC660VD_3ST_DIG),
        SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST),
@@ -15551,6 +15571,29 @@ static void alc861vd_auto_init(struct hda_codec *codec)
                alc_inithook(codec);
 }
 
+enum {
+       ALC660VD_FIX_ASUS_GPIO1
+};
+
+/* reset GPIO1 */
+static const struct hda_verb alc660vd_fix_asus_gpio1_verbs[] = {
+       {0x01, AC_VERB_SET_GPIO_MASK, 0x03},
+       {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01},
+       {0x01, AC_VERB_SET_GPIO_DATA, 0x01},
+       { }
+};
+
+static const struct alc_fixup alc861vd_fixups[] = {
+       [ALC660VD_FIX_ASUS_GPIO1] = {
+               .verbs = alc660vd_fix_asus_gpio1_verbs,
+       },
+};
+
+static struct snd_pci_quirk alc861vd_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1339, "ASUS A7-K", ALC660VD_FIX_ASUS_GPIO1),
+       {}
+};
+
 static int patch_alc861vd(struct hda_codec *codec)
 {
        struct alc_spec *spec;
@@ -15572,6 +15615,8 @@ static int patch_alc861vd(struct hda_codec *codec)
                board_config = ALC861VD_AUTO;
        }
 
+       alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups);
+
        if (board_config == ALC861VD_AUTO) {
                /* automatic parse from the BIOS config */
                err = alc861vd_parse_auto_config(codec);
@@ -16852,6 +16897,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
        SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_ECS),
        SND_PCI_QUIRK(0x105b, 0x0d47, "Foxconn 45CMX/45GMX/45CMX-K",
                      ALC662_3ST_6ch_DIG),
+       SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB200", ALC663_ASUS_MODE4),
        SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L",
                      ALC662_3ST_6ch_DIG),
@@ -17145,70 +17191,145 @@ static struct alc_config_preset alc662_presets[] = {
  * BIOS auto configuration
  */
 
+/* convert from MIX nid to DAC */
+static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
+{
+       if (nid == 0x0f)
+               return 0x02;
+       else if (nid >= 0x0c && nid <= 0x0e)
+               return nid - 0x0c + 0x02;
+       else
+               return 0;
+}
+
+/* get MIX nid connected to the given pin targeted to DAC */
+static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
+                                  hda_nid_t dac)
+{
+       hda_nid_t mix[4];
+       int i, num;
+
+       num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
+       for (i = 0; i < num; i++) {
+               if (alc662_mix_to_dac(mix[i]) == dac)
+                       return mix[i];
+       }
+       return 0;
+}
+
+/* look for an empty DAC slot */
+static hda_nid_t alc662_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t srcs[5];
+       int i, j, num;
+
+       num = snd_hda_get_connections(codec, pin, srcs, ARRAY_SIZE(srcs));
+       if (num < 0)
+               return 0;
+       for (i = 0; i < num; i++) {
+               hda_nid_t nid = alc662_mix_to_dac(srcs[i]);
+               if (!nid)
+                       continue;
+               for (j = 0; j < spec->multiout.num_dacs; j++)
+                       if (spec->multiout.dac_nids[j] == nid)
+                               break;
+               if (j >= spec->multiout.num_dacs)
+                       return nid;
+       }
+       return 0;
+}
+
+/* fill in the dac_nids table from the parsed pin configuration */
+static int alc662_auto_fill_dac_nids(struct hda_codec *codec,
+                                    const struct auto_pin_cfg *cfg)
+{
+       struct alc_spec *spec = codec->spec;
+       int i;
+       hda_nid_t dac;
+
+       spec->multiout.dac_nids = spec->private_dac_nids;
+       for (i = 0; i < cfg->line_outs; i++) {
+               dac = alc662_look_for_dac(codec, cfg->line_out_pins[i]);
+               if (!dac)
+                       continue;
+               spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
+       }
+       return 0;
+}
+
+static int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
+                             hda_nid_t nid, unsigned int chs)
+{
+       char name[32];
+       sprintf(name, "%s Playback Volume", pfx);
+       return add_control(spec, ALC_CTL_WIDGET_VOL, name,
+                          HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
+}
+
+static int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
+                            hda_nid_t nid, unsigned int chs)
+{
+       char name[32];
+       sprintf(name, "%s Playback Switch", pfx);
+       return add_control(spec, ALC_CTL_WIDGET_MUTE, name,
+                          HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT));
+}
+
+#define alc662_add_stereo_vol(spec, pfx, nid) \
+       alc662_add_vol_ctl(spec, pfx, nid, 3)
+#define alc662_add_stereo_sw(spec, pfx, nid) \
+       alc662_add_sw_ctl(spec, pfx, nid, 3)
+
 /* add playback controls from the parsed DAC table */
-static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec,
+static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec,
                                             const struct auto_pin_cfg *cfg)
 {
-       char name[32];
+       struct alc_spec *spec = codec->spec;
        static const char *chname[4] = {
                "Front", "Surround", NULL /*CLFE*/, "Side"
        };
-       hda_nid_t nid;
+       hda_nid_t nid, mix;
        int i, err;
 
        for (i = 0; i < cfg->line_outs; i++) {
-               if (!spec->multiout.dac_nids[i])
+               nid = spec->multiout.dac_nids[i];
+               if (!nid)
+                       continue;
+               mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid);
+               if (!mix)
                        continue;
-               nid = alc880_idx_to_dac(i);
                if (i == 2) {
                        /* Center/LFE */
-                       err = add_control(spec, ALC_CTL_WIDGET_VOL,
-                                         "Center Playback Volume",
-                                         HDA_COMPOSE_AMP_VAL(nid, 1, 0,
-                                                             HDA_OUTPUT));
+                       err = alc662_add_vol_ctl(spec, "Center", nid, 1);
                        if (err < 0)
                                return err;
-                       err = add_control(spec, ALC_CTL_WIDGET_VOL,
-                                         "LFE Playback Volume",
-                                         HDA_COMPOSE_AMP_VAL(nid, 2, 0,
-                                                             HDA_OUTPUT));
+                       err = alc662_add_vol_ctl(spec, "LFE", nid, 2);
                        if (err < 0)
                                return err;
-                       err = add_control(spec, ALC_CTL_WIDGET_MUTE,
-                                         "Center Playback Switch",
-                                         HDA_COMPOSE_AMP_VAL(0x0e, 1, 0,
-                                                             HDA_INPUT));
+                       err = alc662_add_sw_ctl(spec, "Center", mix, 1);
                        if (err < 0)
                                return err;
-                       err = add_control(spec, ALC_CTL_WIDGET_MUTE,
-                                         "LFE Playback Switch",
-                                         HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
-                                                             HDA_INPUT));
+                       err = alc662_add_sw_ctl(spec, "LFE", mix, 2);
                        if (err < 0)
                                return err;
                } else {
                        const char *pfx;
                        if (cfg->line_outs == 1 &&
                            cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-                               if (!cfg->hp_pins)
+                               if (cfg->hp_outs)
                                        pfx = "Speaker";
                                else
                                        pfx = "PCM";
                        } else
                                pfx = chname[i];
-                       sprintf(name, "%s Playback Volume", pfx);
-                       err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
-                                         HDA_COMPOSE_AMP_VAL(nid, 3, 0,
-                                                             HDA_OUTPUT));
+                       err = alc662_add_vol_ctl(spec, pfx, nid, 3);
                        if (err < 0)
                                return err;
                        if (cfg->line_outs == 1 &&
                            cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
                                pfx = "Speaker";
-                       sprintf(name, "%s Playback Switch", pfx);
-                       err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
-                               HDA_COMPOSE_AMP_VAL(alc880_idx_to_mixer(i),
-                                                   3, 0, HDA_INPUT));
+                       err = alc662_add_sw_ctl(spec, pfx, mix, 3);
                        if (err < 0)
                                return err;
                }
@@ -17217,54 +17338,38 @@ static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec,
 }
 
 /* add playback controls for speaker and HP outputs */
-static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
+/* return DAC nid if any new DAC is assigned */
+static int alc662_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                                        const char *pfx)
 {
-       hda_nid_t nid;
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t nid, mix;
        int err;
-       char name[32];
 
        if (!pin)
                return 0;
-
-       if (pin == 0x17) {
-               /* ALC663 has a mono output pin on 0x17 */
+       nid = alc662_look_for_dac(codec, pin);
+       if (!nid) {
+               char name[32];
+               /* the corresponding DAC is already occupied */
+               if (!(get_wcaps(codec, pin) & AC_WCAP_OUT_AMP))
+                       return 0; /* no way */
+               /* create a switch only */
                sprintf(name, "%s Playback Switch", pfx);
-               err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
-                                 HDA_COMPOSE_AMP_VAL(pin, 2, 0, HDA_OUTPUT));
-               return err;
+               return add_control(spec, ALC_CTL_WIDGET_MUTE, name,
+                                  HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_OUTPUT));
        }
 
-       if (alc880_is_fixed_pin(pin)) {
-               nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin));
-               /* printk(KERN_DEBUG "DAC nid=%x\n",nid); */
-               /* specify the DAC as the extra output */
-               if (!spec->multiout.hp_nid)
-                       spec->multiout.hp_nid = nid;
-               else
-                       spec->multiout.extra_out_nid[0] = nid;
-               /* control HP volume/switch on the output mixer amp */
-               nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin));
-               sprintf(name, "%s Playback Volume", pfx);
-               err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
-                                 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
-               if (err < 0)
-                       return err;
-               sprintf(name, "%s Playback Switch", pfx);
-               err = add_control(spec, ALC_CTL_BIND_MUTE, name,
-                                 HDA_COMPOSE_AMP_VAL(nid, 3, 2, HDA_INPUT));
-               if (err < 0)
-                       return err;
-       } else if (alc880_is_multi_pin(pin)) {
-               /* set manual connection */
-               /* we have only a switch on HP-out PIN */
-               sprintf(name, "%s Playback Switch", pfx);
-               err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
-                                 HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_OUTPUT));
-               if (err < 0)
-                       return err;
-       }
-       return 0;
+       mix = alc662_dac_to_mix(codec, pin, nid);
+       if (!mix)
+               return 0;
+       err = alc662_add_vol_ctl(spec, pfx, nid, 3);
+       if (err < 0)
+               return err;
+       err = alc662_add_sw_ctl(spec, pfx, mix, 3);
+       if (err < 0)
+               return err;
+       return nid;
 }
 
 /* create playback/capture controls for input pins */
@@ -17273,30 +17378,35 @@ static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
 
 static void alc662_auto_set_output_and_unmute(struct hda_codec *codec,
                                              hda_nid_t nid, int pin_type,
-                                             int dac_idx)
+                                             hda_nid_t dac)
 {
+       int i, num;
+       hda_nid_t srcs[4];
+
        alc_set_pin_output(codec, nid, pin_type);
        /* need the manual connection? */
-       if (alc880_is_multi_pin(nid)) {
-               struct alc_spec *spec = codec->spec;
-               int idx = alc880_multi_pin_idx(nid);
-               snd_hda_codec_write(codec, alc880_idx_to_selector(idx), 0,
-                                   AC_VERB_SET_CONNECT_SEL,
-                                   alc880_dac_to_idx(spec->multiout.dac_nids[dac_idx]));
+       num = snd_hda_get_connections(codec, nid, srcs, ARRAY_SIZE(srcs));
+       if (num <= 1)
+               return;
+       for (i = 0; i < num; i++) {
+               if (alc662_mix_to_dac(srcs[i]) != dac)
+                       continue;
+               snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, i);
+               return;
        }
 }
 
 static void alc662_auto_init_multi_out(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
+       int pin_type = get_pin_type(spec->autocfg.line_out_type);
        int i;
 
        for (i = 0; i <= HDA_SIDE; i++) {
                hda_nid_t nid = spec->autocfg.line_out_pins[i];
-               int pin_type = get_pin_type(spec->autocfg.line_out_type);
                if (nid)
                        alc662_auto_set_output_and_unmute(codec, nid, pin_type,
-                                                         i);
+                                       spec->multiout.dac_nids[i]);
        }
 }
 
@@ -17306,12 +17416,13 @@ static void alc662_auto_init_hp_out(struct hda_codec *codec)
        hda_nid_t pin;
 
        pin = spec->autocfg.hp_pins[0];
-       if (pin) /* connect to front */
-               /* use dac 0 */
-               alc662_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
+       if (pin)
+               alc662_auto_set_output_and_unmute(codec, pin, PIN_HP,
+                                                 spec->multiout.hp_nid);
        pin = spec->autocfg.speaker_pins[0];
        if (pin)
-               alc662_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
+               alc662_auto_set_output_and_unmute(codec, pin, PIN_OUT,
+                                       spec->multiout.extra_out_nid[0]);
 }
 
 #define ALC662_PIN_CD_NID              ALC880_PIN_CD_NID
@@ -17349,21 +17460,25 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
        if (!spec->autocfg.line_outs)
                return 0; /* can't find valid BIOS pin config */
 
-       err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
+       err = alc662_auto_fill_dac_nids(codec, &spec->autocfg);
        if (err < 0)
                return err;
-       err = alc662_auto_create_multi_out_ctls(spec, &spec->autocfg);
+       err = alc662_auto_create_multi_out_ctls(codec, &spec->autocfg);
        if (err < 0)
                return err;
-       err = alc662_auto_create_extra_out(spec,
+       err = alc662_auto_create_extra_out(codec,
                                           spec->autocfg.speaker_pins[0],
                                           "Speaker");
        if (err < 0)
                return err;
-       err = alc662_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
+       if (err)
+               spec->multiout.extra_out_nid[0] = err;
+       err = alc662_auto_create_extra_out(codec, spec->autocfg.hp_pins[0],
                                           "Headphone");
        if (err < 0)
                return err;
+       if (err)
+               spec->multiout.hp_nid = err;
        err = alc662_auto_create_input_ctls(codec, &spec->autocfg);
        if (err < 0)
                return err;
index 826137ec3002021dba56de29169f01740b3008e1..a9b26828a65168fdd9fee7aed1c6e4ae0033fc55 100644 (file)
@@ -182,8 +182,8 @@ struct sigmatel_jack {
 
 struct sigmatel_mic_route {
        hda_nid_t pin;
-       unsigned char mux_idx;
-       unsigned char dmux_idx;
+       signed char mux_idx;
+       signed char dmux_idx;
 };
 
 struct sigmatel_spec {
@@ -3469,18 +3469,26 @@ static int set_mic_route(struct hda_codec *codec,
                        break;
        if (i <= AUTO_PIN_FRONT_MIC) {
                /* analog pin */
-               mic->dmux_idx = 0;
                i = get_connection_index(codec, spec->mux_nids[0], pin);
                if (i < 0)
                        return -1;
                mic->mux_idx = i;
+               mic->dmux_idx = -1;
+               if (spec->dmux_nids)
+                       mic->dmux_idx = get_connection_index(codec,
+                                                            spec->dmux_nids[0],
+                                                            spec->mux_nids[0]);
        }  else if (spec->dmux_nids) {
                /* digital pin */
-               mic->mux_idx = 0;
                i = get_connection_index(codec, spec->dmux_nids[0], pin);
                if (i < 0)
                        return -1;
                mic->dmux_idx = i;
+               mic->mux_idx = -1;
+               if (spec->mux_nids)
+                       mic->mux_idx = get_connection_index(codec,
+                                                           spec->mux_nids[0],
+                                                           spec->dmux_nids[0]);
        }
        return 0;
 }
@@ -4557,11 +4565,11 @@ static void stac92xx_mic_detect(struct hda_codec *codec)
                mic = &spec->ext_mic;
        else
                mic = &spec->int_mic;
-       if (mic->dmux_idx)
+       if (mic->dmux_idx >= 0)
                snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
                                          AC_VERB_SET_CONNECT_SEL,
                                          mic->dmux_idx);
-       else
+       if (mic->mux_idx >= 0)
                snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0,
                                          AC_VERB_SET_CONNECT_SEL,
                                          mic->mux_idx);
index cecf1ffeeaaa448c50d499101fa890ce2a921db4..d74033a2cfbe900553acfff24249127f97057098 100644 (file)
@@ -2259,7 +2259,7 @@ static int snd_ice1712_pro_peak_get(struct snd_kcontrol *kcontrol,
 }
 
 static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = {
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .iface = SNDRV_CTL_ELEM_IFACE_PCM,
        .name = "Multi Track Peak",
        .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
        .info = snd_ice1712_pro_peak_info,
index af6e001486219083b172b345ef94c987cd74d242..76b717dae4b65c22aa2834795d4ad26b02216fee 100644 (file)
@@ -1294,7 +1294,7 @@ static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
                                              snd_dma_pci_data(ice->pci),
-                                             64*1024, 64*1024);
+                                             256*1024, 256*1024);
 
        ice->pcm = pcm;
 
@@ -1408,7 +1408,7 @@ static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
                                              snd_dma_pci_data(ice->pci),
-                                             64*1024, 64*1024);
+                                             256*1024, 256*1024);
 
        ice->pcm_ds = pcm;
 
@@ -2110,7 +2110,7 @@ static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol,
 }
 
 static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = {
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .iface = SNDRV_CTL_ELEM_IFACE_PCM,
        .name = "Multi Track Peak",
        .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
        .info = snd_vt1724_pro_peak_info,
index 171ada535209a2974c8151597b0e1ceb4489736c..754867ed4785f248dc15f9d2680ba4e607932776 100644 (file)
@@ -1954,6 +1954,18 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "Sony S1XP",
                .type = AC97_TUNE_INV_EAPD
        },
+       {
+               .subvendor = 0x104d,
+               .subdevice = 0x81c0,
+               .name = "Sony VAIO VGN-T350P", /*AD1981B*/
+               .type = AC97_TUNE_INV_EAPD
+       },
+       {
+               .subvendor = 0x104d,
+               .subdevice = 0x81c5,
+               .name = "Sony VAIO VGN-B1VP", /*AD1981B*/
+               .type = AC97_TUNE_INV_EAPD
+       },
        {
                .subvendor = 0x1043,
                .subdevice = 0x80f3,
index acfa4760da49e3afb147b438897fbdb03b547239..91683a34903543a3bded43fa140cf49b45294922 100644 (file)
@@ -1626,7 +1626,7 @@ static int snd_via8233_dxs_volume_get(struct snd_kcontrol *kcontrol,
                                      struct snd_ctl_elem_value *ucontrol)
 {
        struct via82xx *chip = snd_kcontrol_chip(kcontrol);
-       unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id);
+       unsigned int idx = kcontrol->id.subdevice;
 
        ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0];
        ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1];
@@ -1646,7 +1646,7 @@ static int snd_via8233_dxs_volume_put(struct snd_kcontrol *kcontrol,
                                      struct snd_ctl_elem_value *ucontrol)
 {
        struct via82xx *chip = snd_kcontrol_chip(kcontrol);
-       unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id);
+       unsigned int idx = kcontrol->id.subdevice;
        unsigned long port = chip->port + 0x10 * idx;
        unsigned char val;
        int i, change = 0;
@@ -1705,11 +1705,12 @@ static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata =
 };
 
 static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = {
-       .name = "VIA DXS Playback Volume",
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+       .device = 0,
+       /* .subdevice set later */
+       .name = "PCM Playback Volume",
        .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
                   SNDRV_CTL_ELEM_ACCESS_TLV_READ),
-       .count = 4,
        .info = snd_via8233_dxs_volume_info,
        .get = snd_via8233_dxs_volume_get,
        .put = snd_via8233_dxs_volume_put,
@@ -1936,10 +1937,18 @@ static int __devinit snd_via8233_init_misc(struct via82xx *chip)
                }
                else /* Using DXS when PCM emulation is enabled is really weird */
                {
-                       /* Standalone DXS controls */
-                       err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_dxs_volume_control, chip));
-                       if (err < 0)
-                               return err;
+                       for (i = 0; i < 4; ++i) {
+                               struct snd_kcontrol *kctl;
+
+                               kctl = snd_ctl_new1(
+                                       &snd_via8233_dxs_volume_control, chip);
+                               if (!kctl)
+                                       return -ENOMEM;
+                               kctl->id.subdevice = i;
+                               err = snd_ctl_add(chip->card, kctl);
+                               if (err < 0)
+                                       return err;
+                       }
                }
        }
        /* select spdif data slot 10/11 */
index 835fa19ed461864018fb152aa3c41eb57261cb4b..d06f780bd7e84ad088163eb52e134c8c57188956 100644 (file)
@@ -59,6 +59,18 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
        strlcpy(info.type, "keywest", I2C_NAME_SIZE);
        info.addr = keywest_ctx->addr;
        keywest_ctx->client = i2c_new_device(adapter, &info);
+       if (!keywest_ctx->client)
+               return -ENODEV;
+       /*
+        * We know the driver is already loaded, so the device should be
+        * already bound. If not it means binding failed, and then there
+        * is no point in keeping the device instantiated.
+        */
+       if (!keywest_ctx->client->driver) {
+               i2c_unregister_device(keywest_ctx->client);
+               keywest_ctx->client = NULL;
+               return -ENODEV;
+       }
        
        /*
         * Let i2c-core delete that device on driver removal.
@@ -86,7 +98,7 @@ static const struct i2c_device_id keywest_i2c_id[] = {
        { }
 };
 
-struct i2c_driver keywest_driver = {
+static struct i2c_driver keywest_driver = {
        .driver = {
                .name = "PMac Keywest Audio",
        },
index ac927ffdc9614d59808f8aabcb22b17dcab9ceed..97f1a251e446e5329c31c7f155d0c76ad29436ba 100644 (file)
@@ -7,15 +7,6 @@ config SND_BF5XX_I2S
          mode (supports single stereo In/Out).
          You will also need to select the audio interfaces to support below.
 
-config SND_BF5XX_TDM
-       tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
-       depends on (BLACKFIN && SND_SOC)
-       help
-         Say Y or M if you want to add support for codecs attached to
-         the Blackfin SPORT (synchronous serial ports) interface in TDM
-         mode.
-         You will also need to select the audio interfaces to support below.
-
 config SND_BF5XX_SOC_SSM2602
        tristate "SoC SSM2602 Audio support for BF52x ezkit"
        depends on SND_BF5XX_I2S
@@ -41,6 +32,31 @@ config SND_BFIN_AD73311_SE
          Enter the GPIO used to control AD73311's SE pin. Acceptable
          values are 0 to 7
 
+config SND_BF5XX_TDM
+       tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
+       depends on (BLACKFIN && SND_SOC)
+       help
+         Say Y or M if you want to add support for codecs attached to
+         the Blackfin SPORT (synchronous serial ports) interface in TDM
+         mode.
+         You will also need to select the audio interfaces to support below.
+
+config SND_BF5XX_SOC_AD1836
+       tristate "SoC AD1836 Audio support for BF5xx"
+       depends on SND_BF5XX_TDM
+       select SND_BF5XX_SOC_TDM
+       select SND_SOC_AD1836
+       help
+         Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
+
+config SND_BF5XX_SOC_AD1938
+       tristate "SoC AD1938 Audio support for Blackfin"
+       depends on SND_BF5XX_TDM
+       select SND_BF5XX_SOC_TDM
+       select SND_SOC_AD1938
+       help
+         Say Y if you want to add support for AD1938 codec on Blackfin.
+
 config SND_BF5XX_AC97
        tristate "SoC AC97 Audio for the ADI BF5xx chip"
        depends on BLACKFIN
@@ -71,6 +87,30 @@ config SND_BF5XX_MULTICHAN_SUPPORT
          Say y if you want AC97 driver to support up to 5.1 channel audio.
          this mode will consume much more memory for DMA.
 
+config SND_BF5XX_HAVE_COLD_RESET
+       bool "BOARD has COLD Reset GPIO"
+       depends on SND_BF5XX_AC97
+       default y if BFIN548_EZKIT
+       default n if !BFIN548_EZKIT
+
+config SND_BF5XX_RESET_GPIO_NUM
+       int "Set a GPIO for cold reset"
+       depends on SND_BF5XX_HAVE_COLD_RESET
+       range 0 159
+       default 19 if BFIN548_EZKIT
+       default 5 if BFIN537_STAMP
+       default 0
+       help
+         Set the correct GPIO for RESET the sound chip.
+
+config SND_BF5XX_SOC_AD1980
+       tristate "SoC AD1980/1 Audio support for BF5xx"
+       depends on SND_BF5XX_AC97
+       select SND_BF5XX_SOC_AC97
+       select SND_SOC_AD1980
+       help
+         Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
+
 config SND_BF5XX_SOC_SPORT
        tristate
 
@@ -88,30 +128,6 @@ config SND_BF5XX_SOC_AC97
        select SND_SOC_AC97_BUS
        select SND_BF5XX_SOC_SPORT
 
-config SND_BF5XX_SOC_AD1836
-       tristate "SoC AD1836 Audio support for BF5xx"
-       depends on SND_BF5XX_TDM
-       select SND_BF5XX_SOC_TDM
-       select SND_SOC_AD1836
-       help
-         Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
-
-config SND_BF5XX_SOC_AD1980
-       tristate "SoC AD1980/1 Audio support for BF5xx"
-       depends on SND_BF5XX_AC97
-       select SND_BF5XX_SOC_AC97
-       select SND_SOC_AD1980
-       help
-         Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
-
-config SND_BF5XX_SOC_AD1938
-        tristate "SoC AD1938 Audio support for Blackfin"
-        depends on SND_BF5XX_TDM
-        select SND_BF5XX_SOC_TDM
-        select SND_SOC_AD1938
-        help
-          Say Y if you want to add support for AD1938 codec on Blackfin.
-
 config SND_BF5XX_SPORT_NUM
        int "Set a SPORT for Sound chip"
        depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM)
@@ -120,19 +136,3 @@ config SND_BF5XX_SPORT_NUM
        default 0
        help
          Set the correct SPORT for sound chip.
-
-config SND_BF5XX_HAVE_COLD_RESET
-       bool "BOARD has COLD Reset GPIO"
-       depends on SND_BF5XX_AC97
-       default y if BFIN548_EZKIT
-       default n if !BFIN548_EZKIT
-
-config SND_BF5XX_RESET_GPIO_NUM
-       int "Set a GPIO for cold reset"
-       depends on SND_BF5XX_HAVE_COLD_RESET
-       range 0 159
-       default 19 if BFIN548_EZKIT
-       default 5 if BFIN537_STAMP
-       default 0
-       help
-         Set the correct GPIO for RESET the sound chip.
index 1e9d161c76c4ff9b74e8f34b6cb923db76378850..084b68884adaa7d407e0e67ded3aa07b41edd9ee 100644 (file)
@@ -77,12 +77,12 @@ static struct sport_param sport_params[2] = {
  * TFS. When Port G is selected and EMAC then there is a conflict between
  * the PHY interrupt line and TFS.  Current settings prevent the conflict
  * by ignoring the TFS pin when Port G is selected. This allows both
- * ssm2602 using Port G and EMAC concurrently.
+ * codecs and EMAC using Port G concurrently.
  */
-#ifdef CONFIG_BF527_SPORT0_PORTF
-#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
-#else
+#ifdef CONFIG_BF527_SPORT0_PORTG
 #define LOCAL_SPORT0_TFS (0)
+#else
+#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
 #endif
 
 static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
index 3096badf09a565eb508f01d28579a1e48d4f7b87..ff546e91a22ef711484aaf07b155a23607667257 100644 (file)
@@ -78,12 +78,12 @@ static struct sport_param sport_params[2] = {
  * TFS. When Port G is selected and EMAC then there is a conflict between
  * the PHY interrupt line and TFS.  Current settings prevent the conflict
  * by ignoring the TFS pin when Port G is selected. This allows both
- * ssm2602 using Port G and EMAC concurrently.
+ * codecs and EMAC using Port G concurrently.
  */
-#ifdef CONFIG_BF527_SPORT0_PORTF
-#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
-#else
+#ifdef CONFIG_BF527_SPORT0_PORTG
 #define LOCAL_SPORT0_TFS (0)
+#else
+#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
 #endif
 
 static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
index 3ff0373dff8904faeb2f1c522495e2c1ff32e63e..593d5b9c9f033ca79e0ee97ab907d29ab05ac0db 100644 (file)
@@ -579,7 +579,7 @@ static const struct snd_kcontrol_new wm8350_left_capt_mixer_controls[] = {
        SOC_DAPM_SINGLE_TLV("L3 Capture Volume",
                            WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv),
        SOC_DAPM_SINGLE("PGA Capture Switch",
-                       WM8350_LEFT_INPUT_VOLUME, 14, 1, 0),
+                       WM8350_LEFT_INPUT_VOLUME, 14, 1, 1),
 };
 
 /* Right Input Mixer */
@@ -589,7 +589,7 @@ static const struct snd_kcontrol_new wm8350_right_capt_mixer_controls[] = {
        SOC_DAPM_SINGLE_TLV("L3 Capture Volume",
                            WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv),
        SOC_DAPM_SINGLE("PGA Capture Switch",
-                       WM8350_RIGHT_INPUT_VOLUME, 14, 1, 0),
+                       WM8350_RIGHT_INPUT_VOLUME, 14, 1, 1),
 };
 
 /* Left Mic Mixer */
index da97aae475a2ed1eb89582401558165074f96ef7..1ef2454c52054357789cb3b14e1fea77072b86a6 100644 (file)
@@ -790,7 +790,7 @@ static int wm8940_register(struct wm8940_priv *wm8940,
        codec->reg_cache = &wm8940->reg_cache;
 
        ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
-       if (ret == 0) {
+       if (ret < 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
        }
index 12a6c549ee6e56857110204c1fbab964dbfbe2ea..4ae707048021886c45533ec6c5f6ca5cadf3999c 100644 (file)
@@ -97,22 +97,19 @@ enum {
        DAVINCI_MCBSP_WORD_32,
 };
 
-static struct davinci_pcm_dma_params davinci_i2s_pcm_out = {
-       .name = "I2S PCM Stereo out",
-};
-
-static struct davinci_pcm_dma_params davinci_i2s_pcm_in = {
-       .name = "I2S PCM Stereo in",
-};
-
 struct davinci_mcbsp_dev {
+       /*
+        * dma_params must be first because rtd->dai->cpu_dai->private_data
+        * is cast to a pointer of an array of struct davinci_pcm_dma_params in
+        * davinci_pcm_open.
+        */
+       struct davinci_pcm_dma_params   dma_params[2];
        void __iomem                    *base;
 #define MOD_DSP_A      0
 #define MOD_DSP_B      1
        int                             mode;
        u32                             pcr;
        struct clk                      *clk;
-       struct davinci_pcm_dma_params   *dma_params[2];
 };
 
 static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev,
@@ -215,14 +212,6 @@ static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback)
        toggle_clock(dev, playback);
 }
 
-static int davinci_i2s_startup(struct snd_pcm_substream *substream,
-                              struct snd_soc_dai *cpu_dai)
-{
-       struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
-       cpu_dai->dma_data = dev->dma_params[substream->stream];
-       return 0;
-}
-
 #define DEFAULT_BITPERSAMPLE   16
 
 static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -353,8 +342,9 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
                                 struct snd_pcm_hw_params *params,
                                 struct snd_soc_dai *dai)
 {
-       struct davinci_pcm_dma_params *dma_params = dai->dma_data;
        struct davinci_mcbsp_dev *dev = dai->private_data;
+       struct davinci_pcm_dma_params *dma_params =
+                                       &dev->dma_params[substream->stream];
        struct snd_interval *i = NULL;
        int mcbsp_word_length;
        unsigned int rcr, xcr, srgr;
@@ -472,7 +462,6 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
 #define DAVINCI_I2S_RATES      SNDRV_PCM_RATE_8000_96000
 
 static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
-       .startup        = davinci_i2s_startup,
        .shutdown       = davinci_i2s_shutdown,
        .prepare        = davinci_i2s_prepare,
        .trigger        = davinci_i2s_trigger,
@@ -534,12 +523,10 @@ static int davinci_i2s_probe(struct platform_device *pdev)
 
        dev->base = (void __iomem *)IO_ADDRESS(mem->start);
 
-       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &davinci_i2s_pcm_out;
-       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->dma_addr =
+       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
            (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG);
 
-       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &davinci_i2s_pcm_in;
-       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->dma_addr =
+       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
            (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG);
 
        /* first TX, then RX */
@@ -549,7 +536,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
                ret = -ENXIO;
                goto err_free_mem;
        }
-       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = res->start;
+       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start;
 
        res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
        if (!res) {
@@ -557,7 +544,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
                ret = -ENXIO;
                goto err_free_mem;
        }
-       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = res->start;
+       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start;
 
        davinci_i2s_dai.private_data = dev;
        ret = snd_soc_register_dai(&davinci_i2s_dai);
index 7a06c0a86665a806e0dfbda55f3bb34b509e6f7a..5d1f98a4c97869fa41d446327a879f383b11136c 100644 (file)
@@ -332,14 +332,6 @@ static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
                printk(KERN_ERR "GBLCTL write error\n");
 }
 
-static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
-                                               struct snd_soc_dai *cpu_dai)
-{
-       struct davinci_audio_dev *dev = cpu_dai->private_data;
-       cpu_dai->dma_data = dev->dma_params[substream->stream];
-       return 0;
-}
-
 static void mcasp_start_rx(struct davinci_audio_dev *dev)
 {
        mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
@@ -386,17 +378,17 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)
 
 static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
 {
-       if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+       if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               if (dev->txnumevt)      /* enable FIFO */
+                       mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+                                                               FIFO_ENABLE);
                mcasp_start_tx(dev);
-       else
+       } else {
+               if (dev->rxnumevt)      /* enable FIFO */
+                       mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+                                                               FIFO_ENABLE);
                mcasp_start_rx(dev);
-
-       /* enable FIFO */
-       if (dev->txnumevt)
-               mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
-
-       if (dev->rxnumevt)
-               mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
+       }
 }
 
 static void mcasp_stop_rx(struct davinci_audio_dev *dev)
@@ -413,17 +405,17 @@ static void mcasp_stop_tx(struct davinci_audio_dev *dev)
 
 static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
 {
-       if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+       if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               if (dev->txnumevt)      /* disable FIFO */
+                       mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+                                                               FIFO_ENABLE);
                mcasp_stop_tx(dev);
-       else
+       } else {
+               if (dev->rxnumevt)      /* disable FIFO */
+                       mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+                                                               FIFO_ENABLE);
                mcasp_stop_rx(dev);
-
-       /* disable FIFO */
-       if (dev->txnumevt)
-               mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
-
-       if (dev->rxnumevt)
-               mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
+       }
 }
 
 static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -720,7 +712,7 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
 {
        struct davinci_audio_dev *dev = cpu_dai->private_data;
        struct davinci_pcm_dma_params *dma_params =
-                                       dev->dma_params[substream->stream];
+                                       &dev->dma_params[substream->stream];
        int word_length;
        u8 numevt;
 
@@ -798,7 +790,6 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
 }
 
 static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
-       .startup        = davinci_mcasp_startup,
        .trigger        = davinci_mcasp_trigger,
        .hw_params      = davinci_mcasp_hw_params,
        .set_fmt        = davinci_mcasp_set_dai_fmt,
@@ -849,20 +840,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        struct resource *mem, *ioarea, *res;
        struct snd_platform_data *pdata;
        struct davinci_audio_dev *dev;
-       int count = 0;
        int ret = 0;
 
        dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
        if (!dev)
                return  -ENOMEM;
 
-       dma_data = kzalloc(sizeof(struct davinci_pcm_dma_params) * 2,
-                                                               GFP_KERNEL);
-       if (!dma_data) {
-               ret = -ENOMEM;
-               goto err_release_dev;
-       }
-
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem) {
                dev_err(&pdev->dev, "no mem resource?\n");
@@ -897,11 +880,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        dev->txnumevt = pdata->txnumevt;
        dev->rxnumevt = pdata->rxnumevt;
 
-       dma_data[count].name = "I2S PCM Stereo out";
-       dma_data[count].eventq_no = pdata->eventq_no;
-       dma_data[count].dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
+       dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
+       dma_data->eventq_no = pdata->eventq_no;
+       dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
                                                        io_v2p(dev->base));
-       dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &dma_data[count];
 
        /* first TX, then RX */
        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -910,13 +892,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                goto err_release_region;
        }
 
-       dma_data[count].channel = res->start;
-       count++;
-       dma_data[count].name = "I2S PCM Stereo in";
-       dma_data[count].eventq_no = pdata->eventq_no;
-       dma_data[count].dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
+       dma_data->channel = res->start;
+
+       dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
+       dma_data->eventq_no = pdata->eventq_no;
+       dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
                                                        io_v2p(dev->base));
-       dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &dma_data[count];
 
        res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
        if (!res) {
@@ -924,7 +905,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                goto err_release_region;
        }
 
-       dma_data[count].channel = res->start;
+       dma_data->channel = res->start;
        davinci_mcasp_dai[pdata->op_mode].private_data = dev;
        davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev;
        ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]);
@@ -936,8 +917,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
 err_release_region:
        release_mem_region(mem->start, (mem->end - mem->start) + 1);
 err_release_data:
-       kfree(dma_data);
-err_release_dev:
        kfree(dev);
 
        return ret;
@@ -946,7 +925,6 @@ err_release_dev:
 static int davinci_mcasp_remove(struct platform_device *pdev)
 {
        struct snd_platform_data *pdata = pdev->dev.platform_data;
-       struct davinci_pcm_dma_params *dma_data;
        struct davinci_audio_dev *dev;
        struct resource *mem;
 
@@ -959,8 +937,6 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(mem->start, (mem->end - mem->start) + 1);
 
-       dma_data = dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
-       kfree(dma_data);
        kfree(dev);
 
        return 0;
index 554354c1cc2f1bf786bf006bdca62310a53c568b..9d179cc88f7b84a9614753c0af34aef28a16c74d 100644 (file)
@@ -39,10 +39,15 @@ enum {
 };
 
 struct davinci_audio_dev {
+       /*
+        * dma_params must be first because rtd->dai->cpu_dai->private_data
+        * is cast to a pointer of an array of struct davinci_pcm_dma_params in
+        * davinci_pcm_open.
+        */
+       struct davinci_pcm_dma_params dma_params[2];
        void __iomem *base;
        int sample_rate;
        struct clk *clk;
-       struct davinci_pcm_dma_params *dma_params[2];
        unsigned int codec_fmt;
 
        /* McASP specific data */
index 2f7da49ed34fd1cd6219ad634f55c224f25fa8a6..c73a915f233f3d397639bfcb669ffbd834f3aee1 100644 (file)
@@ -126,16 +126,9 @@ static void davinci_pcm_dma_irq(unsigned lch, u16 ch_status, void *data)
 static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
 {
        struct davinci_runtime_data *prtd = substream->runtime->private_data;
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
        struct edmacc_param p_ram;
        int ret;
 
-       if (!dma_data)
-               return -ENODEV;
-
-       prtd->params = dma_data;
-
        /* Request master DMA channel */
        ret = edma_alloc_channel(prtd->params->channel,
                                  davinci_pcm_dma_irq, substream,
@@ -244,6 +237,11 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct davinci_runtime_data *prtd;
        int ret = 0;
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct davinci_pcm_dma_params *pa = rtd->dai->cpu_dai->private_data;
+       struct davinci_pcm_dma_params *params = &pa[substream->stream];
+       if (!params)
+               return -ENODEV;
 
        snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware);
        /* ensure that buffer size is a multiple of period size */
@@ -257,6 +255,7 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
                return -ENOMEM;
 
        spin_lock_init(&prtd->lock);
+       prtd->params = params;
 
        runtime->private_data = prtd;
 
index 63d96253c73a13dd6825245d6c34f6a669b1f291..8746606efc893114a10f4135fe2420006da96321 100644 (file)
@@ -17,7 +17,6 @@
 
 
 struct davinci_pcm_dma_params {
-       char *name;                     /* stream identifier */
        int channel;                    /* sync dma channel ID */
        unsigned short acnt;
        dma_addr_t dma_addr;            /* device physical address for DMA */
index 3806ff2c0cd429ac675658774268a75c33806b8a..ccdefe60e752f32bece04a58c33ecbf1d3f9f247 100644 (file)
@@ -397,14 +397,6 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                break;
        }
 
-       /* sync */
-       if (!(fmt & SND_SOC_DAIFMT_ASYNC))
-               scr |= SSI_SCR_SYN;
-
-       /* tdm - only for stereo atm */
-       if (fmt & SND_SOC_DAIFMT_TDM)
-               scr |= SSI_SCR_NET;
-
        if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
                SSI1_STCR = stcr;
                SSI1_SRCR = srcr;
index 6375b4ea525dc572ca4feae6a8b69db703ecce1a..dcb3181bb3406a66fc1c5843a8b9be743a87fc78 100644 (file)
@@ -138,7 +138,7 @@ config SND_PXA2XX_SOC_MIOA701
 
 config SND_PXA2XX_SOC_IMOTE2
        tristate "SoC Audio support for IMote 2"
-       depends on SND_PXA2XX_SOC && MACH_INTELMOTE2
+       depends on SND_PXA2XX_SOC && MACH_INTELMOTE2 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8940
        help
index f79711b9fa5b9c58de8c156b30ff0e613bf26664..8de6f9dec4a225a495825c86f2dcf03e167403fb 100644 (file)
@@ -524,7 +524,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
 
                /* connected jack or spk ? */
                if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk ||
-                       widget->id == snd_soc_dapm_line)
+                   (widget->id == snd_soc_dapm_line && !list_empty(&widget->sources)))
                        return 1;
        }
 
@@ -573,7 +573,8 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
                        return 1;
 
                /* connected jack ? */
-               if (widget->id == snd_soc_dapm_mic || widget->id == snd_soc_dapm_line)
+               if (widget->id == snd_soc_dapm_mic ||
+                   (widget->id == snd_soc_dapm_line && !list_empty(&widget->sinks)))
                        return 1;
        }
 
index ab5a3ac2ac47c5ee9aa086d87481e5446777438d..9efcfd08d747b1c430ae17f9429fda3b57974a0b 100644 (file)
@@ -898,6 +898,11 @@ static struct snd_kcontrol_new usb_feature_unit_ctl = {
  * build a feature control
  */
 
+static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
+{
+       return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
+}
+
 static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
                              unsigned int ctl_mask, int control,
                              struct usb_audio_term *iterm, int unitid)
@@ -978,13 +983,13 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
                 */
                if (! mapped_name && ! (state->oterm.type >> 16)) {
                        if ((state->oterm.type & 0xff00) == 0x0100) {
-                               len = strlcat(kctl->id.name, " Capture", sizeof(kctl->id.name));
+                               len = append_ctl_name(kctl, " Capture");
                        } else {
-                               len = strlcat(kctl->id.name + len, " Playback", sizeof(kctl->id.name));
+                               len = append_ctl_name(kctl, " Playback");
                        }
                }
-               strlcat(kctl->id.name + len, control == USB_FEATURE_MUTE ? " Switch" : " Volume",
-                       sizeof(kctl->id.name));
+               append_ctl_name(kctl, control == USB_FEATURE_MUTE ?
+                               " Switch" : " Volume");
                if (control == USB_FEATURE_VOLUME) {
                        kctl->tlv.c = mixer_vol_tlv;
                        kctl->vd[0].access |= 
@@ -1143,7 +1148,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc,
                len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0);
        if (! len)
                len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1);
-       strlcat(kctl->id.name + len, " Volume", sizeof(kctl->id.name));
+       append_ctl_name(kctl, " Volume");
 
        snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
                    cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
@@ -1400,8 +1405,8 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
                        if (! len)
                                strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
                }
-               strlcat(kctl->id.name, " ", sizeof(kctl->id.name));
-               strlcat(kctl->id.name, valinfo->suffix, sizeof(kctl->id.name));
+               append_ctl_name(kctl, " ");
+               append_ctl_name(kctl, valinfo->suffix);
 
                snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
                            cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
@@ -1610,9 +1615,9 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
                        strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
                if ((state->oterm.type & 0xff00) == 0x0100)
-                       strlcat(kctl->id.name, " Capture Source", sizeof(kctl->id.name));
+                       append_ctl_name(kctl, " Capture Source");
                else
-                       strlcat(kctl->id.name, " Playback Source", sizeof(kctl->id.name));
+                       append_ctl_name(kctl, " Playback Source");
        }
 
        snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
index b5e7e3f1183f24236fcaef9d16c4cfad63326f83..b7c78a403dc209dc12b72c2b769352cd4400187b 100644 (file)
@@ -850,6 +850,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
 
 }
 
+static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+                                       struct mm_struct *mm,
+                                       unsigned long address,
+                                       pte_t pte)
+{
+       struct kvm *kvm = mmu_notifier_to_kvm(mn);
+
+       spin_lock(&kvm->mmu_lock);
+       kvm->mmu_notifier_seq++;
+       kvm_set_spte_hva(kvm, address, pte);
+       spin_unlock(&kvm->mmu_lock);
+}
+
 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                                                    struct mm_struct *mm,
                                                    unsigned long start,
@@ -929,6 +942,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
        .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
        .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
        .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
+       .change_pte             = kvm_mmu_notifier_change_pte,
        .release                = kvm_mmu_notifier_release,
 };
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
@@ -2625,7 +2639,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 
 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
 
-static struct file_operations *stat_fops[] = {
+static const struct file_operations *stat_fops[] = {
        [KVM_STAT_VCPU] = &vcpu_stat_fops,
        [KVM_STAT_VM]   = &vm_stat_fops,
 };